From e68b4e80191f1b1e819f8cf4bb9cf331bc80cd79 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 9 Nov 2017 12:01:15 -0800 Subject: [PATCH 01/34] lua: fix respond() status code checks (#2034) Signed-off-by: Matt Klein --- source/common/http/filter/lua/lua_filter.cc | 4 ++-- test/common/http/filter/lua/lua_filter_test.cc | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/source/common/http/filter/lua/lua_filter.cc b/source/common/http/filter/lua/lua_filter.cc index f6ca604fa589..0e3af6dc7111 100644 --- a/source/common/http/filter/lua/lua_filter.cc +++ b/source/common/http/filter/lua/lua_filter.cc @@ -117,8 +117,8 @@ int StreamHandleWrapper::luaRespond(lua_State* state) { uint64_t status; if (headers->Status() == nullptr || !StringUtil::atoul(headers->Status()->value().c_str(), status) || status < 200 || - status >= 500) { - luaL_error(state, ":status must be between 100-599"); + status >= 600) { + luaL_error(state, ":status must be between 200-599"); } Buffer::InstancePtr body; diff --git a/test/common/http/filter/lua/lua_filter_test.cc b/test/common/http/filter/lua/lua_filter_test.cc index 48257afc85a7..b54736e7de5c 100644 --- a/test/common/http/filter/lua/lua_filter_test.cc +++ b/test/common/http/filter/lua/lua_filter_test.cc @@ -1171,7 +1171,7 @@ TEST_F(LuaHttpFilterTest, ImmediateResponse) { const std::string SCRIPT{R"EOF( function envoy_on_request(request_handle) request_handle:respond( - {[":status"] = "403"}, + {[":status"] = "503"}, "nope") -- Should not run @@ -1184,7 +1184,7 @@ TEST_F(LuaHttpFilterTest, ImmediateResponse) { setup(SCRIPT); TestHeaderMapImpl request_headers{{":path", "/"}}; - TestHeaderMapImpl expected_headers{{":status", "403"}, {"content-length", "4"}}; + TestHeaderMapImpl expected_headers{{":status", "503"}, {"content-length", "4"}}; EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), false)); EXPECT_CALL(decoder_callbacks_, encodeData(_, true)); EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers, false)); @@ -1205,7 +1205,7 @@ TEST_F(LuaHttpFilterTest, ImmediateResponseBadStatus) { TestHeaderMapImpl request_headers{{":path", "/"}}; EXPECT_CALL(*filter_, scriptLog(spdlog::level::err, - StrEq("[string \"...\"]:3: :status must be between 100-599"))); + StrEq("[string \"...\"]:3: :status must be between 200-599"))); EXPECT_EQ(FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); } From 9be0f0b950bc6bbcf9e3b128324d3553d560d4b3 Mon Sep 17 00:00:00 2001 From: Daniel Hochman Date: Thu, 9 Nov 2017 15:46:42 -0800 Subject: [PATCH 02/34] build: check format fix functionality (#2036) Signed-off-by: Daniel Hochman --- tools/check_format.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/check_format.py b/tools/check_format.py index 7f31f35a06c2..cb3d5c8ca955 100755 --- a/tools/check_format.py +++ b/tools/check_format.py @@ -71,9 +71,7 @@ def checkProtobufExternalDeps(file_path): def isBuildFile(file_path): basename = os.path.basename(file_path) - if basename in ["BUILD", "BUILD.bazel"]: - return True - if basename.endswith(".BUILD"): + if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"): return True return False @@ -101,7 +99,7 @@ def checkFilePath(file_path): def fixFilePath(file_path): - if os.path.basename(file_path) == "BUILD": + if isBuildFile(file_path): if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0: printError("envoy_build_fixer rewrite failed for file: %s" % file_path) if os.system("%s -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: From 19ed39abd5b25dfbfc82be2d90fcd5cceff98291 Mon Sep 17 00:00:00 2001 From: ccaraman Date: Thu, 9 Nov 2017 15:50:30 -0800 Subject: [PATCH 03/34] redirect: add support to specify response code (#2030) Signed-off-by: Constance Caramanolis --- bazel/repositories.bzl | 2 +- include/envoy/router/BUILD | 1 + include/envoy/router/router.h | 7 ++++ source/common/http/utility.cc | 5 +-- source/common/http/utility.h | 6 ++-- source/common/router/BUILD | 1 + source/common/router/config_impl.cc | 6 ++-- source/common/router/config_impl.h | 3 ++ source/common/router/config_utility.cc | 18 ++++++++++ source/common/router/config_utility.h | 9 +++++ source/common/router/router.cc | 3 +- test/common/router/config_impl_test.cc | 48 ++++++++++++++++++++++++++ test/common/router/router_test.cc | 15 ++++++++ test/mocks/router/mocks.h | 1 + 14 files changed, 117 insertions(+), 8 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 66cc67b1d433..4dcecfd76a04 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -113,7 +113,7 @@ def envoy_api_deps(skip_targets): native.git_repository( name = "envoy_api", remote = REPO_LOCATIONS["data-plane-api"], - commit = "4d18e6d236a6476782076b217cd62d43c30a7dfe", + commit = "971fb1b70f419348a1ac2273508237b7ebd08cf5", ) api_bind_targets = [ diff --git a/include/envoy/router/BUILD b/include/envoy/router/BUILD index 2ac863630c10..aa69ddd8b12e 100644 --- a/include/envoy/router/BUILD +++ b/include/envoy/router/BUILD @@ -38,6 +38,7 @@ envoy_cc_library( "//include/envoy/access_log:access_log_interface", "//include/envoy/common:optional", "//include/envoy/http:codec_interface", + "//include/envoy/http:codes_interface", "//include/envoy/http:header_map_interface", "//include/envoy/tracing:http_tracer_interface", "//include/envoy/upstream:resource_manager_interface", diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index 8ea712a66b89..1b1efe00ac31 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -11,6 +11,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/common/optional.h" #include "envoy/http/codec.h" +#include "envoy/http/codes.h" #include "envoy/http/header_map.h" #include "envoy/tracing/http_tracer.h" #include "envoy/upstream/resource_manager.h" @@ -34,6 +35,12 @@ class RedirectEntry { * @return std::string the redirect URL. */ virtual std::string newPath(const Http::HeaderMap& headers) const PURE; + + /** + * Returns the HTTP status code to use when redirecting a request. + * @return Http::Code the redirect response Code. + */ + virtual Http::Code redirectResponseCode() const PURE; }; /** diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 23025c316149..f6112d1ae5ed 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -243,9 +243,10 @@ void Utility::sendLocalReply( } } -void Utility::sendRedirect(StreamDecoderFilterCallbacks& callbacks, const std::string& new_path) { +void Utility::sendRedirect(StreamDecoderFilterCallbacks& callbacks, const std::string& new_path, + Code response_code) { HeaderMapPtr response_headers{ - new HeaderMapImpl{{Headers::get().Status, std::to_string(enumToInt(Code::MovedPermanently))}, + new HeaderMapImpl{{Headers::get().Status, std::to_string(enumToInt(response_code))}, {Headers::get().Location, new_path}}}; callbacks.encodeHeaders(std::move(response_headers), true); diff --git a/source/common/http/utility.h b/source/common/http/utility.h index efad3e00dc15..c5ca55161bd6 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -141,11 +141,13 @@ class Utility { const bool& is_reset, Code response_code, const std::string& body_text); /** - * Send a redirect response (301). + * Send a redirect response. * @param callbacks supplies the filter callbacks to use. * @param new_path supplies the redirect target. + * @param response_code supplies the response code to use. */ - static void sendRedirect(StreamDecoderFilterCallbacks& callbacks, const std::string& new_path); + static void sendRedirect(StreamDecoderFilterCallbacks& callbacks, const std::string& new_path, + Code response_code); /** * Retrieves the last address in x-forwarded-for header. If it isn't set, returns empty string. diff --git a/source/common/router/BUILD b/source/common/router/BUILD index dd839deaeee5..c274e1eb42ba 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -42,6 +42,7 @@ envoy_cc_library( hdrs = ["config_utility.h"], external_deps = ["envoy_rds"], deps = [ + "//include/envoy/http:codes_interface", "//include/envoy/upstream:resource_manager_interface", "//source/common/common:assert_lib", "//source/common/common:empty_string", diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 623f448cada6..5202fed55bf7 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -232,7 +232,9 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, rate_limit_policy_(route.route().rate_limits()), shadow_policy_(route.route()), priority_(ConfigUtility::parsePriority(route.route().priority())), request_headers_parser_(RequestHeaderParser::parse(route.route().request_headers_to_add())), - opaque_config_(parseOpaqueConfig(route)), decorator_(parseDecorator(route)) { + opaque_config_(parseOpaqueConfig(route)), decorator_(parseDecorator(route)), + redirect_response_code_( + ConfigUtility::parseRedirectResponseCode(route.redirect().response_code())) { if (route.route().has_metadata_match()) { const auto filter_it = route.route().metadata_match().filter_metadata().find( Envoy::Config::MetadataFilters::get().ENVOY_LB); @@ -686,7 +688,7 @@ RouteMatcher::RouteMatcher(const envoy::api::v2::RouteConfiguration& route_confi for (const std::string& domain : virtual_host_config.domains()) { if ("*" == domain) { if (default_virtual_host_) { - throw EnvoyException(fmt::format("Only a single single wildcard domain is permitted")); + throw EnvoyException(fmt::format("Only a single wildcard domain is permitted")); } default_virtual_host_ = virtual_host; } else if (domain.size() > 0 && '*' == domain[0]) { diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 025d0819c216..600d7b1cca07 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -52,6 +52,7 @@ class SslRedirector : public RedirectEntry { public: // Router::RedirectEntry std::string newPath(const Http::HeaderMap& headers) const override; + Http::Code redirectResponseCode() const override { return Http::Code::MovedPermanently; } }; class SslRedirectRoute : public Route { @@ -329,6 +330,7 @@ class RouteEntryImplBase : public RouteEntry, // Router::RedirectEntry std::string newPath(const Http::HeaderMap& headers) const override; + Http::Code redirectResponseCode() const override { return redirect_response_code_; } // Router::Route const RedirectEntry* redirectEntry() const override; @@ -474,6 +476,7 @@ class RouteEntryImplBase : public RouteEntry, const std::multimap opaque_config_; const DecoratorConstPtr decorator_; + const Http::Code redirect_response_code_; }; /** diff --git a/source/common/router/config_utility.cc b/source/common/router/config_utility.cc index 0beb9d363130..6f374b073fb0 100644 --- a/source/common/router/config_utility.cc +++ b/source/common/router/config_utility.cc @@ -45,5 +45,23 @@ bool ConfigUtility::matchHeaders(const Http::HeaderMap& request_headers, return matches; } +Http::Code ConfigUtility::parseRedirectResponseCode( + const envoy::api::v2::RedirectAction::RedirectResponseCode& code) { + switch (code) { + case envoy::api::v2::RedirectAction::MOVED_PERMANENTLY: + return Http::Code::MovedPermanently; + case envoy::api::v2::RedirectAction::FOUND: + return Http::Code::Found; + case envoy::api::v2::RedirectAction::SEE_OTHER: + return Http::Code::SeeOther; + case envoy::api::v2::RedirectAction::TEMPORARY_REDIRECT: + return Http::Code::TemporaryRedirect; + case envoy::api::v2::RedirectAction::PERMANENT_REDIRECT: + return Http::Code::PermanentRedirect; + default: + NOT_IMPLEMENTED; + } +} + } // namespace Router } // namespace Envoy diff --git a/source/common/router/config_utility.h b/source/common/router/config_utility.h index e19686a4e9f8..e52ffb49d857 100644 --- a/source/common/router/config_utility.h +++ b/source/common/router/config_utility.h @@ -4,6 +4,7 @@ #include #include +#include "envoy/http/codes.h" #include "envoy/json/json_object.h" #include "envoy/upstream/resource_manager.h" @@ -57,6 +58,14 @@ class ConfigUtility { */ static bool matchHeaders(const Http::HeaderMap& headers, const std::vector& request_headers); + + /** + * Returns the redirect HTTP Status Code enum parsed from proto. + * @param code supplies the RedirectResponseCode enum. + * @return Returns the Http::Code version of the RedirectResponseCode. + */ + static Http::Code + parseRedirectResponseCode(const envoy::api::v2::RedirectAction::RedirectResponseCode& code); }; } // namespace Router diff --git a/source/common/router/router.cc b/source/common/router/router.cc index da5164302f19..5ad6650dec91 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -212,7 +212,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e // Determine if there is a redirect for the request. if (route_->redirectEntry()) { config_.stats_.rq_redirect_.inc(); - Http::Utility::sendRedirect(*callbacks_, route_->redirectEntry()->newPath(headers)); + Http::Utility::sendRedirect(*callbacks_, route_->redirectEntry()->newPath(headers), + route_->redirectEntry()->redirectResponseCode()); return Http::FilterHeadersStatus::StopIteration; } diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index ff22f7a927d0..f2a2b633d2ff 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -46,6 +46,12 @@ envoy::api::v2::RouteConfiguration parseRouteConfigurationFromJson(const std::st return route_config; } +envoy::api::v2::RouteConfiguration parseRouteConfigurationFromV2Yaml(const std::string& yaml) { + envoy::api::v2::RouteConfiguration route_config; + MessageUtil::loadFromYaml(yaml, route_config); + return route_config; +} + TEST(RouteMatcherTest, TestRoutes) { std::string json = R"EOF( { @@ -2927,6 +2933,48 @@ TEST(RoutEntryMetadataMatchTest, ParsesMetadata) { } } +TEST(ConfigUtility, ParseResponseCode) { + const std::vector> + test_set = {std::make_pair(envoy::api::v2::RedirectAction::MOVED_PERMANENTLY, + Http::Code::MovedPermanently), + std::make_pair(envoy::api::v2::RedirectAction::FOUND, Http::Code::Found), + std::make_pair(envoy::api::v2::RedirectAction::SEE_OTHER, Http::Code::SeeOther), + std::make_pair(envoy::api::v2::RedirectAction::TEMPORARY_REDIRECT, + Http::Code::TemporaryRedirect), + std::make_pair(envoy::api::v2::RedirectAction::PERMANENT_REDIRECT, + Http::Code::PermanentRedirect)}; + for (const auto& test_case : test_set) { + EXPECT_EQ(test_case.second, ConfigUtility::parseRedirectResponseCode(test_case.first)); + } +} + +TEST(RouteConfigurationV2, RedirectCode) { + std::string yaml = R"EOF( +name: foo +virtual_hosts: + - name: redirect + domains: [redirect.lyft.com] + routes: + - match: { prefix: "/"} + redirect: { host_redirect: new.lyft.com, response_code: TEMPORARY_REDIRECT } + + )EOF"; + + NiceMock runtime; + NiceMock cm; + ConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), runtime, cm, true); + + EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); + + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("redirect.lyft.com", "/foo", false, false); + EXPECT_EQ("http://new.lyft.com/foo", + config.route(headers, 0)->redirectEntry()->newPath(headers)); + EXPECT_EQ(Http::Code::TemporaryRedirect, + config.route(headers, 0)->redirectEntry()->redirectResponseCode()); + } +} + } // namespace } // namespace Router } // namespace Envoy diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 002ac6ea7b16..e80e0e680220 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -1405,6 +1405,7 @@ TEST_F(RouterTest, AltStatName) { TEST_F(RouterTest, Redirect) { MockRedirectEntry redirect; EXPECT_CALL(redirect, newPath(_)).WillOnce(Return("hello")); + EXPECT_CALL(redirect, redirectResponseCode()).WillOnce(Return(Http::Code::MovedPermanently)); EXPECT_CALL(*callbacks_.route_, redirectEntry()).WillRepeatedly(Return(&redirect)); Http::TestHeaderMapImpl response_headers{{":status", "301"}, {"location", "hello"}}; @@ -1415,6 +1416,20 @@ TEST_F(RouterTest, Redirect) { EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); } +TEST_F(RouterTest, RedirectFound) { + MockRedirectEntry redirect; + EXPECT_CALL(redirect, newPath(_)).WillOnce(Return("hello")); + EXPECT_CALL(redirect, redirectResponseCode()).WillOnce(Return(Http::Code::Found)); + EXPECT_CALL(*callbacks_.route_, redirectEntry()).WillRepeatedly(Return(&redirect)); + + Http::TestHeaderMapImpl response_headers{{":status", "302"}, {"location", "hello"}}; + EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); + Http::TestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, true); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); +} + TEST(RouterFilterUtilityTest, finalTimeout) { { NiceMock route; diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 9387bb967c70..bedae0a17e59 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -34,6 +34,7 @@ class MockRedirectEntry : public RedirectEntry { // Router::Config MOCK_CONST_METHOD1(newPath, std::string(const Http::HeaderMap& headers)); + MOCK_CONST_METHOD0(redirectResponseCode, Http::Code()); }; class TestCorsPolicy : public CorsPolicy { From b627a64d70a324bcd4f15fef4737facc78495cb2 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 9 Nov 2017 15:53:46 -0800 Subject: [PATCH 04/34] redis: implement drain close functionality (#2037) Signed-off-by: Matt Klein --- .../network_filters/redis_proxy_filter.rst | 12 +++- source/common/redis/BUILD | 1 + source/common/redis/proxy_filter.cc | 36 ++++++---- source/common/redis/proxy_filter.h | 18 ++--- source/server/config/network/redis_proxy.cc | 9 +-- test/common/redis/proxy_filter_test.cc | 67 +++++++++++-------- 6 files changed, 87 insertions(+), 56 deletions(-) diff --git a/docs/configuration/network_filters/redis_proxy_filter.rst b/docs/configuration/network_filters/redis_proxy_filter.rst index 10db8ac058d4..7a7e7744bccd 100644 --- a/docs/configuration/network_filters/redis_proxy_filter.rst +++ b/docs/configuration/network_filters/redis_proxy_filter.rst @@ -64,6 +64,7 @@ following statistics: downstream_cx_total, Counter, Total connections downstream_cx_tx_bytes_buffered, Gauge, Total sent bytes currently buffered downstream_cx_tx_bytes_total, Counter, Total bytes sent + downstream_cx_drain_close, Counter, Number of connections closed due to draining downstream_rq_active, Gauge, Total active requests downstream_rq_total, Counter, Total requests @@ -81,7 +82,7 @@ The Redis filter will gather statistics for the command splitter in the invalid_request, Counter, "Number of requests with an incorrect number of arguments" unsupported_command, Counter, "Number of commands issued which are not recognized by the command splitter" - + Per command statistics ---------------------- @@ -95,3 +96,12 @@ The Redis filter will gather statistics for commands in the total, Counter, Number of commands .. _config_network_filters_redis_proxy_per_command_stats: + +Runtime +------- + +The Redis proxy filter supports the following runtime settings: + +redis.drain_close_enabled + % of connections that will be drain closed if the server is draining and would otherwise + attempt a drain close. Defaults to 100. diff --git a/source/common/redis/BUILD b/source/common/redis/BUILD index 44bb38df90c1..19a7e623dd20 100644 --- a/source/common/redis/BUILD +++ b/source/common/redis/BUILD @@ -59,6 +59,7 @@ envoy_cc_library( srcs = ["proxy_filter.cc"], hdrs = ["proxy_filter.h"], deps = [ + "//include/envoy/network:drain_decision_interface", "//include/envoy/network:filter_interface", "//include/envoy/redis:codec_interface", "//include/envoy/redis:command_splitter_interface", diff --git a/source/common/redis/proxy_filter.cc b/source/common/redis/proxy_filter.cc index d17ddccdc776..3e0e58e66534 100644 --- a/source/common/redis/proxy_filter.cc +++ b/source/common/redis/proxy_filter.cc @@ -9,14 +9,15 @@ #include "fmt/format.h" -// TODO(mattklein123): Graceful drain support. - namespace Envoy { namespace Redis { ProxyFilterConfig::ProxyFilterConfig(const Json::Object& config, Upstream::ClusterManager& cm, - Stats::Scope& scope) + Stats::Scope& scope, + const Network::DrainDecision& drain_decision, + Runtime::Loader& runtime) : Json::Validator(config, Json::Schema::REDIS_PROXY_NETWORK_FILTER_SCHEMA), + drain_decision_(drain_decision), runtime_(runtime), cluster_name_(config.getString("cluster_name")), stat_prefix_(fmt::format("redis.{}.", config.getString("stat_prefix"))), stats_(generateStats(stat_prefix_, scope)) { @@ -32,22 +33,22 @@ ProxyFilter::ProxyFilter(DecoderFactory& factory, EncoderPtr&& encoder, CommandSplitter::Instance& splitter, ProxyFilterConfigSharedPtr config) : decoder_(factory.create(*this)), encoder_(std::move(encoder)), splitter_(splitter), config_(config) { - config_->stats().downstream_cx_total_.inc(); - config_->stats().downstream_cx_active_.inc(); + config_->stats_.downstream_cx_total_.inc(); + config_->stats_.downstream_cx_active_.inc(); } ProxyFilter::~ProxyFilter() { ASSERT(pending_requests_.empty()); - config_->stats().downstream_cx_active_.dec(); + config_->stats_.downstream_cx_active_.dec(); } void ProxyFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { callbacks_ = &callbacks; callbacks_->connection().addConnectionCallbacks(*this); - callbacks_->connection().setConnectionStats({config_->stats().downstream_cx_rx_bytes_total_, - config_->stats().downstream_cx_rx_bytes_buffered_, - config_->stats().downstream_cx_tx_bytes_total_, - config_->stats().downstream_cx_tx_bytes_buffered_, + callbacks_->connection().setConnectionStats({config_->stats_.downstream_cx_rx_bytes_total_, + config_->stats_.downstream_cx_rx_bytes_buffered_, + config_->stats_.downstream_cx_tx_bytes_total_, + config_->stats_.downstream_cx_tx_bytes_buffered_, nullptr}); } @@ -89,6 +90,13 @@ void ProxyFilter::onResponse(PendingRequest& request, RespValuePtr&& value) { if (encoder_buffer_.length() > 0) { callbacks_->connection().write(encoder_buffer_); } + + // Check for drain close only if there are no pending responses. + if (pending_requests_.empty() && config_->drain_decision_.drainClose() && + config_->runtime_.snapshot().featureEnabled(config_->redis_drain_close_runtime_key_, 100)) { + config_->stats_.downstream_cx_drain_close_.inc(); + callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + } } Network::FilterStatus ProxyFilter::onData(Buffer::Instance& data) { @@ -96,7 +104,7 @@ Network::FilterStatus ProxyFilter::onData(Buffer::Instance& data) { decoder_->decode(data); return Network::FilterStatus::Continue; } catch (ProtocolError&) { - config_->stats().downstream_cx_protocol_error_.inc(); + config_->stats_.downstream_cx_protocol_error_.inc(); RespValue error; error.type(RespType::Error); error.asString() = "downstream protocol error"; @@ -108,12 +116,12 @@ Network::FilterStatus ProxyFilter::onData(Buffer::Instance& data) { } ProxyFilter::PendingRequest::PendingRequest(ProxyFilter& parent) : parent_(parent) { - parent.config_->stats().downstream_rq_total_.inc(); - parent.config_->stats().downstream_rq_active_.inc(); + parent.config_->stats_.downstream_rq_total_.inc(); + parent.config_->stats_.downstream_rq_active_.inc(); } ProxyFilter::PendingRequest::~PendingRequest() { - parent_.config_->stats().downstream_rq_active_.dec(); + parent_.config_->stats_.downstream_rq_active_.dec(); } } // namespace Redis diff --git a/source/common/redis/proxy_filter.h b/source/common/redis/proxy_filter.h index 4583e006ac59..dea63f8ce4f0 100644 --- a/source/common/redis/proxy_filter.h +++ b/source/common/redis/proxy_filter.h @@ -5,6 +5,7 @@ #include #include +#include "envoy/network/drain_decision.h" #include "envoy/network/filter.h" #include "envoy/redis/codec.h" #include "envoy/redis/command_splitter.h" @@ -29,6 +30,7 @@ namespace Redis { COUNTER(downstream_cx_protocol_error) \ COUNTER(downstream_cx_total) \ GAUGE (downstream_cx_active) \ + COUNTER(downstream_cx_drain_close) \ COUNTER(downstream_rq_total) \ GAUGE (downstream_rq_active) // clang-format on @@ -45,18 +47,18 @@ struct ProxyStats { */ class ProxyFilterConfig : Json::Validator { public: - ProxyFilterConfig(const Json::Object& config, Upstream::ClusterManager& cm, Stats::Scope& scope); - - const std::string& clusterName() { return cluster_name_; } - const std::string& statPrefix() { return stat_prefix_; } - ProxyStats& stats() { return stats_; } - -private: - static ProxyStats generateStats(const std::string& prefix, Stats::Scope& scope); + ProxyFilterConfig(const Json::Object& config, Upstream::ClusterManager& cm, Stats::Scope& scope, + const Network::DrainDecision& drain_decision, Runtime::Loader& runtime); + const Network::DrainDecision& drain_decision_; + Runtime::Loader& runtime_; const std::string cluster_name_; const std::string stat_prefix_; + const std::string redis_drain_close_runtime_key_{"redis.drain_close_enabled"}; ProxyStats stats_; + +private: + static ProxyStats generateStats(const std::string& prefix, Stats::Scope& scope); }; typedef std::shared_ptr ProxyFilterConfigSharedPtr; diff --git a/source/server/config/network/redis_proxy.cc b/source/server/config/network/redis_proxy.cc index d3043abe86b5..e2a4a890fa87 100644 --- a/source/server/config/network/redis_proxy.cc +++ b/source/server/config/network/redis_proxy.cc @@ -17,15 +17,16 @@ namespace Configuration { NetworkFilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactory(const Json::Object& config, FactoryContext& context) { - Redis::ProxyFilterConfigSharedPtr filter_config(std::make_shared( - config, context.clusterManager(), context.scope())); + Redis::ProxyFilterConfigSharedPtr filter_config( + std::make_shared(config, context.clusterManager(), context.scope(), + context.drainDecision(), context.runtime())); Redis::ConnPool::InstancePtr conn_pool( - new Redis::ConnPool::InstanceImpl(filter_config->clusterName(), context.clusterManager(), + new Redis::ConnPool::InstanceImpl(filter_config->cluster_name_, context.clusterManager(), Redis::ConnPool::ClientFactoryImpl::instance_, context.threadLocal(), *config.getObject("conn_pool"))); std::shared_ptr splitter( new Redis::CommandSplitter::InstanceImpl(std::move(conn_pool), context.scope(), - filter_config->statPrefix())); + filter_config->stat_prefix_)); return [splitter, filter_config](Network::FilterManager& filter_manager) -> void { Redis::DecoderFactoryImpl factory; filter_manager.addReadFilter(std::make_shared( diff --git a/test/common/redis/proxy_filter_test.cc b/test/common/redis/proxy_filter_test.cc index 85295dc7a4c5..d570392ed7cc 100644 --- a/test/common/redis/proxy_filter_test.cc +++ b/test/common/redis/proxy_filter_test.cc @@ -27,7 +27,15 @@ using testing::_; namespace Envoy { namespace Redis { -TEST(RedisProxyFilterConfigTest, Normal) { +class RedisProxyFilterConfigTest : public testing::Test { +public: + NiceMock cm_; + Stats::IsolatedStoreImpl store_; + Network::MockDrainDecision drain_decision_; + Runtime::MockLoader runtime_; +}; + +TEST_F(RedisProxyFilterConfigTest, Normal) { std::string json_string = R"EOF( { "cluster_name": "fake_cluster", @@ -37,13 +45,12 @@ TEST(RedisProxyFilterConfigTest, Normal) { )EOF"; Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - NiceMock cm; - Stats::IsolatedStoreImpl store; - ProxyFilterConfig config(*json_config, cm, store); - EXPECT_EQ("fake_cluster", config.clusterName()); + + ProxyFilterConfig config(*json_config, cm_, store_, drain_decision_, runtime_); + EXPECT_EQ("fake_cluster", config.cluster_name_); } -TEST(RedisProxyFilterConfigTest, InvalidCluster) { +TEST_F(RedisProxyFilterConfigTest, InvalidCluster) { std::string json_string = R"EOF( { "cluster_name": "fake_cluster", @@ -53,14 +60,12 @@ TEST(RedisProxyFilterConfigTest, InvalidCluster) { )EOF"; Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - NiceMock cm; - Stats::IsolatedStoreImpl store; - EXPECT_CALL(cm, get("fake_cluster")).WillOnce(Return(nullptr)); - EXPECT_THROW_WITH_MESSAGE(ProxyFilterConfig(*json_config, cm, store), EnvoyException, - "redis: unknown cluster 'fake_cluster'"); + EXPECT_CALL(cm_, get("fake_cluster")).WillOnce(Return(nullptr)); + EXPECT_THROW_WITH_MESSAGE(ProxyFilterConfig(*json_config, cm_, store_, drain_decision_, runtime_), + EnvoyException, "redis: unknown cluster 'fake_cluster'"); } -TEST(RedisProxyFilterConfigTest, InvalidAddedByApi) { +TEST_F(RedisProxyFilterConfigTest, InvalidAddedByApi) { std::string json_string = R"EOF( { "cluster_name": "fake_cluster", @@ -70,15 +75,14 @@ TEST(RedisProxyFilterConfigTest, InvalidAddedByApi) { )EOF"; Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - NiceMock cm; - Stats::IsolatedStoreImpl store; - ON_CALL(*cm.thread_local_cluster_.cluster_.info_, addedViaApi()).WillByDefault(Return(true)); - EXPECT_THROW_WITH_MESSAGE(ProxyFilterConfig(*json_config, cm, store), EnvoyException, + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, addedViaApi()).WillByDefault(Return(true)); + EXPECT_THROW_WITH_MESSAGE(ProxyFilterConfig(*json_config, cm_, store_, drain_decision_, runtime_), + EnvoyException, "redis: invalid cluster 'fake_cluster': currently only " "static (non-CDS) clusters are supported"); } -TEST(RedisProxyFilterConfigTest, BadRedisProxyConfig) { +TEST_F(RedisProxyFilterConfigTest, BadRedisProxyConfig) { std::string json_string = R"EOF( { "cluster_name": "fake_cluster", @@ -87,9 +91,8 @@ TEST(RedisProxyFilterConfigTest, BadRedisProxyConfig) { )EOF"; Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - NiceMock cm; - Stats::IsolatedStoreImpl store; - EXPECT_THROW(ProxyFilterConfig(*json_config, cm, store), Json::Exception); + EXPECT_THROW(ProxyFilterConfig(*json_config, cm_, store_, drain_decision_, runtime_), + Json::Exception); } class RedisProxyFilterTest : public testing::Test, public DecoderFactory { @@ -105,12 +108,12 @@ class RedisProxyFilterTest : public testing::Test, public DecoderFactory { Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); NiceMock cm; - config_.reset(new ProxyFilterConfig(*json_config, cm, store_)); + config_.reset(new ProxyFilterConfig(*json_config, cm, store_, drain_decision_, runtime_)); filter_.reset(new ProxyFilter(*this, EncoderPtr{encoder_}, splitter_, config_)); filter_->initializeReadFilterCallbacks(filter_callbacks_); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); - EXPECT_EQ(1UL, config_->stats().downstream_cx_total_.value()); - EXPECT_EQ(1UL, config_->stats().downstream_cx_active_.value()); + EXPECT_EQ(1UL, config_->stats_.downstream_cx_total_.value()); + EXPECT_EQ(1UL, config_->stats_.downstream_cx_active_.value()); } ~RedisProxyFilterTest() { @@ -131,12 +134,14 @@ class RedisProxyFilterTest : public testing::Test, public DecoderFactory { DecoderCallbacks* decoder_callbacks_{}; CommandSplitter::MockInstance splitter_; Stats::IsolatedStoreImpl store_; + NiceMock drain_decision_; + NiceMock runtime_; ProxyFilterConfigSharedPtr config_; std::unique_ptr filter_; NiceMock filter_callbacks_; }; -TEST_F(RedisProxyFilterTest, OutOfOrderResponse) { +TEST_F(RedisProxyFilterTest, OutOfOrderResponseWithDrainClose) { InSequence s; Buffer::OwnedImpl fake_data; @@ -157,8 +162,8 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponse) { })); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data)); - EXPECT_EQ(2UL, config_->stats().downstream_rq_total_.value()); - EXPECT_EQ(2UL, config_->stats().downstream_rq_active_.value()); + EXPECT_EQ(2UL, config_->stats_.downstream_rq_total_.value()); + EXPECT_EQ(2UL, config_->stats_.downstream_rq_active_.value()); RespValuePtr response2(new RespValue()); RespValue* response2_ptr = response2.get(); @@ -168,9 +173,13 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponse) { EXPECT_CALL(*encoder_, encode(Ref(*response1), _)); EXPECT_CALL(*encoder_, encode(Ref(*response2_ptr), _)); EXPECT_CALL(filter_callbacks_.connection_, write(_)); + EXPECT_CALL(drain_decision_, drainClose()).WillOnce(Return(true)); + EXPECT_CALL(runtime_.snapshot_, featureEnabled("redis.drain_close_enabled", 100)) + .WillOnce(Return(true)); + EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); request_callbacks1->onResponse(std::move(response1)); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(1UL, config_->stats_.downstream_cx_drain_close_.value()); } TEST_F(RedisProxyFilterTest, OutOfOrderResponseDownstreamDisconnectBeforeFlush) { @@ -194,8 +203,8 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponseDownstreamDisconnectBeforeFlush) })); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data)); - EXPECT_EQ(2UL, config_->stats().downstream_rq_total_.value()); - EXPECT_EQ(2UL, config_->stats().downstream_rq_active_.value()); + EXPECT_EQ(2UL, config_->stats_.downstream_rq_total_.value()); + EXPECT_EQ(2UL, config_->stats_.downstream_rq_active_.value()); RespValuePtr response2(new RespValue()); request_callbacks2->onResponse(std::move(response2)); From 8a09c488eda22b1b5e98f428467e2bfaa72f1ea1 Mon Sep 17 00:00:00 2001 From: Daniel Hochman Date: Thu, 9 Nov 2017 15:56:53 -0800 Subject: [PATCH 05/34] runtime: add comment capability (#2019) Signed-off-by: Daniel Hochman --- docs/configuration/overview/runtime.rst | 8 ++++++++ source/common/runtime/runtime_impl.cc | 12 +++++++++++- test/common/runtime/runtime_impl_test.cc | 5 +++++ test/common/runtime/test_data/root/envoy/file5 | 2 ++ test/common/runtime/test_data/root/envoy/file6 | 2 ++ test/common/runtime/test_data/root/envoy/file7 | 2 ++ 6 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 test/common/runtime/test_data/root/envoy/file5 create mode 100644 test/common/runtime/test_data/root/envoy/file6 create mode 100644 test/common/runtime/test_data/root/envoy/file7 diff --git a/docs/configuration/overview/runtime.rst b/docs/configuration/overview/runtime.rst index 47c00367b34a..8b623ee2ec5e 100644 --- a/docs/configuration/overview/runtime.rst +++ b/docs/configuration/overview/runtime.rst @@ -70,6 +70,14 @@ that :option:`--service-cluster` has been set to ``my-cluster``. Envoy will firs If found, the value will override any value found in the primary lookup path. This allows the user to customize the runtime values for individual clusters on top of global defaults. +Comments +-------- + +Lines starting with ``#`` as the first character are treated as comments. + +Comments can be used to provide context on an existing value. Comments are also useful in an +otherwise empty file to keep a placeholder for deployment in a time of need. + Updating runtime values via symbolic link swap ---------------------------------------------- diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index bb633674bd57..278aa155a409 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -224,7 +224,17 @@ void SnapshotImpl::walkDirectory(const std::string& path, const std::string& pre // theoretically lead to issues. ENVOY_LOG(debug, "reading file: {}", full_path); Entry entry; - entry.string_value_ = Filesystem::fileReadToEnd(full_path); + + // Read the file and remove any comments. A comment is a line starting with a '#' character. + // Comments are useful for placeholder files with no value. + const std::vector lines = + StringUtil::split(Filesystem::fileReadToEnd(full_path), "\n"); + for (const std::string& line : lines) { + if (!line.empty() && line.at(0) == '#') { + continue; + } + entry.string_value_ += line + "\n"; + } StringUtil::rtrim(entry.string_value_); // As a perf optimization, attempt to convert the string into an integer. If we don't diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index abcf97750eec..7f839f40cb83 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -110,6 +110,11 @@ TEST_F(RuntimeImplTest, All) { EXPECT_EQ(2UL, loader->snapshot().getInteger("file3", 1)); EXPECT_EQ(123UL, loader->snapshot().getInteger("file4", 1)); + // Files with comments. + EXPECT_EQ(123UL, loader->snapshot().getInteger("file5", 1)); + EXPECT_EQ("/home#about-us", loader->snapshot().get("file6")); + EXPECT_EQ("", loader->snapshot().get("file7")); + // Feature enablement. EXPECT_CALL(generator, random()).WillOnce(Return(1)); EXPECT_TRUE(loader->snapshot().featureEnabled("file3", 1)); diff --git a/test/common/runtime/test_data/root/envoy/file5 b/test/common/runtime/test_data/root/envoy/file5 new file mode 100644 index 000000000000..5426e4c9e411 --- /dev/null +++ b/test/common/runtime/test_data/root/envoy/file5 @@ -0,0 +1,2 @@ +# This is a comment in a file with an integer. +123 diff --git a/test/common/runtime/test_data/root/envoy/file6 b/test/common/runtime/test_data/root/envoy/file6 new file mode 100644 index 000000000000..030ad397da4a --- /dev/null +++ b/test/common/runtime/test_data/root/envoy/file6 @@ -0,0 +1,2 @@ +# This is a comment in a file with a string. +/home#about-us diff --git a/test/common/runtime/test_data/root/envoy/file7 b/test/common/runtime/test_data/root/envoy/file7 new file mode 100644 index 000000000000..4235d74a7f1f --- /dev/null +++ b/test/common/runtime/test_data/root/envoy/file7 @@ -0,0 +1,2 @@ +# This is a comment in an empty file. +# This file was intentionally left blank. From d66879755a783f68290dc6886ede14016f4deb74 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 9 Nov 2017 16:54:15 -0800 Subject: [PATCH 06/34] Add access logging to tcp_proxy (#1953) Signed-off-by: Greg Greenway --- .../{http_conn_man => }/access_log.rst | 112 +++++--- docs/configuration/configuration.rst | 1 + docs/configuration/http_conn_man/headers.rst | 2 +- .../http_conn_man/http_conn_man.rst | 5 +- .../network_filters/tcp_proxy_filter.rst | 5 + docs/intro/arch_overview/access_logging.rst | 19 ++ docs/intro/arch_overview/arch_overview.rst | 1 + .../http_connection_management.rst | 19 +- docs/operations/cli.rst | 2 +- include/envoy/access_log/access_log.h | 6 +- .../common/access_log/access_log_formatter.cc | 51 ++-- .../common/access_log/access_log_formatter.h | 2 +- source/common/access_log/request_info_impl.h | 18 +- source/common/config/filter_json.cc | 2 + source/common/filter/BUILD | 5 + source/common/filter/tcp_proxy.cc | 29 +- source/common/filter/tcp_proxy.h | 9 +- source/common/grpc/http1_bridge_filter.cc | 4 +- source/common/json/config_schemas.cc | 253 +++++++++--------- source/common/json/config_schemas.h | 3 + source/server/config/network/tcp_proxy.cc | 3 +- .../access_log/access_log_formatter_test.cc | 30 ++- .../common/access_log/access_log_impl_test.cc | 12 +- .../access_log/request_info_impl_test.cc | 54 ++-- test/common/filter/BUILD | 2 + test/common/filter/tcp_proxy_test.cc | 201 ++++++++++---- test/common/grpc/http1_bridge_filter_test.cc | 4 +- test/common/json/config_schemas_test.cc | 4 +- .../test_access_log_schema.py | 127 +++++++++ .../test_http_conn_network_filter_schema.py | 118 +------- test/common/network/BUILD | 1 + .../network/filter_manager_impl_test.cc | 27 +- .../router/req_header_formatter_test.cc | 3 +- test/common/tracing/http_tracer_impl_test.cc | 12 +- test/mocks/access_log/mocks.cc | 5 +- test/mocks/access_log/mocks.h | 6 +- 36 files changed, 708 insertions(+), 449 deletions(-) rename docs/configuration/{http_conn_man => }/access_log.rst (60%) create mode 100644 docs/intro/arch_overview/access_logging.rst create mode 100644 test/common/json/config_schemas_test_data/test_access_log_schema.py diff --git a/docs/configuration/http_conn_man/access_log.rst b/docs/configuration/access_log.rst similarity index 60% rename from docs/configuration/http_conn_man/access_log.rst rename to docs/configuration/access_log.rst index 8aa645dc0dab..83c23135daba 100644 --- a/docs/configuration/http_conn_man/access_log.rst +++ b/docs/configuration/access_log.rst @@ -1,4 +1,4 @@ -.. _config_http_conn_man_access_log: +.. _config_access_log: Access logging ============== @@ -7,7 +7,7 @@ Configuration ------------------------- Access logs are configured as part of the :ref:`HTTP connection manager config -`. +` or :ref:`TCP Proxy `. .. code-block:: json @@ -21,25 +21,25 @@ Access logs are configured as part of the :ref:`HTTP connection manager config ] } -.. _config_http_conn_man_access_log_path_param: +.. _config_access_log_path_param: path *(required, string)* Path the access log is written to. -.. _config_http_conn_man_access_log_format_param: +.. _config_access_log_format_param: format *(optional, string)* Access log format. Envoy supports :ref:`custom access log formats - ` as well as a :ref:`default format - `. + ` as well as a :ref:`default format + `. -.. _config_http_conn_man_access_log_filter_param: +.. _config_access_log_filter_param: filter *(optional, object)* :ref:`Filter ` which is used to determine if the access log needs to be written. -.. _config_http_con_manager_access_log_format: +.. _config_access_log_format: Format rules ------------ @@ -47,45 +47,76 @@ Format rules The access log format string contains either command operators or other characters interpreted as a plain string. The access log formatter does not make any assumptions about a new line separator, so one has to specified as part of the format string. -See the :ref:`default format ` for an example. +See the :ref:`default format ` for an example. Note that the access log line will contain a '-' character for every not set/empty value. +The same format strings are used by different types of access logs (such as HTTP and TCP). Some +fields may have slightly different meanings, depending on what type of log it is. Differences +are noted. + The following command operators are supported: %START_TIME% - Request start time including milliseconds. + HTTP + Request start time including milliseconds. + + TCP + Downstream connection start time including milliseconds. %BYTES_RECEIVED% - Body bytes received. + HTTP + Body bytes received. + + TCP + Downstream bytes received on connection. %PROTOCOL% - Protocol. Currently either *HTTP/1.1* or *HTTP/2*. + HTTP + Protocol. Currently either *HTTP/1.1* or *HTTP/2*. + + TCP + Not implemented ("-"). %RESPONSE_CODE% - HTTP response code. Note that a response code of '0' means that the server never sent the - beginning of a response. This generally means that the (downstream) client disconnected. + HTTP + HTTP response code. Note that a response code of '0' means that the server never sent the + beginning of a response. This generally means that the (downstream) client disconnected. + + TCP + Not implemented ("-"). %BYTES_SENT% - Body bytes sent. + HTTP + Body bytes sent. + + TCP + Downstream bytes sent on connection. %DURATION% - Total duration in milliseconds of the request from the start time to the last byte out. + HTTP + Total duration in milliseconds of the request from the start time to the last byte out. + + TCP + Total duration in milliseconds of the downstream connection. %RESPONSE_FLAGS% - Additional details about the response, if any. Possible values are: - - * **LH**: Local service failed :ref:`health check request ` in addition to 503 response code. - * **UH**: No healthy upstream hosts in upstream cluster in addition to 503 response code. - * **UT**: Upstream request timeout in addition to 504 response code. - * **LR**: Connection local reset in addition to 503 response code. - * **UR**: Upstream remote reset in addition to 503 response code. - * **UF**: Upstream connection failure in addition to 503 response code. - * **UC**: Upstream connection termination in addition to 503 response code. - * **UO**: Upstream overflow (:ref:`circuit breaking `) in addition to 503 response code. - * **NR**: No :ref:`route configured ` for a given request in addition to 404 response code. - * **DI**: The request processing was delayed for a period specified via :ref:`fault injection `. - * **FI**: The request was aborted with a response code specified via :ref:`fault injection `. - * **RL**: The request was ratelimited locally by the :ref:`HTTP rate limit filter ` in addition to 429 response code. + Additional details about the response or connection, if any. For TCP connections, the response codes mentioned in + the descriptions do not apply. Possible values are: + + HTTP and TCP + * **UH**: No healthy upstream hosts in upstream cluster in addition to 503 response code. + * **UF**: Upstream connection failure in addition to 503 response code. + * **UO**: Upstream overflow (:ref:`circuit breaking `) in addition to 503 response code. + * **NR**: No :ref:`route configured ` for a given request in addition to 404 response code. + HTTP only + * **LH**: Local service failed :ref:`health check request ` in addition to 503 response code. + * **UT**: Upstream request timeout in addition to 504 response code. + * **LR**: Connection local reset in addition to 503 response code. + * **UR**: Upstream remote reset in addition to 503 response code. + * **UC**: Upstream connection termination in addition to 503 response code. + * **DI**: The request processing was delayed for a period specified via :ref:`fault injection `. + * **FI**: The request was aborted with a response code specified via :ref:`fault injection `. + * **RL**: The request was ratelimited locally by the :ref:`HTTP rate limit filter ` in addition to 429 response code. %UPSTREAM_HOST% Upstream host URL (e.g., tcp://ip:port for TCP connections). @@ -94,15 +125,24 @@ The following command operators are supported: Upstream cluster to which the upstream host belongs to. %REQ(X?Y):Z% - An HTTP request header where X is the main HTTP header, Y is the alternative one, and Z is an - optional parameter denoting string truncation up to Z characters long. The value is taken from the - HTTP request header named X first and if it's not set, then request header Y is used. If none of - the headers are present '-' symbol will be in the log. + HTTP + An HTTP request header where X is the main HTTP header, Y is the alternative one, and Z is an + optional parameter denoting string truncation up to Z characters long. The value is taken from + the HTTP request header named X first and if it's not set, then request header Y is used. If + none of the headers are present '-' symbol will be in the log. + + TCP + Not implemented ("-"). %RESP(X?Y):Z% - Same as **%REQ(X?Y):Z%** but taken from HTTP response headers. + HTTP + Same as **%REQ(X?Y):Z%** but taken from HTTP response headers. + + TCP + Not implemented ("-"). + -.. _config_http_con_manager_access_log_default_format: +.. _config_access_log_default_format: Default format -------------- diff --git a/docs/configuration/configuration.rst b/docs/configuration/configuration.rst index d31da9bf19c3..5d5fbc1369dc 100644 --- a/docs/configuration/configuration.rst +++ b/docs/configuration/configuration.rst @@ -13,4 +13,5 @@ Configuration reference http_conn_man/http_conn_man http_filters/http_filters cluster_manager/cluster_manager + access_log tools/router_check diff --git a/docs/configuration/http_conn_man/headers.rst b/docs/configuration/http_conn_man/headers.rst index deb7e2eccd95..02f808c15684 100644 --- a/docs/configuration/http_conn_man/headers.rst +++ b/docs/configuration/http_conn_man/headers.rst @@ -203,7 +203,7 @@ is one of the few areas where a thin client library is needed to perform this du is out of scope for this documentation. If *x-request-id* is propagated across all hosts, the following features are available: -* Stable :ref:`access logging ` via the +* Stable :ref:`access logging ` via the :ref:`runtime filter`. * Stable tracing when performing random sampling via the :ref:`tracing.random_sampling ` runtime setting or via forced tracing using the diff --git a/docs/configuration/http_conn_man/http_conn_man.rst b/docs/configuration/http_conn_man/http_conn_man.rst index 713c1f8c52b3..60784e9c94e4 100644 --- a/docs/configuration/http_conn_man/http_conn_man.rst +++ b/docs/configuration/http_conn_man/http_conn_man.rst @@ -167,8 +167,8 @@ drain_timeout_ms when a connection hits the idle timeout or during general server draining. The default grace period is 5000 milliseconds (5 seconds) if this option is not specified. -:ref:`access_log ` - *(optional, array)* Configuration for :ref:`HTTP access logs ` +:ref:`access_log ` + *(optional, array)* Configuration for :ref:`HTTP access logs ` emitted by the connection manager. .. _config_http_conn_man_use_remote_address: @@ -218,7 +218,6 @@ generate_request_id route_config/route_config filters - access_log tracing headers header_sanitizing diff --git a/docs/configuration/network_filters/tcp_proxy_filter.rst b/docs/configuration/network_filters/tcp_proxy_filter.rst index e6d345c99384..e398af6d5b13 100644 --- a/docs/configuration/network_filters/tcp_proxy_filter.rst +++ b/docs/configuration/network_filters/tcp_proxy_filter.rst @@ -12,6 +12,7 @@ TCP proxy :ref:`architecture overview `. "config": { "stat_prefix": "...", "route_config": "{...}" + "access_log": "[]" } } @@ -23,6 +24,10 @@ stat_prefix *(required, string)* The prefix to use when emitting :ref:`statistics `. +:ref:`access_log ` + *(optional, array)* Configuration for :ref:`access logs ` + emitted by the this tcp_proxy. + .. _config_network_filters_tcp_proxy_route_config: Route Configuration diff --git a/docs/intro/arch_overview/access_logging.rst b/docs/intro/arch_overview/access_logging.rst new file mode 100644 index 000000000000..19cd82f83a26 --- /dev/null +++ b/docs/intro/arch_overview/access_logging.rst @@ -0,0 +1,19 @@ +.. _arch_overview_access_logs: + +Access logging +=================== + +The :ref:`HTTP connection manager ` and +:ref:`tcp proxy ` supports extensible access logging with the following +features: + +* Any number of access logs per connection manager or tcp proxy. +* Asynchronous IO flushing architecture. Access logging will never block the main network processing + threads. +* Customizable access log formats using predefined fields as well as arbitrary HTTP request and + response headers. +* Customizable access log filters that allow different types of requests and responses to be written + to different access logs. + +Access log :ref:`configuration `. + diff --git a/docs/intro/arch_overview/arch_overview.rst b/docs/intro/arch_overview/arch_overview.rst index 6b42c84f9faa..8011b1f5fd56 100644 --- a/docs/intro/arch_overview/arch_overview.rst +++ b/docs/intro/arch_overview/arch_overview.rst @@ -26,6 +26,7 @@ Architecture overview runtime tracing tcp_proxy + access_logging mongo dynamo redis diff --git a/docs/intro/arch_overview/http_connection_management.rst b/docs/intro/arch_overview/http_connection_management.rst index 65de1b579d42..4f1d415b48e3 100644 --- a/docs/intro/arch_overview/http_connection_management.rst +++ b/docs/intro/arch_overview/http_connection_management.rst @@ -8,7 +8,7 @@ large amount of HTTP specific functionality. Envoy has a built in network level :ref:`HTTP connection manager `. This filter translates raw bytes into HTTP level messages and events (e.g., headers received, body data received, trailers received, etc.). It also handles functionality common to all HTTP connections and requests such as :ref:`access logging -`, :ref:`request ID generation and tracing `, +`, :ref:`request ID generation and tracing `, :ref:`request/response header manipulation `, :ref:`route table ` management, and :ref:`statistics `. @@ -34,23 +34,6 @@ HTTP header sanitizing The HTTP connection manager performs various :ref:`header sanitizing ` actions for security reasons. -.. _arch_overview_http_access_logs: - -HTTP access logging -------------------- - -The HTTP connection manager supports extensible access logging with the following features: - -* Any number of access logs per connection manager. -* Asynchronous IO flushing architecture. Access logging will never block the main network processing - threads. -* Customizable access log formats using predefined fields as well as arbitrary HTTP request and - response headers. -* Customizable access log filters that allow different types of requests and responses to be written - to different access logs. - -HTTP access log :ref:`configuration `. - Route table configuration ------------------------- diff --git a/docs/operations/cli.rst b/docs/operations/cli.rst index 297feff8318a..0a7af8122eb5 100644 --- a/docs/operations/cli.rst +++ b/docs/operations/cli.rst @@ -98,7 +98,7 @@ following are the command line options that Envoy supports. This setting is used during file creation to determine the duration between flushes of buffers to files. The buffer will flush every time it gets full, or every time the interval has elapsed, whichever comes first. Adjusting this setting is useful - when tailing :ref:`access logs ` in order to + when tailing :ref:`access logs ` in order to get more (or less) immediate flushing. .. option:: --drain-time-s diff --git a/include/envoy/access_log/access_log.h b/include/envoy/access_log/access_log.h index 955bd72f3ccd..0f34feafc38d 100644 --- a/include/envoy/access_log/access_log.h +++ b/include/envoy/access_log/access_log.h @@ -88,7 +88,7 @@ class RequestInfo { * @return duration from request start to when the entire request was received from the * downstream client in microseconds. Note: if unset, will return 0 microseconds. */ - virtual std::chrono::microseconds requestReceivedDuration() const PURE; + virtual const Optional& requestReceivedDuration() const PURE; /** * Set the duration from request start to when the entire request was received from the @@ -101,7 +101,7 @@ class RequestInfo { * @return the duration from request start to when the entire response was received from the * upstream host in microseconds. Note: if unset, will return 0 microseconds. */ - virtual std::chrono::microseconds responseReceivedDuration() const PURE; + virtual const Optional& responseReceivedDuration() const PURE; /** * Set the duration from request start to when the entire response was received from the @@ -118,7 +118,7 @@ class RequestInfo { /** * @return the protocol of the request. */ - virtual Http::Protocol protocol() const PURE; + virtual const Optional& protocol() const PURE; /** * Set the request's protocol. diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc index a1bea89e92ed..650127565147 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/access_log/access_log_formatter.cc @@ -12,7 +12,9 @@ namespace Envoy { namespace AccessLog { -const std::string ResponseFlagUtils::NONE = "-"; +static const std::string UnspecifiedValueString = "-"; + +const std::string ResponseFlagUtils::NONE = UnspecifiedValueString; const std::string ResponseFlagUtils::FAILED_LOCAL_HEALTH_CHECK = "LH"; const std::string ResponseFlagUtils::NO_HEALTHY_UPSTREAM = "UH"; const std::string ResponseFlagUtils::UPSTREAM_REQUEST_TIMEOUT = "UT"; @@ -103,14 +105,19 @@ static const std::string Http10String = "HTTP/1.0"; static const std::string Http11String = "HTTP/1.1"; static const std::string Http2String = "HTTP/2"; -const std::string& AccessLogFormatUtils::protocolToString(Http::Protocol protocol) { - switch (protocol) { - case Http::Protocol::Http10: - return Http10String; - case Http::Protocol::Http11: - return Http11String; - case Http::Protocol::Http2: - return Http2String; +const std::string& +AccessLogFormatUtils::protocolToString(const Optional& protocol) { + if (protocol.valid()) { + switch (protocol.value()) { + case Http::Protocol::Http10: + return Http10String; + case Http::Protocol::Http11: + return Http11String; + case Http::Protocol::Http2: + return Http2String; + } + } else { + return UnspecifiedValueString; } NOT_REACHED; @@ -232,15 +239,23 @@ RequestInfoFormatter::RequestInfoFormatter(const std::string& field_name) { }; } else if (field_name == "REQUEST_DURATION") { field_extractor_ = [](const RequestInfo& request_info) { - return std::to_string(std::chrono::duration_cast( - request_info.requestReceivedDuration()) - .count()); + Optional duration = request_info.requestReceivedDuration(); + if (duration.valid()) { + return std::to_string( + std::chrono::duration_cast(duration.value()).count()); + } else { + return UnspecifiedValueString; + } }; } else if (field_name == "RESPONSE_DURATION") { field_extractor_ = [](const RequestInfo& request_info) { - return std::to_string(std::chrono::duration_cast( - request_info.responseReceivedDuration()) - .count()); + Optional duration = request_info.responseReceivedDuration(); + if (duration.valid()) { + return std::to_string( + std::chrono::duration_cast(duration.value()).count()); + } else { + return UnspecifiedValueString; + } }; } else if (field_name == "BYTES_RECEIVED") { field_extractor_ = [](const RequestInfo& request_info) { @@ -274,7 +289,7 @@ RequestInfoFormatter::RequestInfoFormatter(const std::string& field_name) { if (request_info.upstreamHost()) { return request_info.upstreamHost()->address()->asString(); } else { - return std::string("-"); + return UnspecifiedValueString; } }; } else if (field_name == "UPSTREAM_CLUSTER") { @@ -284,7 +299,7 @@ RequestInfoFormatter::RequestInfoFormatter(const std::string& field_name) { upstream_cluster_name = request_info.upstreamHost()->cluster().name(); } - return upstream_cluster_name.empty() ? "-" : upstream_cluster_name; + return upstream_cluster_name.empty() ? UnspecifiedValueString : upstream_cluster_name; }; } else { throw EnvoyException(fmt::format("Not supported field in RequestInfo: {}", field_name)); @@ -317,7 +332,7 @@ std::string HeaderFormatter::format(const Http::HeaderMap& headers) const { std::string header_value_string; if (!header) { - header_value_string = "-"; + header_value_string = UnspecifiedValueString; } else { header_value_string = header->value().c_str(); } diff --git a/source/common/access_log/access_log_formatter.h b/source/common/access_log/access_log_formatter.h index 1e5cc1dd57a0..cceafffe55d0 100644 --- a/source/common/access_log/access_log_formatter.h +++ b/source/common/access_log/access_log_formatter.h @@ -53,7 +53,7 @@ class AccessLogFormatParser { class AccessLogFormatUtils { public: static FormatterPtr defaultAccessLogFormatter(); - static const std::string& protocolToString(Http::Protocol protocol); + static const std::string& protocolToString(const Optional& protocol); private: AccessLogFormatUtils(); diff --git a/source/common/access_log/request_info_impl.h b/source/common/access_log/request_info_impl.h index 6957fd6d45cd..77baa8676630 100644 --- a/source/common/access_log/request_info_impl.h +++ b/source/common/access_log/request_info_impl.h @@ -9,14 +9,16 @@ namespace Envoy { namespace AccessLog { struct RequestInfoImpl : public RequestInfo { - RequestInfoImpl(Http::Protocol protocol) - : protocol_(protocol), start_time_(std::chrono::system_clock::now()), + RequestInfoImpl() + : start_time_(std::chrono::system_clock::now()), start_time_monotonic_(std::chrono::steady_clock::now()) {} + RequestInfoImpl(Http::Protocol protocol) : RequestInfoImpl() { protocol_ = protocol; } + // AccessLog::RequestInfo SystemTime startTime() const override { return start_time_; } - std::chrono::microseconds requestReceivedDuration() const override { + const Optional& requestReceivedDuration() const override { return request_received_duration_; } void requestReceivedDuration(MonotonicTime time) override { @@ -24,7 +26,7 @@ struct RequestInfoImpl : public RequestInfo { std::chrono::duration_cast(time - start_time_monotonic_); } - std::chrono::microseconds responseReceivedDuration() const override { + const Optional& responseReceivedDuration() const override { return response_received_duration_; } void responseReceivedDuration(MonotonicTime time) override { @@ -34,7 +36,7 @@ struct RequestInfoImpl : public RequestInfo { uint64_t bytesReceived() const override { return bytes_received_; } - Http::Protocol protocol() const override { return protocol_; } + const Optional& protocol() const override { return protocol_; } void protocol(Http::Protocol protocol) override { protocol_ = protocol; } const Optional& responseCode() const override { return response_code_; } @@ -64,11 +66,11 @@ struct RequestInfoImpl : public RequestInfo { const std::string& getDownstreamAddress() const override { return downstream_address_; }; - Http::Protocol protocol_; + Optional protocol_; const SystemTime start_time_; const MonotonicTime start_time_monotonic_; - std::chrono::microseconds request_received_duration_{}; - std::chrono::microseconds response_received_duration_{}; + Optional request_received_duration_{}; + Optional response_received_duration_{}; uint64_t bytes_received_{}; Optional response_code_; uint64_t bytes_sent_{}; diff --git a/source/common/config/filter_json.cc b/source/common/config/filter_json.cc index dd6ff7cb9b6c..4bafa0d7c411 100644 --- a/source/common/config/filter_json.cc +++ b/source/common/config/filter_json.cc @@ -89,6 +89,8 @@ void FilterJson::translateAccessLogFilter( void FilterJson::translateAccessLog(const Json::Object& json_access_log, envoy::api::v2::filter::AccessLog& access_log) { + json_access_log.validateSchema(Json::Schema::ACCESS_LOG_SCHEMA); + envoy::api::v2::filter::FileAccessLog file_access_log; JSON_UTIL_SET_STRING(json_access_log, file_access_log, path); diff --git a/source/common/filter/BUILD b/source/common/filter/BUILD index 69eab421ef60..3bd3e68ae54a 100644 --- a/source/common/filter/BUILD +++ b/source/common/filter/BUILD @@ -42,20 +42,25 @@ envoy_cc_library( srcs = ["tcp_proxy.cc"], hdrs = ["tcp_proxy.h"], deps = [ + "//include/envoy/access_log:access_log_interface", "//include/envoy/buffer:buffer_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", "//include/envoy/network:connection_interface", "//include/envoy/network:filter_interface", "//include/envoy/router:router_interface", + "//include/envoy/server:filter_config_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", "//include/envoy/stats:timespan", "//include/envoy/upstream:cluster_manager_interface", "//include/envoy/upstream:upstream_interface", + "//source/common/access_log:access_log_lib", + "//source/common/access_log:request_info_lib", "//source/common/common:assert_lib", "//source/common/common:empty_string", "//source/common/common:logger_lib", + "//source/common/config:filter_json_lib", "//source/common/json:config_schemas_lib", "//source/common/json:json_loader_lib", "//source/common/network:cidr_range_lib", diff --git a/source/common/filter/tcp_proxy.cc b/source/common/filter/tcp_proxy.cc index 28f8303a29d1..f0619f505e20 100644 --- a/source/common/filter/tcp_proxy.cc +++ b/source/common/filter/tcp_proxy.cc @@ -10,11 +10,14 @@ #include "envoy/upstream/cluster_manager.h" #include "envoy/upstream/upstream.h" +#include "common/access_log/access_log_impl.h" #include "common/common/assert.h" #include "common/common/empty_string.h" +#include "common/config/filter_json.h" #include "common/json/config_schemas.h" #include "common/json/json_loader.h" +#include "api/filter/http/http_connection_manager.pb.h" #include "fmt/format.h" namespace Envoy { @@ -43,19 +46,25 @@ TcpProxyConfig::Route::Route(const Json::Object& config) { } TcpProxyConfig::TcpProxyConfig(const Json::Object& config, - Upstream::ClusterManager& cluster_manager, Stats::Scope& scope) - : stats_(generateStats(config.getString("stat_prefix"), scope)) { + Server::Configuration::FactoryContext& context) + : stats_(generateStats(config.getString("stat_prefix"), context.scope())) { config.validateSchema(Json::Schema::TCP_PROXY_NETWORK_FILTER_SCHEMA); for (const Json::ObjectSharedPtr& route_desc : config.getObject("route_config")->getObjectArray("routes")) { routes_.emplace_back(Route(*route_desc)); - if (!cluster_manager.get(route_desc->getString("cluster"))) { + if (!context.clusterManager().get(route_desc->getString("cluster"))) { throw EnvoyException(fmt::format("tcp proxy: unknown cluster '{}' in TCP route", route_desc->getString("cluster"))); } } + + for (const Json::ObjectSharedPtr& json_access_log : config.getObjectArray("access_log", true)) { + envoy::api::v2::filter::AccessLog v2_access_log; + Config::FilterJson::translateAccessLog(*json_access_log, v2_access_log); + access_logs_.emplace_back(AccessLog::AccessLogFactory::fromProto(v2_access_log, context)); + } } const std::string& TcpProxyConfig::getRouteFromEntries(Network::Connection& connection) { @@ -93,6 +102,12 @@ TcpProxy::TcpProxy(TcpProxyConfigSharedPtr config, Upstream::ClusterManager& clu upstream_callbacks_(new UpstreamCallbacks(*this)) {} TcpProxy::~TcpProxy() { + if (config_ != nullptr) { + for (const auto& access_log : config_->accessLogs()) { + access_log->log(nullptr, nullptr, request_info_); + } + } + if (upstream_connection_) { read_callbacks_->upstreamHost()->cluster().stats().upstream_cx_destroy_.inc(); read_callbacks_->upstreamHost()->cluster().stats().upstream_cx_active_.dec(); @@ -200,12 +215,14 @@ Network::FilterStatus TcpProxy::initializeUpstreamConnection() { if (config_) { config_->stats().downstream_cx_no_route_.inc(); } + request_info_.setResponseFlag(AccessLog::ResponseFlag::NoRouteFound); onInitFailure(); return Network::FilterStatus::StopIteration; } Upstream::ClusterInfoConstSharedPtr cluster = thread_local_cluster->info(); if (!cluster->resourceManager(Upstream::ResourcePriority::Default).connections().canCreate()) { + request_info_.setResponseFlag(AccessLog::ResponseFlag::UpstreamOverflow); cluster->stats().upstream_cx_overflow_.inc(); onInitFailure(); return Network::FilterStatus::StopIteration; @@ -216,10 +233,12 @@ Network::FilterStatus TcpProxy::initializeUpstreamConnection() { upstream_connection_ = std::move(conn_info.connection_); read_callbacks_->upstreamHost(conn_info.host_description_); if (!upstream_connection_) { + request_info_.setResponseFlag(AccessLog::ResponseFlag::NoHealthyUpstream); onInitFailure(); return Network::FilterStatus::StopIteration; } + request_info_.onUpstreamHostSelected(conn_info.host_description_); onUpstreamHostReady(); cluster->resourceManager(Upstream::ResourcePriority::Default).connections().inc(); upstream_connection_->addReadFilter(upstream_callbacks_); @@ -252,6 +271,7 @@ Network::FilterStatus TcpProxy::initializeUpstreamConnection() { void TcpProxy::onConnectTimeout() { ENVOY_CONN_LOG(debug, "connect timeout", read_callbacks_->connection()); read_callbacks_->upstreamHost()->cluster().stats().upstream_cx_connect_timeout_.inc(); + request_info_.setResponseFlag(AccessLog::ResponseFlag::UpstreamConnectionFailure); // This will close the upstream connection as well. onConnectTimeoutError(); @@ -259,6 +279,7 @@ void TcpProxy::onConnectTimeout() { Network::FilterStatus TcpProxy::onData(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "received {} bytes", read_callbacks_->connection(), data.length()); + request_info_.bytes_received_ += data.length(); upstream_connection_->write(data); ASSERT(0 == data.length()); return Network::FilterStatus::StopIteration; @@ -277,6 +298,7 @@ void TcpProxy::onDownstreamEvent(Network::ConnectionEvent event) { } void TcpProxy::onUpstreamData(Buffer::Instance& data) { + request_info_.bytes_sent_ += data.length(); read_callbacks_->connection().write(data); ASSERT(0 == data.length()); } @@ -285,6 +307,7 @@ void TcpProxy::onUpstreamEvent(Network::ConnectionEvent event) { if (event == Network::ConnectionEvent::RemoteClose) { read_callbacks_->upstreamHost()->cluster().stats().upstream_cx_destroy_remote_.inc(); if (connect_timeout_timer_) { + request_info_.setResponseFlag(AccessLog::ResponseFlag::UpstreamConnectionFailure); read_callbacks_->upstreamHost()->cluster().stats().upstream_cx_connect_fail_.inc(); read_callbacks_->upstreamHost()->stats().cx_connect_fail_.inc(); } diff --git a/source/common/filter/tcp_proxy.h b/source/common/filter/tcp_proxy.h index 15eddd486c55..1e2c24dfaec5 100644 --- a/source/common/filter/tcp_proxy.h +++ b/source/common/filter/tcp_proxy.h @@ -5,14 +5,17 @@ #include #include +#include "envoy/access_log/access_log.h" #include "envoy/event/timer.h" #include "envoy/network/connection.h" #include "envoy/network/filter.h" +#include "envoy/server/filter_config.h" #include "envoy/stats/stats_macros.h" #include "envoy/stats/timespan.h" #include "envoy/upstream/cluster_manager.h" #include "envoy/upstream/upstream.h" +#include "common/access_log/request_info_impl.h" #include "common/common/logger.h" #include "common/json/json_loader.h" #include "common/network/cidr_range.h" @@ -49,8 +52,7 @@ struct TcpProxyStats { */ class TcpProxyConfig { public: - TcpProxyConfig(const Json::Object& config, Upstream::ClusterManager& cluster_manager, - Stats::Scope& scope); + TcpProxyConfig(const Json::Object& config, Server::Configuration::FactoryContext& context); /** * Find out which cluster an upstream connection should be opened to based on the @@ -63,6 +65,7 @@ class TcpProxyConfig { const std::string& getRouteFromEntries(Network::Connection& connection); const TcpProxyStats& stats() { return stats_; } + const std::vector& accessLogs() { return access_logs_; } private: struct Route { @@ -79,6 +82,7 @@ class TcpProxyConfig { std::vector routes_; const TcpProxyStats stats_; + std::vector access_logs_; }; typedef std::shared_ptr TcpProxyConfigSharedPtr; @@ -180,6 +184,7 @@ class TcpProxy : public Network::ReadFilter, Stats::TimespanPtr connected_timespan_; std::shared_ptr upstream_callbacks_; // shared_ptr required for passing as a // read filter. + AccessLog::RequestInfoImpl request_info_; }; } // Filter diff --git a/source/common/grpc/http1_bridge_filter.cc b/source/common/grpc/http1_bridge_filter.cc index 35e9544d9803..33c38d4d7cde 100644 --- a/source/common/grpc/http1_bridge_filter.cc +++ b/source/common/grpc/http1_bridge_filter.cc @@ -28,7 +28,9 @@ Http::FilterHeadersStatus Http1BridgeFilter::decodeHeaders(Http::HeaderMap& head setupStatTracking(headers); } - if (decoder_callbacks_->requestInfo().protocol() != Http::Protocol::Http2 && grpc_request) { + const Optional& protocol = decoder_callbacks_->requestInfo().protocol(); + ASSERT(protocol.valid()); + if (protocol.value() != Http::Protocol::Http2 && grpc_request) { do_bridging_ = true; } diff --git a/source/common/json/config_schemas.cc b/source/common/json/config_schemas.cc index 0d38b560ee62..28b1876ef13b 100644 --- a/source/common/json/config_schemas.cc +++ b/source/common/json/config_schemas.cc @@ -3,6 +3,132 @@ #include namespace Envoy { +const std::string Json::Schema::ACCESS_LOG_SCHEMA(R"EOF( + { + "$schema": "http://json-schema.org/schema#", + "definitions": { + "status_code" : { + "type" : "object", + "properties" : { + "type" : { + "type" : "string", + "enum" : ["status_code"] + }, + "op" : { + "type" : "string", + "enum" : [">=", "="] + }, + "value" : { + "type" : "integer", + "minimum" : 0, + "maximum" : 599 + }, + "runtime_key" : {"type" : "string"} + }, + "required" : ["type", "op", "value"], + "additionalProperties" : false + }, + "duration" : { + "type" : "object", + "properties" : { + "type" : { + "type" : "string", + "enum" : ["duration"] + }, + "op" : { + "type" : "string", + "enum" : [">=", "="] + }, + "value" : { + "type" : "integer", + "minimum" : 0 + }, + "runtime_key" : {"type" : "string"} + }, + "required" : ["type", "op", "value"], + "additionalProperties" : false + }, + "not_healthcheck" : { + "type" : "object", + "properties" : { + "type" : { + "type" : "string", + "enum" : ["not_healthcheck"] + } + }, + "required" : ["type"], + "additionalProperties" : false + }, + "traceable_request" : { + "type" : "object", + "properties" : { + "type" : { + "type" : "string", + "enum" : ["traceable_request"] + } + }, + "required" : ["type"], + "additionalProperties" : false + }, + "runtime" : { + "type" : "object", + "properties" : { + "type" : { + "type" : "string", + "enum" : ["runtime"] + }, + "key" : {"type" : "string"} + }, + "required" : ["type", "key"], + "additionalProperties" : false + }, + "logical_filter" : { + "type" : "object", + "properties" : { + "type" : { + "type" : "string", + "enum" : ["logical_and", "logical_or"] + }, + "filters" : { + "type" : "array", + "minItems" : 2, + "items" : { + "oneOf" : [ + {"$ref" : "#/definitions/status_code"}, + {"$ref" : "#/definitions/duration"}, + {"$ref" : "#/definitions/not_healthcheck"}, + {"$ref" : "#/definitions/logical_filter"}, + {"$ref" : "#/definitions/traceable_request"}, + {"$ref" : "#/definitions/runtime"} + ] + } + } + }, + "required" : ["type", "filters"], + "additionalProperties" : false + } + }, + "type" : "object", + "properties" : { + "path" : {"type" : "string"}, + "format" : {"type" : "string"}, + "filter" : { + "type" : "object", + "oneOf" : [ + {"$ref" : "#/definitions/not_healthcheck"}, + {"$ref" : "#/definitions/status_code"}, + {"$ref" : "#/definitions/duration"}, + {"$ref" : "#/definitions/traceable_request"}, + {"$ref" : "#/definitions/runtime"}, + {"$ref" : "#/definitions/logical_filter"} + ] + } + }, + "required" : ["path"], + "additionalProperties" : false + } + )EOF"); + const std::string Json::Schema::LISTENER_SCHEMA(R"EOF( { "$schema": "http://json-schema.org/schema#", @@ -124,106 +250,6 @@ const std::string Json::Schema::HTTP_CONN_NETWORK_FILTER_SCHEMA(R"EOF( { "$schema": "http://json-schema.org/schema#", "definitions" : { - "status_code" : { - "type" : "object", - "properties" : { - "type" : { - "type" : "string", - "enum" : ["status_code"] - }, - "op" : { - "type" : "string", - "enum" : [">=", "="] - }, - "value" : { - "type" : "integer", - "minimum" : 0, - "maximum" : 599 - }, - "runtime_key" : {"type" : "string"} - }, - "required" : ["type", "op", "value"], - "additionalProperties" : false - }, - "duration" : { - "type" : "object", - "properties" : { - "type" : { - "type" : "string", - "enum" : ["duration"] - }, - "op" : { - "type" : "string", - "enum" : [">=", "="] - }, - "value" : { - "type" : "integer", - "minimum" : 0 - }, - "runtime_key" : {"type" : "string"} - }, - "required" : ["type", "op", "value"], - "additionalProperties" : false - }, - "not_healthcheck" : { - "type" : "object", - "properties" : { - "type" : { - "type" : "string", - "enum" : ["not_healthcheck"] - } - }, - "required" : ["type"], - "additionalProperties" : false - }, - "traceable_request" : { - "type" : "object", - "properties" : { - "type" : { - "type" : "string", - "enum" : ["traceable_request"] - } - }, - "required" : ["type"], - "additionalProperties" : false - }, - "runtime" : { - "type" : "object", - "properties" : { - "type" : { - "type" : "string", - "enum" : ["runtime"] - }, - "key" : {"type" : "string"} - }, - "required" : ["type", "key"], - "additionalProperties" : false - }, - "logical_filter" : { - "type" : "object", - "properties" : { - "type" : { - "type" : "string", - "enum" : ["logical_and", "logical_or"] - }, - "filters" : { - "type" : "array", - "minItems" : 2, - "items" : { - "oneOf" : [ - {"$ref" : "#/definitions/status_code"}, - {"$ref" : "#/definitions/duration"}, - {"$ref" : "#/definitions/not_healthcheck"}, - {"$ref" : "#/definitions/logical_filter"}, - {"$ref" : "#/definitions/traceable_request"}, - {"$ref" : "#/definitions/runtime"} - ] - } - } - }, - "required" : ["type", "filters"], - "additionalProperties" : false - }, "tracing" : { "type" : "object", "properties" : { @@ -309,29 +335,7 @@ const std::string Json::Schema::HTTP_CONN_NETWORK_FILTER_SCHEMA(R"EOF( "server_name" : {"type" : "string"}, "idle_timeout_s" : {"type" : "integer"}, "drain_timeout_ms" : {"type" : "integer"}, - "access_log" : { - "type" : "array", - "items" : { - "type" : "object", - "properties" : { - "path" : {"type" : "string"}, - "format" : {"type" : "string"}, - "filter" : { - "type" : "object", - "oneOf" : [ - {"$ref" : "#/definitions/not_healthcheck"}, - {"$ref" : "#/definitions/status_code"}, - {"$ref" : "#/definitions/duration"}, - {"$ref" : "#/definitions/traceable_request"}, - {"$ref" : "#/definitions/runtime"}, - {"$ref" : "#/definitions/logical_filter"} - ] - } - }, - "required" : ["path"], - "additionalProperties" : false - } - }, + "access_log" : { "type": "array" }, "use_remote_address" : {"type" : "boolean"}, "forward_client_cert" : { "type" : "string", @@ -505,7 +509,8 @@ const std::string Json::Schema::TCP_PROXY_NETWORK_FILTER_SCHEMA(R"EOF( } }, "additionalProperties": false - } + }, + "access_log" : { "type": "array" } }, "required": ["stat_prefix", "route_config"], "additionalProperties": false diff --git a/source/common/json/config_schemas.h b/source/common/json/config_schemas.h index c601f0f925c4..09709ddbf596 100644 --- a/source/common/json/config_schemas.h +++ b/source/common/json/config_schemas.h @@ -10,6 +10,9 @@ class Schema { // Top Level Config Schemas static const std::string TOP_LEVEL_CONFIG_SCHEMA; + // Access log Schema + static const std::string ACCESS_LOG_SCHEMA; + // Listener Schema static const std::string LISTENER_SCHEMA; static const std::string LDS_SCHEMA; diff --git a/source/server/config/network/tcp_proxy.cc b/source/server/config/network/tcp_proxy.cc index 8f99595602ff..2079d0277674 100644 --- a/source/server/config/network/tcp_proxy.cc +++ b/source/server/config/network/tcp_proxy.cc @@ -13,8 +13,7 @@ namespace Configuration { NetworkFilterFactoryCb TcpProxyConfigFactory::createFilterFactory(const Json::Object& config, FactoryContext& context) { - Filter::TcpProxyConfigSharedPtr filter_config( - new Filter::TcpProxyConfig(config, context.clusterManager(), context.scope())); + Filter::TcpProxyConfigSharedPtr filter_config(new Filter::TcpProxyConfig(config, context)); return [filter_config, &context](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(Network::ReadFilterSharedPtr{ new Filter::TcpProxy(filter_config, context.clusterManager())}); diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/access_log/access_log_formatter_test.cc index 67b12b697cab..dbade7fcad49 100644 --- a/test/common/access_log/access_log_formatter_test.cc +++ b/test/common/access_log/access_log_formatter_test.cc @@ -67,6 +67,7 @@ TEST(AccessLogFormatUtilsTest, protocolToString) { EXPECT_EQ("HTTP/1.0", AccessLogFormatUtils::protocolToString(Http::Protocol::Http10)); EXPECT_EQ("HTTP/1.1", AccessLogFormatUtils::protocolToString(Http::Protocol::Http11)); EXPECT_EQ("HTTP/2", AccessLogFormatUtils::protocolToString(Http::Protocol::Http2)); + EXPECT_EQ("-", AccessLogFormatUtils::protocolToString({})); } TEST(AccessLogFormatterTest, plainStringFormatter) { @@ -93,18 +94,32 @@ TEST(AccessLogFormatterTest, requestInfoFormatter) { { RequestInfoFormatter request_duration_format("REQUEST_DURATION"); - std::chrono::microseconds duration{5000}; - EXPECT_CALL(request_info, requestReceivedDuration()).WillOnce(Return(duration)); + Optional duration{std::chrono::microseconds(5000)}; + EXPECT_CALL(request_info, requestReceivedDuration()).WillOnce(ReturnRef(duration)); EXPECT_EQ("5", request_duration_format.format(header, header, request_info)); } + { + RequestInfoFormatter request_duration_format("REQUEST_DURATION"); + Optional duration; + EXPECT_CALL(request_info, requestReceivedDuration()).WillOnce(ReturnRef(duration)); + EXPECT_EQ("-", request_duration_format.format(header, header, request_info)); + } + { RequestInfoFormatter response_duration_format("RESPONSE_DURATION"); - std::chrono::microseconds duration{10000}; - EXPECT_CALL(request_info, responseReceivedDuration()).WillRepeatedly(Return(duration)); + Optional duration{std::chrono::microseconds(10000)}; + EXPECT_CALL(request_info, responseReceivedDuration()).WillRepeatedly(ReturnRef(duration)); EXPECT_EQ("10", response_duration_format.format(header, header, request_info)); } + { + RequestInfoFormatter response_duration_format("RESPONSE_DURATION"); + Optional duration; + EXPECT_CALL(request_info, responseReceivedDuration()).WillOnce(ReturnRef(duration)); + EXPECT_EQ("-", response_duration_format.format(header, header, request_info)); + } + { RequestInfoFormatter bytes_received_format("BYTES_RECEIVED"); EXPECT_CALL(request_info, bytesReceived()).WillOnce(Return(1)); @@ -113,7 +128,8 @@ TEST(AccessLogFormatterTest, requestInfoFormatter) { { RequestInfoFormatter protocol_format("PROTOCOL"); - EXPECT_CALL(request_info, protocol()).WillOnce(Return(Http::Protocol::Http11)); + Optional protocol = Http::Protocol::Http11; + EXPECT_CALL(request_info, protocol()).WillOnce(ReturnRef(protocol)); EXPECT_EQ("HTTP/1.1", protocol_format.format(header, header, request_info)); } @@ -237,8 +253,8 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { "%REQ(FIRST?SECOND)% %RESP(FIRST?SECOND)%[]"; FormatterImpl formatter(format); - Http::Protocol protocol = Http::Protocol::Http11; - EXPECT_CALL(request_info, protocol()).WillRepeatedly(Return(protocol)); + Optional protocol = Http::Protocol::Http11; + EXPECT_CALL(request_info, protocol()).WillRepeatedly(ReturnRef(protocol)); EXPECT_EQ("{{HTTP/1.1}} -++test GET PUT[]", formatter.format(request_header, response_header, request_info)); diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 3a5f8f475b4c..ff109478ad2b 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -61,16 +61,16 @@ class TestRequestInfo : public RequestInfo { } SystemTime startTime() const override { return start_time_; } - std::chrono::microseconds requestReceivedDuration() const override { + const Optional& requestReceivedDuration() const override { return request_received_duration_; } void requestReceivedDuration(MonotonicTime time) override { UNREFERENCED_PARAMETER(time); } - std::chrono::microseconds responseReceivedDuration() const override { + const Optional& responseReceivedDuration() const override { return request_received_duration_; } void responseReceivedDuration(MonotonicTime time) override { UNREFERENCED_PARAMETER(time); } uint64_t bytesReceived() const override { return 1; } - Http::Protocol protocol() const override { return protocol_; } + const Optional& protocol() const override { return protocol_; } void protocol(Http::Protocol protocol) override { protocol_ = protocol; } const Optional& responseCode() const override { return response_code_; } uint64_t bytesSent() const override { return 2; } @@ -90,9 +90,9 @@ class TestRequestInfo : public RequestInfo { const std::string& getDownstreamAddress() const override { return downstream_address_; } SystemTime start_time_; - std::chrono::microseconds request_received_duration_{1000}; - std::chrono::microseconds response_received_duration_{2000}; - Http::Protocol protocol_{Http::Protocol::Http11}; + Optional request_received_duration_{std::chrono::microseconds(1000)}; + Optional response_received_duration_{std::chrono::microseconds(2000)}; + Optional protocol_{Http::Protocol::Http11}; Optional response_code_; uint64_t response_flags_{}; uint64_t duration_{3000}; diff --git a/test/common/access_log/request_info_impl_test.cc b/test/common/access_log/request_info_impl_test.cc index 380a0b699a33..754d3b97b8f6 100644 --- a/test/common/access_log/request_info_impl_test.cc +++ b/test/common/access_log/request_info_impl_test.cc @@ -52,14 +52,14 @@ TEST(RequestInfoImplTest, TimingTest) { wrapper.checkTimingBounds( [](RequestInfoImpl& request_info) { request_info.requestReceivedDuration(std::chrono::steady_clock::now()); - return request_info.requestReceivedDuration(); + return request_info.requestReceivedDuration().value(); }, "request received"); wrapper.checkTimingBounds( [](RequestInfoImpl& request_info) { request_info.responseReceivedDuration(std::chrono::steady_clock::now()); - return request_info.responseReceivedDuration(); + return request_info.responseReceivedDuration().value(); }, "response received"); @@ -105,25 +105,43 @@ TEST(RequestInfoImplTest, ResponseFlagTest) { } TEST(RequestInfoImplTest, MiscSettersAndGetters) { - RequestInfoImpl request_info(Http::Protocol::Http2); - EXPECT_EQ(Http::Protocol::Http2, request_info.protocol()); - - request_info.protocol(Http::Protocol::Http10); - EXPECT_EQ(Http::Protocol::Http10, request_info.protocol()); + { + RequestInfoImpl request_info(Http::Protocol::Http2); + EXPECT_EQ(Http::Protocol::Http2, request_info.protocol().value()); + + request_info.protocol(Http::Protocol::Http10); + EXPECT_EQ(Http::Protocol::Http10, request_info.protocol().value()); + + EXPECT_FALSE(request_info.responseCode().valid()); + request_info.response_code_ = 200; + ASSERT_TRUE(request_info.responseCode().valid()); + EXPECT_EQ(200, request_info.responseCode().value()); + + EXPECT_EQ(nullptr, request_info.upstreamHost()); + Upstream::HostDescriptionConstSharedPtr host(new NiceMock()); + request_info.onUpstreamHostSelected(host); + EXPECT_EQ(host, request_info.upstreamHost()); + + EXPECT_FALSE(request_info.healthCheck()); + request_info.healthCheck(true); + EXPECT_TRUE(request_info.healthCheck()); + } - EXPECT_FALSE(request_info.responseCode().valid()); - request_info.response_code_ = 200; - ASSERT_TRUE(request_info.responseCode().valid()); - EXPECT_EQ(200, request_info.responseCode().value()); + { + RequestInfoImpl request_info; - EXPECT_EQ(nullptr, request_info.upstreamHost()); - Upstream::HostDescriptionConstSharedPtr host(new NiceMock()); - request_info.onUpstreamHostSelected(host); - EXPECT_EQ(host, request_info.upstreamHost()); + // If no value is set, these should be not valid + EXPECT_FALSE(request_info.protocol().valid()); + EXPECT_FALSE(request_info.requestReceivedDuration().valid()); + EXPECT_FALSE(request_info.responseReceivedDuration().valid()); - EXPECT_FALSE(request_info.healthCheck()); - request_info.healthCheck(true); - EXPECT_TRUE(request_info.healthCheck()); + request_info.protocol(Http::Protocol::Http10); + request_info.requestReceivedDuration(std::chrono::steady_clock::now()); + request_info.responseReceivedDuration(std::chrono::steady_clock::now()); + EXPECT_TRUE(request_info.protocol().valid()); + EXPECT_TRUE(request_info.requestReceivedDuration().valid()); + EXPECT_TRUE(request_info.responseReceivedDuration().valid()); + } } } // namespace diff --git a/test/common/filter/BUILD b/test/common/filter/BUILD index 713864c706f9..c77daa23424a 100644 --- a/test/common/filter/BUILD +++ b/test/common/filter/BUILD @@ -34,10 +34,12 @@ envoy_cc_test( "//source/common/stats:stats_lib", "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", + "//source/server/config/network:file_access_log_lib", "//test/common/upstream:utility_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", "//test/mocks/upstream:host_mocks", "//test/mocks/upstream:upstream_mocks", ], diff --git a/test/common/filter/tcp_proxy_test.cc b/test/common/filter/tcp_proxy_test.cc index e0818fab6193..2bc0dfc32b2f 100644 --- a/test/common/filter/tcp_proxy_test.cc +++ b/test/common/filter/tcp_proxy_test.cc @@ -12,6 +12,7 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/mocks.h" #include "test/mocks/upstream/host.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" @@ -19,6 +20,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::MatchesRegex; using testing::NiceMock; using testing::Return; using testing::ReturnRef; @@ -36,10 +38,8 @@ TEST(TcpProxyConfigTest, NoRouteConfig) { )EOF"; Json::ObjectSharedPtr config = Json::Factory::loadFromString(json); - NiceMock cluster_manager; - EXPECT_THROW(TcpProxyConfig(*config, cluster_manager, - cluster_manager.thread_local_cluster_.cluster_.info_->stats_store_), - EnvoyException); + NiceMock factory_context; + EXPECT_THROW(TcpProxyConfig(*config, factory_context), EnvoyException); } TEST(TcpProxyConfigTest, NoCluster) { @@ -57,11 +57,9 @@ TEST(TcpProxyConfigTest, NoCluster) { )EOF"; Json::ObjectSharedPtr config = Json::Factory::loadFromString(json); - NiceMock cluster_manager; - EXPECT_CALL(cluster_manager, get("fake_cluster")).WillOnce(Return(nullptr)); - EXPECT_THROW(TcpProxyConfig(*config, cluster_manager, - cluster_manager.thread_local_cluster_.cluster_.info_->stats_store_), - EnvoyException); + NiceMock factory_context; + EXPECT_CALL(factory_context.cluster_manager_, get("fake_cluster")).WillOnce(Return(nullptr)); + EXPECT_THROW(TcpProxyConfig(*config, factory_context), EnvoyException); } TEST(TcpProxyConfigTest, BadTcpProxyConfig) { @@ -79,10 +77,8 @@ TEST(TcpProxyConfigTest, BadTcpProxyConfig) { )EOF"; Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - NiceMock cluster_manager; - EXPECT_THROW(TcpProxyConfig(*json_config, cluster_manager, - cluster_manager.thread_local_cluster_.cluster_.info_->stats_store_), - Json::Exception); + NiceMock factory_context; + EXPECT_THROW(TcpProxyConfig(*json_config, factory_context), Json::Exception); } TEST(TcpProxyConfigTest, Routes) { @@ -145,10 +141,9 @@ TEST(TcpProxyConfigTest, Routes) { )EOF"; Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json); - NiceMock cm_; + NiceMock factory_context_; - TcpProxyConfig config_obj(*json_config, cm_, - cm_.thread_local_cluster_.cluster_.info_->stats_store_); + TcpProxyConfig config_obj(*json_config, factory_context_); { // hit route with destination_ip (10.10.10.10/32) @@ -338,38 +333,70 @@ TEST(TcpProxyConfigTest, EmptyRouteConfig) { )EOF"; Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json); - NiceMock cm_; + NiceMock factory_context_; - TcpProxyConfig config_obj(*json_config, cm_, - cm_.thread_local_cluster_.cluster_.info_->stats_store_); + TcpProxyConfig config_obj(*json_config, factory_context_); NiceMock connection; EXPECT_EQ(std::string(""), config_obj.getRouteFromEntries(connection)); } +TEST(TcpProxyConfigTest, AccessLogConfig) { + std::string json = R"EOF( + { + "stat_prefix": "name", + "route_config": { + "routes": [ + ] + }, + "access_log": [ + { + "path": "some_path", + "format": "the format specifier" + }, + { + "path": "another path" + } + ] + } + )EOF"; + + Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json); + NiceMock factory_context_; + + TcpProxyConfig config_obj(*json_config, factory_context_); + EXPECT_EQ(2, config_obj.accessLogs().size()); +} + class TcpProxyTest : public testing::Test { public: TcpProxyTest() { + ON_CALL(*factory_context_.access_log_manager_.file_, write(_)) + .WillByDefault(SaveArg<0>(&access_log_data_)); + } + + void configure(const std::string& accessLogJson) { std::string json = R"EOF( - { + {{ "stat_prefix": "name", - "route_config": { + "route_config": {{ "routes": [ - { + {{ "cluster": "fake_cluster" - } + }} ] - } - } + }}, + "access_log": [ + {} + ] + }} )EOF"; - Json::ObjectSharedPtr config = Json::Factory::loadFromString(json); - config_.reset( - new TcpProxyConfig(*config, cluster_manager_, - cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_)); + Json::ObjectSharedPtr config = Json::Factory::loadFromString(fmt::format(json, accessLogJson)); + config_.reset(new TcpProxyConfig(*config, factory_context_)); } - - void setup(bool return_connection) { + void setup(bool return_connection, const std::string& accessLogJson) { + configure(accessLogJson); if (return_connection) { connect_timer_ = new NiceMock(&filter_callbacks_.connection_.dispatcher_); EXPECT_CALL(*connect_timer_, enableTimer(_)); @@ -378,18 +405,19 @@ class TcpProxyTest : public testing::Test { Upstream::MockHost::MockCreateConnectionData conn_info; conn_info.connection_ = upstream_connection_; conn_info.host_description_ = Upstream::makeTestHost( - cluster_manager_.thread_local_cluster_.cluster_.info_, "tcp://127.0.0.1:80"); - EXPECT_CALL(cluster_manager_, tcpConnForCluster_("fake_cluster", _)) + factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_, + "tcp://127.0.0.1:80"); + EXPECT_CALL(factory_context_.cluster_manager_, tcpConnForCluster_("fake_cluster", _)) .WillOnce(Return(conn_info)); EXPECT_CALL(*upstream_connection_, addReadFilter(_)) .WillOnce(SaveArg<0>(&upstream_read_filter_)); } else { Upstream::MockHost::MockCreateConnectionData conn_info; - EXPECT_CALL(cluster_manager_, tcpConnForCluster_("fake_cluster", _)) + EXPECT_CALL(factory_context_.cluster_manager_, tcpConnForCluster_("fake_cluster", _)) .WillOnce(Return(conn_info)); } - filter_.reset(new TcpProxy(config_, cluster_manager_)); + filter_.reset(new TcpProxy(config_, factory_context_.cluster_manager_)); filter_->initializeReadFilterCallbacks(filter_callbacks_); EXPECT_EQ(return_connection ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration, @@ -399,14 +427,16 @@ class TcpProxyTest : public testing::Test { EXPECT_EQ(&filter_callbacks_.connection_, filter_->downstreamConnection()); } + void setup(bool return_connection) { setup(return_connection, std::string()); } + TcpProxyConfigSharedPtr config_; NiceMock filter_callbacks_; - NiceMock cluster_manager_; + NiceMock factory_context_; NiceMock* upstream_connection_{}; Network::ReadFilterSharedPtr upstream_read_filter_; NiceMock* connect_timer_{}; std::unique_ptr filter_; - NiceMock runtime_; + std::string access_log_data_; }; TEST_F(TcpProxyTest, UpstreamDisconnect) { @@ -487,7 +517,12 @@ TEST_F(TcpProxyTest, DownstreamDisconnectLocal) { } TEST_F(TcpProxyTest, UpstreamConnectTimeout) { - setup(true); + setup(true, R"EOF( + { + "path": "unused", + "format": "%RESPONSE_FLAGS%" + } + )EOF"); Buffer::OwnedImpl buffer("hello"); EXPECT_CALL(*upstream_connection_, write(BufferEqual(&buffer))); @@ -496,25 +531,41 @@ TEST_F(TcpProxyTest, UpstreamConnectTimeout) { EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); connect_timer_->callback_(); - EXPECT_EQ(1U, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_ + EXPECT_EQ(1U, factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_cx_connect_timeout") .value()); + + filter_.reset(); + EXPECT_EQ(access_log_data_, "UF"); } TEST_F(TcpProxyTest, NoHost) { EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); - setup(false); + setup(false, R"EOF( + { + "path": "unused", + "format": "%RESPONSE_FLAGS%" + } + )EOF"); + filter_.reset(); + EXPECT_EQ(access_log_data_, "UH"); } TEST_F(TcpProxyTest, DisconnectBeforeData) { - filter_.reset(new TcpProxy(config_, cluster_manager_)); + configure(""); + filter_.reset(new TcpProxy(config_, factory_context_.cluster_manager_)); filter_->initializeReadFilterCallbacks(filter_callbacks_); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(TcpProxyTest, UpstreamConnectFailure) { - setup(true); + setup(true, R"EOF( + { + "path": "unused", + "format": "%RESPONSE_FLAGS%" + } + )EOF"); Buffer::OwnedImpl buffer("hello"); EXPECT_CALL(*upstream_connection_, write(BufferEqual(&buffer))); @@ -523,25 +574,71 @@ TEST_F(TcpProxyTest, UpstreamConnectFailure) { EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); EXPECT_CALL(*connect_timer_, disableTimer()); upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_EQ(1U, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_ + EXPECT_EQ(1U, factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_cx_connect_fail") .value()); + + filter_.reset(); + EXPECT_EQ(access_log_data_, "UF"); } TEST_F(TcpProxyTest, UpstreamConnectionLimit) { - cluster_manager_.thread_local_cluster_.cluster_.info_->resource_manager_.reset( - new Upstream::ResourceManagerImpl(runtime_, "fake_key", 0, 0, 0, 0)); + configure(R"EOF( + { + "path": "unused", + "format": "%RESPONSE_FLAGS%" + } + )EOF"); + factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_->resource_manager_.reset( + new Upstream::ResourceManagerImpl(factory_context_.runtime_loader_, "fake_key", 0, 0, 0, 0)); // setup sets up expectation for tcpConnForCluster but this test is expected to NOT call that - filter_.reset(new TcpProxy(config_, cluster_manager_)); + filter_.reset(new TcpProxy(config_, factory_context_.cluster_manager_)); // The downstream connection closes if the proxy can't make an upstream connection. EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); filter_->initializeReadFilterCallbacks(filter_callbacks_); filter_->onNewConnection(); - EXPECT_EQ(1U, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_ + EXPECT_EQ(1U, factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_cx_overflow") .value()); + + filter_.reset(); + EXPECT_EQ(access_log_data_, "UO"); +} + +TEST_F(TcpProxyTest, AccessLogUpstreamHost) { + setup(true, R"EOF( + { + "path": "unused", + "format": "%UPSTREAM_HOST% %UPSTREAM_CLUSTER%" + } + )EOF"); + filter_.reset(); + EXPECT_EQ(access_log_data_, "127.0.0.1:80 fake_cluster"); +} + +TEST_F(TcpProxyTest, AccessLogBytesRxTxDuration) { + setup(true, R"EOF( + { + "path": "unused", + "format": "bytesreceived=%BYTES_RECEIVED% bytessent=%BYTES_SENT% datetime=%START_TIME% nonzeronum=%DURATION%" + } + )EOF"); + + upstream_connection_->raiseEvent(Network::ConnectionEvent::Connected); + Buffer::OwnedImpl buffer("a"); + filter_->onData(buffer); + Buffer::OwnedImpl response("bb"); + upstream_read_filter_->onData(response); + + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + filter_.reset(); + + EXPECT_THAT(access_log_data_, + MatchesRegex( + "bytesreceived=1 bytessent=2 datetime=[0-9-]+T[0-9:.]+Z nonzeronum=[1-9][0-9]*")); } class TcpProxyRoutingTest : public testing::Test { @@ -562,22 +659,20 @@ class TcpProxyRoutingTest : public testing::Test { )EOF"; Json::ObjectSharedPtr config = Json::Factory::loadFromString(json); - config_.reset( - new TcpProxyConfig(*config, cluster_manager_, - cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_)); + config_.reset(new TcpProxyConfig(*config, factory_context_)); } void setup() { EXPECT_CALL(filter_callbacks_, connection()).WillRepeatedly(ReturnRef(connection_)); - filter_.reset(new TcpProxy(config_, cluster_manager_)); + filter_.reset(new TcpProxy(config_, factory_context_.cluster_manager_)); filter_->initializeReadFilterCallbacks(filter_callbacks_); } TcpProxyConfigSharedPtr config_; NiceMock connection_; NiceMock filter_callbacks_; - NiceMock cluster_manager_; + NiceMock factory_context_; std::unique_ptr filter_; }; @@ -610,7 +705,7 @@ TEST_F(TcpProxyRoutingTest, RoutableConnection) { EXPECT_CALL(connection_, localAddress()).WillRepeatedly(ReturnRef(local_address)); // Expect filter to try to open a connection to specified cluster - EXPECT_CALL(cluster_manager_, tcpConnForCluster_("fake_cluster", _)); + EXPECT_CALL(factory_context_.cluster_manager_, tcpConnForCluster_("fake_cluster", _)); filter_->onNewConnection(); diff --git a/test/common/grpc/http1_bridge_filter_test.cc b/test/common/grpc/http1_bridge_filter_test.cc index 4e589cca0147..167f6316eafa 100644 --- a/test/common/grpc/http1_bridge_filter_test.cc +++ b/test/common/grpc/http1_bridge_filter_test.cc @@ -24,7 +24,7 @@ class GrpcHttp1BridgeFilterTest : public testing::Test { GrpcHttp1BridgeFilterTest() : filter_(cm_) { filter_.setDecoderFilterCallbacks(decoder_callbacks_); filter_.setEncoderFilterCallbacks(encoder_callbacks_); - ON_CALL(decoder_callbacks_.request_info_, protocol()).WillByDefault(ReturnPointee(&protocol_)); + ON_CALL(decoder_callbacks_.request_info_, protocol()).WillByDefault(ReturnRef(protocol_)); } ~GrpcHttp1BridgeFilterTest() { filter_.onDestroy(); } @@ -33,7 +33,7 @@ class GrpcHttp1BridgeFilterTest : public testing::Test { Http1BridgeFilter filter_; NiceMock decoder_callbacks_; NiceMock encoder_callbacks_; - Http::Protocol protocol_{Http::Protocol::Http11}; + Optional protocol_{Http::Protocol::Http11}; }; TEST_F(GrpcHttp1BridgeFilterTest, NoRoute) { diff --git a/test/common/json/config_schemas_test.cc b/test/common/json/config_schemas_test.cc index eb641c1b092e..e5e4aea31ff8 100644 --- a/test/common/json/config_schemas_test.cc +++ b/test/common/json/config_schemas_test.cc @@ -22,7 +22,7 @@ std::vector generateTestInputs() { std::string test_path = TestEnvironment::temporaryDirectory() + "/config_schemas_test"; auto file_list = TestUtility::listFiles(test_path, false); - EXPECT_EQ(19, file_list.size()); + EXPECT_EQ(21, file_list.size()); return file_list; } @@ -47,6 +47,8 @@ TEST_P(ConfigSchemasTest, CheckValidationExpectation) { schema = Schema::CLUSTER_SCHEMA; } else if (schema_name == "TOP_LEVEL_CONFIG_SCHEMA") { schema = Schema::TOP_LEVEL_CONFIG_SCHEMA; + } else if (schema_name == "ACCESS_LOG_SCHEMA") { + schema = Schema::ACCESS_LOG_SCHEMA; } else { FAIL() << fmt::format("Did not recognize schema name {}", schema_name); } diff --git a/test/common/json/config_schemas_test_data/test_access_log_schema.py b/test/common/json/config_schemas_test_data/test_access_log_schema.py new file mode 100644 index 000000000000..6b8e257e4be9 --- /dev/null +++ b/test/common/json/config_schemas_test_data/test_access_log_schema.py @@ -0,0 +1,127 @@ +from util import get_blob +from util import true, false + +ACCESS_LOG_BLOB = { + "access_log": [ + { + "filter": { + "type": "logical_and", + "filters": [ + { + "type": "not_healthcheck" + }, + { + "type": "runtime", + "key": "access_log.front_access_log" + } + ] + }, + "path": "/var/log/envoy/access.log" + }, + { + "filter": { + "type": "logical_or", + "filters": [ + { + "runtime_key": "access_log.access_error.status", + "type": "status_code", + "value": 500, + "op": ">=" + }, + { + "type": "status_code", + "value": 429, + "op": "=" + }, + { + "runtime_key": "access_log.access_error.duration", + "type": "duration", + "value": 1000, + "op": ">=" + }, + { + "type": "traceable_request" + } + ] + }, + "path": "/var/log/envoy/access_error.log" + } + ] +} + + +def test(writer): + for idx, item in enumerate(ACCESS_LOG_BLOB["access_log"]): + writer.write_test_file( + 'Valid_idx_' + str(idx), + schema='ACCESS_LOG_SCHEMA', + data=get_blob(item), + throws=False, + ) + + blob = get_blob(ACCESS_LOG_BLOB)['access_log'][1] + blob['filter']['filters'][0]['op'] = '<' + writer.write_test_file( + 'FilterOperatorIsNotSupportedLessThan', + schema='ACCESS_LOG_SCHEMA', + data=blob, + throws=True, + ) + + blob = get_blob(ACCESS_LOG_BLOB)['access_log'][1] + blob['filter']['filters'][0]['op'] = '<=' + writer.write_test_file( + 'FilterOperatorIsNotSupportedLessThanEqual', + schema='ACCESS_LOG_SCHEMA', + data=blob, + throws=True, + ) + + blob = get_blob(ACCESS_LOG_BLOB)['access_log'][1] + blob['filter']['filters'][0]['op'] = '>' + writer.write_test_file( + 'FilterOperatorIsNotSupportedGreaterThan', + schema='ACCESS_LOG_SCHEMA', + data=blob, + throws=True, + ) + + blob = {"path": "/dev/null", "filter": {"type": "unknown"}} + writer.write_test_file( + 'FilterTypeIsNotSupported', + schema='ACCESS_LOG_SCHEMA', + data=blob, + throws=True, + ) + + blob = {"path": "/dev/null", "filter": {"type": "logical_or", "filters": []}} + writer.write_test_file( + 'LessThanTwoFiltersInListNoneLogicalOrThrows', + schema='ACCESS_LOG_SCHEMA', + data=blob, + throws=True, + ) + + blob = {"path": "/dev/null", "filter": {"type": "logical_and", "filters": []}} + writer.write_test_file( + 'LessThanTwoFiltersInListNoneLogicalAndThrows', + schema='ACCESS_LOG_SCHEMA', + data=blob, + throws=True, + ) + + blob = {"path": "/dev/null", "filter": {"type": "logical_or", "filters": [{"type": "not_healthcheck"}]}} + writer.write_test_file( + 'LessThanTwoFiltersInListOneLogicalOrThrows', + schema='ACCESS_LOG_SCHEMA', + data=blob, + throws=True, + ) + + blob = {"path": "/dev/null", "filter": {"type": "logical_and", "filters": [{"type": "not_healthcheck"}]}} + writer.write_test_file( + 'LessThanTwoFiltersInListOneLogicalAndThrows', + schema='ACCESS_LOG_SCHEMA', + data=blob, + throws=True, + ) diff --git a/test/common/json/config_schemas_test_data/test_http_conn_network_filter_schema.py b/test/common/json/config_schemas_test_data/test_http_conn_network_filter_schema.py index b866f5dfd3a1..62837bdb89ec 100644 --- a/test/common/json/config_schemas_test_data/test_http_conn_network_filter_schema.py +++ b/test/common/json/config_schemas_test_data/test_http_conn_network_filter_schema.py @@ -6,51 +6,7 @@ "stat_prefix": "router", "use_remote_address": true, "server_name": "envoy-123", - "access_log": [ - { - "filter": { - "type": "logical_and", - "filters": [ - { - "type": "not_healthcheck" - }, - { - "type": "runtime", - "key": "access_log.front_access_log" - } - ] - }, - "path": "/var/log/envoy/access.log" - }, - { - "filter": { - "type": "logical_or", - "filters": [ - { - "runtime_key": "access_log.access_error.status", - "type": "status_code", - "value": 500, - "op": ">=" - }, - { - "type": "status_code", - "value": 429, - "op": "=" - }, - { - "runtime_key": "access_log.access_error.duration", - "type": "duration", - "value": 1000, - "op": ">=" - }, - { - "type": "traceable_request" - } - ] - }, - "path": "/var/log/envoy/access_error.log" - } - ], + "access_log": [], "tracing": { "request_headers_for_tags": [ "x-source" @@ -84,75 +40,3 @@ def test(writer): data=get_blob(HTTP_CONN_NETWORK_FILTER_BLOB), throws=False, ) - - blob = get_blob(HTTP_CONN_NETWORK_FILTER_BLOB) - blob['access_log'][1]['filter']['filters'][0]['op'] = '<' - writer.write_test_file( - 'FilterOperatorIsNotSupportedLessThan', - schema='HTTP_CONN_NETWORK_FILTER_SCHEMA', - data=blob, - throws=True, - ) - - blob = get_blob(HTTP_CONN_NETWORK_FILTER_BLOB) - blob['access_log'][1]['filter']['filters'][0]['op'] = '<=' - writer.write_test_file( - 'FilterOperatorIsNotSupportedLessThanEqual', - schema='HTTP_CONN_NETWORK_FILTER_SCHEMA', - data=blob, - throws=True, - ) - - blob = get_blob(HTTP_CONN_NETWORK_FILTER_BLOB) - blob['access_log'][1]['filter']['filters'][0]['op'] = '>' - writer.write_test_file( - 'FilterOperatorIsNotSupportedGreaterThan', - schema='HTTP_CONN_NETWORK_FILTER_SCHEMA', - data=blob, - throws=True, - ) - - blob = get_blob(HTTP_CONN_NETWORK_FILTER_BLOB) - blob['access_log'].append({"path": "/dev/null", "filter": {"type": "unknown"}}) - writer.write_test_file( - 'FilterTypeIsNotSupported', - schema='HTTP_CONN_NETWORK_FILTER_SCHEMA', - data=blob, - throws=True, - ) - - blob = get_blob(HTTP_CONN_NETWORK_FILTER_BLOB) - blob['access_log'].append({"path": "/dev/null", "filter": {"type": "logical_or", "filters": []}}) - writer.write_test_file( - 'LessThanTwoFiltersInListNoneLogicalOrThrows', - schema='HTTP_CONN_NETWORK_FILTER_SCHEMA', - data=blob, - throws=True, - ) - - blob = get_blob(HTTP_CONN_NETWORK_FILTER_BLOB) - blob['access_log'].append({"path": "/dev/null", "filter": {"type": "logical_and", "filters": []}}) - writer.write_test_file( - 'LessThanTwoFiltersInListNoneLogicalAndThrows', - schema='HTTP_CONN_NETWORK_FILTER_SCHEMA', - data=blob, - throws=True, - ) - - blob = get_blob(HTTP_CONN_NETWORK_FILTER_BLOB) - blob['access_log'].append({"path": "/dev/null", "filter": {"type": "logical_or", "filters": [{"type": "not_healthcheck"}]}}) - writer.write_test_file( - 'LessThanTwoFiltersInListOneLogicalOrThrows', - schema='HTTP_CONN_NETWORK_FILTER_SCHEMA', - data=blob, - throws=True, - ) - - blob = get_blob(HTTP_CONN_NETWORK_FILTER_BLOB) - blob['access_log'].append({"path": "/dev/null", "filter": {"type": "logical_and", "filters": [{"type": "not_healthcheck"}]}}) - writer.write_test_file( - 'LessThanTwoFiltersInListOneLogicalAndThrows', - schema='HTTP_CONN_NETWORK_FILTER_SCHEMA', - data=blob, - throws=True, - ) diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 23cad768462d..b94d59af311d 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -87,6 +87,7 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/ratelimit:ratelimit_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:host_mocks", "//test/mocks/upstream:upstream_mocks", diff --git a/test/common/network/filter_manager_impl_test.cc b/test/common/network/filter_manager_impl_test.cc index a79596c6d6c3..2300f2722ce0 100644 --- a/test/common/network/filter_manager_impl_test.cc +++ b/test/common/network/filter_manager_impl_test.cc @@ -13,6 +13,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/ratelimit/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/host.h" #include "test/mocks/upstream/mocks.h" @@ -97,9 +98,7 @@ TEST_F(NetworkFilterManagerTest, All) { // This is a very important flow so make sure it works correctly in aggregate. TEST_F(NetworkFilterManagerTest, RateLimitAndTcpProxy) { InSequence s; - Stats::IsolatedStoreImpl stats_store; - NiceMock runtime; - NiceMock cm; + NiceMock factory_context; NiceMock connection; FilterManagerImpl manager(connection, *this); @@ -113,15 +112,17 @@ TEST_F(NetworkFilterManagerTest, RateLimitAndTcpProxy) { } )EOF"; - ON_CALL(runtime.snapshot_, featureEnabled("ratelimit.tcp_filter_enabled", 100)) + ON_CALL(factory_context.runtime_loader_.snapshot_, + featureEnabled("ratelimit.tcp_filter_enabled", 100)) .WillByDefault(Return(true)); - ON_CALL(runtime.snapshot_, featureEnabled("ratelimit.tcp_filter_enforcing", 100)) + ON_CALL(factory_context.runtime_loader_.snapshot_, + featureEnabled("ratelimit.tcp_filter_enforcing", 100)) .WillByDefault(Return(true)); Json::ObjectSharedPtr rl_config_loader = Json::Factory::loadFromString(rl_json); - RateLimit::TcpFilter::ConfigSharedPtr rl_config( - new RateLimit::TcpFilter::Config(*rl_config_loader, stats_store, runtime)); + RateLimit::TcpFilter::ConfigSharedPtr rl_config(new RateLimit::TcpFilter::Config( + *rl_config_loader, factory_context.scope_, factory_context.runtime_loader_)); RateLimit::MockClient* rl_client = new RateLimit::MockClient(); manager.addReadFilter(ReadFilterSharedPtr{ new RateLimit::TcpFilter::Instance(rl_config, RateLimit::ClientPtr{rl_client})}); @@ -141,8 +142,9 @@ TEST_F(NetworkFilterManagerTest, RateLimitAndTcpProxy) { Json::ObjectSharedPtr tcp_proxy_config_loader = Json::Factory::loadFromString(tcp_proxy_json); Envoy::Filter::TcpProxyConfigSharedPtr tcp_proxy_config( - new Envoy::Filter::TcpProxyConfig(*tcp_proxy_config_loader, cm, stats_store)); - manager.addReadFilter(ReadFilterSharedPtr{new Envoy::Filter::TcpProxy(tcp_proxy_config, cm)}); + new Envoy::Filter::TcpProxyConfig(*tcp_proxy_config_loader, factory_context)); + manager.addReadFilter(ReadFilterSharedPtr{ + new Envoy::Filter::TcpProxy(tcp_proxy_config, factory_context.cluster_manager_)}); RateLimit::RequestCallbacks* request_callbacks{}; EXPECT_CALL(*rl_client, limit(_, "foo", @@ -159,9 +161,10 @@ TEST_F(NetworkFilterManagerTest, RateLimitAndTcpProxy) { new NiceMock(); Upstream::MockHost::MockCreateConnectionData conn_info; conn_info.connection_ = upstream_connection; - conn_info.host_description_ = - Upstream::makeTestHost(cm.thread_local_cluster_.cluster_.info_, "tcp://127.0.0.1:80"); - EXPECT_CALL(cm, tcpConnForCluster_("fake_cluster", _)).WillOnce(Return(conn_info)); + conn_info.host_description_ = Upstream::makeTestHost( + factory_context.cluster_manager_.thread_local_cluster_.cluster_.info_, "tcp://127.0.0.1:80"); + EXPECT_CALL(factory_context.cluster_manager_, tcpConnForCluster_("fake_cluster", _)) + .WillOnce(Return(conn_info)); request_callbacks->complete(RateLimit::LimitStatus::OK); diff --git a/test/common/router/req_header_formatter_test.cc b/test/common/router/req_header_formatter_test.cc index 2af24f2f3f21..a0b90761318a 100644 --- a/test/common/router/req_header_formatter_test.cc +++ b/test/common/router/req_header_formatter_test.cc @@ -39,7 +39,8 @@ TEST(RequestHeaderFormatterTest, TestFormatWithClientIpVariable) { TEST(RequestHeaderFormatterTest, TestFormatWithProtocolVariable) { NiceMock request_info; - ON_CALL(request_info, protocol()).WillByDefault(Return(Envoy::Http::Protocol::Http11)); + Optional protocol = Envoy::Http::Protocol::Http11; + ON_CALL(request_info, protocol()).WillByDefault(ReturnRef(protocol)); const std::string variable = "PROTOCOL"; RequestHeaderFormatter requestHeaderFormatter(variable); const std::string formatted_string = requestHeaderFormatter.format(request_info); diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index d756971ca704..d067b4185b91 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -246,10 +246,10 @@ TEST(HttpConnManFinalizerImpl, OriginalAndLongPath) { {"x-forwarded-proto", "http"}}; NiceMock request_info; - Http::Protocol protocol = Http::Protocol::Http2; + Optional protocol = Http::Protocol::Http2; EXPECT_CALL(request_info, bytesReceived()).WillOnce(Return(10)); EXPECT_CALL(request_info, bytesSent()).WillOnce(Return(11)); - EXPECT_CALL(request_info, protocol()).WillOnce(Return(protocol)); + EXPECT_CALL(request_info, protocol()).WillOnce(ReturnRef(protocol)); Optional response_code; EXPECT_CALL(request_info, responseCode()).WillRepeatedly(ReturnRef(response_code)); @@ -314,9 +314,9 @@ TEST(HttpConnManFinalizerImpl, SpanOptionalHeaders) { {"x-forwarded-proto", "https"}}; NiceMock request_info; - Http::Protocol protocol = Http::Protocol::Http10; + Optional protocol = Http::Protocol::Http10; EXPECT_CALL(request_info, bytesReceived()).WillOnce(Return(10)); - EXPECT_CALL(request_info, protocol()).WillOnce(Return(protocol)); + EXPECT_CALL(request_info, protocol()).WillOnce(ReturnRef(protocol)); const std::string service_node = "i-453"; // Check that span is populated correctly. @@ -356,8 +356,8 @@ TEST(HttpConnManFinalizerImpl, SpanPopulatedFailureResponse) { request_headers.insertEnvoyDownstreamServiceCluster().value(std::string("downstream_cluster")); request_headers.insertClientTraceId().value(std::string("client_trace_id")); - Http::Protocol protocol = Http::Protocol::Http10; - EXPECT_CALL(request_info, protocol()).WillOnce(Return(protocol)); + Optional protocol = Http::Protocol::Http10; + EXPECT_CALL(request_info, protocol()).WillOnce(ReturnRef(protocol)); EXPECT_CALL(request_info, bytesReceived()).WillOnce(Return(10)); const std::string service_node = "i-453"; diff --git a/test/mocks/access_log/mocks.cc b/test/mocks/access_log/mocks.cc index 0875fc164aa5..e71d752ac15c 100644 --- a/test/mocks/access_log/mocks.cc +++ b/test/mocks/access_log/mocks.cc @@ -4,6 +4,7 @@ #include "gtest/gtest.h" using testing::Return; +using testing::ReturnRef; using testing::_; namespace Envoy { @@ -21,8 +22,8 @@ MockInstance::~MockInstance() {} MockRequestInfo::MockRequestInfo() { ON_CALL(*this, upstreamHost()).WillByDefault(Return(host_)); ON_CALL(*this, startTime()).WillByDefault(Return(start_time_)); - ON_CALL(*this, requestReceivedDuration()).WillByDefault(Return(request_received_duration_)); - ON_CALL(*this, responseReceivedDuration()).WillByDefault(Return(response_received_duration_)); + ON_CALL(*this, requestReceivedDuration()).WillByDefault(ReturnRef(request_received_duration_)); + ON_CALL(*this, responseReceivedDuration()).WillByDefault(ReturnRef(response_received_duration_)); } MockRequestInfo::~MockRequestInfo() {} diff --git a/test/mocks/access_log/mocks.h b/test/mocks/access_log/mocks.h index a02dd2762907..a275de79adb5 100644 --- a/test/mocks/access_log/mocks.h +++ b/test/mocks/access_log/mocks.h @@ -44,12 +44,12 @@ class MockRequestInfo : public RequestInfo { MOCK_METHOD1(setResponseFlag, void(ResponseFlag response_flag)); MOCK_METHOD1(onUpstreamHostSelected, void(Upstream::HostDescriptionConstSharedPtr host)); MOCK_CONST_METHOD0(startTime, SystemTime()); - MOCK_CONST_METHOD0(requestReceivedDuration, std::chrono::microseconds()); + MOCK_CONST_METHOD0(requestReceivedDuration, const Optional&()); MOCK_METHOD1(requestReceivedDuration, void(MonotonicTime time)); - MOCK_CONST_METHOD0(responseReceivedDuration, std::chrono::microseconds()); + MOCK_CONST_METHOD0(responseReceivedDuration, const Optional&()); MOCK_METHOD1(responseReceivedDuration, void(MonotonicTime time)); MOCK_CONST_METHOD0(bytesReceived, uint64_t()); - MOCK_CONST_METHOD0(protocol, Http::Protocol()); + MOCK_CONST_METHOD0(protocol, const Optional&()); MOCK_METHOD1(protocol, void(Http::Protocol protocol)); MOCK_CONST_METHOD0(responseCode, Optional&()); MOCK_CONST_METHOD0(bytesSent, uint64_t()); From c6256a7db1db3218ac5e27f4c2337560283cf195 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 9 Nov 2017 17:01:33 -0800 Subject: [PATCH 07/34] listeners: implement drain_type option (#2027) Signed-off-by: Matt Klein --- bazel/repositories.bzl | 2 +- docs/configuration/listeners/listeners.rst | 10 ++++- docs/intro/arch_overview/arch_overview.rst | 1 + docs/intro/arch_overview/draining.rst | 35 ++++++++++++++++++ docs/intro/arch_overview/health_checking.rst | 4 +- include/envoy/server/listener_manager.h | 3 +- source/common/config/lds_json.cc | 7 ++++ source/common/json/config_schemas.cc | 1 + source/exe/main_common.cc | 6 ++- source/server/config_validation/server.h | 4 +- source/server/drain_manager_impl.cc | 7 ++-- source/server/drain_manager_impl.h | 3 +- source/server/listener_manager_impl.cc | 7 ++-- source/server/listener_manager_impl.h | 2 +- test/mocks/server/mocks.h | 6 ++- test/server/drain_manager_impl_test.cc | 39 ++++++++++++++------ test/server/listener_manager_impl_test.cc | 37 ++++++++++++++++--- test/server/utility.h | 6 +++ 18 files changed, 146 insertions(+), 34 deletions(-) create mode 100644 docs/intro/arch_overview/draining.rst diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 4dcecfd76a04..d563f141a763 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -113,7 +113,7 @@ def envoy_api_deps(skip_targets): native.git_repository( name = "envoy_api", remote = REPO_LOCATIONS["data-plane-api"], - commit = "971fb1b70f419348a1ac2273508237b7ebd08cf5", + commit = "e355cdbe0f7d614a110dc12e9d01b3ce817a2e87", ) api_bind_targets = [ diff --git a/docs/configuration/listeners/listeners.rst b/docs/configuration/listeners/listeners.rst index 595112313335..b7f560464bd4 100644 --- a/docs/configuration/listeners/listeners.rst +++ b/docs/configuration/listeners/listeners.rst @@ -25,7 +25,8 @@ Each individual listener configuration has the following format: "bind_to_port": "...", "use_proxy_proto": "...", "use_original_dst": "...", - "per_connection_buffer_limit_bytes": "..." + "per_connection_buffer_limit_bytes": "...", + "drain_type": "..." } .. _config_listeners_name: @@ -79,6 +80,13 @@ per_connection_buffer_limit_bytes *(optional, integer)* Soft limit on size of the listener's new connection read and write buffers. If unspecified, an implementation defined default is applied (1MiB). +.. _config_listeners_drain_type: + +drain_type + *(optional, string)* The type of draining that the listener does. Allowed values include *default* + and *modify_only*. See the :ref:`draining ` architecture overview for + more information. + Statistics ---------- diff --git a/docs/intro/arch_overview/arch_overview.rst b/docs/intro/arch_overview/arch_overview.rst index 8011b1f5fd56..e147985e5c75 100644 --- a/docs/intro/arch_overview/arch_overview.rst +++ b/docs/intro/arch_overview/arch_overview.rst @@ -33,4 +33,5 @@ Architecture overview hot_restart dynamic_configuration init + draining scripting diff --git a/docs/intro/arch_overview/draining.rst b/docs/intro/arch_overview/draining.rst new file mode 100644 index 000000000000..a7ac2aa812fa --- /dev/null +++ b/docs/intro/arch_overview/draining.rst @@ -0,0 +1,35 @@ +.. _arch_overview_draining: + +Draining +======== + +Draining is the process by which Envoy attempts to gracefully shed connections in response to +various events. Draining occurs at the following times: + +* The server has been manually health check failed via the :ref:`healthcheck/fail + ` admin endpoint. See the :ref:`health check filter + ` architecture overview for more information. +* The server is being :ref:`hot restarted `. +* Individual listeners are being modified or removed via :ref:`LDS + `. + +Each :ref:`configured listener ` has a :ref:`drain_type +` setting which controls when draining takes place. The currently +supported values are: + +default + Envoy will drain listeners in response to all three cases above (admin drain, hot restart, and + LDS update/remove). This is the default setting. + +modify_only + Envoy will drain listeners only in response to the 2nd and 3rd cases above (hot restart and + LDS update/remove). This setting is useful if Envoy is hosting both ingress and egress listeners. + It may be desirable to set *modify_only* on egress listeners so they only drain during + modifications while relying on ingress listener draining to perform full server draining when + attempting to do a controlled shutdown. + +Note that although draining is a per-listener concept, it must be supported at the network filter +level. Currently the only filters that support graceful draining are +:ref:`HTTP connection manager `, +:ref:`Redis `, and +:ref:`Mongo `. diff --git a/docs/intro/arch_overview/health_checking.rst b/docs/intro/arch_overview/health_checking.rst index 83a517bcca7c..9527903fa665 100644 --- a/docs/intro/arch_overview/health_checking.rst +++ b/docs/intro/arch_overview/health_checking.rst @@ -27,13 +27,13 @@ Passive health checking Envoy also supports passive health checking via :ref:`outlier detection `. -.. _arch_overview_health_checking_filter: - Connection pool interactions ---------------------------- See :ref:`here ` for more information. +.. _arch_overview_health_checking_filter: + HTTP health checking filter --------------------------- diff --git a/include/envoy/server/listener_manager.h b/include/envoy/server/listener_manager.h index 44cffcf96454..52a3e30b6ac8 100644 --- a/include/envoy/server/listener_manager.h +++ b/include/envoy/server/listener_manager.h @@ -42,8 +42,9 @@ class ListenerComponentFactory { /** * @return DrainManagerPtr a new drain manager. + * @param drain_type supplies the type of draining to do for the owning listener. */ - virtual DrainManagerPtr createDrainManager() PURE; + virtual DrainManagerPtr createDrainManager(envoy::api::v2::Listener::DrainType drain_type) PURE; /** * @return uint64_t a listener tag usable for connection handler tracking. diff --git a/source/common/config/lds_json.cc b/source/common/config/lds_json.cc index aa8ab279e7c2..76860398bbea 100644 --- a/source/common/config/lds_json.cc +++ b/source/common/config/lds_json.cc @@ -46,6 +46,13 @@ void LdsJson::translateListener(const Json::Object& json_listener, UNREFERENCED_PARAMETER(status); } + const std::string drain_type = json_listener.getString("drain_type", "default"); + if (drain_type == "modify_only") { + listener.set_drain_type(envoy::api::v2::Listener_DrainType_MODIFY_ONLY); + } else { + ASSERT(drain_type == "default"); + } + JSON_UTIL_SET_BOOL(json_listener, *filter_chain, use_proxy_proto); JSON_UTIL_SET_BOOL(json_listener, listener, use_original_dst); JSON_UTIL_SET_BOOL(json_listener, *listener.mutable_deprecated_v1(), bind_to_port); diff --git a/source/common/json/config_schemas.cc b/source/common/json/config_schemas.cc index 28b1876ef13b..86d5db67189d 100644 --- a/source/common/json/config_schemas.cc +++ b/source/common/json/config_schemas.cc @@ -182,6 +182,7 @@ const std::string Json::Schema::LISTENER_SCHEMA(R"EOF( "type" : "array", "items": {"$ref" : "#/definitions/filters"} }, + "drain_type": {"type" : "string", "enum" : ["default", "modify_only"]}, "ssl_context" : {"$ref" : "#/definitions/ssl_context"}, "bind_to_port" : {"type": "boolean"}, "use_proxy_proto" : {"type" : "boolean"}, diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index 18ebbfee8619..e3ce501ebdaa 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -28,7 +28,11 @@ class ProdComponentFactory : public ComponentFactory { public: // Server::DrainManagerFactory DrainManagerPtr createDrainManager(Instance& server) override { - return DrainManagerPtr{new DrainManagerImpl(server)}; + return DrainManagerPtr{ + // The global drain manager only triggers on listener modification, which effectively is + // hot restart at the global level. The per-listener drain managers decide whether to + // to include /healthcheck/fail status. + new DrainManagerImpl(server, envoy::api::v2::Listener_DrainType_MODIFY_ONLY)}; } Runtime::LoaderPtr createRuntime(Server::Instance& server, diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 666179220e53..cf3a8e3b6548 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -100,7 +100,9 @@ class ValidationInstance : Logger::Loggable, // validation mock. return nullptr; } - DrainManagerPtr createDrainManager() override { return nullptr; } + DrainManagerPtr createDrainManager(envoy::api::v2::Listener::DrainType) override { + return nullptr; + } uint64_t nextListenerTag() override { return 0; } // Server::WorkerFactory diff --git a/source/server/drain_manager_impl.cc b/source/server/drain_manager_impl.cc index 4dfc46cac561..a616eb694660 100644 --- a/source/server/drain_manager_impl.cc +++ b/source/server/drain_manager_impl.cc @@ -14,16 +14,17 @@ namespace Envoy { namespace Server { -DrainManagerImpl::DrainManagerImpl(Instance& server) : server_(server) {} +DrainManagerImpl::DrainManagerImpl(Instance& server, envoy::api::v2::Listener::DrainType drain_type) + : server_(server), drain_type_(drain_type) {} bool DrainManagerImpl::drainClose() const { - // If we are actively HC failed, always drain close. + // If we are actively HC failed and the drain type is default, always drain close. // // TODO(mattklein123): In relation to x-envoy-immediate-health-check-fail, it would be better // if even in the case of server health check failure we had some period of drain ramp up. This // would allow the other side to fail health check for the host which will require some thread // jumps versus immediately start GOAWAY/connection thrashing. - if (server_.healthCheckFailed()) { + if (drain_type_ == envoy::api::v2::Listener_DrainType_DEFAULT && server_.healthCheckFailed()) { return true; } diff --git a/source/server/drain_manager_impl.h b/source/server/drain_manager_impl.h index da51606955a9..4182edb9e843 100644 --- a/source/server/drain_manager_impl.h +++ b/source/server/drain_manager_impl.h @@ -19,7 +19,7 @@ namespace Server { */ class DrainManagerImpl : Logger::Loggable, public DrainManager { public: - DrainManagerImpl(Instance& server); + DrainManagerImpl(Instance& server, envoy::api::v2::Listener::DrainType drain_type); // Server::DrainManager bool drainClose() const override; @@ -31,6 +31,7 @@ class DrainManagerImpl : Logger::Loggable, public DrainManager void drainSequenceTick(); Instance& server_; + const envoy::api::v2::Listener::DrainType drain_type_; Event::TimerPtr drain_tick_timer_; std::atomic drain_time_completed_{}; Event::TimerPtr parent_shutdown_timer_; diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 49c5c3ecbcb3..788c9f893506 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -65,8 +65,9 @@ ProdListenerComponentFactory::createListenSocket(Network::Address::InstanceConst } } -DrainManagerPtr ProdListenerComponentFactory::createDrainManager() { - return DrainManagerPtr{new DrainManagerImpl(server_)}; +DrainManagerPtr +ProdListenerComponentFactory::createDrainManager(envoy::api::v2::Listener::DrainType drain_type) { + return DrainManagerPtr{new DrainManagerImpl(server_, drain_type)}; } ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, ListenerManagerImpl& parent, @@ -85,7 +86,7 @@ ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, ListenerManag PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)), listener_tag_(parent_.factory_.nextListenerTag()), name_(name), workers_started_(workers_started), hash_(hash), - local_drain_manager_(parent.factory_.createDrainManager()) { + local_drain_manager_(parent.factory_.createDrainManager(config.drain_type())) { // TODO(htuch): Support multiple filter chains #1280, add constraint to ensure we have at least on // filter chain #1308. ASSERT(config.filter_chains().size() == 1); diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index ed00ab67eabd..9952e9cd102a 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -39,7 +39,7 @@ class ProdListenerComponentFactory : public ListenerComponentFactory, } Network::ListenSocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr address, bool bind_to_port) override; - DrainManagerPtr createDrainManager() override; + DrainManagerPtr createDrainManager(envoy::api::v2::Listener::DrainType drain_type) override; uint64_t nextListenerTag() override { return next_listener_tag_++; } private: diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 0e1bcdcde58e..69d78eb97b9c 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -139,7 +139,9 @@ class MockListenerComponentFactory : public ListenerComponentFactory { MockListenerComponentFactory(); ~MockListenerComponentFactory(); - DrainManagerPtr createDrainManager() override { return DrainManagerPtr{createDrainManager_()}; } + DrainManagerPtr createDrainManager(envoy::api::v2::Listener::DrainType drain_type) override { + return DrainManagerPtr{createDrainManager_(drain_type)}; + } MOCK_METHOD2(createFilterFactoryList, std::vector( @@ -148,7 +150,7 @@ class MockListenerComponentFactory : public ListenerComponentFactory { MOCK_METHOD2(createListenSocket, Network::ListenSocketSharedPtr(Network::Address::InstanceConstSharedPtr address, bool bind_to_port)); - MOCK_METHOD0(createDrainManager_, DrainManager*()); + MOCK_METHOD1(createDrainManager_, DrainManager*(envoy::api::v2::Listener::DrainType drain_type)); MOCK_METHOD0(nextListenerTag, uint64_t()); std::shared_ptr socket_; diff --git a/test/server/drain_manager_impl_test.cc b/test/server/drain_manager_impl_test.cc index 284aff67025e..207e3d6debaf 100644 --- a/test/server/drain_manager_impl_test.cc +++ b/test/server/drain_manager_impl_test.cc @@ -15,30 +15,37 @@ using testing::_; namespace Envoy { namespace Server { -TEST(DrainManagerImplTest, All) { - InSequence s; +class DrainManagerImplTest : public testing::Test { +public: + DrainManagerImplTest() { + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(600))); + ON_CALL(server_.options_, parentShutdownTime()) + .WillByDefault(Return(std::chrono::seconds(900))); + } - NiceMock server; - ON_CALL(server.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(600))); - ON_CALL(server.options_, parentShutdownTime()).WillByDefault(Return(std::chrono::seconds(900))); - DrainManagerImpl drain_manager(server); + NiceMock server_; +}; + +TEST_F(DrainManagerImplTest, Default) { + InSequence s; + DrainManagerImpl drain_manager(server_, envoy::api::v2::Listener_DrainType_DEFAULT); // Test parent shutdown. - Event::MockTimer* shutdown_timer = new Event::MockTimer(&server.dispatcher_); + Event::MockTimer* shutdown_timer = new Event::MockTimer(&server_.dispatcher_); EXPECT_CALL(*shutdown_timer, enableTimer(std::chrono::milliseconds(900000))); drain_manager.startParentShutdownSequence(); - EXPECT_CALL(server.hot_restart_, terminateParent()); + EXPECT_CALL(server_.hot_restart_, terminateParent()); shutdown_timer->callback_(); // Verify basic drain close. - EXPECT_CALL(server, healthCheckFailed()).WillOnce(Return(false)); + EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(false)); EXPECT_FALSE(drain_manager.drainClose()); - EXPECT_CALL(server, healthCheckFailed()).WillOnce(Return(true)); + EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(true)); EXPECT_TRUE(drain_manager.drainClose()); // Test drain sequence. - Event::MockTimer* drain_timer = new Event::MockTimer(&server.dispatcher_); + Event::MockTimer* drain_timer = new Event::MockTimer(&server_.dispatcher_); EXPECT_CALL(*drain_timer, enableTimer(_)); ReadyWatcher drain_complete; drain_manager.startDrainSequence([&drain_complete]() -> void { drain_complete.ready(); }); @@ -53,9 +60,17 @@ TEST(DrainManagerImplTest, All) { drain_timer->callback_(); } - EXPECT_CALL(server, healthCheckFailed()).WillOnce(Return(false)); + EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(false)); EXPECT_TRUE(drain_manager.drainClose()); } +TEST_F(DrainManagerImplTest, ModifyOnly) { + InSequence s; + DrainManagerImpl drain_manager(server_, envoy::api::v2::Listener_DrainType_MODIFY_ONLY); + + EXPECT_CALL(server_, healthCheckFailed()).Times(0); + EXPECT_FALSE(drain_manager.drainClose()); +} + } // namespace Server } // namespace Envoy diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index a4663814d423..8d6b12e22e8f 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -48,9 +48,11 @@ class ListenerManagerImplTest : public testing::Test { * 3) Stores the factory context for later use. * 4) Creates a mock local drain manager for the listener. */ - ListenerHandle* expectListenerCreate(bool need_init) { + ListenerHandle* expectListenerCreate( + bool need_init, + envoy::api::v2::Listener::DrainType drain_type = envoy::api::v2::Listener_DrainType_DEFAULT) { ListenerHandle* raw_listener = new ListenerHandle(); - EXPECT_CALL(listener_factory_, createDrainManager_()) + EXPECT_CALL(listener_factory_, createDrainManager_(drain_type)) .WillOnce(Return(raw_listener->drain_manager_)); EXPECT_CALL(listener_factory_, createFilterFactoryList(_, _)) .WillOnce(Invoke( @@ -250,6 +252,28 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, StatsScopeTest) { EXPECT_EQ(1UL, server_.stats_store_.counter("listener.127.0.0.1_1234.foo").value()); } +TEST_F(ListenerManagerImplTest, ModifyOnlyDrainType) { + InSequence s; + + // Add foo listener. + const std::string listener_foo_yaml = R"EOF( + name: "foo" + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + drain_type: MODIFY_ONLY + )EOF"; + + ListenerHandle* listener_foo = + expectListenerCreate(false, envoy::api::v2::Listener_DrainType_MODIFY_ONLY); + EXPECT_CALL(listener_factory_, createListenSocket(_, true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml))); + checkStats(1, 0, 0, 0, 1, 0); + + EXPECT_CALL(*listener_foo, onDestroy()); +} + TEST_F(ListenerManagerImplTest, AddListenerAddressNotMatching) { InSequence s; @@ -258,7 +282,8 @@ TEST_F(ListenerManagerImplTest, AddListenerAddressNotMatching) { { "name": "foo", "address": "tcp://127.0.0.1:1234", - "filters": [] + "filters": [], + "drain_type": "default" } )EOF"; @@ -272,11 +297,13 @@ TEST_F(ListenerManagerImplTest, AddListenerAddressNotMatching) { { "name": "foo", "address": "tcp://127.0.0.1:1235", - "filters": [] + "filters": [], + "drain_type": "modify_only" } )EOF"; - ListenerHandle* listener_foo_different_address = expectListenerCreate(false); + ListenerHandle* listener_foo_different_address = + expectListenerCreate(false, envoy::api::v2::Listener_DrainType_MODIFY_ONLY); EXPECT_CALL(*listener_foo_different_address, onDestroy()); EXPECT_THROW_WITH_MESSAGE( manager_->addOrUpdateListener(parseListenerFromJson(listener_foo_different_address_json)), diff --git a/test/server/utility.h b/test/server/utility.h index c37b988c72d9..9f9376e62cb1 100644 --- a/test/server/utility.h +++ b/test/server/utility.h @@ -14,6 +14,12 @@ inline envoy::api::v2::Listener parseListenerFromJson(const std::string& json_st return listener; } +inline envoy::api::v2::Listener parseListenerFromV2Yaml(const std::string& yaml) { + envoy::api::v2::Listener listener; + MessageUtil::loadFromYaml(yaml, listener); + return listener; +} + } // namespace } // namespace Server } // namespace Envoy From 491757009657be8c982b7db826e6dae859564bcb Mon Sep 17 00:00:00 2001 From: Gordon Syme Date: Fri, 10 Nov 2017 16:59:35 +0000 Subject: [PATCH 08/34] ci: use YAML references to define the build-image sha once (#2045) Signed-off-by: Gordon Syme --- .circleci/config.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f3207a2c2a97..c003b0b1976d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,11 +1,12 @@ -# TODO(mattklein123): See if there is a way to put build SHA into a variable. -# In the meantime, it's required to edit the SHA in this file as well as envoy_build_sha.sh. +references: + envoy-build-image: &envoy-build-image + envoyproxy/envoy-build:516cdcd55326648978f8db1fe3a774ec759f5a13 version: 2 jobs: release: docker: - - image: envoyproxy/envoy-build:516cdcd55326648978f8db1fe3a774ec759f5a13 + - image: *envoy-build-image resource_class: xlarge working_directory: /source steps: @@ -16,7 +17,7 @@ jobs: - run: ci/docker_tag.sh asan: docker: - - image: envoyproxy/envoy-build:516cdcd55326648978f8db1fe3a774ec759f5a13 + - image: *envoy-build-image resource_class: xlarge working_directory: /source steps: @@ -24,7 +25,7 @@ jobs: - run: ci/do_circle_ci.sh bazel.asan tsan: docker: - - image: envoyproxy/envoy-build:516cdcd55326648978f8db1fe3a774ec759f5a13 + - image: *envoy-build-image resource_class: xlarge working_directory: /source steps: @@ -32,7 +33,7 @@ jobs: - run: ci/do_circle_ci.sh bazel.tsan coverage: docker: - - image: envoyproxy/envoy-build:516cdcd55326648978f8db1fe3a774ec759f5a13 + - image: *envoy-build-image resource_class: xlarge working_directory: /source steps: @@ -43,7 +44,7 @@ jobs: path: /build/envoy/generated/coverage format: docker: - - image: envoyproxy/envoy-build:516cdcd55326648978f8db1fe3a774ec759f5a13 + - image: *envoy-build-image resource_class: small working_directory: /source steps: From 0d35346d68ab227d56f9e30f9b7b1be2aaf68a20 Mon Sep 17 00:00:00 2001 From: Shriram Rajagopalan Date: Fri, 10 Nov 2017 12:42:23 -0500 Subject: [PATCH 09/34] migrate buffer filter to v2 api (#2043) Signed-off-by: Shriram Rajagopalan --- source/common/config/BUILD | 1 + source/common/config/filter_json.cc | 22 ++++++++++++------ source/common/config/filter_json.h | 14 +++++++++-- source/common/protobuf/utility.h | 7 ++++++ source/server/config/http/BUILD | 3 ++- source/server/config/http/buffer.cc | 30 ++++++++++++++++++------ source/server/config/http/buffer.h | 14 +++++++++++ test/server/config/http/config_test.cc | 32 ++++++++++++++++++++++++-- 8 files changed, 104 insertions(+), 19 deletions(-) diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 578a4044a0a5..47d515d5dfb8 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -89,6 +89,7 @@ envoy_cc_library( hdrs = ["filter_json.h"], external_deps = [ "envoy_filter_http_http_connection_manager", + "envoy_filter_http_buffer", "envoy_filter_http_fault", "envoy_filter_http_router", "envoy_filter_network_mongo_proxy", diff --git a/source/common/config/filter_json.cc b/source/common/config/filter_json.cc index 4bafa0d7c411..e299b1f1a0cf 100644 --- a/source/common/config/filter_json.cc +++ b/source/common/config/filter_json.cc @@ -231,12 +231,12 @@ void FilterJson::translateMongoProxy(const Json::Object& json_mongo_proxy, } } -void FilterJson::translateFaultFilter(const Json::Object& config, +void FilterJson::translateFaultFilter(const Json::Object& json_fault, envoy::api::v2::filter::http::HTTPFault& fault) { - config.validateSchema(Json::Schema::FAULT_HTTP_FILTER_SCHEMA); + json_fault.validateSchema(Json::Schema::FAULT_HTTP_FILTER_SCHEMA); - const Json::ObjectSharedPtr config_abort = config.getObject("abort", true); - const Json::ObjectSharedPtr config_delay = config.getObject("delay", true); + const Json::ObjectSharedPtr config_abort = json_fault.getObject("abort", true); + const Json::ObjectSharedPtr config_delay = json_fault.getObject("delay", true); if (!config_abort->empty()) { auto* abort_fault = fault.mutable_abort(); @@ -253,14 +253,14 @@ void FilterJson::translateFaultFilter(const Json::Object& config, JSON_UTIL_SET_DURATION_FROM_FIELD(*config_delay, *delay, fixed_delay, fixed_duration); } - for (const auto json_header_matcher : config.getObjectArray("headers", true)) { + for (const auto json_header_matcher : json_fault.getObjectArray("headers", true)) { auto* header_matcher = fault.mutable_headers()->Add(); RdsJson::translateHeaderMatcher(*json_header_matcher, *header_matcher); } - JSON_UTIL_SET_STRING(config, fault, upstream_cluster); + JSON_UTIL_SET_STRING(json_fault, fault, upstream_cluster); - for (auto json_downstream_node : config.getStringArray("downstream_nodes", true)) { + for (auto json_downstream_node : json_fault.getStringArray("downstream_nodes", true)) { auto* downstream_node = fault.mutable_downstream_nodes()->Add(); *downstream_node = json_downstream_node; } @@ -274,5 +274,13 @@ void FilterJson::translateRouter(const Json::Object& json_router, router.set_start_child_span(json_router.getBoolean("start_child_span", false)); } +void FilterJson::translateBufferFilter(const Json::Object& json_buffer, + envoy::api::v2::filter::http::Buffer& buffer) { + json_buffer.validateSchema(Json::Schema::BUFFER_HTTP_FILTER_SCHEMA); + + JSON_UTIL_SET_INTEGER(json_buffer, buffer, max_request_bytes); + JSON_UTIL_SET_DURATION_SECONDS(json_buffer, buffer, max_request_time); +} + } // namespace Config } // namespace Envoy diff --git a/source/common/config/filter_json.h b/source/common/config/filter_json.h index b96fd8c916de..7fb8acc56292 100644 --- a/source/common/config/filter_json.h +++ b/source/common/config/filter_json.h @@ -2,6 +2,7 @@ #include "envoy/json/json_object.h" +#include "api/filter/http/buffer.pb.h" #include "api/filter/http/fault.pb.h" #include "api/filter/http/http_connection_manager.pb.h" #include "api/filter/http/router.pb.h" @@ -51,11 +52,11 @@ class FilterJson { /** * Translate a v1 JSON Fault filter object to v2 envoy::api::v2::filter::http::HTTPFault. - * @param config source v1 JSON HTTP Fault Filter object. + * @param json_fault source v1 JSON HTTP Fault Filter object. * @param fault destination v2 * envoy::api::v2::filter::http::HTTPFault. */ - static void translateFaultFilter(const Json::Object& config, + static void translateFaultFilter(const Json::Object& json_fault, envoy::api::v2::filter::http::HTTPFault& fault); /* @@ -65,6 +66,15 @@ class FilterJson { */ static void translateRouter(const Json::Object& json_router, envoy::api::v2::filter::http::Router& router); + + /** + * Translate a v1 JSON Buffer filter object to v2 envoy::api::v2::filter::http::Buffer. + * @param json_buffer source v1 JSON HTTP Buffer Filter object. + * @param buffer destination v2 + * envoy::api::v2::filter::http::Buffer. + */ + static void translateBufferFilter(const Json::Object& json_buffer, + envoy::api::v2::filter::http::Buffer& buffer); }; } // namespace Config diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index b55ae1697de8..020453034064 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -35,6 +35,13 @@ ? Protobuf::util::TimeUtil::DurationToMilliseconds((message).field_name()) \ : throw MissingFieldException(#field_name, (message))) +// Obtain the seconds value of a google.protobuf.Duration field if set. Otherwise, throw a +// MissingFieldException. +#define PROTOBUF_GET_SECONDS_REQUIRED(message, field_name) \ + ((message).has_##field_name() \ + ? Protobuf::util::TimeUtil::DurationToSeconds((message).field_name()) \ + : throw MissingFieldException(#field_name, (message))) + namespace Envoy { class MissingFieldException : public EnvoyException { diff --git a/source/server/config/http/BUILD b/source/server/config/http/BUILD index 5cd59f86509c..cabd33a79bb5 100644 --- a/source/server/config/http/BUILD +++ b/source/server/config/http/BUILD @@ -15,9 +15,10 @@ envoy_cc_library( deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", + "//source/common/config:filter_json_lib", "//source/common/config:well_known_names", "//source/common/http/filter:buffer_filter_lib", - "//source/common/json:config_schemas_lib", + "//source/common/protobuf:utility_lib", ], ) diff --git a/source/server/config/http/buffer.cc b/source/server/config/http/buffer.cc index 78facea48e6f..fe34c5e704c4 100644 --- a/source/server/config/http/buffer.cc +++ b/source/server/config/http/buffer.cc @@ -6,28 +6,44 @@ #include "envoy/registry/registry.h" +#include "common/config/filter_json.h" #include "common/http/filter/buffer_filter.h" -#include "common/json/config_schemas.h" +#include "common/protobuf/utility.h" namespace Envoy { namespace Server { namespace Configuration { -HttpFilterFactoryCb BufferFilterConfig::createFilterFactory(const Json::Object& json_config, - const std::string& stats_prefix, - FactoryContext& context) { - json_config.validateSchema(Json::Schema::BUFFER_HTTP_FILTER_SCHEMA); +HttpFilterFactoryCb +BufferFilterConfig::createBufferFilter(const envoy::api::v2::filter::http::Buffer& buffer, + const std::string& stats_prefix, FactoryContext& context) { + ASSERT(buffer.has_max_request_bytes()); + ASSERT(buffer.has_max_request_time()); Http::BufferFilterConfigConstSharedPtr config(new Http::BufferFilterConfig{ Http::BufferFilter::generateStats(stats_prefix, context.scope()), - static_cast(json_config.getInteger("max_request_bytes")), - std::chrono::seconds(json_config.getInteger("max_request_time_s"))}); + static_cast(buffer.max_request_bytes().value()), + std::chrono::seconds(PROTOBUF_GET_SECONDS_REQUIRED(buffer, max_request_time))}); return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamDecoderFilter( Http::StreamDecoderFilterSharedPtr{new Http::BufferFilter(config)}); }; } +HttpFilterFactoryCb BufferFilterConfig::createFilterFactory(const Json::Object& json_config, + const std::string& stats_prefix, + FactoryContext& context) { + envoy::api::v2::filter::http::Buffer buffer; + Config::FilterJson::translateBufferFilter(json_config, buffer); + return createBufferFilter(buffer, stats_prefix, context); +} + +HttpFilterFactoryCb BufferFilterConfig::createFilterFactoryFromProto( + const Protobuf::Message& config, const std::string& stats_prefix, FactoryContext& context) { + return createBufferFilter(dynamic_cast(config), + stats_prefix, context); +} + /** * Static registration for the buffer filter. @see RegisterFactory. */ diff --git a/source/server/config/http/buffer.h b/source/server/config/http/buffer.h index 1250d7bf2011..51f8b1372e9b 100644 --- a/source/server/config/http/buffer.h +++ b/source/server/config/http/buffer.h @@ -6,6 +6,8 @@ #include "common/config/well_known_names.h" +#include "api/filter/http/buffer.pb.h" + namespace Envoy { namespace Server { namespace Configuration { @@ -18,7 +20,19 @@ class BufferFilterConfig : public NamedHttpFilterConfigFactory { HttpFilterFactoryCb createFilterFactory(const Json::Object& json_config, const std::string& stats_prefix, FactoryContext& context) override; + HttpFilterFactoryCb createFilterFactoryFromProto(const Protobuf::Message& config, + const std::string& stats_prefix, + FactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return ProtobufTypes::MessagePtr{new envoy::api::v2::filter::http::Buffer()}; + } + std::string name() override { return Config::HttpFilterNames::get().BUFFER; } + +private: + HttpFilterFactoryCb createBufferFilter(const envoy::api::v2::filter::http::Buffer& buffer, + const std::string& stats_prefix, FactoryContext& context); }; } // namespace Configuration diff --git a/test/server/config/http/config_test.cc b/test/server/config/http/config_test.cc index 3cfdc782ce96..673fc4c1b2d5 100644 --- a/test/server/config/http/config_test.cc +++ b/test/server/config/http/config_test.cc @@ -37,7 +37,7 @@ namespace Envoy { namespace Server { namespace Configuration { -TEST(HttpFilterConfigTest, BufferFilter) { +TEST(HttpFilterConfigTest, CorrectBufferFilterInJson) { std::string json_string = R"EOF( { "max_request_bytes" : 1028, @@ -54,7 +54,7 @@ TEST(HttpFilterConfigTest, BufferFilter) { cb(filter_callback); } -TEST(HttpFilterConfigTest, BadBufferFilterConfig) { +TEST(HttpFilterConfigTest, BadBufferFilterConfigInJson) { std::string json_string = R"EOF( { "max_request_bytes" : 1028, @@ -68,6 +68,34 @@ TEST(HttpFilterConfigTest, BadBufferFilterConfig) { EXPECT_THROW(factory.createFilterFactory(*json_config, "stats", context), Json::Exception); } +TEST(HttpFilterConfigTest, CorrectBufferFilterInProto) { + envoy::api::v2::filter::http::Buffer config{}; + config.mutable_max_request_bytes()->set_value(1028); + config.mutable_max_request_time()->set_seconds(2); + + NiceMock context; + BufferFilterConfig factory; + HttpFilterFactoryCb cb = factory.createFilterFactoryFromProto(config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamDecoderFilter(_)); + cb(filter_callback); +} + +TEST(HttpFilterConfigTest, BufferFilterWithEmptyProto) { + BufferFilterConfig factory; + envoy::api::v2::filter::http::Buffer config = + *dynamic_cast(factory.createEmptyConfigProto().get()); + + config.mutable_max_request_bytes()->set_value(1028); + config.mutable_max_request_time()->set_seconds(2); + + NiceMock context; + HttpFilterFactoryCb cb = factory.createFilterFactoryFromProto(config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamDecoderFilter(_)); + cb(filter_callback); +} + TEST(HttpFilterConfigTest, RateLimitFilter) { std::string json_string = R"EOF( { From e48d42237dfa672106bf573790437e1cdbb700e5 Mon Sep 17 00:00:00 2001 From: Shriram Rajagopalan Date: Fri, 10 Nov 2017 17:09:46 -0500 Subject: [PATCH 10/34] migrate health check filter to v2 api (#2047) Signed-off-by: Shriram Rajagopalan --- source/common/config/BUILD | 1 + source/common/config/filter_json.cc | 10 +++++++ source/common/config/filter_json.h | 10 +++++++ source/server/http/BUILD | 6 ++-- source/server/http/health_check.cc | 40 +++++++++++++++++++-------- source/server/http/health_check.h | 19 ++++++++++++- test/server/http/health_check_test.cc | 40 +++++++++++++++++++++++++-- 7 files changed, 109 insertions(+), 17 deletions(-) diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 47d515d5dfb8..669172558ffb 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -91,6 +91,7 @@ envoy_cc_library( "envoy_filter_http_http_connection_manager", "envoy_filter_http_buffer", "envoy_filter_http_fault", + "envoy_filter_http_health_check", "envoy_filter_http_router", "envoy_filter_network_mongo_proxy", ], diff --git a/source/common/config/filter_json.cc b/source/common/config/filter_json.cc index e299b1f1a0cf..07f5bcbf7ecf 100644 --- a/source/common/config/filter_json.cc +++ b/source/common/config/filter_json.cc @@ -266,6 +266,16 @@ void FilterJson::translateFaultFilter(const Json::Object& json_fault, } } +void FilterJson::translateHealthCheckFilter( + const Json::Object& json_health_check, + envoy::api::v2::filter::http::HealthCheck& health_check) { + json_health_check.validateSchema(Json::Schema::HEALTH_CHECK_HTTP_FILTER_SCHEMA); + + JSON_UTIL_SET_BOOL(json_health_check, health_check, pass_through_mode); + JSON_UTIL_SET_DURATION(json_health_check, health_check, cache_time); + JSON_UTIL_SET_STRING(json_health_check, health_check, endpoint); +} + void FilterJson::translateRouter(const Json::Object& json_router, envoy::api::v2::filter::http::Router& router) { json_router.validateSchema(Json::Schema::ROUTER_HTTP_FILTER_SCHEMA); diff --git a/source/common/config/filter_json.h b/source/common/config/filter_json.h index 7fb8acc56292..663c03b2f184 100644 --- a/source/common/config/filter_json.h +++ b/source/common/config/filter_json.h @@ -4,6 +4,7 @@ #include "api/filter/http/buffer.pb.h" #include "api/filter/http/fault.pb.h" +#include "api/filter/http/health_check.pb.h" #include "api/filter/http/http_connection_manager.pb.h" #include "api/filter/http/router.pb.h" #include "api/filter/network/mongo_proxy.pb.h" @@ -59,6 +60,15 @@ class FilterJson { static void translateFaultFilter(const Json::Object& json_fault, envoy::api::v2::filter::http::HTTPFault& fault); + /** + * Translate a v1 JSON Health Check filter object to v2 envoy::api::v2::filter::http::HealthCheck. + * @param config source v1 JSON Health Check Filter object. + * @param health_check destination v2 + * envoy::api::v2::filter::http::HealthCheck. + */ + static void translateHealthCheckFilter(const Json::Object& config, + envoy::api::v2::filter::http::HealthCheck& health_check); + /* * Translate a v1 JSON Router object to v2 envoy::api::v2::filter::http::Router. * @param json_router source v1 JSON HTTP router object. diff --git a/source/server/http/BUILD b/source/server/http/BUILD index b01bfae09eff..b902a5c168fa 100644 --- a/source/server/http/BUILD +++ b/source/server/http/BUILD @@ -52,6 +52,7 @@ envoy_cc_library( name = "health_check_lib", srcs = ["health_check.cc"], hdrs = ["health_check.h"], + external_deps = ["envoy_filter_http_health_check"], deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", @@ -60,12 +61,13 @@ envoy_cc_library( "//include/envoy/http:header_map_interface", "//source/common/common:assert_lib", "//source/common/common:enum_to_int", + "//source/common/config:filter_json_lib", + "//source/common/config:well_known_names", "//source/common/http:codes_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", "//source/common/http:utility_lib", - "//source/common/json:config_schemas_lib", - "//source/common/json:json_loader_lib", + "//source/common/protobuf:utility_lib", "//source/server/config/network:http_connection_manager_lib", ], ) diff --git a/source/server/http/health_check.cc b/source/server/http/health_check.cc index 8c2df6b18484..805029e60861 100644 --- a/source/server/http/health_check.cc +++ b/source/server/http/health_check.cc @@ -10,12 +10,12 @@ #include "common/common/assert.h" #include "common/common/enum_to_int.h" +#include "common/config/filter_json.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" #include "common/http/utility.h" -#include "common/json/config_schemas.h" -#include "common/json/json_loader.h" +#include "common/protobuf/utility.h" #include "server/config/network/http_connection_manager.h" @@ -23,17 +23,15 @@ namespace Envoy { namespace Server { namespace Configuration { -/** - * Config registration for the health check filter. @see NamedHttpFilterConfigFactory. - */ -HttpFilterFactoryCb HealthCheckFilterConfig::createFilterFactory(const Json::Object& config, - const std::string&, - FactoryContext& context) { - config.validateSchema(Json::Schema::HEALTH_CHECK_HTTP_FILTER_SCHEMA); +HttpFilterFactoryCb HealthCheckFilterConfig::createHealthCheckFilter( + const envoy::api::v2::filter::http::HealthCheck& health_check, const std::string&, + FactoryContext& context) { + ASSERT(health_check.has_pass_through_mode()); + ASSERT(!health_check.endpoint().empty()); - bool pass_through_mode = config.getBoolean("pass_through_mode"); - int64_t cache_time_ms = config.getInteger("cache_time_ms", 0); - std::string hc_endpoint = config.getString("endpoint"); + bool pass_through_mode = health_check.pass_through_mode().value(); + int64_t cache_time_ms = PROTOBUF_GET_MS_OR_DEFAULT(health_check, cache_time, 0); + std::string hc_endpoint = health_check.endpoint(); if (!pass_through_mode && cache_time_ms) { throw EnvoyException("cache_time_ms must not be set when path_through_mode is disabled"); @@ -52,6 +50,24 @@ HttpFilterFactoryCb HealthCheckFilterConfig::createFilterFactory(const Json::Obj }; } +/** + * Config registration for the health check filter. @see NamedHttpFilterConfigFactory. + */ +HttpFilterFactoryCb HealthCheckFilterConfig::createFilterFactory(const Json::Object& json_config, + const std::string& stats_prefix, + FactoryContext& context) { + envoy::api::v2::filter::http::HealthCheck health_check; + Config::FilterJson::translateHealthCheckFilter(json_config, health_check); + return createHealthCheckFilter(health_check, stats_prefix, context); +} + +HttpFilterFactoryCb HealthCheckFilterConfig::createFilterFactoryFromProto( + const Protobuf::Message& config, const std::string& stats_prefix, FactoryContext& context) { + return createHealthCheckFilter( + dynamic_cast(config), stats_prefix, + context); +} + /** * Static registration for the health check filter. @see RegisterFactory. */ diff --git a/source/server/http/health_check.h b/source/server/http/health_check.h index d36e5ea097dd..4214d9aceb83 100644 --- a/source/server/http/health_check.h +++ b/source/server/http/health_check.h @@ -9,6 +9,10 @@ #include "envoy/http/filter.h" #include "envoy/server/filter_config.h" +#include "common/config/well_known_names.h" + +#include "api/filter/http/health_check.pb.h" + namespace Envoy { namespace Server { namespace Configuration { @@ -17,7 +21,20 @@ class HealthCheckFilterConfig : public NamedHttpFilterConfigFactory { public: HttpFilterFactoryCb createFilterFactory(const Json::Object& config, const std::string&, FactoryContext& context) override; - std::string name() override { return "envoy.health_check"; } + HttpFilterFactoryCb createFilterFactoryFromProto(const Protobuf::Message& config, + const std::string& stats_prefix, + FactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return ProtobufTypes::MessagePtr{new envoy::api::v2::filter::http::HealthCheck()}; + } + + std::string name() override { return Config::HttpFilterNames::get().HEALTH_CHECK; } + +private: + HttpFilterFactoryCb + createHealthCheckFilter(const envoy::api::v2::filter::http::HealthCheck& health_check, + const std::string& stats_prefix, FactoryContext& context); }; } // namespace Configuration diff --git a/test/server/http/health_check_test.cc b/test/server/http/health_check_test.cc index 07c1a2acb6e4..ba5c86d5f42e 100644 --- a/test/server/http/health_check_test.cc +++ b/test/server/http/health_check_test.cc @@ -208,7 +208,7 @@ TEST_F(HealthCheckFilterCachingTest, NotHcRequest) { filter_->decodeHeaders(request_headers_no_hc_, true)); } -TEST(HealthCheckFilterConfig, failsWhenNotPassThroughButTimeoutSet) { +TEST(HealthCheckFilterConfig, failsWhenNotPassThroughButTimeoutSetJson) { Server::Configuration::HealthCheckFilterConfig healthCheckFilterConfig; Json::ObjectSharedPtr config = Json::Factory::loadFromString( "{\"pass_through_mode\":false, \"cache_time_ms\":234, \"endpoint\":\"foo\"}"); @@ -218,7 +218,7 @@ TEST(HealthCheckFilterConfig, failsWhenNotPassThroughButTimeoutSet) { EnvoyException); } -TEST(HealthCheckFilterConfig, notFailingWhenNotPassThroughAndTimeoutNotSet) { +TEST(HealthCheckFilterConfig, notFailingWhenNotPassThroughAndTimeoutNotSetJson) { Server::Configuration::HealthCheckFilterConfig healthCheckFilterConfig; Json::ObjectSharedPtr config = Json::Factory::loadFromString("{\"pass_through_mode\":false, \"endpoint\":\"foo\"}"); @@ -226,4 +226,40 @@ TEST(HealthCheckFilterConfig, notFailingWhenNotPassThroughAndTimeoutNotSet) { healthCheckFilterConfig.createFilterFactory(*config, "dummy_stats_prefix", context); } + +TEST(HealthCheckFilterConfig, failsWhenNotPassThroughButTimeoutSetProto) { + Server::Configuration::HealthCheckFilterConfig healthCheckFilterConfig; + envoy::api::v2::filter::http::HealthCheck config{}; + NiceMock context; + + config.mutable_pass_through_mode()->set_value(false); + config.set_endpoint("foo"); + config.mutable_cache_time()->set_seconds(10); + + EXPECT_THROW( + healthCheckFilterConfig.createFilterFactoryFromProto(config, "dummy_stats_prefix", context), + EnvoyException); +} + +TEST(HealthCheckFilterConfig, notFailingWhenNotPassThroughAndTimeoutNotSetProto) { + Server::Configuration::HealthCheckFilterConfig healthCheckFilterConfig; + envoy::api::v2::filter::http::HealthCheck config{}; + NiceMock context; + + config.mutable_pass_through_mode()->set_value(false); + config.set_endpoint("foo"); + healthCheckFilterConfig.createFilterFactoryFromProto(config, "dummy_stats_prefix", context); +} + +TEST(HealthCheckFilterConfig, HealthCheckFilterWithEmptyProto) { + Server::Configuration::HealthCheckFilterConfig healthCheckFilterConfig; + NiceMock context; + envoy::api::v2::filter::http::HealthCheck config = + *dynamic_cast( + healthCheckFilterConfig.createEmptyConfigProto().get()); + + config.mutable_pass_through_mode()->set_value(false); + config.set_endpoint("foo"); + healthCheckFilterConfig.createFilterFactoryFromProto(config, "dummy_stats_prefix", context); +} } // namespace Envoy From 0a3ec920e99b4d0450d7cf7dcf40518bbaca7a54 Mon Sep 17 00:00:00 2001 From: jmillikin-stripe Date: Fri, 10 Nov 2017 14:15:28 -0800 Subject: [PATCH 11/34] Move all repository locations to `repository_locations.bzl`. (#2042) Fixes https://github.com/envoyproxy/envoy/issues/2041 Signed-off-by: John Millikin --- bazel/external/jinja.BUILD | 6 + bazel/external/markupsafe.BUILD | 5 + bazel/repositories.bzl | 502 ++++++++++++++------------------ bazel/repository_locations.bzl | 85 +++++- 4 files changed, 316 insertions(+), 282 deletions(-) create mode 100644 bazel/external/jinja.BUILD create mode 100644 bazel/external/markupsafe.BUILD diff --git a/bazel/external/jinja.BUILD b/bazel/external/jinja.BUILD new file mode 100644 index 000000000000..285a1aa988cb --- /dev/null +++ b/bazel/external/jinja.BUILD @@ -0,0 +1,6 @@ +py_library( + name = "jinja2", + srcs = glob(["jinja2/**/*.py"]), + visibility = ["//visibility:public"], + deps = ["@com_github_pallets_markupsafe//:markupsafe"], +) diff --git a/bazel/external/markupsafe.BUILD b/bazel/external/markupsafe.BUILD new file mode 100644 index 000000000000..76e9b3ab9646 --- /dev/null +++ b/bazel/external/markupsafe.BUILD @@ -0,0 +1,5 @@ +py_library( + name = "markupsafe", + srcs = glob(["markupsafe/**/*.py"]), + visibility = ["//visibility:public"], +) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index d563f141a763..9bfdadad67ea 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,9 +1,65 @@ +load( + "@bazel_tools//tools/build_defs/repo:git.bzl", + "git_repository", + "new_git_repository", +) load(":genrule_repository.bzl", "genrule_repository") load(":patched_http_archive.bzl", "patched_http_archive") -load(":repository_locations.bzl", "REPO_LOCATIONS") +load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") load(":target_recipes.bzl", "TARGET_RECIPES") -def _repository_impl(ctxt): +def _repository_impl(name, **kwargs): + # `existing_rule_keys` contains the names of repositories that have already + # been defined in the Bazel workspace. By skipping repos with existing keys, + # users can override dependency versions by using standard Bazel repository + # rules in their WORKSPACE files. + existing_rule_keys = native.existing_rules().keys() + if name in existing_rule_keys: + # This repository has already been defined, probably because the user + # wants to override the version. Do nothing. + return + + location = REPOSITORY_LOCATIONS[name] + + # Git tags are mutable. We want to depend on commit IDs instead. Give the + # user a useful error if they accidentally specify a tag. + if "tag" in location: + fail( + "Refusing to depend on Git tag %r for external dependency %r: use 'commit' instead." + % (location["tag"], name)) + + if "commit" in location: + # Git repository at given commit ID. Add a BUILD file if requested. + if "build_file" in kwargs: + new_git_repository( + name = name, + remote = location["remote"], + commit = location["commit"], + **kwargs) + else: + git_repository( + name = name, + remote = location["remote"], + commit = location["commit"], + **kwargs) + else: # HTTP + # HTTP tarball at a given URL. Add a BUILD file if requested. + if "build_file" in kwargs: + native.new_http_archive( + name = name, + urls = location["urls"], + sha256 = location["sha256"], + strip_prefix = location["strip_prefix"], + **kwargs) + else: + native.http_archive( + name = name, + urls = location["urls"], + sha256 = location["sha256"], + strip_prefix = location["strip_prefix"], + **kwargs) + +def _build_recipe_repository_impl(ctxt): # Setup the build directory with links to the relevant files. ctxt.symlink(Label("//bazel:repositories.sh"), "repositories.sh") ctxt.symlink(Label("//ci/build_container:build_and_install_deps.sh"), @@ -33,88 +89,46 @@ def _repository_impl(ctxt): # This error message doesn't appear to the user :( https://github.com/bazelbuild/bazel/issues/3683 fail("External dep build failed") -def py_jinja2_dep(): - BUILD = """ -py_library( - name = "jinja2", - srcs = glob(["jinja2/**/*.py"]), - visibility = ["//visibility:public"], - deps = ["@markupsafe_git//:markupsafe"], -) -""" - native.new_git_repository( - name = "jinja2_git", - remote = REPO_LOCATIONS["jinja2"], - tag = "2.9.6", - build_file_content = BUILD, - ) - -def py_markupsafe_dep(): - BUILD = """ -py_library( - name = "markupsafe", - srcs = glob(["markupsafe/**/*.py"]), - visibility = ["//visibility:public"], -) -""" - native.new_git_repository( - name = "markupsafe_git", - remote = REPO_LOCATIONS["markupsafe"], - tag = "1.0", - build_file_content = BUILD, - ) - # Python dependencies. If these become non-trivial, we might be better off using a virtualenv to # wrap them, but for now we can treat them as first-class Bazel. -def python_deps(skip_targets): - if 'markupsafe' not in skip_targets: - py_markupsafe_dep() - native.bind( - name = "markupsafe", - actual = "@markupsafe_git//:markupsafe", - ) - if 'jinja2' not in skip_targets: - py_jinja2_dep() - native.bind( - name = "jinja2", - actual = "@jinja2_git//:jinja2", - ) - -def cc_grpc_httpjson_transcoding_dep(): - native.git_repository( - name = "grpc_httpjson_transcoding", - remote = REPO_LOCATIONS["grpc_transcoding"], - commit = "e4f58aa07b9002befa493a0a82e10f2e98b51fc6", +def _python_deps(): + _repository_impl( + name = "com_github_pallets_markupsafe", + build_file = "@envoy//bazel/external:markupsafe.BUILD", + ) + native.bind( + name = "markupsafe", + actual = "@com_github_pallets_markupsafe//:markupsafe", + ) + _repository_impl( + name = "com_github_pallets_jinja", + build_file = "@envoy//bazel/external:jinja.BUILD", + ) + native.bind( + name = "jinja2", + actual = "@com_github_pallets_jinja//:jinja2", ) # Bazel native C++ dependencies. For the depedencies that doesn't provide autoconf/automake builds. -def cc_deps(skip_targets): - if 'grpc-httpjson-transcoding' not in skip_targets: - cc_grpc_httpjson_transcoding_dep() - native.bind( - name = "path_matcher", - actual = "@grpc_httpjson_transcoding//src:path_matcher", - ) - native.bind( - name = "grpc_transcoding", - actual = "@grpc_httpjson_transcoding//src:transcoding", - ) +def _cc_deps(): + _repository_impl("grpc_httpjson_transcoding") + native.bind( + name = "path_matcher", + actual = "@grpc_httpjson_transcoding//src:path_matcher", + ) + native.bind( + name = "grpc_transcoding", + actual = "@grpc_httpjson_transcoding//src:transcoding", + ) -def go_deps(skip_targets): - if 'io_bazel_rules_go' not in skip_targets: - native.git_repository( - name = "io_bazel_rules_go", - remote = "https://github.com/bazelbuild/rules_go.git", - commit = "4374be38e9a75ff5957c3922adb155d32086fe14", - ) +def _go_deps(skip_targets): + # Keep the skip_targets check around until Istio Proxy has stopped using + # it to exclude the Go rules. + if "io_bazel_rules_go" not in skip_targets: + _repository_impl("io_bazel_rules_go") -def envoy_api_deps(skip_targets): - if 'envoy_api' not in skip_targets: - native.git_repository( - name = "envoy_api", - remote = REPO_LOCATIONS["data-plane-api"], - commit = "e355cdbe0f7d614a110dc12e9d01b3ce817a2e87", - ) +def _envoy_api_deps(): + _repository_impl("envoy_api") api_bind_targets = [ "address", @@ -181,10 +195,9 @@ def envoy_api_deps(skip_targets): actual = "@googleapis//:http_api_protos_lib", ) -def envoy_dependencies(path = "@envoy_deps//", skip_com_google_protobuf = False, skip_targets = [], - repository = ""): +def envoy_dependencies(path = "@envoy_deps//", skip_targets = []): envoy_repository = repository_rule( - implementation = _repository_impl, + implementation = _build_recipe_repository_impl, environ = [ "CC", "CXX", @@ -209,41 +222,6 @@ def envoy_dependencies(path = "@envoy_deps//", skip_com_google_protobuf = False, name = "envoy_deps", recipes = recipes.to_list(), ) - - # `existing_rule_keys` contains the names of repositories that have already - # been defined in the Bazel workspace. By skipping repos with existing keys, - # users can override dependency versions by using standard Bazel repository - # rules in their WORKSPACE files. - # - # The long repo names (`com_github_fmtlib_fmt` instead of `fmtlib`) are - # semi-standard in the Bazel community, intended to avoid both duplicate - # dependencies and name conflicts. - existing_rule_keys = native.existing_rules().keys() - if not ("backward" in skip_targets or "com_github_bombela_backward" in existing_rule_keys): - com_github_bombela_backward(repository) - if not ("xxhash" in skip_targets or "com_github_cyan4973_xxhash" in existing_rule_keys): - com_github_cyan4973_xxhash(repository) - if not ("tclap" in skip_targets or "com_github_eile_tclap" in existing_rule_keys): - com_github_eile_tclap(repository) - if not ("fmtlib" in skip_targets or "com_github_fmtlib_fmt" in existing_rule_keys): - com_github_fmtlib_fmt(repository) - if not ("spdlog" in skip_targets or "com_github_gabime_spdlog" in existing_rule_keys): - com_github_gabime_spdlog(repository) - if not ("gcovr" in skip_targets or "com_github_gcovr_gcovr" in existing_rule_keys): - com_github_gcovr_gcovr(repository) - if not ("lightstep" in skip_targets or "com_github_lightstep_lightstep_tracer_cpp" in existing_rule_keys): - com_github_lightstep_lightstep_tracer_cpp(repository) - if not ("http_parser" in skip_targets or "com_github_nodejs_http_parser" in existing_rule_keys): - com_github_nodejs_http_parser(repository) - if not ("rapidjson" in skip_targets or "com_github_tencent_rapidjson" in existing_rule_keys): - com_github_tencent_rapidjson(repository) - if not ("googletest" in skip_targets or "com_google_googletest" in existing_rule_keys): - com_google_googletest() - if not (skip_com_google_protobuf or "com_google_protobuf" in existing_rule_keys): - com_google_protobuf() - if not ("subpar" in existing_rule_keys): - subpar() - for t in TARGET_RECIPES: if t not in skip_targets: native.bind( @@ -251,176 +229,148 @@ def envoy_dependencies(path = "@envoy_deps//", skip_com_google_protobuf = False, actual = path + ":" + t, ) - python_deps(skip_targets) - cc_deps(skip_targets) - go_deps(skip_targets) - envoy_api_deps(skip_targets) + # The long repo names (`com_github_fmtlib_fmt` instead of `fmtlib`) are + # semi-standard in the Bazel community, intended to avoid both duplicate + # dependencies and name conflicts. + _com_github_bombela_backward() + _com_github_cyan4973_xxhash() + _com_github_eile_tclap() + _com_github_fmtlib_fmt() + _com_github_gabime_spdlog() + _com_github_gcovr_gcovr() + _com_github_lightstep_lightstep_tracer_cpp() + _com_github_nodejs_http_parser() + _com_github_tencent_rapidjson() + _com_google_googletest() + _com_google_protobuf() -def com_github_bombela_backward(repository = ""): - native.new_git_repository( - name = "com_github_bombela_backward", - remote = "https://github.com/bombela/backward-cpp", - commit = "cd1c4bd9e48afe812a0e996d335298c455afcd92", # v1.3 - build_file = repository + "//bazel/external:backward.BUILD", - ) - native.bind( - name = "backward", - actual = "@com_github_bombela_backward//:backward", - ) + # Used for bundling gcovr into a relocatable .par file. + _repository_impl("subpar") -def com_github_cyan4973_xxhash(repository = ""): - native.new_git_repository( - name = "com_github_cyan4973_xxhash", - remote = "https://github.com/Cyan4973/xxHash", - commit = "50a564c33c36b3f0c83f027dd21c25cba2967c72", # v0.6.3 - build_file = repository + "//bazel/external:xxhash.BUILD", - ) - native.bind( - name = "xxhash", - actual = "@com_github_cyan4973_xxhash//:xxhash", - ) + _python_deps() + _cc_deps() + _go_deps(skip_targets) + _envoy_api_deps() -def com_github_eile_tclap(repository = ""): - native.new_git_repository( - name = "com_github_eile_tclap", - remote = "https://github.com/eile/tclap", - commit = "3627d9402e529770df9b0edf2aa8c0e0d6c6bb41", # tclap-1-2-1-release-final - build_file = repository + "//bazel/external:tclap.BUILD", - ) - native.bind( - name = "tclap", - actual = "@com_github_eile_tclap//:tclap", - ) +def _com_github_bombela_backward(): + _repository_impl( + name = "com_github_bombela_backward", + build_file = "@envoy//bazel/external:backward.BUILD", + ) + native.bind( + name = "backward", + actual = "@com_github_bombela_backward//:backward", + ) -def com_github_fmtlib_fmt(repository = ""): - native.new_http_archive( - name = "com_github_fmtlib_fmt", - urls = [ - "https://github.com/fmtlib/fmt/releases/download/4.0.0/fmt-4.0.0.zip", - ], - sha256 = "10a9f184d4d66f135093a08396d3b0a0ebe8d97b79f8b3ddb8559f75fe4fcbc3", - strip_prefix = "fmt-4.0.0", - build_file = repository + "//bazel/external:fmtlib.BUILD", - ) - native.bind( - name="fmtlib", - actual="@com_github_fmtlib_fmt//:fmtlib", - ) +def _com_github_cyan4973_xxhash(): + _repository_impl( + name = "com_github_cyan4973_xxhash", + build_file = "@envoy//bazel/external:xxhash.BUILD", + ) + native.bind( + name = "xxhash", + actual = "@com_github_cyan4973_xxhash//:xxhash", + ) + +def _com_github_eile_tclap(): + _repository_impl( + name = "com_github_eile_tclap", + build_file = "@envoy//bazel/external:tclap.BUILD", + ) + native.bind( + name = "tclap", + actual = "@com_github_eile_tclap//:tclap", + ) -def com_github_gabime_spdlog(repository = ""): - native.new_http_archive( - name = "com_github_gabime_spdlog", - urls = [ - "https://github.com/gabime/spdlog/archive/v0.14.0.tar.gz", - ], - sha256 = "eb5beb4e53f4bfff5b32eb4db8588484bdc15a17b90eeefef3a9fc74fec1d83d", - strip_prefix = "spdlog-0.14.0", - build_file = repository + "//bazel/external:spdlog.BUILD", - ) - native.bind( - name="spdlog", - actual="@com_github_gabime_spdlog//:spdlog", - ) +def _com_github_fmtlib_fmt(): + _repository_impl( + name = "com_github_fmtlib_fmt", + build_file = "@envoy//bazel/external:fmtlib.BUILD", + ) + native.bind( + name = "fmtlib", + actual = "@com_github_fmtlib_fmt//:fmtlib", + ) + +def _com_github_gabime_spdlog(): + _repository_impl( + name = "com_github_gabime_spdlog", + build_file = "@envoy//bazel/external:spdlog.BUILD", + ) + native.bind( + name = "spdlog", + actual = "@com_github_gabime_spdlog//:spdlog", + ) -def com_github_gcovr_gcovr(repository = ""): - native.new_git_repository( - name = "com_github_gcovr_gcovr", - remote = "https://github.com/gcovr/gcovr", - commit = "c0d77201039c7b119b18bc7fb991564c602dd75d", - build_file = repository + "//bazel/external:gcovr.BUILD", - ) - native.bind( - name = "gcovr", - actual = "@com_github_gcovr_gcovr//:gcovr", - ) +def _com_github_gcovr_gcovr(): + _repository_impl( + name = "com_github_gcovr_gcovr", + build_file = "@envoy//bazel/external:gcovr.BUILD", + ) + native.bind( + name = "gcovr", + actual = "@com_github_gcovr_gcovr//:gcovr", + ) -def com_github_lightstep_lightstep_tracer_cpp(repository = ""): - genrule_repository( - name = "com_github_lightstep_lightstep_tracer_cpp", - urls = [ - "https://github.com/lightstep/lightstep-tracer-cpp/releases/download/v0_36/lightstep-tracer-cpp-0.36.tar.gz", - ], - sha256 = "f7477e67eca65f904c0b90a6bfec46d58cccfc998a8e75bc3259b6e93157ff84", - strip_prefix = "lightstep-tracer-cpp-0.36", - patches = [ - repository + "//bazel/external:lightstep-missing-header.patch", - ], - genrule_cmd_file = repository + "//bazel/external:lightstep.genrule_cmd", - build_file = repository + "//bazel/external:lightstep.BUILD", - ) - native.bind( - name="lightstep", - actual="@com_github_lightstep_lightstep_tracer_cpp//:lightstep", - ) +def _com_github_lightstep_lightstep_tracer_cpp(): + location = REPOSITORY_LOCATIONS[ + "com_github_lightstep_lightstep_tracer_cpp"] + genrule_repository( + name = "com_github_lightstep_lightstep_tracer_cpp", + urls = location["urls"], + sha256 = location["sha256"], + strip_prefix = location["strip_prefix"], + patches = [ + "@envoy//bazel/external:lightstep-missing-header.patch", + ], + genrule_cmd_file = "@envoy//bazel/external:lightstep.genrule_cmd", + build_file = "@envoy//bazel/external:lightstep.BUILD", + ) + native.bind( + name = "lightstep", + actual = "@com_github_lightstep_lightstep_tracer_cpp//:lightstep", + ) -def com_github_tencent_rapidjson(repository = ""): - native.new_git_repository( - name = "com_github_tencent_rapidjson", - remote = "https://github.com/tencent/rapidjson", - commit = "f54b0e47a08782a6131cc3d60f94d038fa6e0a51", # v1.1.0 - build_file = repository + "//bazel/external:rapidjson.BUILD", - ) - native.bind( - name = "rapidjson", - actual = "@com_github_tencent_rapidjson//:rapidjson", - ) +def _com_github_tencent_rapidjson(): + _repository_impl( + name = "com_github_tencent_rapidjson", + build_file = "@envoy//bazel/external:rapidjson.BUILD", + ) + native.bind( + name = "rapidjson", + actual = "@com_github_tencent_rapidjson//:rapidjson", + ) -def com_github_nodejs_http_parser(repository = ""): - native.new_git_repository( - name = "com_github_nodejs_http_parser", - remote = "https://github.com/nodejs/http-parser", - commit = "feae95a3a69f111bc1897b9048d9acbc290992f9", # v2.7.1 - build_file = repository + "//bazel/external:http-parser.BUILD", - ) - native.bind( - name = "http_parser", - actual = "@com_github_nodejs_http_parser//:http_parser", - ) +def _com_github_nodejs_http_parser(): + _repository_impl( + name = "com_github_nodejs_http_parser", + build_file = "@envoy//bazel/external:http-parser.BUILD", + ) + native.bind( + name = "http_parser", + actual = "@com_github_nodejs_http_parser//:http_parser", + ) -def com_google_googletest(): - native.git_repository( - name = "com_google_googletest", - remote = "https://github.com/google/googletest", - commit = "43863938377a9ea1399c0596269e0890b5c5515a", - ) - native.bind( - name = "googletest", - actual = "@com_google_googletest//:gtest", - ) +def _com_google_googletest(): + _repository_impl("com_google_googletest") + native.bind( + name = "googletest", + actual = "@com_google_googletest//:gtest", + ) -def com_google_protobuf(): - # TODO(htuch): This can switch back to a point release http_archive at the next - # release (> 3.4.1), we need HEAD proto_library support and - # https://github.com/google/protobuf/pull/3761. - native.http_archive( - name = "com_google_protobuf", - strip_prefix = "protobuf-c4f59dcc5c13debc572154c8f636b8a9361aacde", - sha256 = "5d4551193416861cb81c3bc0a428f22a6878148c57c31fb6f8f2aa4cf27ff635", - url = "https://github.com/google/protobuf/archive/c4f59dcc5c13debc572154c8f636b8a9361aacde.tar.gz", - ) - # Needed for cc_proto_library, Bazel doesn't support aliases today for repos, - # see https://groups.google.com/forum/#!topic/bazel-discuss/859ybHQZnuI and - # https://github.com/bazelbuild/bazel/issues/3219. - native.http_archive( - name = "com_google_protobuf_cc", - strip_prefix = "protobuf-c4f59dcc5c13debc572154c8f636b8a9361aacde", - sha256 = "5d4551193416861cb81c3bc0a428f22a6878148c57c31fb6f8f2aa4cf27ff635", - url = "https://github.com/google/protobuf/archive/c4f59dcc5c13debc572154c8f636b8a9361aacde.tar.gz", - ) - native.bind( - name = "protobuf", - actual = "@com_google_protobuf//:protobuf", - ) - native.bind( - name = "protoc", - actual = "@com_google_protobuf_cc//:protoc", - ) +def _com_google_protobuf(): + _repository_impl("com_google_protobuf") -def subpar(): - # I'd love to name this `com_github_google_subpar`, but something in the Subpar - # code assumes its repository name is just `subpar`. - native.git_repository( - name = "subpar", - remote = "https://github.com/google/subpar", - commit = "eb23aa7a5361cabc02464476dd080389340a5522", # HEAD - ) + # Needed for cc_proto_library, Bazel doesn't support aliases today for repos, + # see https://groups.google.com/forum/#!topic/bazel-discuss/859ybHQZnuI and + # https://github.com/bazelbuild/bazel/issues/3219. + location = REPOSITORY_LOCATIONS["com_google_protobuf"] + native.http_archive(name = "com_google_protobuf_cc", **location) + native.bind( + name = "protobuf", + actual = "@com_google_protobuf//:protobuf", + ) + native.bind( + name = "protoc", + actual = "@com_google_protobuf_cc//:protoc", + ) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 87502429e868..1f48d4d1018a 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -1,6 +1,79 @@ -REPO_LOCATIONS = { - "jinja2": "https://github.com/pallets/jinja.git", - "grpc_transcoding": "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding.git", - "data-plane-api": "https://github.com/envoyproxy/data-plane-api.git", - "markupsafe": "https://github.com/pallets/markupsafe.git", -} +REPOSITORY_LOCATIONS = dict( + com_github_bombela_backward = dict( + commit = "cd1c4bd9e48afe812a0e996d335298c455afcd92", # v1.3 + remote = "https://github.com/bombela/backward-cpp", + ), + com_github_cyan4973_xxhash = dict( + commit = "50a564c33c36b3f0c83f027dd21c25cba2967c72", # v0.6.3 + remote = "https://github.com/Cyan4973/xxHash", + ), + com_github_eile_tclap = dict( + commit = "3627d9402e529770df9b0edf2aa8c0e0d6c6bb41", # tclap-1-2-1-release-final + remote = "https://github.com/eile/tclap", + ), + com_github_fmtlib_fmt = dict( + sha256 = "10a9f184d4d66f135093a08396d3b0a0ebe8d97b79f8b3ddb8559f75fe4fcbc3", + strip_prefix = "fmt-4.0.0", + urls = ["https://github.com/fmtlib/fmt/releases/download/4.0.0/fmt-4.0.0.zip"], + ), + com_github_gabime_spdlog = dict( + sha256 = "eb5beb4e53f4bfff5b32eb4db8588484bdc15a17b90eeefef3a9fc74fec1d83d", + strip_prefix = "spdlog-0.14.0", + urls = ["https://github.com/gabime/spdlog/archive/v0.14.0.tar.gz"], + ), + com_github_gcovr_gcovr = dict( + commit = "c0d77201039c7b119b18bc7fb991564c602dd75d", + remote = "https://github.com/gcovr/gcovr", + ), + com_github_lightstep_lightstep_tracer_cpp = dict( + sha256 = "f7477e67eca65f904c0b90a6bfec46d58cccfc998a8e75bc3259b6e93157ff84", + strip_prefix = "lightstep-tracer-cpp-0.36", + urls = ["https://github.com/lightstep/lightstep-tracer-cpp/releases/download/v0_36/lightstep-tracer-cpp-0.36.tar.gz"], + ), + com_github_nodejs_http_parser = dict( + commit = "feae95a3a69f111bc1897b9048d9acbc290992f9", # v2.7.1 + remote = "https://github.com/nodejs/http-parser", + ), + com_github_pallets_jinja = dict( + commit = "d78a1b079cd985eea7d636f79124ab4fc44cb538", # 2.9.6 + remote = "https://github.com/pallets/jinja", + ), + com_github_pallets_markupsafe = dict( + commit = "d2a40c41dd1930345628ea9412d97e159f828157", # 1.0 + remote = "https://github.com/pallets/markupsafe", + ), + com_github_tencent_rapidjson = dict( + commit = "f54b0e47a08782a6131cc3d60f94d038fa6e0a51", # v1.1.0 + remote = "https://github.com/tencent/rapidjson", + ), + com_google_googletest = dict( + commit = "43863938377a9ea1399c0596269e0890b5c5515a", + remote = "https://github.com/google/googletest", + ), + # TODO(htuch): Protobuf can switch back to a point release http_archive at the next + # release (> 3.4.1), we need HEAD proto_library support and + # https://github.com/google/protobuf/pull/3761. + com_google_protobuf = dict( + sha256 = "5d4551193416861cb81c3bc0a428f22a6878148c57c31fb6f8f2aa4cf27ff635", + strip_prefix = "protobuf-c4f59dcc5c13debc572154c8f636b8a9361aacde", + urls = ["https://github.com/google/protobuf/archive/c4f59dcc5c13debc572154c8f636b8a9361aacde.tar.gz"], + ), + envoy_api = dict( + commit = "e355cdbe0f7d614a110dc12e9d01b3ce817a2e87", + remote = "https://github.com/envoyproxy/data-plane-api", + ), + grpc_httpjson_transcoding = dict( + commit = "e4f58aa07b9002befa493a0a82e10f2e98b51fc6", + remote = "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding", + ), + io_bazel_rules_go = dict( + commit = "4374be38e9a75ff5957c3922adb155d32086fe14", + remote = "https://github.com/bazelbuild/rules_go", + ), + # I'd love to name this `com_github_google_subpar`, but something in the Subpar + # code assumes its repository name is just `subpar`. + subpar = dict( + commit = "eb23aa7a5361cabc02464476dd080389340a5522", # HEAD + remote = "https://github.com/google/subpar", + ), +) From 5811662f2ca1e7bce965abe410161d67240069b0 Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 10 Nov 2017 18:49:46 -0500 Subject: [PATCH 12/34] docs: migrate to data-plane-api. (#2049) Signed-off-by: Harvey Tuch --- .circleci/config.yml | 7 +- bazel/git_repository_info.py | 20 + bazel/repository_locations.bzl | 2 +- ci/README.md | 1 + ci/do_ci.sh | 13 + docs/Doxyfile | 2427 ----------------- docs/_static/docker_compose_v0.1.svg | 4 - docs/_static/double_proxy.svg | 4 - docs/_static/front_proxy.svg | 4 - docs/_static/placeholder | 0 docs/_static/service_to_service.svg | 4 - docs/about_docs.rst | 19 - docs/build.sh | 16 - docs/conf.py | 293 -- docs/configuration/access_log.rst | 308 --- docs/configuration/cluster_manager/cds.rst | 66 - .../configuration/cluster_manager/cluster.rst | 201 -- .../cluster_circuit_breakers.rst | 73 - .../cluster_manager/cluster_hc.rst | 140 - .../cluster_manager/cluster_manager.rst | 67 - .../cluster_outlier_detection.rst | 85 - .../cluster_manager/cluster_runtime.rst | 130 - .../cluster_manager/cluster_ssl.rst | 82 - .../cluster_manager/cluster_stats.rst | 193 -- .../configuration/cluster_manager/outlier.rst | 15 - docs/configuration/cluster_manager/sds.rst | 24 - .../configuration/cluster_manager/sds_api.rst | 60 - docs/configuration/configuration.rst | 17 - docs/configuration/http_conn_man/filters.rst | 21 - .../http_conn_man/header_sanitizing.rst | 35 - docs/configuration/http_conn_man/headers.rst | 276 -- .../http_conn_man/http_conn_man.rst | 226 -- docs/configuration/http_conn_man/rds.rst | 86 - .../route_config/rate_limits.rst | 255 -- .../http_conn_man/route_config/route.rst | 509 ---- .../route_config/route_config.rst | 90 - .../route_config/route_matching.rst | 14 - .../route_config/traffic_splitting.rst | 136 - .../http_conn_man/route_config/vcluster.rst | 47 - .../http_conn_man/route_config/vhost.rst | 84 - docs/configuration/http_conn_man/runtime.rst | 25 - docs/configuration/http_conn_man/stats.rst | 85 - docs/configuration/http_conn_man/tracing.rst | 23 - .../http_filters/buffer_filter.rst | 38 - .../http_filters/cors_filter.rst | 65 - .../http_filters/dynamodb_filter.rst | 82 - .../http_filters/fault_filter.rst | 177 -- .../http_filters/grpc_http1_bridge_filter.rst | 55 - .../grpc_json_transcoder_filter.rst | 85 - .../http_filters/grpc_web_filter.rst | 16 - .../http_filters/health_check_filter.rst | 35 - .../http_filters/http_filters.rst | 20 - .../http_filters/ip_tagging_filter.rst | 47 - .../configuration/http_filters/lua_filter.rst | 353 --- .../http_filters/rate_limit_filter.rst | 79 - .../http_filters/router_filter.rst | 307 --- docs/configuration/listeners/filters.rst | 21 - docs/configuration/listeners/lds.rst | 84 - docs/configuration/listeners/listeners.rst | 107 - docs/configuration/listeners/runtime.rst | 8 - docs/configuration/listeners/ssl.rst | 125 - docs/configuration/listeners/stats.rst | 24 - .../client_ssl_auth_filter.rst | 98 - .../network_filters/echo_filter.rst | 12 - .../network_filters/mongo_proxy_filter.rst | 212 -- .../network_filters/network_filters.rst | 18 - .../network_filters/rate_limit_filter.rst | 71 - .../network_filters/redis_proxy_filter.rst | 107 - .../network_filters/tcp_proxy_filter.rst | 146 - docs/configuration/overview/admin.rst | 27 - docs/configuration/overview/overview.rst | 120 - docs/configuration/overview/rate_limit.rst | 37 - docs/configuration/overview/runtime.rst | 107 - docs/configuration/overview/tracing.rst | 69 - docs/configuration/tools/router_check.rst | 170 -- docs/extending/extending.rst | 10 - docs/favicon.ico | Bin 67646 -> 0 bytes docs/index.rst | 12 - docs/install/building.rst | 8 - docs/install/install.rst | 14 - docs/install/installation.rst | 6 - docs/install/ref_configs.rst | 70 - docs/install/requirements.rst | 37 - docs/install/sandboxes/front_proxy.rst | 228 -- docs/install/sandboxes/grpc_bridge.rst | 68 - docs/install/sandboxes/jaeger_tracing.rst | 81 - docs/install/sandboxes/local_docker_build.rst | 35 - docs/install/sandboxes/sandboxes.rst | 17 - docs/install/sandboxes/zipkin_tracing.rst | 82 - docs/install/tools/config_load_check_tool.rst | 30 - docs/install/tools/route_table_check_tool.rst | 65 - .../tools/schema_validator_check_tool.rst | 33 - docs/install/tools/tools.rst | 9 - docs/intro/arch_overview/access_logging.rst | 19 - docs/intro/arch_overview/arch_overview.rst | 37 - docs/intro/arch_overview/circuit_breaking.rst | 38 - docs/intro/arch_overview/cluster_manager.rst | 26 - .../arch_overview/connection_pooling.rst | 37 - docs/intro/arch_overview/draining.rst | 35 - .../arch_overview/dynamic_configuration.rst | 81 - docs/intro/arch_overview/dynamo.rst | 18 - .../arch_overview/global_rate_limiting.rst | 31 - docs/intro/arch_overview/grpc.rst | 26 - docs/intro/arch_overview/health_checking.rst | 98 - docs/intro/arch_overview/hot_restart.rst | 28 - .../http_connection_management.rst | 44 - docs/intro/arch_overview/http_filters.rst | 24 - docs/intro/arch_overview/http_routing.rst | 94 - docs/intro/arch_overview/init.rst | 24 - docs/intro/arch_overview/listeners.rst | 23 - docs/intro/arch_overview/load_balancing.rst | 282 -- docs/intro/arch_overview/mongo.rst | 18 - docs/intro/arch_overview/network_filters.rst | 22 - docs/intro/arch_overview/outlier.rst | 139 - docs/intro/arch_overview/redis.rst | 209 -- docs/intro/arch_overview/runtime.rst | 16 - docs/intro/arch_overview/scripting.rst | 5 - .../intro/arch_overview/service_discovery.rst | 130 - docs/intro/arch_overview/ssl.rst | 44 - docs/intro/arch_overview/statistics.rst | 25 - docs/intro/arch_overview/tcp_proxy.rst | 18 - docs/intro/arch_overview/terminology.rst | 32 - docs/intro/arch_overview/threading_model.rst | 13 - docs/intro/arch_overview/tracing.rst | 105 - docs/intro/arch_overview/websocket.rst | 36 - docs/intro/comparison.rst | 137 - .../deployment_types/deployment_types.rst | 12 - docs/intro/deployment_types/double_proxy.rst | 26 - docs/intro/deployment_types/front_proxy.rst | 26 - .../deployment_types/service_to_service.rst | 62 - docs/intro/getting_help.rst | 15 - docs/intro/intro.rst | 14 - docs/intro/version_history.rst | 188 -- docs/intro/what_is_envoy.rst | 125 - docs/operations/admin.rst | 143 - docs/operations/cli.rst | 136 - docs/operations/faq/overview.rst | 12 - docs/operations/faq/zipkin_tracing.rst | 7 - docs/operations/faq/zone_aware_routing.rst | 61 - docs/operations/fs_flags.rst | 13 - docs/operations/hot_restarter.rst | 37 - docs/operations/operations.rst | 15 - docs/operations/runtime.rst | 8 - docs/operations/stats_overview.rst | 13 - docs/requirements.txt | 19 - 145 files changed, 39 insertions(+), 12721 deletions(-) create mode 100755 bazel/git_repository_info.py delete mode 100644 docs/Doxyfile delete mode 100644 docs/_static/docker_compose_v0.1.svg delete mode 100644 docs/_static/double_proxy.svg delete mode 100644 docs/_static/front_proxy.svg delete mode 100644 docs/_static/placeholder delete mode 100644 docs/_static/service_to_service.svg delete mode 100644 docs/about_docs.rst delete mode 100755 docs/build.sh delete mode 100644 docs/conf.py delete mode 100644 docs/configuration/access_log.rst delete mode 100644 docs/configuration/cluster_manager/cds.rst delete mode 100644 docs/configuration/cluster_manager/cluster.rst delete mode 100644 docs/configuration/cluster_manager/cluster_circuit_breakers.rst delete mode 100644 docs/configuration/cluster_manager/cluster_hc.rst delete mode 100644 docs/configuration/cluster_manager/cluster_manager.rst delete mode 100644 docs/configuration/cluster_manager/cluster_outlier_detection.rst delete mode 100644 docs/configuration/cluster_manager/cluster_runtime.rst delete mode 100644 docs/configuration/cluster_manager/cluster_ssl.rst delete mode 100644 docs/configuration/cluster_manager/cluster_stats.rst delete mode 100644 docs/configuration/cluster_manager/outlier.rst delete mode 100644 docs/configuration/cluster_manager/sds.rst delete mode 100644 docs/configuration/cluster_manager/sds_api.rst delete mode 100644 docs/configuration/configuration.rst delete mode 100644 docs/configuration/http_conn_man/filters.rst delete mode 100644 docs/configuration/http_conn_man/header_sanitizing.rst delete mode 100644 docs/configuration/http_conn_man/headers.rst delete mode 100644 docs/configuration/http_conn_man/http_conn_man.rst delete mode 100644 docs/configuration/http_conn_man/rds.rst delete mode 100644 docs/configuration/http_conn_man/route_config/rate_limits.rst delete mode 100644 docs/configuration/http_conn_man/route_config/route.rst delete mode 100644 docs/configuration/http_conn_man/route_config/route_config.rst delete mode 100644 docs/configuration/http_conn_man/route_config/route_matching.rst delete mode 100644 docs/configuration/http_conn_man/route_config/traffic_splitting.rst delete mode 100644 docs/configuration/http_conn_man/route_config/vcluster.rst delete mode 100644 docs/configuration/http_conn_man/route_config/vhost.rst delete mode 100644 docs/configuration/http_conn_man/runtime.rst delete mode 100644 docs/configuration/http_conn_man/stats.rst delete mode 100644 docs/configuration/http_conn_man/tracing.rst delete mode 100644 docs/configuration/http_filters/buffer_filter.rst delete mode 100644 docs/configuration/http_filters/cors_filter.rst delete mode 100644 docs/configuration/http_filters/dynamodb_filter.rst delete mode 100644 docs/configuration/http_filters/fault_filter.rst delete mode 100644 docs/configuration/http_filters/grpc_http1_bridge_filter.rst delete mode 100644 docs/configuration/http_filters/grpc_json_transcoder_filter.rst delete mode 100644 docs/configuration/http_filters/grpc_web_filter.rst delete mode 100644 docs/configuration/http_filters/health_check_filter.rst delete mode 100644 docs/configuration/http_filters/http_filters.rst delete mode 100644 docs/configuration/http_filters/ip_tagging_filter.rst delete mode 100644 docs/configuration/http_filters/lua_filter.rst delete mode 100644 docs/configuration/http_filters/rate_limit_filter.rst delete mode 100644 docs/configuration/http_filters/router_filter.rst delete mode 100644 docs/configuration/listeners/filters.rst delete mode 100644 docs/configuration/listeners/lds.rst delete mode 100644 docs/configuration/listeners/listeners.rst delete mode 100644 docs/configuration/listeners/runtime.rst delete mode 100644 docs/configuration/listeners/ssl.rst delete mode 100644 docs/configuration/listeners/stats.rst delete mode 100644 docs/configuration/network_filters/client_ssl_auth_filter.rst delete mode 100644 docs/configuration/network_filters/echo_filter.rst delete mode 100644 docs/configuration/network_filters/mongo_proxy_filter.rst delete mode 100644 docs/configuration/network_filters/network_filters.rst delete mode 100644 docs/configuration/network_filters/rate_limit_filter.rst delete mode 100644 docs/configuration/network_filters/redis_proxy_filter.rst delete mode 100644 docs/configuration/network_filters/tcp_proxy_filter.rst delete mode 100644 docs/configuration/overview/admin.rst delete mode 100644 docs/configuration/overview/overview.rst delete mode 100644 docs/configuration/overview/rate_limit.rst delete mode 100644 docs/configuration/overview/runtime.rst delete mode 100644 docs/configuration/overview/tracing.rst delete mode 100644 docs/configuration/tools/router_check.rst delete mode 100644 docs/extending/extending.rst delete mode 100644 docs/favicon.ico delete mode 100644 docs/index.rst delete mode 100644 docs/install/building.rst delete mode 100644 docs/install/install.rst delete mode 100644 docs/install/installation.rst delete mode 100644 docs/install/ref_configs.rst delete mode 100644 docs/install/requirements.rst delete mode 100644 docs/install/sandboxes/front_proxy.rst delete mode 100644 docs/install/sandboxes/grpc_bridge.rst delete mode 100644 docs/install/sandboxes/jaeger_tracing.rst delete mode 100644 docs/install/sandboxes/local_docker_build.rst delete mode 100644 docs/install/sandboxes/sandboxes.rst delete mode 100644 docs/install/sandboxes/zipkin_tracing.rst delete mode 100644 docs/install/tools/config_load_check_tool.rst delete mode 100644 docs/install/tools/route_table_check_tool.rst delete mode 100644 docs/install/tools/schema_validator_check_tool.rst delete mode 100644 docs/install/tools/tools.rst delete mode 100644 docs/intro/arch_overview/access_logging.rst delete mode 100644 docs/intro/arch_overview/arch_overview.rst delete mode 100644 docs/intro/arch_overview/circuit_breaking.rst delete mode 100644 docs/intro/arch_overview/cluster_manager.rst delete mode 100644 docs/intro/arch_overview/connection_pooling.rst delete mode 100644 docs/intro/arch_overview/draining.rst delete mode 100644 docs/intro/arch_overview/dynamic_configuration.rst delete mode 100644 docs/intro/arch_overview/dynamo.rst delete mode 100644 docs/intro/arch_overview/global_rate_limiting.rst delete mode 100644 docs/intro/arch_overview/grpc.rst delete mode 100644 docs/intro/arch_overview/health_checking.rst delete mode 100644 docs/intro/arch_overview/hot_restart.rst delete mode 100644 docs/intro/arch_overview/http_connection_management.rst delete mode 100644 docs/intro/arch_overview/http_filters.rst delete mode 100644 docs/intro/arch_overview/http_routing.rst delete mode 100644 docs/intro/arch_overview/init.rst delete mode 100644 docs/intro/arch_overview/listeners.rst delete mode 100644 docs/intro/arch_overview/load_balancing.rst delete mode 100644 docs/intro/arch_overview/mongo.rst delete mode 100644 docs/intro/arch_overview/network_filters.rst delete mode 100644 docs/intro/arch_overview/outlier.rst delete mode 100644 docs/intro/arch_overview/redis.rst delete mode 100644 docs/intro/arch_overview/runtime.rst delete mode 100644 docs/intro/arch_overview/scripting.rst delete mode 100644 docs/intro/arch_overview/service_discovery.rst delete mode 100644 docs/intro/arch_overview/ssl.rst delete mode 100644 docs/intro/arch_overview/statistics.rst delete mode 100644 docs/intro/arch_overview/tcp_proxy.rst delete mode 100644 docs/intro/arch_overview/terminology.rst delete mode 100644 docs/intro/arch_overview/threading_model.rst delete mode 100644 docs/intro/arch_overview/tracing.rst delete mode 100644 docs/intro/arch_overview/websocket.rst delete mode 100644 docs/intro/comparison.rst delete mode 100644 docs/intro/deployment_types/deployment_types.rst delete mode 100644 docs/intro/deployment_types/double_proxy.rst delete mode 100644 docs/intro/deployment_types/front_proxy.rst delete mode 100644 docs/intro/deployment_types/service_to_service.rst delete mode 100644 docs/intro/getting_help.rst delete mode 100644 docs/intro/intro.rst delete mode 100644 docs/intro/version_history.rst delete mode 100644 docs/intro/what_is_envoy.rst delete mode 100644 docs/operations/admin.rst delete mode 100644 docs/operations/cli.rst delete mode 100644 docs/operations/faq/overview.rst delete mode 100644 docs/operations/faq/zipkin_tracing.rst delete mode 100644 docs/operations/faq/zone_aware_routing.rst delete mode 100644 docs/operations/fs_flags.rst delete mode 100644 docs/operations/hot_restarter.rst delete mode 100644 docs/operations/operations.rst delete mode 100644 docs/operations/runtime.rst delete mode 100644 docs/operations/stats_overview.rst delete mode 100644 docs/requirements.txt diff --git a/.circleci/config.yml b/.circleci/config.yml index c003b0b1976d..75a9efb544d5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -63,11 +63,12 @@ jobs: - run: ci/build_container/docker_push.sh docs: docker: - - image: circleci/python:2.7 - resource_class: small + - image: *envoy-build-image + resource_class: xlarge + working_directory: /source steps: - checkout - - run: docs/build.sh + - run: ci/do_circle_ci.sh docs - add_ssh_keys - run: docs/publish.sh mac: diff --git a/bazel/git_repository_info.py b/bazel/git_repository_info.py new file mode 100755 index 000000000000..e8ffa485976f --- /dev/null +++ b/bazel/git_repository_info.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python + +# Quick-and-dirty Python to fetch git repository info in bazel/repository_locations.bzl. + +import imp +import sys +import subprocess as sp + +repolocs = imp.load_source('replocs', 'bazel/repository_locations.bzl') + +if __name__ == '__main__': + if len(sys.argv) != 2: + print 'Usage: %s ' % sys.argv[0] + sys.exit(1) + repo = sys.argv[1] + if repo not in repolocs.REPOSITORY_LOCATIONS: + print 'Unknown repository: %s' % repo + sys.exit(1) + repoloc = repolocs.REPOSITORY_LOCATIONS[repo] + print '%s %s' % (repoloc['remote'], repoloc['commit']) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 1f48d4d1018a..3b63a93a1755 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -59,7 +59,7 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/protobuf/archive/c4f59dcc5c13debc572154c8f636b8a9361aacde.tar.gz"], ), envoy_api = dict( - commit = "e355cdbe0f7d614a110dc12e9d01b3ce817a2e87", + commit = "320b5afbd0f6ed88df8d8e361545decabfea7777", remote = "https://github.com/envoyproxy/data-plane-api", ), grpc_httpjson_transcoding = dict( diff --git a/ci/README.md b/ci/README.md index 71a081bd01cd..d6273bf2f8d6 100644 --- a/ci/README.md +++ b/ci/README.md @@ -82,6 +82,7 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `bazel.tsan` — build and run tests under `-c dbg --config=clang-tsan` with clang-5.0. * `check_format`— run `clang-format` 5.0 and `buildifier` on entire source tree. * `fix_format`— run and enforce `clang-format` 5.0 and `buildifier` on entire source tree. +* `doc`— build documentation tree in `generated/docs`. # Testing changes to the build image as a developer diff --git a/ci/do_ci.sh b/ci/do_ci.sh index facb6da9f9ca..e4fbd16a6e85 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -153,6 +153,19 @@ elif [[ "$1" == "check_format" ]]; then cd "${ENVOY_SRCDIR}" ./tools/check_format.py check exit 0 +elif [[ "$1" == "docs" ]]; then + DOCS_BUILD_DIR="${BUILD_DIR}"/docs + rm -rf "${DOCS_BUILD_DIR}" generated/docs generated/rst + mkdir -p "${DOCS_BUILD_DIR}" + ENVOY_API=$(bazel/git_repository_info.py envoy_api) + read -a GIT_INFO <<< "${ENVOY_API}" + pushd "${DOCS_BUILD_DIR}" + git clone "${GIT_INFO[0]}" + cd data-plane-api + git checkout "${GIT_INFO[1]}" + ./docs/build.sh + popd + rsync -av "${DOCS_BUILD_DIR}"/data-plane-api/generated/* generated/ else echo "Invalid do_ci.sh target, see ci/README.md for valid targets." exit 1 diff --git a/docs/Doxyfile b/docs/Doxyfile deleted file mode 100644 index e3dff7830195..000000000000 --- a/docs/Doxyfile +++ /dev/null @@ -1,2427 +0,0 @@ -# Doxyfile 1.8.11 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = "Envoy" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = _doxygen/ - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = YES - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# If one adds a struct or class to a group and this option is enabled, then also -# any nested class or struct is added to the same group. By default this option -# is disabled and one has to add nested compounds explicitly via \ingroup. -# The default value is: NO. - -GROUP_NESTED_COMPOUNDS = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO, these declarations will be -# included in the documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. -# The default value is: NO. - -WARN_AS_ERROR = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING -# Note: If this tag is empty the current directory is searched. - -INPUT = ../include - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# read by doxygen. -# -# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, -# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f, *.for, *.tcl, -# *.vhd, *.vhdl, *.ucf, *.qsf, *.as and *.js. - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the -# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the -# cost of reduced performance. This can be particularly helpful with template -# rich C++ code for which doxygen's built-in parser lacks the necessary type -# information. -# Note: The availability of this option depends on whether or not doxygen was -# generated with the -Duse-libclang=ON option for CMake. -# The default value is: NO. - -CLANG_ASSISTED_PARSING = NO - -# If clang assisted parsing is enabled you can provide the compiler with command -# line options that you would normally use when invoking the compiler. Note that -# the include paths will already be set by doxygen for the files and directories -# specified with INPUT and INCLUDE_PATH. -# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. - -CLANG_OPTIONS = - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to YES can help to show when doxygen was last run and thus if the -# documentation is up to date. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = NO - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /