From e674640f70a33d9bf235696ebcb194a5d8009794 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 28 Aug 2019 00:57:36 -0700 Subject: [PATCH 01/31] build: link C++ stdlib dynamically in sanitizer runs (#8019) Description: Sanitizers doesn't support static link, reverts #7929 and link lib(std)c++ dynamically in sanitizer runs. Addresses test issue for #4251. Added workaround in ASAN for #7647. Risk Level: Low (test only) Testing: CI, local libc++ runs Docs Changes: N/A Release Notes: N/A Fixes #7928 --- .bazelrc | 24 ++++++----- bazel/BUILD | 31 +++++++++++++- bazel/envoy_binary.bzl | 3 +- bazel/envoy_internal.bzl | 7 ++++ bazel/envoy_test.bzl | 5 ++- bazel/io_opentracing_cpp.patch | 17 ++++++++ bazel/repositories.bzl | 7 +++- .../configs/clang/bazel_0.28.1/cc/BUILD | 1 - .../clang_libcxx/bazel_0.28.1/cc/BUILD | 1 - .../configs/gcc/bazel_0.28.1/cc/BUILD | 3 +- bazel/toolchains/configs/versions.bzl | 6 +-- bazel/toolchains/rbe_toolchains_config.bzl | 6 +-- test/exe/BUILD | 5 +-- test/integration/fake_upstream.cc | 42 ++++++------------- 14 files changed, 99 insertions(+), 59 deletions(-) mode change 100755 => 100644 bazel/BUILD create mode 100644 bazel/io_opentracing_cpp.patch diff --git a/.bazelrc b/.bazelrc index 6b73d8942498..5235dc6265ac 100644 --- a/.bazelrc +++ b/.bazelrc @@ -16,7 +16,7 @@ build --experimental_local_memory_estimate build --experimental_strict_action_env=true build --host_force_python=PY2 build --action_env=BAZEL_LINKLIBS=-l%:libstdc++.a -build --action_env=BAZEL_LINKOPTS=-lm:-static-libgcc +build --action_env=BAZEL_LINKOPTS=-lm build --host_javabase=@bazel_tools//tools/jdk:remote_jdk11 build --javabase=@bazel_tools//tools/jdk:remote_jdk11 @@ -28,19 +28,21 @@ build --action_env=CC build --action_env=CXX build --action_env=PATH +# Common flags for sanitizers +build:sanitizer --define tcmalloc=disabled +build:sanitizer --linkopt -ldl +build:sanitizer --build_tag_filters=-no_san +build:sanitizer --test_tag_filters=-no_san + # Basic ASAN/UBSAN that works for gcc -build:asan --action_env=BAZEL_LINKLIBS= -build:asan --action_env=BAZEL_LINKOPTS=-lstdc++:-lm +build:asan --config=sanitizer +# ASAN install its signal handler, disable ours so the stacktrace will be printed by ASAN +build:asan --define signal_trace=disabled build:asan --define ENVOY_CONFIG_ASAN=1 build:asan --copt -fsanitize=address,undefined build:asan --linkopt -fsanitize=address,undefined build:asan --copt -fno-sanitize=vptr build:asan --linkopt -fno-sanitize=vptr -build:asan --linkopt -ldl -build:asan --define tcmalloc=disabled -build:asan --build_tag_filters=-no_asan -build:asan --test_tag_filters=-no_asan -build:asan --define signal_trace=disabled build:asan --copt -DADDRESS_SANITIZER=1 build:asan --copt -D__SANITIZE_ADDRESS__ build:asan --test_env=ASAN_OPTIONS=handle_abort=1:allow_addr2line=true:check_initialization_order=true:strict_init_order=true:detect_odr_violation=1 @@ -62,21 +64,20 @@ build:macos-asan --copt -DGRPC_BAZEL_BUILD build:macos-asan --dynamic_mode=off # Clang TSAN +build:clang-tsan --config=sanitizer build:clang-tsan --define ENVOY_CONFIG_TSAN=1 build:clang-tsan --copt -fsanitize=thread build:clang-tsan --linkopt -fsanitize=thread build:clang-tsan --linkopt -fuse-ld=lld -build:clang-tsan --linkopt -static-libsan -build:clang-tsan --define tcmalloc=disabled # Needed due to https://github.com/libevent/libevent/issues/777 build:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE # Clang MSAN - broken today since we need to rebuild lib[std]c++ and external deps with MSAN # support (see https://github.com/envoyproxy/envoy/issues/443). +build:clang-msan --config=sanitizer build:clang-msan --define ENVOY_CONFIG_MSAN=1 build:clang-msan --copt -fsanitize=memory build:clang-msan --linkopt -fsanitize=memory -build:clang-msan --define tcmalloc=disabled build:clang-msan --copt -fsanitize-memory-track-origins=2 # Clang with libc++ @@ -108,6 +109,7 @@ build:rbe-toolchain-clang-libc++ --config=rbe-toolchain build:rbe-toolchain-clang-libc++ --crosstool_top=@rbe_ubuntu_clang_libcxx//cc:toolchain build:rbe-toolchain-clang-libc++ --extra_toolchains=@rbe_ubuntu_clang_libcxx//config:cc-toolchain build:rbe-toolchain-clang-libc++ --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/llvm-8/bin +build:rbe-toolchain-clang-libc++ --define force_libcpp=enabled build:rbe-toolchain-gcc --config=rbe-toolchain build:rbe-toolchain-gcc --crosstool_top=@rbe_ubuntu_gcc//cc:toolchain diff --git a/bazel/BUILD b/bazel/BUILD old mode 100755 new mode 100644 index e60a4ae9c06c..202ecfc9d381 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -1,11 +1,14 @@ licenses(["notice"]) # Apache 2 -package(default_visibility = ["//visibility:public"]) +load("//bazel:envoy_build_system.bzl", "envoy_package") + +envoy_package() + +load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp") exports_files([ "gen_sh_test_runner.sh", "sh_test_wrapper.sh", - "cc_wrapper.py", ]) genrule( @@ -37,6 +40,25 @@ genrule( stamp = 1, ) +# A target to optionally link C++ standard library dynamically in sanitizer runs. +# TSAN doesn't support libc/libstdc++ static linking per doc: +# http://releases.llvm.org/8.0.1/tools/clang/docs/ThreadSanitizer.html +cc_library( + name = "dynamic_stdlib", + linkopts = envoy_select_force_libcpp( + ["-lc++"], + ["-lstdc++"], + ), +) + +cc_library( + name = "static_stdlib", + linkopts = select({ + "//bazel:linux": ["-static-libgcc"], + "//conditions:default": [], + }), +) + config_setting( name = "windows_opt_build", values = { @@ -81,6 +103,11 @@ config_setting( values = {"define": "ENVOY_CONFIG_ASAN=1"}, ) +config_setting( + name = "tsan_build", + values = {"define": "ENVOY_CONFIG_TSAN=1"}, +) + config_setting( name = "coverage_build", values = {"define": "ENVOY_CONFIG_COVERAGE=1"}, diff --git a/bazel/envoy_binary.bzl b/bazel/envoy_binary.bzl index edcdd09ae03b..8a9d396dcfbc 100644 --- a/bazel/envoy_binary.bzl +++ b/bazel/envoy_binary.bzl @@ -4,6 +4,7 @@ load( ":envoy_internal.bzl", "envoy_copts", "envoy_external_dep_path", + "envoy_stdlib_deps", "tcmalloc_external_dep", ) @@ -24,7 +25,7 @@ def envoy_cc_binary( if stamped: linkopts = linkopts + _envoy_stamped_linkopts() deps = deps + _envoy_stamped_deps() - deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + envoy_stdlib_deps() native.cc_binary( name = name, srcs = srcs, diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index 2562390311de..5dca0a028f82 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -89,6 +89,13 @@ def envoy_select_force_libcpp(if_libcpp, default = None): "//conditions:default": default or [], }) +def envoy_stdlib_deps(): + return select({ + "@envoy//bazel:asan_build": ["@envoy//bazel:dynamic_stdlib"], + "@envoy//bazel:tsan_build": ["@envoy//bazel:dynamic_stdlib"], + "//conditions:default": ["@envoy//bazel:static_stdlib"], + }) + # Dependencies on tcmalloc_and_profiler should be wrapped with this function. def tcmalloc_external_dep(repository): return select({ diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 4e7e91e0070b..e6985f92a1f3 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -8,6 +8,7 @@ load( "envoy_external_dep_path", "envoy_linkstatic", "envoy_select_force_libcpp", + "envoy_stdlib_deps", "tcmalloc_external_dep", ) @@ -80,7 +81,7 @@ def envoy_cc_fuzz_test(name, corpus, deps = [], tags = [], **kwargs): test_lib_name = name + "_lib" envoy_cc_test_library( name = test_lib_name, - deps = deps + ["//test/fuzz:fuzz_runner_lib"], + deps = deps + ["//test/fuzz:fuzz_runner_lib", "//bazel:dynamic_stdlib"], **kwargs ) native.cc_test( @@ -163,7 +164,7 @@ def envoy_cc_test( linkopts = _envoy_test_linkopts(), linkstatic = envoy_linkstatic(), malloc = tcmalloc_external_dep(repository), - deps = [ + deps = envoy_stdlib_deps() + [ ":" + name + "_lib_internal_only", repository + "//test:main", ], diff --git a/bazel/io_opentracing_cpp.patch b/bazel/io_opentracing_cpp.patch new file mode 100644 index 000000000000..6389489d65a0 --- /dev/null +++ b/bazel/io_opentracing_cpp.patch @@ -0,0 +1,17 @@ +diff --git a/src/dynamic_load_unix.cpp b/src/dynamic_load_unix.cpp +index 17e08fd..d25e0c8 100644 +--- a/src/dynamic_load_unix.cpp ++++ b/src/dynamic_load_unix.cpp +@@ -35,7 +35,11 @@ DynamicallyLoadTracingLibrary(const char* shared_library, + std::string& error_message) noexcept try { + dlerror(); // Clear any existing error. + +- const auto handle = dlopen(shared_library, RTLD_NOW | RTLD_LOCAL); ++ const auto handle = dlopen(shared_library, RTLD_NOW | RTLD_LOCAL ++#ifdef __SANITIZE_ADDRESS__ ++ | RTLD_NODELETE ++#endif ++ ); + if (handle == nullptr) { + error_message = dlerror(); + return make_unexpected(dynamic_load_failure_error); diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index b80d6f0bfe22..4f2a28fab99d 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -335,7 +335,12 @@ def _com_github_nghttp2_nghttp2(): ) def _io_opentracing_cpp(): - _repository_impl("io_opentracing_cpp") + _repository_impl( + name = "io_opentracing_cpp", + patch_args = ["-p1"], + # Workaround for LSAN false positive in https://github.com/envoyproxy/envoy/issues/7647 + patches = ["@envoy//bazel:io_opentracing_cpp.patch"], + ) native.bind( name = "opentracing", actual = "@io_opentracing_cpp//:opentracing", diff --git a/bazel/toolchains/configs/clang/bazel_0.28.1/cc/BUILD b/bazel/toolchains/configs/clang/bazel_0.28.1/cc/BUILD index 1726bdef2c05..015b58ead104 100755 --- a/bazel/toolchains/configs/clang/bazel_0.28.1/cc/BUILD +++ b/bazel/toolchains/configs/clang/bazel_0.28.1/cc/BUILD @@ -116,7 +116,6 @@ cc_toolchain_config( "-Wl,-z,relro,-z,now", "-B/usr/lib/llvm-8/bin", "-lm", - "-static-libgcc", "-fuse-ld=lld"], link_libs = ["-l:libstdc++.a"], opt_link_flags = ["-Wl,--gc-sections"], diff --git a/bazel/toolchains/configs/clang_libcxx/bazel_0.28.1/cc/BUILD b/bazel/toolchains/configs/clang_libcxx/bazel_0.28.1/cc/BUILD index dae58f58c97a..8a2ac6331467 100755 --- a/bazel/toolchains/configs/clang_libcxx/bazel_0.28.1/cc/BUILD +++ b/bazel/toolchains/configs/clang_libcxx/bazel_0.28.1/cc/BUILD @@ -114,7 +114,6 @@ cc_toolchain_config( "-Wl,-z,relro,-z,now", "-B/usr/lib/llvm-8/bin", "-lm", - "-static-libgcc", "-pthread", "-fuse-ld=lld"], link_libs = ["-l:libc++.a", diff --git a/bazel/toolchains/configs/gcc/bazel_0.28.1/cc/BUILD b/bazel/toolchains/configs/gcc/bazel_0.28.1/cc/BUILD index e936d4b91522..443b34aa3eff 100755 --- a/bazel/toolchains/configs/gcc/bazel_0.28.1/cc/BUILD +++ b/bazel/toolchains/configs/gcc/bazel_0.28.1/cc/BUILD @@ -115,8 +115,7 @@ cc_toolchain_config( "-Wl,-z,relro,-z,now", "-B/usr/bin", "-pass-exit-codes", - "-lm", - "-static-libgcc"], + "-lm"], link_libs = ["-l:libstdc++.a"], opt_link_flags = ["-Wl,--gc-sections"], unfiltered_compile_flags = ["-fno-canonical-system-headers", diff --git a/bazel/toolchains/configs/versions.bzl b/bazel/toolchains/configs/versions.bzl index b7fee4d50322..c21f70f9cb14 100644 --- a/bazel/toolchains/configs/versions.bzl +++ b/bazel/toolchains/configs/versions.bzl @@ -1,9 +1,9 @@ # Generated file, do not modify by hand # Generated by 'rbe_ubuntu_gcc_gen' rbe_autoconfig rule """Definitions to be used in rbe_repo attr of an rbe_autoconf rule """ -toolchain_config_spec0 = struct(config_repos = [], create_cc_configs = True, create_java_configs = False, env = {"BAZEL_COMPILER": "clang", "BAZEL_LINKLIBS": "-l%:libstdc++.a", "BAZEL_LINKOPTS": "-lm:-static-libgcc:-fuse-ld=lld", "BAZEL_USE_LLVM_NATIVE_COVERAGE": "1", "GCOV": "llvm-profdata", "CC": "clang", "CXX": "clang++", "PATH": "/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/llvm-8/bin"}, java_home = None, name = "clang") -toolchain_config_spec1 = struct(config_repos = [], create_cc_configs = True, create_java_configs = False, env = {"BAZEL_COMPILER": "clang", "BAZEL_LINKLIBS": "-l%:libc++.a:-l%:libc++abi.a", "BAZEL_LINKOPTS": "-lm:-static-libgcc:-pthread:-fuse-ld=lld", "BAZEL_USE_LLVM_NATIVE_COVERAGE": "1", "GCOV": "llvm-profdata", "CC": "clang", "CXX": "clang++", "PATH": "/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/llvm-8/bin", "BAZEL_CXXOPTS": "-stdlib=libc++", "CXXFLAGS": "-stdlib=libc++"}, java_home = None, name = "clang_libcxx") -toolchain_config_spec2 = struct(config_repos = [], create_cc_configs = True, create_java_configs = False, env = {"BAZEL_COMPILER": "gcc", "BAZEL_LINKLIBS": "-l%:libstdc++.a", "BAZEL_LINKOPTS": "-lm:-static-libgcc", "CC": "gcc", "CXX": "g++", "PATH": "/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/llvm-8/bin"}, java_home = None, name = "gcc") +toolchain_config_spec0 = struct(config_repos = [], create_cc_configs = True, create_java_configs = False, env = {"BAZEL_COMPILER": "clang", "BAZEL_LINKLIBS": "-l%:libstdc++.a", "BAZEL_LINKOPTS": "-lm:-fuse-ld=lld", "BAZEL_USE_LLVM_NATIVE_COVERAGE": "1", "GCOV": "llvm-profdata", "CC": "clang", "CXX": "clang++", "PATH": "/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/llvm-8/bin"}, java_home = None, name = "clang") +toolchain_config_spec1 = struct(config_repos = [], create_cc_configs = True, create_java_configs = False, env = {"BAZEL_COMPILER": "clang", "BAZEL_LINKLIBS": "-l%:libc++.a:-l%:libc++abi.a", "BAZEL_LINKOPTS": "-lm:-pthread:-fuse-ld=lld", "BAZEL_USE_LLVM_NATIVE_COVERAGE": "1", "GCOV": "llvm-profdata", "CC": "clang", "CXX": "clang++", "PATH": "/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/llvm-8/bin", "BAZEL_CXXOPTS": "-stdlib=libc++", "CXXFLAGS": "-stdlib=libc++"}, java_home = None, name = "clang_libcxx") +toolchain_config_spec2 = struct(config_repos = [], create_cc_configs = True, create_java_configs = False, env = {"BAZEL_COMPILER": "gcc", "BAZEL_LINKLIBS": "-l%:libstdc++.a", "BAZEL_LINKOPTS": "-lm", "CC": "gcc", "CXX": "g++", "PATH": "/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/llvm-8/bin"}, java_home = None, name = "gcc") _TOOLCHAIN_CONFIG_SPECS = [toolchain_config_spec0,toolchain_config_spec1,toolchain_config_spec2] _BAZEL_TO_CONFIG_SPEC_NAMES = {"0.28.1": ["clang", "clang_libcxx", "gcc"]} LATEST = "sha256:d1f6087fdeb6a6e5d4fd52a5dc06b15f43f49e2c20fc813bcaaa12333485a70b" diff --git a/bazel/toolchains/rbe_toolchains_config.bzl b/bazel/toolchains/rbe_toolchains_config.bzl index fd7210db1f87..96c38c79e1f5 100644 --- a/bazel/toolchains/rbe_toolchains_config.bzl +++ b/bazel/toolchains/rbe_toolchains_config.bzl @@ -10,7 +10,7 @@ _CONFIGS_OUTPUT_BASE = "bazel/toolchains/configs" _CLANG_ENV = { "BAZEL_COMPILER": "clang", "BAZEL_LINKLIBS": "-l%:libstdc++.a", - "BAZEL_LINKOPTS": "-lm:-static-libgcc:-fuse-ld=lld", + "BAZEL_LINKOPTS": "-lm:-fuse-ld=lld", "BAZEL_USE_LLVM_NATIVE_COVERAGE": "1", "GCOV": "llvm-profdata", "CC": "clang", @@ -20,7 +20,7 @@ _CLANG_ENV = { _CLANG_LIBCXX_ENV = dicts.add(_CLANG_ENV, { "BAZEL_LINKLIBS": "-l%:libc++.a:-l%:libc++abi.a", - "BAZEL_LINKOPTS": "-lm:-static-libgcc:-pthread:-fuse-ld=lld", + "BAZEL_LINKOPTS": "-lm:-pthread:-fuse-ld=lld", "BAZEL_CXXOPTS": "-stdlib=libc++", "CXXFLAGS": "-stdlib=libc++", }) @@ -28,7 +28,7 @@ _CLANG_LIBCXX_ENV = dicts.add(_CLANG_ENV, { _GCC_ENV = { "BAZEL_COMPILER": "gcc", "BAZEL_LINKLIBS": "-l%:libstdc++.a", - "BAZEL_LINKOPTS": "-lm:-static-libgcc", + "BAZEL_LINKOPTS": "-lm", "CC": "gcc", "CXX": "g++", "PATH": "/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/llvm-8/bin", diff --git a/test/exe/BUILD b/test/exe/BUILD index 12235850ca35..8ed8e3334dcd 100644 --- a/test/exe/BUILD +++ b/test/exe/BUILD @@ -24,9 +24,8 @@ envoy_sh_test( srcs = ["envoy_static_test.sh"], coverage = False, data = ["//source/exe:envoy-static"], - # NOTE: In some environments, ASAN causes dynamic linking no matter what, so don't run this - # test when doing ASAN. - tags = ["no_asan"], + # Sanitizers doesn't like statically linked lib(std)c++ and libgcc, skip this test in that context. + tags = ["no_san"], ) envoy_sh_test( diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index b30deb3aee70..f55806361987 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -73,30 +73,22 @@ void FakeStream::decodeMetadata(Http::MetadataMapPtr&& metadata_map_ptr) { } void FakeStream::encode100ContinueHeaders(const Http::HeaderMapImpl& headers) { - // TSan complains about thread-safety of std::shared_ptr when linked against libc++. - // See: https://github.com/envoyproxy/envoy/pull/7929 - std::unique_ptr headers_copy( + std::shared_ptr headers_copy( new Http::HeaderMapImpl(static_cast(headers))); - parent_.connection().dispatcher().post([this, headers = headers_copy.release()]() -> void { - encoder_.encode100ContinueHeaders(*headers); - delete headers; - }); + parent_.connection().dispatcher().post( + [this, headers_copy]() -> void { encoder_.encode100ContinueHeaders(*headers_copy); }); } void FakeStream::encodeHeaders(const Http::HeaderMapImpl& headers, bool end_stream) { - // TSan complains about thread-safety of std::shared_ptr when linked against libc++. - // See: https://github.com/envoyproxy/envoy/pull/7929 - std::unique_ptr headers_copy( + std::shared_ptr headers_copy( new Http::HeaderMapImpl(static_cast(headers))); if (add_served_by_header_) { headers_copy->addCopy(Http::LowerCaseString("x-served-by"), parent_.connection().localAddress()->asString()); } - parent_.connection().dispatcher().post( - [this, headers = headers_copy.release(), end_stream]() -> void { - encoder_.encodeHeaders(*headers, end_stream); - delete headers; - }); + parent_.connection().dispatcher().post([this, headers_copy, end_stream]() -> void { + encoder_.encodeHeaders(*headers_copy, end_stream); + }); } void FakeStream::encodeData(absl::string_view data, bool end_stream) { @@ -114,24 +106,16 @@ void FakeStream::encodeData(uint64_t size, bool end_stream) { } void FakeStream::encodeData(Buffer::Instance& data, bool end_stream) { - // TSan complains about thread-safety of std::shared_ptr when linked against libc++. - // See: https://github.com/envoyproxy/envoy/pull/7929 - std::unique_ptr data_copy(new Buffer::OwnedImpl(data)); - parent_.connection().dispatcher().post([this, data = data_copy.release(), end_stream]() -> void { - encoder_.encodeData(*data, end_stream); - delete data; - }); + std::shared_ptr data_copy(new Buffer::OwnedImpl(data)); + parent_.connection().dispatcher().post( + [this, data_copy, end_stream]() -> void { encoder_.encodeData(*data_copy, end_stream); }); } void FakeStream::encodeTrailers(const Http::HeaderMapImpl& trailers) { - // TSan complains about thread-safety of std::shared_ptr when linked against libc++. - // See: https://github.com/envoyproxy/envoy/pull/7929 - std::unique_ptr trailers_copy( + std::shared_ptr trailers_copy( new Http::HeaderMapImpl(static_cast(trailers))); - parent_.connection().dispatcher().post([this, trailers = trailers_copy.release()]() -> void { - encoder_.encodeTrailers(*trailers); - delete trailers; - }); + parent_.connection().dispatcher().post( + [this, trailers_copy]() -> void { encoder_.encodeTrailers(*trailers_copy); }); } void FakeStream::encodeResetStream() { From b020b63d1ae05fa009a0581e0ecf135cc3e77726 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 28 Aug 2019 09:38:01 -0400 Subject: [PATCH 02/31] test: cleaning up test runtime (#8012) Using the new runtime utility to clean up a bunch of test gorp. Yay utils! Risk Level: n/a (test only) Testing: tests pass Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- test/common/http/http1/BUILD | 1 + test/common/http/http1/codec_impl_test.cc | 46 ++----------------- test/common/http/http2/BUILD | 1 + test/common/http/http2/codec_impl_test.cc | 30 ++---------- test/extensions/filters/http/buffer/BUILD | 1 + .../filters/http/buffer/buffer_filter_test.cc | 27 ++--------- test/test_common/test_runtime.h | 1 - 7 files changed, 15 insertions(+), 92 deletions(-) diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index bbc146417792..b59485efb6ae 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -28,6 +28,7 @@ envoy_cc_test( "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:logging_lib", + "//test/test_common:test_runtime_lib", ], ) diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index f2b04b6e088a..6329e8bdc1bf 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -12,15 +12,10 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/http/mocks.h" -#include "test/mocks/init/mocks.h" -#include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/protobuf/mocks.h" -#include "test/mocks/runtime/mocks.h" -#include "test/mocks/thread_local/mocks.h" #include "test/test_common/logging.h" #include "test/test_common/printers.h" -#include "test/test_common/utility.h" +#include "test/test_common/test_runtime.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -38,17 +33,6 @@ namespace Http1 { class Http1ServerConnectionImplTest : public testing::Test { public: - Http1ServerConnectionImplTest() : api_(Api::createApiForTest()) { - envoy::config::bootstrap::v2::LayeredRuntime config; - config.add_layers()->mutable_admin_layer(); - - // Create a runtime loader, so that tests can manually manipulate runtime - // guarded features. - loader_ = std::make_unique(Runtime::LoaderPtr{ - new Runtime::LoaderImpl(dispatcher_, tls_, config, local_info_, init_manager_, store_, - generator_, validation_visitor_, *api_)}); - } - void initialize() { codec_ = std::make_unique(connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_); @@ -65,15 +49,7 @@ class Http1ServerConnectionImplTest : public testing::Test { protected: uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; - Event::MockDispatcher dispatcher_; - NiceMock tls_; Stats::IsolatedStoreImpl store_; - Runtime::MockRandomGenerator generator_; - Api::ApiPtr api_; - NiceMock local_info_; - Init::MockManager init_manager_; - NiceMock validation_visitor_; - std::unique_ptr loader_; }; void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_url, @@ -366,6 +342,7 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { // Ensures that requests with invalid HTTP header values are not rejected // when the runtime guard is not enabled for the feature. TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRuntimeGuard) { + TestScopedRuntime scoped_runtime; // When the runtime-guarded feature is NOT enabled, invalid header values // should be accepted by the codec. Runtime::LoaderSingleton::getExisting()->mergeValues( @@ -384,6 +361,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRuntimeGuard) { // Ensures that requests with invalid HTTP header values are properly rejected // when the runtime guard is enabled for the feature. TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { + TestScopedRuntime scoped_runtime; // When the runtime-guarded feature is enabled, invalid header values // should result in a rejection. Runtime::LoaderSingleton::getExisting()->mergeValues( @@ -850,16 +828,6 @@ TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { class Http1ClientConnectionImplTest : public testing::Test { public: - Http1ClientConnectionImplTest() : api_(Api::createApiForTest()) { - envoy::config::bootstrap::v2::LayeredRuntime config; - - // Create a runtime loader, so that tests can manually manipulate runtime - // guarded features. - loader_ = std::make_unique(Runtime::LoaderPtr{ - new Runtime::LoaderImpl(dispatcher_, tls_, config, local_info_, init_manager_, store_, - generator_, validation_visitor_, *api_)}); - } - void initialize() { codec_ = std::make_unique(connection_, store_, callbacks_); } @@ -869,15 +837,7 @@ class Http1ClientConnectionImplTest : public testing::Test { std::unique_ptr codec_; protected: - Event::MockDispatcher dispatcher_; - NiceMock tls_; Stats::IsolatedStoreImpl store_; - Runtime::MockRandomGenerator generator_; - Api::ApiPtr api_; - NiceMock local_info_; - Init::MockManager init_manager_; - NiceMock validation_visitor_; - std::unique_ptr loader_; }; TEST_F(Http1ClientConnectionImplTest, SimpleGet) { diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index ae51864b72d9..3a93f99146e4 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -30,6 +30,7 @@ envoy_cc_test( "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index e738c140aff0..7592ec0985d0 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -11,12 +11,9 @@ #include "test/common/http/common.h" #include "test/common/http/http2/http2_frame.h" #include "test/mocks/http/mocks.h" -#include "test/mocks/init/mocks.h" -#include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/protobuf/mocks.h" -#include "test/mocks/thread_local/mocks.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "codec_impl_test_util.h" @@ -169,17 +166,7 @@ class Http2CodecImplTest : public ::testing::TestWithParam(GetParam()), ::testing::get<1>(GetParam())), - api_(Api::createApiForTest()) { - envoy::config::bootstrap::v2::LayeredRuntime config; - config.add_layers()->mutable_admin_layer(); - - // Create a runtime loader, so that tests can manually manipulate runtime - // guarded features. - loader_ = std::make_unique( - std::make_unique(dispatcher_, tls_, config, local_info_, init_manager_, - store_, generator_, validation_visitor_, *api_)); - } + : Http2CodecImplTestFixture(::testing::get<0>(GetParam()), ::testing::get<1>(GetParam())) {} protected: void priorityFlood() { @@ -242,16 +229,9 @@ class Http2CodecImplTest : public ::testing::TestWithParam tls_; - Stats::IsolatedStoreImpl store_; - Runtime::MockRandomGenerator generator_; - Api::ApiPtr api_; - NiceMock local_info_; - Init::MockManager init_manager_; - NiceMock validation_visitor_; - std::unique_ptr loader_; + // Make sure the test fixture has a fake runtime, for the tests which use + // Runtime::LoaderSingleton::getExisting()->mergeValues(...) + TestScopedRuntime scoped_runtime_; }; TEST_P(Http2CodecImplTest, ShutdownNotice) { diff --git a/test/extensions/filters/http/buffer/BUILD b/test/extensions/filters/http/buffer/BUILD index 0cbbdb99fea3..64c60dcba9c7 100644 --- a/test/extensions/filters/http/buffer/BUILD +++ b/test/extensions/filters/http/buffer/BUILD @@ -27,6 +27,7 @@ envoy_extension_cc_test( "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", ], ) diff --git a/test/extensions/filters/http/buffer/buffer_filter_test.cc b/test/extensions/filters/http/buffer/buffer_filter_test.cc index 12b715f5d29f..7f8a46899cd6 100644 --- a/test/extensions/filters/http/buffer/buffer_filter_test.cc +++ b/test/extensions/filters/http/buffer/buffer_filter_test.cc @@ -12,12 +12,8 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/http/mocks.h" -#include "test/mocks/init/mocks.h" -#include "test/mocks/local_info/mocks.h" -#include "test/mocks/protobuf/mocks.h" -#include "test/mocks/runtime/mocks.h" -#include "test/mocks/thread_local/mocks.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -42,16 +38,8 @@ class BufferFilterTest : public testing::Test { return std::make_shared(proto_config); } - BufferFilterTest() : config_(setupConfig()), filter_(config_), api_(Api::createApiForTest()) { + BufferFilterTest() : config_(setupConfig()), filter_(config_) { filter_.setDecoderFilterCallbacks(callbacks_); - - // Create a runtime loader, so that tests can manually manipulate runtime - // guarded features. - envoy::config::bootstrap::v2::LayeredRuntime config; - config.add_layers()->mutable_admin_layer(); - loader_ = std::make_unique(Runtime::LoaderPtr{ - new Runtime::LoaderImpl(dispatcher_, tls_, config, local_info_, init_manager_, store_, - generator_, validation_visitor_, *api_)}); } void routeLocalConfig(const Router::RouteSpecificFilterConfig* route_settings, @@ -66,15 +54,8 @@ class BufferFilterTest : public testing::Test { NiceMock callbacks_; BufferFilterConfigSharedPtr config_; BufferFilter filter_; - Event::MockDispatcher dispatcher_; - NiceMock tls_; - Stats::IsolatedStoreImpl store_; - Runtime::MockRandomGenerator generator_; - Api::ApiPtr api_; - NiceMock local_info_; - Init::MockManager init_manager_; - NiceMock validation_visitor_; - std::unique_ptr loader_; + // Create a runtime loader, so that tests can manually manipulate runtime guarded features. + TestScopedRuntime scoped_runtime; }; TEST_F(BufferFilterTest, HeaderOnlyRequest) { diff --git a/test/test_common/test_runtime.h b/test/test_common/test_runtime.h index ba9900578fa7..ca796ca23f65 100644 --- a/test/test_common/test_runtime.h +++ b/test/test_common/test_runtime.h @@ -25,7 +25,6 @@ namespace Envoy { -// TODO(alyssawilk) move existing runtime tests over to using this. class TestScopedRuntime { public: TestScopedRuntime() : api_(Api::createApiForTest()) { From 64243c97628369ceb365b4da6d73b43dd8bccba5 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 28 Aug 2019 11:04:24 -0400 Subject: [PATCH 03/31] test: improved coverage and handling of deprecated config (#8057) Making ENVOY_DISABLE_DEPRECATED_FEATURES work for unit tests without runtime configured. Fixing up a handful of unit tests to remove legacy code or use the handy DEPRECATED_FEATURE_TEST macro Adding back coverage of cors.enabled() and redis.catch_all_route() Risk Level: Low (test only) Testing: new unit tests Docs Changes: n/a Release Notes: n/a Fixes #8013 Fixes #7548 Signed-off-by: Alyssa Wilk --- source/common/protobuf/utility.cc | 4 ++ test/common/router/config_impl_test.cc | 52 +++++++++-------- test/config/integration/server.yaml | 17 ++++-- test/config_test/example_configs_test.cc | 2 +- .../http/cors/cors_filter_integration_test.cc | 27 +++++++++ .../network/dubbo_proxy/conn_manager_test.cc | 3 +- .../network/dubbo_proxy/route_matcher_test.cc | 6 +- .../network/redis_proxy/config_test.cc | 57 +++++++++++++++++-- .../thrift_proxy/route_matcher_test.cc | 3 +- test/integration/integration_admin_test.cc | 8 +-- test/server/BUILD | 1 + .../invalid_legacy_runtime_bootstrap.yaml | 4 ++ test/server/invalid_runtime_bootstrap.yaml | 10 ++-- test/server/server_test.cc | 5 ++ 14 files changed, 153 insertions(+), 46 deletions(-) create mode 100644 test/server/invalid_legacy_runtime_bootstrap.yaml diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 33185a00fc5e..4df4964b768f 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -192,7 +192,11 @@ void MessageUtil::checkForDeprecation(const Protobuf::Message& message, Runtime: continue; } +#ifdef ENVOY_DISABLE_DEPRECATED_FEATURES + bool warn_only = false; +#else bool warn_only = true; +#endif absl::string_view filename = filenameFromPath(field->file()->name()); // Allow runtime to be null both to not crash if this is called before server initialization, // and so proto validation works in context where runtime singleton is not set up (e.g. diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index ebed75c45251..ff6aeb4555d6 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -116,7 +116,8 @@ class ConfigImplTestBase { class RouteMatcherTest : public testing::Test, public ConfigImplTestBase {}; -TEST_F(RouteMatcherTest, TestRoutes) { +// TODO(alyssawilk) go through all these tests and update or duplicate. +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutes)) { const std::string yaml = R"EOF( virtual_hosts: - name: www2 @@ -617,7 +618,7 @@ TEST_F(RouteMatcherTest, TestRoutesWithWildcardAndDefaultOnly) { config.route(genHeaders("example.com", "/", "GET"), 0)->routeEntry()->clusterName()); } -TEST_F(RouteMatcherTest, TestRoutesWithInvalidRegex) { +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutesWithInvalidRegex)) { std::string invalid_route = R"EOF( virtual_hosts: - name: regex @@ -1077,7 +1078,7 @@ name: foo } } -TEST_F(RouteMatcherTest, Priority) { +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Priority)) { const std::string yaml = R"EOF( virtual_hosts: - name: local_service @@ -1164,7 +1165,7 @@ TEST_F(RouteMatcherTest, NoAutoRewriteAndAutoRewriteHeader) { EnvoyException); } -TEST_F(RouteMatcherTest, HeaderMatchedRouting) { +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(HeaderMatchedRouting)) { const std::string yaml = R"EOF( virtual_hosts: - name: local_service @@ -1288,7 +1289,7 @@ TEST_F(RouteMatcherTest, HeaderMatchedRouting) { } // Verify the fixes for https://github.com/envoyproxy/envoy/issues/2406 -TEST_F(RouteMatcherTest, InvalidHeaderMatchedRoutingConfig) { +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidHeaderMatchedRoutingConfig)) { std::string value_with_regex_chars = R"EOF( virtual_hosts: - name: local_service @@ -1323,7 +1324,7 @@ TEST_F(RouteMatcherTest, InvalidHeaderMatchedRoutingConfig) { EnvoyException, "Invalid regex"); } -TEST_F(RouteMatcherTest, QueryParamMatchedRouting) { +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(QueryParamMatchedRouting)) { const std::string yaml = R"EOF( virtual_hosts: - name: local_service @@ -1438,7 +1439,7 @@ TEST_F(RouteMatcherTest, QueryParamMatchedRouting) { } // Verify the fixes for https://github.com/envoyproxy/envoy/issues/2406 -TEST_F(RouteMatcherTest, InvalidQueryParamMatchedRoutingConfig) { +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidQueryParamMatchedRoutingConfig)) { std::string value_with_regex_chars = R"EOF( virtual_hosts: - name: local_service @@ -2249,7 +2250,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundResponseCodeConfig404) { config.route(headers, 0)->routeEntry()->clusterNotFoundResponseCode()); } -TEST_F(RouteMatcherTest, Shadow) { +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Shadow)) { const std::string yaml = R"EOF( virtual_hosts: - name: www2 @@ -2307,7 +2308,7 @@ TEST_F(RouteMatcherTest, Shadow) { class RouteConfigurationV2 : public testing::Test, public ConfigImplTestBase {}; -TEST_F(RouteConfigurationV2, RequestMirrorPolicy) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RequestMirrorPolicy)) { const std::string yaml = R"EOF( name: foo virtual_hosts: @@ -4264,7 +4265,7 @@ TEST_F(RoutePropertyTest, excludeVHRateLimits) { EXPECT_TRUE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); } -TEST_F(RoutePropertyTest, TestVHostCorsConfig) { +TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestVHostCorsConfig)) { const std::string yaml = R"EOF( virtual_hosts: - name: "default" @@ -4326,7 +4327,7 @@ TEST_F(RoutePropertyTest, TestVHostCorsConfig) { EXPECT_EQ(cors_policy->allowCredentials(), true); } -TEST_F(RoutePropertyTest, TestRouteCorsConfig) { +TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestRouteCorsConfig)) { const std::string yaml = R"EOF( virtual_hosts: - name: "default" @@ -4379,7 +4380,7 @@ TEST_F(RoutePropertyTest, TestRouteCorsConfig) { EXPECT_EQ(cors_policy->allowCredentials(), true); } -TEST_F(RoutePropertyTest, TestVHostCorsLegacyConfig) { +TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestVHostCorsLegacyConfig)) { const std::string yaml = R"EOF( virtual_hosts: - name: default @@ -4418,7 +4419,7 @@ TEST_F(RoutePropertyTest, TestVHostCorsLegacyConfig) { EXPECT_EQ(cors_policy->allowCredentials(), true); } -TEST_F(RoutePropertyTest, TestRouteCorsLegacyConfig) { +TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestRouteCorsLegacyConfig)) { const std::string yaml = R"EOF( virtual_hosts: - name: default @@ -4909,7 +4910,7 @@ name: foo Envoy::EnvoyException, "Cannot create a Baz when metadata is empty."); } -TEST_F(RouteConfigurationV2, RouteConfigGetters) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RouteConfigGetters)) { const std::string yaml = R"EOF( name: foo virtual_hosts: @@ -4948,7 +4949,7 @@ name: foo EXPECT_EQ("foo", route_entry->virtualHost().routeConfig().name()); } -TEST_F(RouteConfigurationV2, RouteTracingConfig) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RouteTracingConfig)) { const std::string yaml = R"EOF( name: foo virtual_hosts: @@ -5003,7 +5004,7 @@ name: foo } // Test to check Prefix Rewrite for redirects -TEST_F(RouteConfigurationV2, RedirectPrefixRewrite) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RedirectPrefixRewrite)) { std::string RedirectPrefixRewrite = R"EOF( name: AllRedirects virtual_hosts: @@ -5190,7 +5191,7 @@ name: AllRedirects } } -TEST_F(RouteMatcherTest, HeaderMatchedRoutingV2) { +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(HeaderMatchedRoutingV2)) { const std::string yaml = R"EOF( name: foo virtual_hosts: @@ -5365,7 +5366,8 @@ name: foo } } -TEST_F(RouteConfigurationV2, RegexPrefixWithNoRewriteWorksWhenPathChanged) { +TEST_F(RouteConfigurationV2, + DEPRECATED_FEATURE_TEST(RegexPrefixWithNoRewriteWorksWhenPathChanged)) { // Setup regex route entry. the regex is trivial, that's ok as we only want to test that // path change works. @@ -5396,7 +5398,7 @@ name: RegexNoMatch } } -TEST_F(RouteConfigurationV2, NoIdleTimeout) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(NoIdleTimeout)) { const std::string NoIdleTimeout = R"EOF( name: NoIdleTimeout virtual_hosts: @@ -5414,7 +5416,7 @@ name: NoIdleTimeout EXPECT_EQ(absl::nullopt, route_entry->idleTimeout()); } -TEST_F(RouteConfigurationV2, ZeroIdleTimeout) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(ZeroIdleTimeout)) { const std::string ZeroIdleTimeout = R"EOF( name: ZeroIdleTimeout virtual_hosts: @@ -5433,7 +5435,7 @@ name: ZeroIdleTimeout EXPECT_EQ(0, route_entry->idleTimeout().value().count()); } -TEST_F(RouteConfigurationV2, ExplicitIdleTimeout) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(ExplicitIdleTimeout)) { const std::string ExplicitIdleTimeout = R"EOF( name: ExplicitIdleTimeout virtual_hosts: @@ -5453,7 +5455,7 @@ name: ExplicitIdleTimeout EXPECT_EQ(7 * 1000, route_entry->idleTimeout().value().count()); } -TEST_F(RouteConfigurationV2, RetriableStatusCodes) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RetriableStatusCodes)) { const std::string ExplicitIdleTimeout = R"EOF( name: RetriableStatusCodes virtual_hosts: @@ -5475,7 +5477,7 @@ name: RetriableStatusCodes EXPECT_EQ(expected_codes, retry_policy.retriableStatusCodes()); } -TEST_F(RouteConfigurationV2, UpgradeConfigs) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(UpgradeConfigs)) { const std::string UpgradeYaml = R"EOF( name: RetriableStatusCodes virtual_hosts: @@ -5499,7 +5501,7 @@ name: RetriableStatusCodes EXPECT_FALSE(upgrade_map.find("disabled")->second); } -TEST_F(RouteConfigurationV2, DuplicateUpgradeConfigs) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(DuplicateUpgradeConfigs)) { const std::string yaml = R"EOF( name: RetriableStatusCodes virtual_hosts: @@ -5522,7 +5524,7 @@ name: RetriableStatusCodes // Verifies that we're creating a new instance of the retry plugins on each call instead of always // returning the same one. -TEST_F(RouteConfigurationV2, RetryPluginsAreNotReused) { +TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RetryPluginsAreNotReused)) { const std::string ExplicitIdleTimeout = R"EOF( name: RetriableStatusCodes virtual_hosts: diff --git a/test/config/integration/server.yaml b/test/config/integration/server.yaml index 88d34049619b..95a9a439efa4 100644 --- a/test/config/integration/server.yaml +++ b/test/config/integration/server.yaml @@ -168,10 +168,19 @@ stats_sinks: "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink tcp_cluster_name: statsd watchdog: {} -runtime: - symlink_root: "{{ test_tmpdir }}/test/common/runtime/test_data/current" - subdirectory: envoy - override_subdirectory: envoy_override +layered_runtime: + layers: + - name: root + disk_layer: + symlink_root: "{{ test_tmpdir }}/test/common/runtime/test_data/current" + subdirectory: envoy + - name: override + disk_layer: + symlink_root: "{{ test_tmpdir }}/test/common/runtime/test_data/current" + subdirectory: envoy_override + append_service_cluster: true + - name: admin + admin_layer: {} admin: access_log_path: "/dev/null" profile_path: "{{ test_tmpdir }}/envoy.prof" diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc index 6da6d5e55f0a..fef29e44112c 100644 --- a/test/config_test/example_configs_test.cc +++ b/test/config_test/example_configs_test.cc @@ -5,7 +5,7 @@ #include "gtest/gtest.h" namespace Envoy { -TEST(ExampleConfigsTest, All) { +TEST(ExampleConfigsTest, DEPRECATED_FEATURE_TEST(All)) { TestEnvironment::exec( {TestEnvironment::runfilesPath("test/config_test/example_configs_test_setup.sh")}); diff --git a/test/extensions/filters/http/cors/cors_filter_integration_test.cc b/test/extensions/filters/http/cors/cors_filter_integration_test.cc index b313d6162497..06a9120c8c4a 100644 --- a/test/extensions/filters/http/cors/cors_filter_integration_test.cc +++ b/test/extensions/filters/http/cors/cors_filter_integration_test.cc @@ -195,6 +195,33 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestCorsDisabled)) { }); } +TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestLegacyCorsDisabled)) { + config_helper_.addConfigModifier( + [&](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) + -> void { + auto* route_config = hcm.mutable_route_config(); + auto* virtual_host = route_config->mutable_virtual_hosts(0); + auto* route = virtual_host->add_routes(); + route->mutable_match()->set_prefix("/legacy-no-cors"); + route->mutable_route()->set_cluster("cluster_0"); + route->mutable_route()->mutable_cors()->mutable_enabled()->set_value(false); + }); + testNormalRequest( + Http::TestHeaderMapImpl{ + {":method", "OPTIONS"}, + {":path", "/legacy-no-cors/test"}, + {":scheme", "http"}, + {":authority", "test-host"}, + {"access-control-request-method", "GET"}, + {"origin", "test-origin"}, + }, + Http::TestHeaderMapImpl{ + {"server", "envoy"}, + {"content-length", "0"}, + {":status", "200"}, + }); +} + TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeaders)) { testNormalRequest( Http::TestHeaderMapImpl{ diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index bd9de61eeb2a..f93f40b47f5d 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -1156,7 +1156,8 @@ TEST_F(ConnectionManagerTest, PendingMessageEnd) { EXPECT_EQ(1U, store_.gauge("test.request_active", Stats::Gauge::ImportMode::Accumulate).value()); } -TEST_F(ConnectionManagerTest, Routing) { +// TODO(alyssawilk) update. +TEST_F(ConnectionManagerTest, DEPRECATED_FEATURE_TEST(Routing)) { const std::string yaml = R"EOF( stat_prefix: test protocol_type: Dubbo diff --git a/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc b/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc index ad648d51fdc4..398743469bcb 100644 --- a/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc @@ -38,7 +38,8 @@ parseDubboProxyFromV2Yaml(const std::string& yaml) { } // namespace -TEST(DubboRouteMatcherTest, RouteByServiceNameWithAnyMethod) { +// TODO(alyssawilk) update. +TEST(DubboRouteMatcherTest, DEPRECATED_FEATURE_TEST(RouteByServiceNameWithAnyMethod)) { { const std::string yaml = R"EOF( name: local_route @@ -291,7 +292,8 @@ interface: org.apache.dubbo.demo.DemoService EXPECT_EQ("user_service_dubbo_server", matcher.route(metadata, 0)->routeEntry()->clusterName()); } -TEST(DubboRouteMatcherTest, RouteByMethodWithRegexMatch) { +// TODO(alyssawilk) update. +TEST(DubboRouteMatcherTest, DEPRECATED_FEATURE_TEST(RouteByMethodWithRegexMatch)) { const std::string yaml = R"EOF( name: local_route interface: org.apache.dubbo.demo.DemoService diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index ff348c9c1a06..8c34fb502fd7 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -40,7 +40,9 @@ TEST(RedisProxyFilterConfigFactoryTest, NoUpstreamDefined) { TEST(RedisProxyFilterConfigFactoryTest, RedisProxyNoSettings) { const std::string yaml = R"EOF( -cluster: fake_cluster +prefix_routes: + catch_all_route: + cluster: fake_cluster stat_prefix: foo )EOF"; @@ -51,7 +53,9 @@ stat_prefix: foo TEST(RedisProxyFilterConfigFactoryTest, RedisProxyNoOpTimeout) { const std::string yaml = R"EOF( -cluster: fake_cluster +prefix_routes: + catch_all_route: + cluster: fake_cluster stat_prefix: foo settings: {} )EOF"; @@ -61,7 +65,8 @@ settings: {} ProtoValidationException, "embedded message failed validation"); } -TEST(RedisProxyFilterConfigFactoryTest, RedisProxyCorrectProto) { +TEST(RedisProxyFilterConfigFactoryTest, + DEPRECATED_FEATURE_TEST(RedisProxyCorrectProtoLegacyCluster)) { const std::string yaml = R"EOF( cluster: fake_cluster stat_prefix: foo @@ -80,9 +85,53 @@ stat_prefix: foo cb(connection); } +TEST(RedisProxyFilterConfigFactoryTest, + DEPRECATED_FEATURE_TEST(RedisProxyCorrectProtoLegacyCatchAllCluster)) { + const std::string yaml = R"EOF( +prefix_routes: + catch_all_cluster: fake_cluster +stat_prefix: foo +settings: + op_timeout: 0.02s + )EOF"; + + envoy::config::filter::network::redis_proxy::v2::RedisProxy proto_config{}; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + NiceMock context; + RedisProxyFilterConfigFactory factory; + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); + EXPECT_TRUE(factory.isTerminalFilter()); + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); +} + +TEST(RedisProxyFilterConfigFactoryTest, RedisProxyCorrectProto) { + const std::string yaml = R"EOF( +prefix_routes: + catch_all_route: + cluster: fake_cluster +stat_prefix: foo +settings: + op_timeout: 0.02s + )EOF"; + + envoy::config::filter::network::redis_proxy::v2::RedisProxy proto_config{}; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + NiceMock context; + RedisProxyFilterConfigFactory factory; + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); + EXPECT_TRUE(factory.isTerminalFilter()); + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); +} + TEST(RedisProxyFilterConfigFactoryTest, RedisProxyEmptyProto) { const std::string yaml = R"EOF( -cluster: fake_cluster +prefix_routes: + catch_all_route: + cluster: fake_cluster stat_prefix: foo settings: op_timeout: 0.02s diff --git a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc index d29c89833022..c7efe51ec11c 100644 --- a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc @@ -331,7 +331,8 @@ name: config EXPECT_EQ("cluster1", route->routeEntry()->clusterName()); } -TEST(ThriftRouteMatcherTest, RouteByRegexHeaderMatcher) { +// TODO(alyssawilk) update. +TEST(ThriftRouteMatcherTest, DEPRECATED_FEATURE_TEST(RouteByRegexHeaderMatcher)) { const std::string yaml = R"EOF( name: config routes: diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index 43c3a867bfd6..41085b762d9c 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -542,21 +542,21 @@ TEST_P(StatsMatcherIntegrationTest, ExcludePrefixServerDot) { EXPECT_THAT(response_->body(), testing::Not(testing::HasSubstr("server."))); } -TEST_P(StatsMatcherIntegrationTest, ExcludeRequests) { +TEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(ExcludeRequests)) { stats_matcher_.mutable_exclusion_list()->add_patterns()->set_regex(".*requests.*"); initialize(); makeRequest(); EXPECT_THAT(response_->body(), testing::Not(testing::HasSubstr("requests"))); } -TEST_P(StatsMatcherIntegrationTest, ExcludeExact) { +TEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(ExcludeExact)) { stats_matcher_.mutable_exclusion_list()->add_patterns()->set_exact("server.concurrency"); initialize(); makeRequest(); EXPECT_THAT(response_->body(), testing::Not(testing::HasSubstr("server.concurrency"))); } -TEST_P(StatsMatcherIntegrationTest, ExcludeMultipleExact) { +TEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(ExcludeMultipleExact)) { stats_matcher_.mutable_exclusion_list()->add_patterns()->set_exact("server.concurrency"); stats_matcher_.mutable_exclusion_list()->add_patterns()->set_regex(".*live"); initialize(); @@ -569,7 +569,7 @@ TEST_P(StatsMatcherIntegrationTest, ExcludeMultipleExact) { // `listener_manager.listener_create_success` must be instantiated, because BaseIntegrationTest // blocks on its creation (see waitForCounterGe and the suite of waitFor* functions). // If this invariant is changed, this test must be rewritten. -TEST_P(StatsMatcherIntegrationTest, IncludeExact) { +TEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(IncludeExact)) { // Stats matching does not play well with LDS, at least in test. See #7215. use_lds_ = false; stats_matcher_.mutable_inclusion_list()->add_patterns()->set_exact( diff --git a/test/server/BUILD b/test/server/BUILD index 7ffe0fcfd36b..e46a0106c687 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -261,6 +261,7 @@ envoy_cc_test( ":invalid_layered_runtime_duplicate_name.yaml", ":invalid_layered_runtime_missing_name.yaml", ":invalid_layered_runtime_no_layer_specifier.yaml", + ":invalid_legacy_runtime_bootstrap.yaml", ":invalid_runtime_bootstrap.yaml", ":node_bootstrap.yaml", ":node_bootstrap_no_admin_port.yaml", diff --git a/test/server/invalid_legacy_runtime_bootstrap.yaml b/test/server/invalid_legacy_runtime_bootstrap.yaml new file mode 100644 index 000000000000..99c67b7d2d9c --- /dev/null +++ b/test/server/invalid_legacy_runtime_bootstrap.yaml @@ -0,0 +1,4 @@ +runtime: + base: + foo: + - bar: baz diff --git a/test/server/invalid_runtime_bootstrap.yaml b/test/server/invalid_runtime_bootstrap.yaml index 99c67b7d2d9c..3ed04a71c3b0 100644 --- a/test/server/invalid_runtime_bootstrap.yaml +++ b/test/server/invalid_runtime_bootstrap.yaml @@ -1,4 +1,6 @@ -runtime: - base: - foo: - - bar: baz +layered_runtime: + layers: + - name: some_static_layer + static_layer: + foo: + - bar: baz diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 1b9b0deeee19..ab0d89ebad28 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -575,6 +575,11 @@ TEST_P(ServerInstanceImplTest, RuntimeNoAdminLayer) { EXPECT_EQ("No admin layer specified", response_body); } +TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(InvalidLegacyBootstrapRuntime)) { + EXPECT_THROW_WITH_MESSAGE(initialize("test/server/invalid_runtime_bootstrap.yaml"), + EnvoyException, "Invalid runtime entry value for foo"); +} + // Validate invalid runtime in bootstrap is rejected. TEST_P(ServerInstanceImplTest, InvalidBootstrapRuntime) { EXPECT_THROW_WITH_MESSAGE(initialize("test/server/invalid_runtime_bootstrap.yaml"), From f8e42ae3456baba1990ad91e3c43fbe22ea9b6ab Mon Sep 17 00:00:00 2001 From: Colin Schoen Date: Wed, 28 Aug 2019 08:35:41 -0700 Subject: [PATCH 04/31] [Docs typo] Remote Executioon -> Remote Execution (#8061) Fixes mispelling of `Executioon` -> `Execution` Signed-off-by: Colin Schoen --- bazel/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bazel/README.md b/bazel/README.md index 24340fdd0fce..b99b9e50621e 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -102,7 +102,7 @@ CI Docker image. ## Building Envoy with Remote Execution -Envoy can also be built with Bazel [Remote Executioon](https://docs.bazel.build/versions/master/remote-execution.html), +Envoy can also be built with Bazel [Remote Execution](https://docs.bazel.build/versions/master/remote-execution.html), part of the CI is running with the hosted [GCP RBE](https://blog.bazel.build/2018/10/05/remote-build-execution.html) service. To build Envoy with a remote build services, run Bazel with your remote build service flags and with `--config=remote-clang`. From f80188ebc4b592754449c740f831123b4de41ab3 Mon Sep 17 00:00:00 2001 From: Bryce Anderson Date: Wed, 28 Aug 2019 09:40:38 -0600 Subject: [PATCH 05/31] api: Fix duplicate java_outer_classname declarations (#8059) The java_outer_classname is unintentionally duplicated in the new udp_listener_config and regex proto files. This changes them to unique names that match the predominant naming scheme. Signed-off-by: Bryce Anderson --- api/envoy/api/v2/listener/udp_listener_config.proto | 2 +- api/envoy/type/matcher/regex.proto | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/envoy/api/v2/listener/udp_listener_config.proto b/api/envoy/api/v2/listener/udp_listener_config.proto index 88a2a35d3cfc..f75383bab232 100644 --- a/api/envoy/api/v2/listener/udp_listener_config.proto +++ b/api/envoy/api/v2/listener/udp_listener_config.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.api.v2.listener; -option java_outer_classname = "ListenerProto"; +option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option go_package = "listener"; diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto index b3b7194441eb..048a576cc8a6 100644 --- a/api/envoy/type/matcher/regex.proto +++ b/api/envoy/type/matcher/regex.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.type.matcher; -option java_outer_classname = "StringProto"; +option java_outer_classname = "RegexProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.type.matcher"; option go_package = "matcher"; From b8966cbbfff4566b08eaed7b2b060a6c12eec168 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 28 Aug 2019 13:40:18 -0400 Subject: [PATCH 06/31] http: making the behavior of the response Server header configurable (#8014) Default behavior remains unchanged, but now Envoy can override, override iff there's no server header from upstream, or always leave the server header (or lack thereof) unmodified. Risk Level: low (config guarded change) Testing: new unit tests Docs Changes: n/a Release Notes: inline Fixes #6716 Signed-off-by: Alyssa Wilk --- .../v2/http_connection_manager.proto | 20 ++++- docs/root/intro/version_history.rst | 2 + source/common/http/conn_manager_config.h | 9 +++ source/common/http/conn_manager_impl.cc | 7 +- .../network/http_connection_manager/BUILD | 1 + .../network/http_connection_manager/config.cc | 2 + .../network/http_connection_manager/config.h | 5 ++ source/server/http/admin.h | 3 + .../http/conn_manager_impl_fuzz_test.cc | 5 ++ test/common/http/conn_manager_impl_test.cc | 79 +++++++++++++++++++ test/common/http/conn_manager_utility_test.cc | 2 + .../http_connection_manager/config_test.cc | 62 +++++++++++++++ test/server/http/admin_test.cc | 12 +++ 13 files changed, 207 insertions(+), 2 deletions(-) diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 4d662fc2c524..bf1195731b86 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -25,7 +25,7 @@ import "gogoproto/gogo.proto"; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. -// [#comment:next free field: 34] +// [#comment:next free field: 35] message HttpConnectionManager { enum CodecType { option (gogoproto.goproto_enum_prefix) = false; @@ -144,6 +144,24 @@ message HttpConnectionManager { // header in responses. If not set, the default is *envoy*. string server_name = 10; + enum ServerHeaderTransformation { + option (gogoproto.goproto_enum_prefix) = false; + + // Overwrite any Server header with the contents of server_name. + OVERWRITE = 0; + // If no Server header is present, append Server server_name + // If a Server header is present, pass it through. + APPEND_IF_ABSENT = 1; + // Pass through the value of the server header, and do not append a header + // if none is present. + PASS_THROUGH = 2; + } + // Defines the action to be applied to the Server header on the response path. + // By default, Envoy will overwrite the header with the value specified in + // server_name. + ServerHeaderTransformation server_header_transformation = 34 + [(validate.rules).enum.defined_only = true]; + // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. // Requests that exceed this limit will receive a 431 response. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 147c65f5ce60..715ec0fa839f 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -27,6 +27,8 @@ Version history * grpc-json: added support for :ref:`ignoring unknown query parameters`. * header to metadata: added :ref:`PROTOBUF_VALUE ` and :ref:`ValueEncode ` to support protobuf Value and Base64 encoding. * http: added the ability to reject HTTP/1.1 requests with invalid HTTP header values, using the runtime feature `envoy.reloadable_features.strict_header_validation`. +* http: changed Envoy to forward existing x-forwarded-proto from upstream trusted proxies. Guarded by `envoy.reloadable_features.trusted_forwarded_proto` which defaults true. +* http: added the ability to configure the behavior of the server response header, via the :ref:`server_header_transformation` field. * http: changed Envoy to forward existing x-forwarded-proto from downstream trusted proxies. Guarded by `envoy.reloadable_features.trusted_forwarded_proto` which defaults true. * http: added the ability to :ref:`merge adjacent slashes` in the path. * listeners: added :ref:`continue_on_listener_filters_timeout ` to configure whether a listener will still create a connection when listener filters time out. diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index 52ec547ea9d5..b32d8f520e40 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/config/config_provider.h" +#include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.h" #include "envoy/http/filter.h" #include "envoy/router/rds.h" #include "envoy/stats/scope.h" @@ -170,6 +171,9 @@ class DefaultInternalAddressConfig : public Http::InternalAddressConfig { */ class ConnectionManagerConfig { public: + using HttpConnectionManagerProto = + envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager; + virtual ~ConnectionManagerConfig() = default; /** @@ -265,6 +269,11 @@ class ConnectionManagerConfig { */ virtual const std::string& serverName() PURE; + /** + * @return ServerHeaderTransformation the transformation to apply to Server response headers. + */ + virtual HttpConnectionManagerProto::ServerHeaderTransformation serverHeaderTransformation() PURE; + /** * @return ConnectionManagerStats& the stats to write to. */ diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 580ba1e6c365..c830114f5107 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1353,7 +1353,12 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte // Base headers. connection_manager_.config_.dateProvider().setDateHeader(headers); // Following setReference() is safe because serverName() is constant for the life of the listener. - headers.insertServer().value().setReference(connection_manager_.config_.serverName()); + const auto transformation = connection_manager_.config_.serverHeaderTransformation(); + if (transformation == ConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE || + (transformation == ConnectionManagerConfig::HttpConnectionManagerProto::APPEND_IF_ABSENT && + headers.Server() == nullptr)) { + headers.insertServer().value().setReference(connection_manager_.config_.serverName()); + } ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(), connection_manager_.config_.via()); diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index 1f5474a651e2..fbe72b257b15 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -40,5 +40,6 @@ envoy_cc_library( "//source/common/router:scoped_rds_lib", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", + "@envoy_api//envoy/config/filter/network/http_connection_manager/v2:http_connection_manager_cc", ], ) diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 82fb5274bb96..81a45e934761 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -286,6 +286,8 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( access_logs_.push_back(current_access_log); } + server_transformation_ = config.server_header_transformation(); + if (!config.server_name().empty()) { server_name_ = config.server_name(); } else { diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 8e8132e0aa01..0385762236c1 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -116,6 +116,9 @@ class HttpConnectionManagerConfig : Logger::Loggable, return scoped_routes_config_provider_.get(); } const std::string& serverName() override { return server_name_; } + HttpConnectionManagerProto::ServerHeaderTransformation serverHeaderTransformation() override { + return server_transformation_; + } Http::ConnectionManagerStats& stats() override { return stats_; } Http::ConnectionManagerTracingStats& tracingStats() override { return tracing_stats_; } bool useRemoteAddress() override { return use_remote_address_; } @@ -166,6 +169,8 @@ class HttpConnectionManagerConfig : Logger::Loggable, CodecType codec_type_; const Http::Http2Settings http2_settings_; const Http::Http1Settings http1_settings_; + HttpConnectionManagerProto::ServerHeaderTransformation server_transformation_{ + HttpConnectionManagerProto::OVERWRITE}; std::string server_name_; Http::TracingConnectionManagerConfigPtr tracing_config_; absl::optional user_agent_; diff --git a/source/server/http/admin.h b/source/server/http/admin.h index ac01902068a7..f54d72fee7ff 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -121,6 +121,9 @@ class AdminImpl : public Admin, return &scoped_route_config_provider_; } const std::string& serverName() override { return Http::DefaultServerString::get(); } + HttpConnectionManagerProto::ServerHeaderTransformation serverHeaderTransformation() override { + return HttpConnectionManagerProto::OVERWRITE; + } Http::ConnectionManagerStats& stats() override { return stats_; } Http::ConnectionManagerTracingStats& tracingStats() override { return tracing_stats_; } bool useRemoteAddress() override { return true; } diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 47f151b5b1db..df9a0c2b4b35 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -91,6 +91,9 @@ class FuzzConfig : public ConnectionManagerConfig { return &scoped_route_config_provider_; } const std::string& serverName() override { return server_name_; } + HttpConnectionManagerProto::ServerHeaderTransformation serverHeaderTransformation() override { + return server_transformation_; + } ConnectionManagerStats& stats() override { return stats_; } ConnectionManagerTracingStats& tracingStats() override { return tracing_stats_; } bool useRemoteAddress() override { return use_remote_address_; } @@ -124,6 +127,8 @@ class FuzzConfig : public ConnectionManagerConfig { ConnectionManagerImplHelper::RouteConfigProvider route_config_provider_; ConnectionManagerImplHelper::ScopedRouteConfigProvider scoped_route_config_provider_; std::string server_name_; + HttpConnectionManagerProto::ServerHeaderTransformation server_transformation_{ + HttpConnectionManagerProto::OVERWRITE}; Stats::IsolatedStoreImpl fake_stats_; ConnectionManagerStats stats_; ConnectionManagerTracingStats tracing_stats_; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 251c62394907..496b2af6c7ea 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -227,6 +227,21 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan conn_manager_->onData(fake_input, false); } + HeaderMap* sendResponseHeaders(HeaderMapPtr&& response_headers) { + HeaderMap* altered_response_headers = nullptr; + + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, _)) + .WillOnce(Invoke([&](HeaderMap& headers, bool) -> FilterHeadersStatus { + altered_response_headers = &headers; + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); + return altered_response_headers; + } + void expectOnDestroy() { for (auto filter : decoder_filters_) { EXPECT_CALL(*filter, onDestroy()); @@ -261,6 +276,9 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan return &scoped_route_config_provider_; } const std::string& serverName() override { return server_name_; } + HttpConnectionManagerProto::ServerHeaderTransformation serverHeaderTransformation() override { + return server_transformation_; + } ConnectionManagerStats& stats() override { return stats_; } ConnectionManagerTracingStats& tracingStats() override { return tracing_stats_; } bool useRemoteAddress() override { return use_remote_address_; } @@ -301,6 +319,8 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan NiceMock drain_close_; std::unique_ptr conn_manager_; std::string server_name_; + HttpConnectionManagerProto::ServerHeaderTransformation server_transformation_{ + HttpConnectionManagerProto::OVERWRITE}; Network::Address::Ipv4Instance local_address_{"127.0.0.1"}; bool use_remote_address_{true}; Http::DefaultInternalAddressConfig internal_address_config_; @@ -537,6 +557,65 @@ TEST_F(HttpConnectionManagerImplTest, PauseResume100Continue) { decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), false); } +// By default, Envoy will set the server header to the server name, here "custom-value" +TEST_F(HttpConnectionManagerImplTest, ServerHeaderOverwritten) { + setup(false, "custom-value", false); + setUpEncoderAndDecoder(false, false); + + sendRequestHeadersAndData(); + const HeaderMap* altered_headers = sendResponseHeaders( + HeaderMapPtr{new TestHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); + EXPECT_EQ("custom-value", altered_headers->Server()->value().getStringView()); +} + +// When configured APPEND_IF_ABSENT if the server header is present it will be retained. +TEST_F(HttpConnectionManagerImplTest, ServerHeaderAppendPresent) { + server_transformation_ = HttpConnectionManagerProto::APPEND_IF_ABSENT; + setup(false, "custom-value", false); + setUpEncoderAndDecoder(false, false); + + sendRequestHeadersAndData(); + const HeaderMap* altered_headers = sendResponseHeaders( + HeaderMapPtr{new TestHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); + EXPECT_EQ("foo", altered_headers->Server()->value().getStringView()); +} + +// When configured APPEND_IF_ABSENT if the server header is absent the server name will be set. +TEST_F(HttpConnectionManagerImplTest, ServerHeaderAppendAbsent) { + server_transformation_ = HttpConnectionManagerProto::APPEND_IF_ABSENT; + setup(false, "custom-value", false); + setUpEncoderAndDecoder(false, false); + + sendRequestHeadersAndData(); + const HeaderMap* altered_headers = + sendResponseHeaders(HeaderMapPtr{new TestHeaderMapImpl{{":status", "200"}}}); + EXPECT_EQ("custom-value", altered_headers->Server()->value().getStringView()); +} + +// When configured PASS_THROUGH, the server name will pass through. +TEST_F(HttpConnectionManagerImplTest, ServerHeaderPassthroughPresent) { + server_transformation_ = HttpConnectionManagerProto::PASS_THROUGH; + setup(false, "custom-value", false); + setUpEncoderAndDecoder(false, false); + + sendRequestHeadersAndData(); + const HeaderMap* altered_headers = sendResponseHeaders( + HeaderMapPtr{new TestHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); + EXPECT_EQ("foo", altered_headers->Server()->value().getStringView()); +} + +// When configured PASS_THROUGH, the server header will not be added if absent. +TEST_F(HttpConnectionManagerImplTest, ServerHeaderPassthroughAbsent) { + server_transformation_ = HttpConnectionManagerProto::PASS_THROUGH; + setup(false, "custom-value", false); + setUpEncoderAndDecoder(false, false); + + sendRequestHeadersAndData(); + const HeaderMap* altered_headers = + sendResponseHeaders(HeaderMapPtr{new TestHeaderMapImpl{{":status", "200"}}}); + EXPECT_TRUE(altered_headers->Server() == nullptr); +} + TEST_F(HttpConnectionManagerImplTest, InvalidPathWithDualFilter) { InSequence s; setup(false, ""); diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 025a21ab1caf..b0c8e005124f 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -64,6 +64,8 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD0(routeConfigProvider, Router::RouteConfigProvider*()); MOCK_METHOD0(scopedRouteConfigProvider, Config::ConfigProvider*()); MOCK_METHOD0(serverName, const std::string&()); + MOCK_METHOD0(serverHeaderTransformation, + HttpConnectionManagerProto::ServerHeaderTransformation()); MOCK_METHOD0(stats, ConnectionManagerStats&()); MOCK_METHOD0(tracingStats, ConnectionManagerTracingStats&()); MOCK_METHOD0(useRemoteAddress, bool()); diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index c300885c09fb..0b048d28e3dc 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -237,6 +237,8 @@ stat_prefix: router ContainerEq(config.tracingConfig()->request_headers_for_tags_)); EXPECT_EQ(*context_.local_info_.address_, config.localAddress()); EXPECT_EQ("foo", config.serverName()); + EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE, + config.serverHeaderTransformation()); EXPECT_EQ(5 * 60 * 1000, config.streamIdleTimeout().count()); } @@ -388,6 +390,66 @@ TEST_F(HttpConnectionManagerConfigTest, DisabledStreamIdleTimeout) { EXPECT_EQ(0, config.streamIdleTimeout().count()); } +TEST_F(HttpConnectionManagerConfigTest, ServerOverwrite) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + server_header_transformation: OVERWRITE + route_config: + name: local_route + http_filters: + - name: envoy.router + )EOF"; + + EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) + .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, + &Runtime::MockSnapshot::featureEnabledDefault)); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_); + EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE, + config.serverHeaderTransformation()); +} + +TEST_F(HttpConnectionManagerConfigTest, ServerAppendIfAbsent) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + server_header_transformation: APPEND_IF_ABSENT + route_config: + name: local_route + http_filters: + - name: envoy.router + )EOF"; + + EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) + .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, + &Runtime::MockSnapshot::featureEnabledDefault)); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_); + EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::APPEND_IF_ABSENT, + config.serverHeaderTransformation()); +} + +TEST_F(HttpConnectionManagerConfigTest, ServerPassThrough) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + server_header_transformation: PASS_THROUGH + route_config: + name: local_route + http_filters: + - name: envoy.router + )EOF"; + + EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) + .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, + &Runtime::MockSnapshot::featureEnabledDefault)); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_); + EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::PASS_THROUGH, + config.serverHeaderTransformation()); +} + // Validated that by default we don't normalize paths // unless set build flag path_normalization_by_default=true TEST_F(HttpConnectionManagerConfigTest, NormalizePathDefault) { diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc index 24dbed7b348e..38b87e72fb7b 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/http/admin_test.cc @@ -21,6 +21,7 @@ #include "extensions/transport_sockets/tls/context_config_impl.h" +#include "test/mocks/http/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" @@ -89,6 +90,17 @@ class AdminFilterTest : public testing::TestWithParam Date: Wed, 28 Aug 2019 12:35:10 -0700 Subject: [PATCH 07/31] use bazelversion for filter-example too (#8069) Signed-off-by: Lizan Zhou --- ci/build_setup.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/build_setup.sh b/ci/build_setup.sh index 53379d9ab8da..9dc53668825c 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -95,6 +95,7 @@ if [ "$1" != "-nofetch" ]; then # This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to. (cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" && git fetch origin && git checkout -f 1995c1e0eccea84bbb39f64e75ef3e9102d1ae82) sed -e "s|{ENVOY_SRCDIR}|${ENVOY_SRCDIR}|" "${ENVOY_SRCDIR}"/ci/WORKSPACE.filter.example > "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/WORKSPACE + cp -f "${ENVOY_SRCDIR}"/.bazelversion "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/.bazelversion fi # Also setup some space for building Envoy standalone. From c2e8edad54609c752d4594b69252048af84e798e Mon Sep 17 00:00:00 2001 From: Hans Duedal Date: Wed, 28 Aug 2019 23:56:21 +0200 Subject: [PATCH 08/31] grpc-httpjson-transcode: Update for RFC2045 support (#8065) RFC2045 (MIME) Base64 decoding support has been fixed upstream Description: The grpc transcoding filter has been updated to support RFC2045 (MIME) based inputs for protobuf type "Bytes". This is important since Base64 is often using the RFC2045 format for inputs. Also see: grpc-ecosystem/grpc-httpjson-transcoding#34 Risk Level: Low Testing: Integration / Manual Tests Docs Changes: N/A Release Notes: N/A Signed-off-by: Hans Viken Duedal --- bazel/repository_locations.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 2abff095453a..5dec6ea9bb10 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -212,10 +212,10 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.8.0/protobuf-all-3.8.0.tar.gz"], ), grpc_httpjson_transcoding = dict( - sha256 = "dedd76b0169eb8c72e479529301a1d9b914a4ccb4d2b5ddb4ebe92d63a7b2152", - strip_prefix = "grpc-httpjson-transcoding-64d6ac985360b624d8e95105701b64a3814794cd", - # 2018-12-19 - urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/64d6ac985360b624d8e95105701b64a3814794cd.tar.gz"], + sha256 = "a447458b47ea4dc1d31499f555769af437c5d129d988ec1e13d5fdd0a6a36b4e", + strip_prefix = "grpc-httpjson-transcoding-2feabd5d64436e670084091a937855972ee35161", + # 2019-08-28 + urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/2feabd5d64436e670084091a937855972ee35161.tar.gz"], ), io_bazel_rules_go = dict( sha256 = "96b1f81de5acc7658e1f5a86d7dc9e1b89bc935d83799b711363a748652c471a", From 5e45d4874db85be56259df2daa8c69af78987fcf Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Thu, 29 Aug 2019 07:26:01 +0900 Subject: [PATCH 09/31] stats: Clean up all calls to Scope::counter() et al in production code. (#7842) * Convert a few more counter() references to use the StatName interface. Signed-off-by: Joshua Marantz --- source/common/stats/symbol_table_impl.h | 11 ++- .../filters/http/fault/fault_filter.cc | 22 +++-- .../filters/http/fault/fault_filter.h | 16 +++- .../extensions/filters/http/ip_tagging/BUILD | 1 + .../http/ip_tagging/ip_tagging_filter.cc | 52 +++++++++- .../http/ip_tagging/ip_tagging_filter.h | 49 +++------- .../filters/network/mongo_proxy/BUILD | 11 +++ .../filters/network/mongo_proxy/config.cc | 7 +- .../network/mongo_proxy/mongo_stats.cc | 47 +++++++++ .../filters/network/mongo_proxy/mongo_stats.h | 56 +++++++++++ .../filters/network/mongo_proxy/proxy.cc | 96 ++++++++++++------- .../filters/network/mongo_proxy/proxy.h | 17 +++- .../stat_sinks/common/statsd/statsd.cc | 6 +- source/extensions/transport_sockets/tls/BUILD | 1 + .../transport_sockets/tls/context_impl.cc | 60 ++++++++++-- .../transport_sockets/tls/context_impl.h | 8 ++ source/server/BUILD | 2 + source/server/guarddog_impl.cc | 9 +- source/server/overload_manager_impl.cc | 24 +++-- .../filters/http/fault/fault_filter_test.cc | 2 +- .../http/ip_tagging/ip_tagging_filter_test.cc | 2 +- .../filters/network/mongo_proxy/proxy_test.cc | 10 +- .../transport_sockets/tls/ssl_socket_test.cc | 3 +- tools/check_format.py | 19 ---- tools/spelling_dictionary.txt | 2 + 25 files changed, 390 insertions(+), 143 deletions(-) create mode 100644 source/extensions/filters/network/mongo_proxy/mongo_stats.cc create mode 100644 source/extensions/filters/network/mongo_proxy/mongo_stats.h diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index 925ff26368a4..dbbb633dcbe8 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -652,10 +652,13 @@ class StatNameSet { void rememberBuiltin(absl::string_view str); /** - * Finds a StatName by name. If 'token' has been remembered as a built-in, then - * no lock is required. Otherwise we first consult dynamic_stat_names_ under a - * lock that's private to the StatNameSet. If that's empty, we need to create - * the StatName in the pool, which requires taking a global lock. + * Finds a StatName by name. If 'token' has been remembered as a built-in, + * then no lock is required. Otherwise we must consult dynamic_stat_names_ + * under a lock that's private to the StatNameSet. If that's empty, we need to + * create the StatName in the pool, which requires taking a global lock, and + * then remember the new StatName in the dynamic_stat_names_. This allows + * subsequent lookups of the same string to take only the set's lock, and not + * the whole symbol-table lock. * * TODO(jmarantz): Potential perf issue here with contention, both on this * set's mutex and also the SymbolTable mutex which must be taken during diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index 0649366a7e5c..0fb09f1c95f1 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -78,7 +78,17 @@ FaultFilterConfig::FaultFilterConfig(const envoy::config::filter::http::fault::v Runtime::Loader& runtime, const std::string& stats_prefix, Stats::Scope& scope, TimeSource& time_source) : settings_(fault), runtime_(runtime), stats_(generateStats(stats_prefix, scope)), - stats_prefix_(stats_prefix), scope_(scope), time_source_(time_source) {} + scope_(scope), time_source_(time_source), stat_name_set_(scope.symbolTable()), + aborts_injected_(stat_name_set_.add("aborts_injected")), + delays_injected_(stat_name_set_.add("delays_injected")), + stats_prefix_(stat_name_set_.add(absl::StrCat(stats_prefix, "fault"))) {} + +void FaultFilterConfig::incCounter(absl::string_view downstream_cluster, + Stats::StatName stat_name) { + Stats::SymbolTable::StoragePtr storage = scope_.symbolTable().join( + {stats_prefix_, stat_name_set_.getStatName(downstream_cluster), stat_name}); + scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); +} FaultFilter::FaultFilter(FaultFilterConfigSharedPtr config) : config_(config) {} @@ -279,10 +289,7 @@ uint64_t FaultFilter::abortHttpStatus() { void FaultFilter::recordDelaysInjectedStats() { // Downstream specific stats. if (!downstream_cluster_.empty()) { - const std::string stats_counter = - fmt::format("{}fault.{}.delays_injected", config_->statsPrefix(), downstream_cluster_); - - config_->scope().counter(stats_counter).inc(); + config_->incDelays(downstream_cluster_); } // General stats. All injected faults are considered a single aggregate active fault. @@ -293,10 +300,7 @@ void FaultFilter::recordDelaysInjectedStats() { void FaultFilter::recordAbortsInjectedStats() { // Downstream specific stats. if (!downstream_cluster_.empty()) { - const std::string stats_counter = - fmt::format("{}fault.{}.aborts_injected", config_->statsPrefix(), downstream_cluster_); - - config_->scope().counter(stats_counter).inc(); + config_->incAborts(downstream_cluster_); } // General stats. All injected faults are considered a single aggregate active fault. diff --git a/source/extensions/filters/http/fault/fault_filter.h b/source/extensions/filters/http/fault/fault_filter.h index be6ef435420b..b0d3a0f1bf30 100644 --- a/source/extensions/filters/http/fault/fault_filter.h +++ b/source/extensions/filters/http/fault/fault_filter.h @@ -16,6 +16,7 @@ #include "common/buffer/watermark_buffer.h" #include "common/common/token_bucket_impl.h" #include "common/http/header_utility.h" +#include "common/stats/symbol_table_impl.h" #include "extensions/filters/common/fault/fault_config.h" @@ -111,20 +112,31 @@ class FaultFilterConfig { Runtime::Loader& runtime() { return runtime_; } FaultFilterStats& stats() { return stats_; } - const std::string& statsPrefix() { return stats_prefix_; } Stats::Scope& scope() { return scope_; } const FaultSettings* settings() { return &settings_; } TimeSource& timeSource() { return time_source_; } + void incDelays(absl::string_view downstream_cluster) { + incCounter(downstream_cluster, delays_injected_); + } + + void incAborts(absl::string_view downstream_cluster) { + incCounter(downstream_cluster, aborts_injected_); + } + private: static FaultFilterStats generateStats(const std::string& prefix, Stats::Scope& scope); + void incCounter(absl::string_view downstream_cluster, Stats::StatName stat_name); const FaultSettings settings_; Runtime::Loader& runtime_; FaultFilterStats stats_; - const std::string stats_prefix_; Stats::Scope& scope_; TimeSource& time_source_; + Stats::StatNameSet stat_name_set_; + const Stats::StatName aborts_injected_; + const Stats::StatName delays_injected_; + const Stats::StatName stats_prefix_; // Includes ".fault". }; using FaultFilterConfigSharedPtr = std::shared_ptr; diff --git a/source/extensions/filters/http/ip_tagging/BUILD b/source/extensions/filters/http/ip_tagging/BUILD index 583893eadb23..bc88c1313356 100644 --- a/source/extensions/filters/http/ip_tagging/BUILD +++ b/source/extensions/filters/http/ip_tagging/BUILD @@ -22,6 +22,7 @@ envoy_cc_library( "//source/common/http:header_map_lib", "//source/common/http:headers_lib", "//source/common/network:lc_trie_lib", + "//source/common/stats:symbol_table_lib", "@envoy_api//envoy/config/filter/http/ip_tagging/v2:ip_tagging_cc", ], ) diff --git a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc index 1d82239c7dfd..d3b2549a87f7 100644 --- a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc +++ b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc @@ -10,6 +10,52 @@ namespace Extensions { namespace HttpFilters { namespace IpTagging { +IpTaggingFilterConfig::IpTaggingFilterConfig( + const envoy::config::filter::http::ip_tagging::v2::IPTagging& config, + const std::string& stat_prefix, Stats::Scope& scope, Runtime::Loader& runtime) + : request_type_(requestTypeEnum(config.request_type())), scope_(scope), runtime_(runtime), + stat_name_set_(scope.symbolTable()), + stats_prefix_(stat_name_set_.add(stat_prefix + "ip_tagging")), + hit_(stat_name_set_.add("hit")), no_hit_(stat_name_set_.add("no_hit")), + total_(stat_name_set_.add("total")) { + + // Once loading IP tags from a file system is supported, the restriction on the size + // of the set should be removed and observability into what tags are loaded needs + // to be implemented. + // TODO(ccaraman): Remove size check once file system support is implemented. + // Work is tracked by issue https://github.com/envoyproxy/envoy/issues/2695. + if (config.ip_tags().empty()) { + throw EnvoyException("HTTP IP Tagging Filter requires ip_tags to be specified."); + } + + std::vector>> tag_data; + tag_data.reserve(config.ip_tags().size()); + for (const auto& ip_tag : config.ip_tags()) { + std::vector cidr_set; + cidr_set.reserve(ip_tag.ip_list().size()); + for (const envoy::api::v2::core::CidrRange& entry : ip_tag.ip_list()) { + + // Currently, CidrRange::create doesn't guarantee that the CidrRanges are valid. + Network::Address::CidrRange cidr_entry = Network::Address::CidrRange::create(entry); + if (cidr_entry.isValid()) { + cidr_set.emplace_back(std::move(cidr_entry)); + } else { + throw EnvoyException( + fmt::format("invalid ip/mask combo '{}/{}' (format is /<# mask bits>)", + entry.address_prefix(), entry.prefix_len().value())); + } + } + tag_data.emplace_back(ip_tag.ip_tag_name(), cidr_set); + } + trie_ = std::make_unique>(tag_data); +} + +void IpTaggingFilterConfig::incCounter(Stats::StatName name, absl::string_view tag) { + Stats::SymbolTable::StoragePtr storage = + scope_.symbolTable().join({stats_prefix_, stat_name_set_.getStatName(tag), name}); + scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); +} + IpTaggingFilter::IpTaggingFilter(IpTaggingFilterConfigSharedPtr config) : config_(config) {} IpTaggingFilter::~IpTaggingFilter() = default; @@ -42,12 +88,12 @@ Http::FilterHeadersStatus IpTaggingFilter::decodeHeaders(Http::HeaderMap& header // If there are use cases with a large set of tags, a way to opt into these stats // should be exposed and other observability options like logging tags need to be implemented. for (const std::string& tag : tags) { - config_->scope().counter(fmt::format("{}{}.hit", config_->statsPrefix(), tag)).inc(); + config_->incHit(tag); } } else { - config_->scope().counter(fmt::format("{}no_hit", config_->statsPrefix())).inc(); + config_->incNoHit(); } - config_->scope().counter(fmt::format("{}total", config_->statsPrefix())).inc(); + config_->incTotal(); return Http::FilterHeadersStatus::Continue; } diff --git a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h index b79103aab9bd..6cf2b19b0e74 100644 --- a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h +++ b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h @@ -14,6 +14,7 @@ #include "common/network/cidr_range.h" #include "common/network/lc_trie.h" +#include "common/stats/symbol_table_impl.h" namespace Envoy { namespace Extensions { @@ -32,46 +33,16 @@ class IpTaggingFilterConfig { public: IpTaggingFilterConfig(const envoy::config::filter::http::ip_tagging::v2::IPTagging& config, const std::string& stat_prefix, Stats::Scope& scope, - Runtime::Loader& runtime) - : request_type_(requestTypeEnum(config.request_type())), scope_(scope), runtime_(runtime), - stats_prefix_(stat_prefix + "ip_tagging.") { - - // Once loading IP tags from a file system is supported, the restriction on the size - // of the set should be removed and observability into what tags are loaded needs - // to be implemented. - // TODO(ccaraman): Remove size check once file system support is implemented. - // Work is tracked by issue https://github.com/envoyproxy/envoy/issues/2695. - if (config.ip_tags().empty()) { - throw EnvoyException("HTTP IP Tagging Filter requires ip_tags to be specified."); - } - - std::vector>> tag_data; - tag_data.reserve(config.ip_tags().size()); - for (const auto& ip_tag : config.ip_tags()) { - std::vector cidr_set; - cidr_set.reserve(ip_tag.ip_list().size()); - for (const envoy::api::v2::core::CidrRange& entry : ip_tag.ip_list()) { - - // Currently, CidrRange::create doesn't guarantee that the CidrRanges are valid. - Network::Address::CidrRange cidr_entry = Network::Address::CidrRange::create(entry); - if (cidr_entry.isValid()) { - cidr_set.emplace_back(std::move(cidr_entry)); - } else { - throw EnvoyException( - fmt::format("invalid ip/mask combo '{}/{}' (format is /<# mask bits>)", - entry.address_prefix(), entry.prefix_len().value())); - } - } - tag_data.emplace_back(ip_tag.ip_tag_name(), cidr_set); - } - trie_ = std::make_unique>(tag_data); - } + Runtime::Loader& runtime); Runtime::Loader& runtime() { return runtime_; } Stats::Scope& scope() { return scope_; } FilterRequestType requestType() const { return request_type_; } const Network::LcTrie::LcTrie& trie() const { return *trie_; } - const std::string& statsPrefix() const { return stats_prefix_; } + + void incHit(absl::string_view tag) { incCounter(hit_, tag); } + void incNoHit() { incCounter(no_hit_); } + void incTotal() { incCounter(total_); } private: static FilterRequestType requestTypeEnum( @@ -88,10 +59,16 @@ class IpTaggingFilterConfig { } } + void incCounter(Stats::StatName name1, absl::string_view tag = ""); + const FilterRequestType request_type_; Stats::Scope& scope_; Runtime::Loader& runtime_; - const std::string stats_prefix_; + Stats::StatNameSet stat_name_set_; + const Stats::StatName stats_prefix_; + const Stats::StatName hit_; + const Stats::StatName no_hit_; + const Stats::StatName total_; std::unique_ptr> trie_; }; diff --git a/source/extensions/filters/network/mongo_proxy/BUILD b/source/extensions/filters/network/mongo_proxy/BUILD index 36a94de85abc..f2be1d6a9774 100644 --- a/source/extensions/filters/network/mongo_proxy/BUILD +++ b/source/extensions/filters/network/mongo_proxy/BUILD @@ -58,6 +58,7 @@ envoy_cc_library( deps = [ ":codec_interface", ":codec_lib", + ":mongo_stats_lib", ":utility_lib", "//include/envoy/access_log:access_log_interface", "//include/envoy/common:time_interface", @@ -81,6 +82,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "mongo_stats_lib", + srcs = ["mongo_stats.cc"], + hdrs = ["mongo_stats.h"], + deps = [ + "//include/envoy/stats:stats_interface", + "//source/common/stats:symbol_table_lib", + ], +) + envoy_cc_library( name = "utility_lib", srcs = ["utility.cc"], diff --git a/source/extensions/filters/network/mongo_proxy/config.cc b/source/extensions/filters/network/mongo_proxy/config.cc index a8989947e75a..24539b6bd953 100644 --- a/source/extensions/filters/network/mongo_proxy/config.cc +++ b/source/extensions/filters/network/mongo_proxy/config.cc @@ -32,12 +32,13 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP fault_config = std::make_shared(proto_config.delay()); } + auto stats = std::make_shared(context.scope(), stat_prefix); const bool emit_dynamic_metadata = proto_config.emit_dynamic_metadata(); - return [stat_prefix, &context, access_log, fault_config, - emit_dynamic_metadata](Network::FilterManager& filter_manager) -> void { + return [stat_prefix, &context, access_log, fault_config, emit_dynamic_metadata, + stats](Network::FilterManager& filter_manager) -> void { filter_manager.addFilter(std::make_shared( stat_prefix, context.scope(), context.runtime(), access_log, fault_config, - context.drainDecision(), context.dispatcher().timeSource(), emit_dynamic_metadata)); + context.drainDecision(), context.dispatcher().timeSource(), emit_dynamic_metadata, stats)); }; } diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.cc b/source/extensions/filters/network/mongo_proxy/mongo_stats.cc new file mode 100644 index 000000000000..11dd1877cefc --- /dev/null +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.cc @@ -0,0 +1,47 @@ +#include "extensions/filters/network/mongo_proxy/mongo_stats.h" + +#include +#include +#include + +#include "envoy/stats/scope.h" + +#include "common/stats/symbol_table_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace MongoProxy { + +MongoStats::MongoStats(Stats::Scope& scope, const std::string& prefix) + : scope_(scope), stat_name_set_(scope.symbolTable()), prefix_(stat_name_set_.add(prefix)), + callsite_(stat_name_set_.add("callsite")), cmd_(stat_name_set_.add("cmd")), + collection_(stat_name_set_.add("collection")), multi_get_(stat_name_set_.add("multi_get")), + reply_num_docs_(stat_name_set_.add("reply_num_docs")), + reply_size_(stat_name_set_.add("reply_size")), + reply_time_ms_(stat_name_set_.add("reply_time_ms")), time_ms_(stat_name_set_.add("time_ms")), + query_(stat_name_set_.add("query")), scatter_get_(stat_name_set_.add("scatter_get")), + total_(stat_name_set_.add("total")) {} + +Stats::SymbolTable::StoragePtr MongoStats::addPrefix(const std::vector& names) { + std::vector names_with_prefix; + names_with_prefix.reserve(1 + names.size()); + names_with_prefix.push_back(prefix_); + names_with_prefix.insert(names_with_prefix.end(), names.begin(), names.end()); + return scope_.symbolTable().join(names_with_prefix); +} + +void MongoStats::incCounter(const std::vector& names) { + const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix(names); + scope_.counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); +} + +void MongoStats::recordHistogram(const std::vector& names, uint64_t sample) { + const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix(names); + scope_.histogramFromStatName(Stats::StatName(stat_name_storage.get())).recordValue(sample); +} + +} // namespace MongoProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.h b/source/extensions/filters/network/mongo_proxy/mongo_stats.h new file mode 100644 index 000000000000..d27a8478824a --- /dev/null +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.h @@ -0,0 +1,56 @@ +#pragma once + +#include +#include +#include + +#include "envoy/stats/scope.h" + +#include "common/stats/symbol_table_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace MongoProxy { + +class MongoStats { +public: + MongoStats(Stats::Scope& scope, const std::string& prefix); + + void incCounter(const std::vector& names); + void recordHistogram(const std::vector& names, uint64_t sample); + + /** + * Finds or creates a StatName by string, taking a global lock if needed. + * + * TODO(jmarantz): Potential perf issue here with mutex contention for names + * that have not been remembered as builtins in the constructor. + */ + Stats::StatName getStatName(const std::string& str) { return stat_name_set_.getStatName(str); } + +private: + Stats::SymbolTable::StoragePtr addPrefix(const std::vector& names); + + Stats::Scope& scope_; + Stats::StatNameSet stat_name_set_; + +public: + const Stats::StatName prefix_; + const Stats::StatName callsite_; + const Stats::StatName cmd_; + const Stats::StatName collection_; + const Stats::StatName multi_get_; + const Stats::StatName reply_num_docs_; + const Stats::StatName reply_size_; + const Stats::StatName reply_time_ms_; + const Stats::StatName time_ms_; + const Stats::StatName query_; + const Stats::StatName scatter_get_; + const Stats::StatName total_; +}; +using MongoStatsSharedPtr = std::shared_ptr; + +} // namespace MongoProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/mongo_proxy/proxy.cc b/source/extensions/filters/network/mongo_proxy/proxy.cc index be08a04fc638..bc35859113f8 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.cc +++ b/source/extensions/filters/network/mongo_proxy/proxy.cc @@ -58,11 +58,11 @@ ProxyFilter::ProxyFilter(const std::string& stat_prefix, Stats::Scope& scope, Runtime::Loader& runtime, AccessLogSharedPtr access_log, const Filters::Common::Fault::FaultDelayConfigSharedPtr& fault_config, const Network::DrainDecision& drain_decision, TimeSource& time_source, - bool emit_dynamic_metadata) - : stat_prefix_(stat_prefix), scope_(scope), stats_(generateStats(stat_prefix, scope)), - runtime_(runtime), drain_decision_(drain_decision), access_log_(access_log), - fault_config_(fault_config), time_source_(time_source), - emit_dynamic_metadata_(emit_dynamic_metadata) { + bool emit_dynamic_metadata, const MongoStatsSharedPtr& mongo_stats) + : stat_prefix_(stat_prefix), stats_(generateStats(stat_prefix, scope)), runtime_(runtime), + drain_decision_(drain_decision), access_log_(access_log), fault_config_(fault_config), + time_source_(time_source), emit_dynamic_metadata_(emit_dynamic_metadata), + mongo_stats_(mongo_stats) { if (!runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().ConnectionLoggingEnabled, 100)) { // If we are not logging at the connection level, just release the shared pointer so that we @@ -145,21 +145,23 @@ void ProxyFilter::decodeQuery(QueryMessagePtr&& message) { ActiveQueryPtr active_query(new ActiveQuery(*this, *message)); if (!active_query->query_info_.command().empty()) { // First field key is the operation. - scope_.counter(fmt::format("{}cmd.{}.total", stat_prefix_, active_query->query_info_.command())) - .inc(); + mongo_stats_->incCounter({mongo_stats_->cmd_, + mongo_stats_->getStatName(active_query->query_info_.command()), + mongo_stats_->total_}); } else { // Normal query, get stats on a per collection basis first. - std::string collection_stat_prefix = - fmt::format("{}collection.{}", stat_prefix_, active_query->query_info_.collection()); QueryMessageInfo::QueryType query_type = active_query->query_info_.type(); - chargeQueryStats(collection_stat_prefix, query_type); + Stats::StatNameVec names; + names.reserve(6); // 2 entries are added by chargeQueryStats(). + names.push_back(mongo_stats_->collection_); + names.push_back(mongo_stats_->getStatName(active_query->query_info_.collection())); + chargeQueryStats(names, query_type); // Callsite stats if we have it. if (!active_query->query_info_.callsite().empty()) { - std::string callsite_stat_prefix = - fmt::format("{}collection.{}.callsite.{}", stat_prefix_, - active_query->query_info_.collection(), active_query->query_info_.callsite()); - chargeQueryStats(callsite_stat_prefix, query_type); + names.push_back(mongo_stats_->callsite_); + names.push_back(mongo_stats_->getStatName(active_query->query_info_.callsite())); + chargeQueryStats(names, query_type); } // Global stats. @@ -176,14 +178,26 @@ void ProxyFilter::decodeQuery(QueryMessagePtr&& message) { active_query_list_.emplace_back(std::move(active_query)); } -void ProxyFilter::chargeQueryStats(const std::string& prefix, +void ProxyFilter::chargeQueryStats(Stats::StatNameVec& names, QueryMessageInfo::QueryType query_type) { - scope_.counter(fmt::format("{}.query.total", prefix)).inc(); + // names come in containing {"collection", collection}. Report stats for 1 or + // 2 variations on this array, and then return with the array in the same + // state it had on entry. Both of these variations by appending {"query", "total"}. + size_t orig_size = names.size(); + ASSERT(names.capacity() - orig_size >= 2); // Ensures the caller has reserved() enough memory. + names.push_back(mongo_stats_->query_); + names.push_back(mongo_stats_->total_); + mongo_stats_->incCounter(names); + + // And now replace "total" with either "scatter_get" or "multi_get" if depending on query_type. if (query_type == QueryMessageInfo::QueryType::ScatterGet) { - scope_.counter(fmt::format("{}.query.scatter_get", prefix)).inc(); + names.back() = mongo_stats_->scatter_get_; + mongo_stats_->incCounter(names); } else if (query_type == QueryMessageInfo::QueryType::MultiGet) { - scope_.counter(fmt::format("{}.query.multi_get", prefix)).inc(); + names.back() = mongo_stats_->multi_get_; + mongo_stats_->incCounter(names); } + names.resize(orig_size); } void ProxyFilter::decodeReply(ReplyMessagePtr&& message) { @@ -208,21 +222,25 @@ void ProxyFilter::decodeReply(ReplyMessagePtr&& message) { } if (!active_query.query_info_.command().empty()) { - std::string stat_prefix = - fmt::format("{}cmd.{}", stat_prefix_, active_query.query_info_.command()); - chargeReplyStats(active_query, stat_prefix, *message); + Stats::StatNameVec names{mongo_stats_->cmd_, + mongo_stats_->getStatName(active_query.query_info_.command())}; + chargeReplyStats(active_query, names, *message); } else { // Collection stats first. - std::string stat_prefix = - fmt::format("{}collection.{}.query", stat_prefix_, active_query.query_info_.collection()); - chargeReplyStats(active_query, stat_prefix, *message); + Stats::StatNameVec names{mongo_stats_->collection_, + mongo_stats_->getStatName(active_query.query_info_.collection()), + mongo_stats_->query_}; + chargeReplyStats(active_query, names, *message); // Callsite stats if we have it. if (!active_query.query_info_.callsite().empty()) { - std::string callsite_stat_prefix = - fmt::format("{}collection.{}.callsite.{}.query", stat_prefix_, - active_query.query_info_.collection(), active_query.query_info_.callsite()); - chargeReplyStats(active_query, callsite_stat_prefix, *message); + // Currently, names == {"collection", collection, "query"} and we are going + // to mutate the array to {"collection", collection, "callsite", callsite, "query"}. + ASSERT(names.size() == 3); + names.back() = mongo_stats_->callsite_; // Replaces "query". + names.push_back(mongo_stats_->getStatName(active_query.query_info_.callsite())); + names.push_back(mongo_stats_->query_); + chargeReplyStats(active_query, names, *message); } } @@ -270,20 +288,26 @@ void ProxyFilter::onDrainClose() { read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); } -void ProxyFilter::chargeReplyStats(ActiveQuery& active_query, const std::string& prefix, +void ProxyFilter::chargeReplyStats(ActiveQuery& active_query, Stats::StatNameVec& names, const ReplyMessage& message) { uint64_t reply_documents_byte_size = 0; for (const Bson::DocumentSharedPtr& document : message.documents()) { reply_documents_byte_size += document->byteSize(); } - scope_.histogram(fmt::format("{}.reply_num_docs", prefix)) - .recordValue(message.documents().size()); - scope_.histogram(fmt::format("{}.reply_size", prefix)).recordValue(reply_documents_byte_size); - scope_.histogram(fmt::format("{}.reply_time_ms", prefix)) - .recordValue(std::chrono::duration_cast( - time_source_.monotonicTime() - active_query.start_time_) - .count()); + // Write 3 different histograms; appending 3 different suffixes to the name + // that was passed in. Here we overwrite the passed-in names, but we restore + // names to its original state upon return. + const size_t orig_size = names.size(); + names.push_back(mongo_stats_->reply_num_docs_); + mongo_stats_->recordHistogram(names, message.documents().size()); + names[orig_size] = mongo_stats_->reply_size_; + mongo_stats_->recordHistogram(names, reply_documents_byte_size); + names[orig_size] = mongo_stats_->reply_time_ms_; + mongo_stats_->recordHistogram(names, std::chrono::duration_cast( + time_source_.monotonicTime() - active_query.start_time_) + .count()); + names.resize(orig_size); } void ProxyFilter::doDecode(Buffer::Instance& buffer) { diff --git a/source/extensions/filters/network/mongo_proxy/proxy.h b/source/extensions/filters/network/mongo_proxy/proxy.h index f81ef86017ef..da85af19f281 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.h +++ b/source/extensions/filters/network/mongo_proxy/proxy.h @@ -25,6 +25,7 @@ #include "extensions/filters/common/fault/fault_config.h" #include "extensions/filters/network/mongo_proxy/codec.h" +#include "extensions/filters/network/mongo_proxy/mongo_stats.h" #include "extensions/filters/network/mongo_proxy/utility.h" namespace Envoy { @@ -110,7 +111,7 @@ class ProxyFilter : public Network::Filter, AccessLogSharedPtr access_log, const Filters::Common::Fault::FaultDelayConfigSharedPtr& fault_config, const Network::DrainDecision& drain_decision, TimeSource& time_system, - bool emit_dynamic_metadata); + bool emit_dynamic_metadata, const MongoStatsSharedPtr& stats); ~ProxyFilter() override; virtual DecoderPtr createDecoder(DecoderCallbacks& callbacks) PURE; @@ -164,9 +165,17 @@ class ProxyFilter : public Network::Filter, POOL_HISTOGRAM_PREFIX(scope, prefix))}; } - void chargeQueryStats(const std::string& prefix, QueryMessageInfo::QueryType query_type); - void chargeReplyStats(ActiveQuery& active_query, const std::string& prefix, + // Increment counters related to queries. 'names' is passed by non-const + // reference so the implementation can mutate it without copying, though it + // always restores it to its prior state prior to return. + void chargeQueryStats(Stats::StatNameVec& names, QueryMessageInfo::QueryType query_type); + + // Add samples to histograms related to replies. 'names' is passed by + // non-const reference so the implementation can mutate it without copying, + // though it always restores it to its prior state prior to return. + void chargeReplyStats(ActiveQuery& active_query, Stats::StatNameVec& names, const ReplyMessage& message); + void doDecode(Buffer::Instance& buffer); void logMessage(Message& message, bool full); void onDrainClose(); @@ -176,7 +185,6 @@ class ProxyFilter : public Network::Filter, std::unique_ptr decoder_; std::string stat_prefix_; - Stats::Scope& scope_; MongoProxyStats stats_; Runtime::Loader& runtime_; const Network::DrainDecision& drain_decision_; @@ -191,6 +199,7 @@ class ProxyFilter : public Network::Filter, Event::TimerPtr drain_close_timer_; TimeSource& time_source_; const bool emit_dynamic_metadata_; + MongoStatsSharedPtr mongo_stats_; }; class ProdProxyFilter : public ProxyFilter { diff --git a/source/extensions/stat_sinks/common/statsd/statsd.cc b/source/extensions/stat_sinks/common/statsd/statsd.cc index 64d7238bba07..4dc3a5dae872 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.cc +++ b/source/extensions/stat_sinks/common/statsd/statsd.cc @@ -13,6 +13,7 @@ #include "common/common/fmt.h" #include "common/common/utility.h" #include "common/config/utility.h" +#include "common/stats/symbol_table_impl.h" namespace Envoy { namespace Extensions { @@ -99,8 +100,9 @@ TcpStatsdSink::TcpStatsdSink(const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cluster_manager, Stats::Scope& scope, const std::string& prefix) : prefix_(prefix.empty() ? Statsd::getDefaultPrefix() : prefix), tls_(tls.allocateSlot()), - cluster_manager_(cluster_manager), cx_overflow_stat_(scope.counter("statsd.cx_overflow")) { - + cluster_manager_(cluster_manager), + cx_overflow_stat_(scope.counterFromStatName( + Stats::StatNameManagedStorage("statsd.cx_overflow", scope.symbolTable()).statName())) { Config::Utility::checkClusterAndLocalInfo("tcp statsd", cluster_name, cluster_manager, local_info); cluster_info_ = cluster_manager.get(cluster_name)->info(); diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index cb8bb9ec02ea..3345a9d87439 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -101,6 +101,7 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/network:address_lib", "//source/common/protobuf:utility_lib", + "//source/common/stats:symbol_table_lib", "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib", "@envoy_api//envoy/admin/v2alpha:certs_cc", ], diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index f4795c0db2b1..85928172612e 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -51,7 +51,11 @@ bool cbsContainsU16(CBS& cbs, uint16_t n) { ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& config, TimeSource& time_source) : scope_(scope), stats_(generateStats(scope)), time_source_(time_source), - tls_max_version_(config.maxProtocolVersion()) { + tls_max_version_(config.maxProtocolVersion()), stat_name_set_(scope.symbolTable()), + ssl_ciphers_(stat_name_set_.add("ssl.ciphers")), + ssl_versions_(stat_name_set_.add("ssl.versions")), + ssl_curves_(stat_name_set_.add("ssl.curves")), + ssl_sigalgs_(stat_name_set_.add("ssl.sigalgs")) { const auto tls_certificates = config.tlsCertificates(); tls_contexts_.resize(std::max(static_cast(1), tls_certificates.size())); @@ -369,6 +373,35 @@ ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& c } parsed_alpn_protocols_ = parseAlpnProtocols(config.alpnProtocols()); + + // To enumerate the required builtin ciphers, curves, algorithms, and + // versions, uncomment '#define LOG_BUILTIN_STAT_NAMES' below, and run + // bazel test //test/extensions/transport_sockets/tls/... --test_output=streamed + // | grep " Builtin ssl." | sort | uniq + // #define LOG_BUILTIN_STAT_NAMES + // + // TODO(#8035): improve tooling to find any other built-ins needed to avoid + // contention. + + // Ciphers + stat_name_set_.rememberBuiltin("AEAD-AES128-GCM-SHA256"); + stat_name_set_.rememberBuiltin("ECDHE-ECDSA-AES128-GCM-SHA256"); + stat_name_set_.rememberBuiltin("ECDHE-RSA-AES128-GCM-SHA256"); + stat_name_set_.rememberBuiltin("ECDHE-RSA-AES128-SHA"); + stat_name_set_.rememberBuiltin("ECDHE-RSA-CHACHA20-POLY1305"); + + // Curves + stat_name_set_.rememberBuiltin("X25519"); + + // Algorithms + stat_name_set_.rememberBuiltin("ecdsa_secp256r1_sha256"); + stat_name_set_.rememberBuiltin("rsa_pss_rsae_sha256"); + + // Versions + stat_name_set_.rememberBuiltin("TLSv1"); + stat_name_set_.rememberBuiltin("TLSv1.1"); + stat_name_set_.rememberBuiltin("TLSv1.2"); + stat_name_set_.rememberBuiltin("TLSv1.3"); } int ServerContextImpl::alpnSelectCallback(const unsigned char** out, unsigned char* outlen, @@ -477,6 +510,18 @@ int ContextImpl::verifyCertificate(X509* cert, const std::vector& v return 1; } +void ContextImpl::incCounter(const Stats::StatName name, absl::string_view value) const { + Stats::SymbolTable& symbol_table = scope_.symbolTable(); + Stats::SymbolTable::StoragePtr storage = + symbol_table.join({name, stat_name_set_.getStatName(value)}); + scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); + +#ifdef LOG_BUILTIN_STAT_NAMES + std::cerr << absl::StrCat("Builtin ", symbol_table.toString(name), ": ", value, "\n") + << std::flush; +#endif +} + void ContextImpl::logHandshake(SSL* ssl) const { stats_.handshake_.inc(); @@ -484,22 +529,19 @@ void ContextImpl::logHandshake(SSL* ssl) const { stats_.session_reused_.inc(); } - const char* cipher = SSL_get_cipher_name(ssl); - scope_.counter(fmt::format("ssl.ciphers.{}", std::string{cipher})).inc(); - - const char* version = SSL_get_version(ssl); - scope_.counter(fmt::format("ssl.versions.{}", std::string{version})).inc(); + incCounter(ssl_ciphers_, SSL_get_cipher_name(ssl)); + incCounter(ssl_versions_, SSL_get_version(ssl)); uint16_t curve_id = SSL_get_curve_id(ssl); if (curve_id) { - const char* curve = SSL_get_curve_name(curve_id); - scope_.counter(fmt::format("ssl.curves.{}", std::string{curve})).inc(); + // Note: in the unit tests, this curve name is always literal "X25519" + incCounter(ssl_curves_, SSL_get_curve_name(curve_id)); } uint16_t sigalg_id = SSL_get_peer_signature_algorithm(ssl); if (sigalg_id) { const char* sigalg = SSL_get_signature_algorithm_name(sigalg_id, 1 /* include curve */); - scope_.counter(fmt::format("ssl.sigalgs.{}", std::string{sigalg})).inc(); + incCounter(ssl_sigalgs_, sigalg); } bssl::UniquePtr cert(SSL_get_peer_certificate(ssl)); diff --git a/source/extensions/transport_sockets/tls/context_impl.h b/source/extensions/transport_sockets/tls/context_impl.h index ccb984a63efa..ce5007fd5a34 100644 --- a/source/extensions/transport_sockets/tls/context_impl.h +++ b/source/extensions/transport_sockets/tls/context_impl.h @@ -12,6 +12,8 @@ #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" +#include "common/stats/symbol_table_impl.h" + #include "extensions/transport_sockets/tls/context_manager_impl.h" #include "absl/synchronization/mutex.h" @@ -126,6 +128,7 @@ class ContextImpl : public virtual Envoy::Ssl::Context { static SslStats generateStats(Stats::Scope& scope); std::string getCaFileName() const { return ca_file_path_; }; + void incCounter(const Stats::StatName name, absl::string_view value) const; Envoy::Ssl::CertificateDetailsPtr certificateDetails(X509* cert, const std::string& path) const; @@ -167,6 +170,11 @@ class ContextImpl : public virtual Envoy::Ssl::Context { std::string cert_chain_file_path_; TimeSource& time_source_; const unsigned tls_max_version_; + mutable Stats::StatNameSet stat_name_set_; + const Stats::StatName ssl_ciphers_; + const Stats::StatName ssl_versions_; + const Stats::StatName ssl_curves_; + const Stats::StatName ssl_sigalgs_; }; using ContextImplSharedPtr = std::shared_ptr; diff --git a/source/server/BUILD b/source/server/BUILD index 5750846d1dca..169f7d3a3cc3 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -107,6 +107,7 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/common:thread_lib", "//source/common/event:libevent_lib", + "//source/common/stats:symbol_table_lib", ], ) @@ -228,6 +229,7 @@ envoy_cc_library( "//include/envoy/thread_local:thread_local_interface", "//source/common/common:logger_lib", "//source/common/config:utility_lib", + "//source/common/stats:symbol_table_lib", "//source/server:resource_monitor_config_lib", "@envoy_api//envoy/config/overload/v2alpha:overload_cc", ], diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index a9e3e583e318..5b0b80163478 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -8,6 +8,7 @@ #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/lock_guard.h" +#include "common/stats/symbol_table_impl.h" #include "server/watchdog_impl.h" @@ -30,8 +31,12 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio multikillEnabled() ? multi_kill_timeout_ : min_of_nonfatal, min_of_nonfatal}); }()), - watchdog_miss_counter_(stats_scope.counter("server.watchdog_miss")), - watchdog_megamiss_counter_(stats_scope.counter("server.watchdog_mega_miss")), + watchdog_miss_counter_(stats_scope.counterFromStatName( + Stats::StatNameManagedStorage("server.watchdog_miss", stats_scope.symbolTable()) + .statName())), + watchdog_megamiss_counter_(stats_scope.counterFromStatName( + Stats::StatNameManagedStorage("server.watchdog_mega_miss", stats_scope.symbolTable()) + .statName())), dispatcher_(api.allocateDispatcher()), loop_timer_(dispatcher_->createTimer([this]() { step(); })), run_thread_(true) { start(api); diff --git a/source/server/overload_manager_impl.cc b/source/server/overload_manager_impl.cc index 7795802b6c76..484be6f075a9 100644 --- a/source/server/overload_manager_impl.cc +++ b/source/server/overload_manager_impl.cc @@ -5,6 +5,7 @@ #include "common/common/fmt.h" #include "common/config/utility.h" #include "common/protobuf/utility.h" +#include "common/stats/symbol_table_impl.h" #include "server/resource_monitor_config_impl.h" @@ -33,16 +34,25 @@ class ThresholdTriggerImpl : public OverloadAction::Trigger { absl::optional value_; }; -std::string StatsName(const std::string& a, const std::string& b) { - return absl::StrCat("overload.", a, ".", b); +Stats::Counter& makeCounter(Stats::Scope& scope, absl::string_view a, absl::string_view b) { + Stats::StatNameManagedStorage stat_name(absl::StrCat("overload.", a, ".", b), + scope.symbolTable()); + return scope.counterFromStatName(stat_name.statName()); +} + +Stats::Gauge& makeGauge(Stats::Scope& scope, absl::string_view a, absl::string_view b, + Stats::Gauge::ImportMode import_mode) { + Stats::StatNameManagedStorage stat_name(absl::StrCat("overload.", a, ".", b), + scope.symbolTable()); + return scope.gaugeFromStatName(stat_name.statName(), import_mode); } } // namespace OverloadAction::OverloadAction(const envoy::config::overload::v2alpha::OverloadAction& config, Stats::Scope& stats_scope) - : active_gauge_(stats_scope.gauge(StatsName(config.name(), "active"), - Stats::Gauge::ImportMode::Accumulate)) { + : active_gauge_( + makeGauge(stats_scope, config.name(), "active", Stats::Gauge::ImportMode::Accumulate)) { for (const auto& trigger_config : config.triggers()) { TriggerPtr trigger; @@ -213,9 +223,9 @@ OverloadManagerImpl::Resource::Resource(const std::string& name, ResourceMonitor OverloadManagerImpl& manager, Stats::Scope& stats_scope) : name_(name), monitor_(std::move(monitor)), manager_(manager), pending_update_(false), pressure_gauge_( - stats_scope.gauge(StatsName(name, "pressure"), Stats::Gauge::ImportMode::NeverImport)), - failed_updates_counter_(stats_scope.counter(StatsName(name, "failed_updates"))), - skipped_updates_counter_(stats_scope.counter(StatsName(name, "skipped_updates"))) {} + makeGauge(stats_scope, name, "pressure", Stats::Gauge::ImportMode::NeverImport)), + failed_updates_counter_(makeCounter(stats_scope, name, "failed_updates")), + skipped_updates_counter_(makeCounter(stats_scope, name, "skipped_updates")) {} void OverloadManagerImpl::Resource::update() { if (!pending_update_) { diff --git a/test/extensions/filters/http/fault/fault_filter_test.cc b/test/extensions/filters/http/fault/fault_filter_test.cc index d4f8ad2eb90e..f4e0bc1fcb58 100644 --- a/test/extensions/filters/http/fault/fault_filter_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_test.cc @@ -151,6 +151,7 @@ class FaultFilterTest : public testing::Test { void TestPerFilterConfigFault(const Router::RouteSpecificFilterConfig* route_fault, const Router::RouteSpecificFilterConfig* vhost_fault); + Stats::IsolatedStoreImpl stats_; FaultFilterConfigSharedPtr config_; std::unique_ptr filter_; NiceMock decoder_filter_callbacks_; @@ -158,7 +159,6 @@ class FaultFilterTest : public testing::Test { Http::TestHeaderMapImpl request_headers_; Http::TestHeaderMapImpl response_headers_; Buffer::OwnedImpl data_; - Stats::IsolatedStoreImpl stats_; NiceMock runtime_; Event::MockTimer* timer_{}; Event::SimulatedTimeSystem time_system_; diff --git a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc index 50679f090c42..774f4e54c216 100644 --- a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc +++ b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc @@ -52,11 +52,11 @@ request_type: internal ~IpTaggingFilterTest() override { filter_->onDestroy(); } + NiceMock stats_; IpTaggingFilterConfigSharedPtr config_; std::unique_ptr filter_; NiceMock filter_callbacks_; Buffer::OwnedImpl data_; - NiceMock stats_; NiceMock runtime_; }; diff --git a/test/extensions/filters/network/mongo_proxy/proxy_test.cc b/test/extensions/filters/network/mongo_proxy/proxy_test.cc index 01d0c8082332..a70c1a6f279c 100644 --- a/test/extensions/filters/network/mongo_proxy/proxy_test.cc +++ b/test/extensions/filters/network/mongo_proxy/proxy_test.cc @@ -8,6 +8,7 @@ #include "extensions/filters/network/mongo_proxy/bson_impl.h" #include "extensions/filters/network/mongo_proxy/codec_impl.h" +#include "extensions/filters/network/mongo_proxy/mongo_stats.h" #include "extensions/filters/network/mongo_proxy/proxy.h" #include "extensions/filters/network/well_known_names.h" @@ -62,7 +63,7 @@ class TestProxyFilter : public ProxyFilter { class MongoProxyFilterTest : public testing::Test { public: - MongoProxyFilterTest() { setup(); } + MongoProxyFilterTest() : mongo_stats_(std::make_shared(store_, "test")) { setup(); } void setup() { ON_CALL(runtime_.snapshot_, featureEnabled("mongo.proxy_enabled", 100)) @@ -82,9 +83,9 @@ class MongoProxyFilterTest : public testing::Test { } void initializeFilter(bool emit_dynamic_metadata = false) { - filter_ = std::make_unique("test.", store_, runtime_, access_log_, - fault_config_, drain_decision_, - dispatcher_.timeSource(), emit_dynamic_metadata); + filter_ = std::make_unique( + "test.", store_, runtime_, access_log_, fault_config_, drain_decision_, + dispatcher_.timeSource(), emit_dynamic_metadata, mongo_stats_); filter_->initializeReadFilterCallbacks(read_filter_callbacks_); filter_->onNewConnection(); @@ -114,6 +115,7 @@ class MongoProxyFilterTest : public testing::Test { Buffer::OwnedImpl fake_data_; NiceMock store_; + MongoStatsSharedPtr mongo_stats_; NiceMock runtime_; NiceMock dispatcher_; std::shared_ptr file_{ diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 8b6dbb4b0263..4fc6090dde14 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -699,7 +699,8 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { dispatcher->run(Event::Dispatcher::RunType::Block); if (!options.expectedServerStats().empty()) { - EXPECT_EQ(1UL, server_stats_store.counter(options.expectedServerStats()).value()); + EXPECT_EQ(1UL, server_stats_store.counter(options.expectedServerStats()).value()) + << options.expectedServerStats(); } if (!options.expectedClientStats().empty()) { diff --git a/tools/check_format.py b/tools/check_format.py index 2adcc22e8c35..26b3e621848f 100755 --- a/tools/check_format.py +++ b/tools/check_format.py @@ -42,20 +42,6 @@ "./test/test_common/utility.cc", "./test/test_common/utility.h", "./test/integration/integration.h") -# Files matching these directories can use stats by string for now. These should -# be eliminated but for now we don't want to grow this work. The goal for this -# whitelist is to eliminate it by making code transformations similar to -# https://github.com/envoyproxy/envoy/pull/7573 and others. -# -# TODO(#4196): Eliminate this list completely and then merge #4980. -STAT_FROM_STRING_WHITELIST = ("./source/extensions/filters/http/fault/fault_filter.cc", - "./source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc", - "./source/extensions/filters/network/mongo_proxy/proxy.cc", - "./source/extensions/stat_sinks/common/statsd/statsd.cc", - "./source/extensions/transport_sockets/tls/context_impl.cc", - "./source/server/guarddog_impl.cc", - "./source/server/overload_manager_impl.cc") - # Files in these paths can use MessageLite::SerializeAsString SERIALIZE_AS_STRING_WHITELIST = ("./test/common/protobuf/utility_test.cc", "./test/common/grpc/codec_test.cc") @@ -339,10 +325,6 @@ def whitelistedForJsonStringToMessage(file_path): return file_path in JSON_STRING_TO_MESSAGE_WHITELIST -def whitelistedForStatFromString(file_path): - return file_path in STAT_FROM_STRING_WHITELIST - - def whitelistedForStdRegex(file_path): return file_path.startswith("./test") or file_path in STD_REGEX_WHITELIST @@ -591,7 +573,6 @@ def checkSourceLine(line, file_path, reportError): reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.") if isInSubdir(file_path, 'source') and file_path.endswith('.cc') and \ - not whitelistedForStatFromString(file_path) and \ ('.counter(' in line or '.gauge(' in line or '.histogram(' in line): reportError("Don't lookup stats by name at runtime; use StatName saved during construction") diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index bb101fa95946..e0ccece9cfc5 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -777,6 +777,7 @@ uint un unacked unary +uncomment unconfigurable unconfigured uncontended @@ -792,6 +793,7 @@ unescaping unindexed uninsantiated uninstantiated +uniq unix unlink unlinked From 7f060b63fee64d4fb9d7f19661f1058fcebb3357 Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Wed, 28 Aug 2019 19:10:07 -0700 Subject: [PATCH 10/31] tls_inspector: inline the recv in the onAccept (#7951) Description: As discussed in #7864 this PR is the attempt to peek the socket at the invoke of onAccept. Usually client_hello packet should be in the buffer when tls_inspector is peeking, we could save a poll cycle for this connection. Once we agree on the solution I can apply to http_inspector as well. The expecting latency improvement especially when poll cycle is large. Benchmark: Env: hardware Intel(R) Xeon(R) CPU @ 2.20GHz envoy: concurrency = 1, tls_inspector as listener filter. One tls filter chain, and one plain text filter chain. load background: a [sniper](https://github.com/lubia/sniper) client with concurrency = 5 hitting the server with tls handshake, aiming to hit using the tls_filter chain. The qps is about 170/s Another load client hitting the plain text filter chain but would go through tls_inspector with concurrency = 1 This PR: TransactionTime: 10.3 - 11.0 ms(mean) Master TransactionTime: 12.3 - 12.8 ms(mean) Risk Level: Med (ActiveSocket code is affected to adopt the side effect of onAccept) Testing: Docs Changes: Release Notes: Fixes #7864 Signed-off-by: Yuchen Dai --- .../common/network/io_socket_handle_impl.cc | 6 +- .../listener/tls_inspector/tls_inspector.cc | 78 ++++++++++++------- .../listener/tls_inspector/tls_inspector.h | 12 ++- source/server/connection_handler_impl.cc | 17 +++- ...dr_family_aware_socket_option_impl_test.cc | 1 + test/config_test/config_test.cc | 1 + .../proxy_protocol/proxy_protocol_test.cc | 25 ++++-- .../tls_inspector/tls_inspector_test.cc | 37 +++++++++ test/mocks/api/mocks.cc | 8 +- test/server/BUILD | 1 + test/server/connection_handler_test.cc | 63 ++++++++++++++- test/server/listener_manager_impl_test.cc | 24 +++--- 12 files changed, 217 insertions(+), 56 deletions(-) diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index cce957655380..b44ec48fb3fe 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -26,9 +26,11 @@ IoSocketHandleImpl::~IoSocketHandleImpl() { Api::IoCallUint64Result IoSocketHandleImpl::close() { ASSERT(fd_ != -1); - const int rc = ::close(fd_); + auto& os_syscalls = Api::OsSysCallsSingleton::get(); + const auto& result = os_syscalls.close(fd_); fd_ = -1; - return Api::IoCallUint64Result(rc, Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError)); + return Api::IoCallUint64Result(result.rc_, + Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError)); } bool IoSocketHandleImpl::isOpen() const { return fd_ != -1; } diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc index f5d2a8e4cef1..13b52cdc6ba0 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc @@ -72,23 +72,47 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { ENVOY_LOG(debug, "tls inspector: new connection accepted"); Network::ConnectionSocket& socket = cb.socket(); ASSERT(file_event_ == nullptr); - - file_event_ = cb.dispatcher().createFileEvent( - socket.ioHandle().fd(), - [this](uint32_t events) { - if (events & Event::FileReadyType::Closed) { - config_->stats().connection_closed_.inc(); - done(false); - return; - } - - ASSERT(events == Event::FileReadyType::Read); - onRead(); - }, - Event::FileTriggerType::Edge, Event::FileReadyType::Read | Event::FileReadyType::Closed); - cb_ = &cb; - return Network::FilterStatus::StopIteration; + + ParseState parse_state = onRead(); + switch (parse_state) { + case ParseState::Error: + // As per discussion in https://github.com/envoyproxy/envoy/issues/7864 + // we don't add new enum in FilterStatus so we have to signal the caller + // the new condition. + cb.socket().close(); + return Network::FilterStatus::StopIteration; + case ParseState::Done: + return Network::FilterStatus::Continue; + case ParseState::Continue: + // do nothing but create the event + file_event_ = cb.dispatcher().createFileEvent( + socket.ioHandle().fd(), + [this](uint32_t events) { + if (events & Event::FileReadyType::Closed) { + config_->stats().connection_closed_.inc(); + done(false); + return; + } + + ASSERT(events == Event::FileReadyType::Read); + ParseState parse_state = onRead(); + switch (parse_state) { + case ParseState::Error: + done(false); + break; + case ParseState::Done: + done(true); + break; + case ParseState::Continue: + // do nothing but wait for the next event + break; + } + }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read | Event::FileReadyType::Closed); + return Network::FilterStatus::StopIteration; + } + NOT_REACHED_GCOVR_EXCL_LINE } void Filter::onALPN(const unsigned char* data, unsigned int len) { @@ -122,7 +146,7 @@ void Filter::onServername(absl::string_view name) { clienthello_success_ = true; } -void Filter::onRead() { +ParseState Filter::onRead() { // This receive code is somewhat complicated, because it must be done as a MSG_PEEK because // there is no way for a listener-filter to pass payload data to the ConnectionImpl and filters // that get created later. @@ -141,11 +165,10 @@ void Filter::onRead() { ENVOY_LOG(trace, "tls inspector: recv: {}", result.rc_); if (result.rc_ == -1 && result.errno_ == EAGAIN) { - return; + return ParseState::Continue; } else if (result.rc_ < 0) { config_->stats().read_error_.inc(); - done(false); - return; + return ParseState::Error; } // Because we're doing a MSG_PEEK, data we've seen before gets returned every time, so @@ -154,8 +177,9 @@ void Filter::onRead() { const uint8_t* data = buf_ + read_; const size_t len = result.rc_ - read_; read_ = result.rc_; - parseClientHello(data, len); + return parseClientHello(data, len); } + return ParseState::Continue; } void Filter::done(bool success) { @@ -164,7 +188,7 @@ void Filter::done(bool success) { cb_->continueFilterChain(success); } -void Filter::parseClientHello(const void* data, size_t len) { +ParseState Filter::parseClientHello(const void* data, size_t len) { // Ownership is passed to ssl_ in SSL_set_bio() bssl::UniquePtr bio(BIO_new_mem_buf(data, len)); @@ -185,9 +209,9 @@ void Filter::parseClientHello(const void* data, size_t len) { // We've hit the specified size limit. This is an unreasonably large ClientHello; // indicate failure. config_->stats().client_hello_too_large_.inc(); - done(false); + return ParseState::Error; } - break; + return ParseState::Continue; case SSL_ERROR_SSL: if (clienthello_success_) { config_->stats().tls_found_.inc(); @@ -200,11 +224,9 @@ void Filter::parseClientHello(const void* data, size_t len) { } else { config_->stats().tls_not_found_.inc(); } - done(true); - break; + return ParseState::Done; default: - done(false); - break; + return ParseState::Error; } } diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.h b/source/extensions/filters/listener/tls_inspector/tls_inspector.h index cf75fe87e954..ee353ce9ef08 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.h +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.h @@ -36,6 +36,14 @@ struct TlsInspectorStats { ALL_TLS_INSPECTOR_STATS(GENERATE_COUNTER_STRUCT) }; +enum class ParseState { + // Parse result is out. It could be tls or not. + Done, + // Parser expects more data. + Continue, + // Parser reports unrecoverable error. + Error +}; /** * Global configuration for TLS inspector. */ @@ -68,8 +76,8 @@ class Filter : public Network::ListenerFilter, Logger::Loggable std::string { return api_->fileSystem().fileReadToEnd(file); })); + ON_CALL(os_sys_calls_, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0})); // Here we setup runtime to mimic the actual deprecated feature list used in the // production code. Note that this test is actually more strict than production because diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index cdc225696c3d..1f4d8a750c85 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -287,7 +287,10 @@ TEST_P(ProxyProtocolTest, errorRecv_2) { const ssize_t rc = ::readv(fd, iov, iovcnt); return Api::SysCallSizeResult{rc, errno}; })); - + EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](int fd) { + const int rc = ::close(fd); + return Api::SysCallIntResult{rc, errno}; + })); connect(false); write(buffer, sizeof(buffer)); @@ -316,7 +319,10 @@ TEST_P(ProxyProtocolTest, errorFIONREAD_1) { const ssize_t rc = ::readv(fd, iov, iovcnt); return Api::SysCallSizeResult{rc, errno}; })); - + EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](int fd) { + const int rc = ::close(fd); + return Api::SysCallIntResult{rc, errno}; + })); connect(false); write(buffer, sizeof(buffer)); @@ -527,7 +533,10 @@ TEST_P(ProxyProtocolTest, v2ParseExtensionsIoctlError) { const ssize_t rc = ::readv(fd, iov, iovcnt); return Api::SysCallSizeResult{rc, errno}; })); - + EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](int fd) { + const int rc = ::close(fd); + return Api::SysCallIntResult{rc, errno}; + })); connect(false); write(buffer, sizeof(buffer)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -656,7 +665,10 @@ TEST_P(ProxyProtocolTest, v2Fragmented3Error) { const ssize_t rc = ::readv(fd, iov, iovcnt); return Api::SysCallSizeResult{rc, errno}; })); - + EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](int fd) { + const int rc = ::close(fd); + return Api::SysCallIntResult{rc, errno}; + })); connect(false); write(buffer, 17); @@ -702,7 +714,10 @@ TEST_P(ProxyProtocolTest, v2Fragmented4Error) { const ssize_t rc = ::readv(fd, iov, iovcnt); return Api::SysCallSizeResult{rc, errno}; })); - + EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](int fd) { + const int rc = ::close(fd); + return Api::SysCallIntResult{rc, errno}; + })); connect(false); write(buffer, 10); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc index 63cff699cffe..d9793c98a8e6 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc @@ -43,6 +43,13 @@ class TlsInspectorTest : public testing::Test { EXPECT_CALL(cb_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); EXPECT_CALL(socket_, ioHandle()).WillRepeatedly(ReturnRef(*io_handle_)); + // Prepare the first recv attempt during + EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) + .WillOnce( + Invoke([](int fd, void* buffer, size_t length, int flag) -> Api::SysCallSizeResult { + ENVOY_LOG_MISC(error, "In mock syscall recv {} {} {} {}", fd, buffer, length, flag); + return Api::SysCallSizeResult{static_cast(0), 0}; + })); EXPECT_CALL(dispatcher_, createFileEvent_(_, _, Event::FileTriggerType::Edge, Event::FileReadyType::Read | Event::FileReadyType::Closed)) @@ -231,6 +238,36 @@ TEST_F(TlsInspectorTest, NotSsl) { EXPECT_EQ(1, cfg_->stats().tls_not_found_.value()); } +TEST_F(TlsInspectorTest, InlineReadSucceed) { + filter_ = std::make_unique(cfg_); + + EXPECT_CALL(cb_, socket()).WillRepeatedly(ReturnRef(socket_)); + EXPECT_CALL(cb_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); + EXPECT_CALL(socket_, ioHandle()).WillRepeatedly(ReturnRef(*io_handle_)); + const std::vector alpn_protos = {absl::string_view("h2")}; + const std::string servername("example.com"); + std::vector client_hello = Tls::Test::generateClientHello(servername, "\x02h2"); + + EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) + .WillOnce(Invoke( + [&client_hello](int fd, void* buffer, size_t length, int flag) -> Api::SysCallSizeResult { + ENVOY_LOG_MISC(trace, "In mock syscall recv {} {} {} {}", fd, buffer, length, flag); + ASSERT(length >= client_hello.size()); + memcpy(buffer, client_hello.data(), client_hello.size()); + return Api::SysCallSizeResult{ssize_t(client_hello.size()), 0}; + })); + + // No event is created if the inline recv parse the hello. + EXPECT_CALL(dispatcher_, + createFileEvent_(_, _, Event::FileTriggerType::Edge, + Event::FileReadyType::Read | Event::FileReadyType::Closed)) + .Times(0); + + EXPECT_CALL(socket_, setRequestedServerName(Eq(servername))); + EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); + EXPECT_CALL(socket_, setDetectedTransportProtocol(absl::string_view("tls"))); + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onAccept(cb_)); +} } // namespace } // namespace TlsInspector } // namespace ListenerFilters diff --git a/test/mocks/api/mocks.cc b/test/mocks/api/mocks.cc index 77f6a4463dd3..0132f89b272e 100644 --- a/test/mocks/api/mocks.cc +++ b/test/mocks/api/mocks.cc @@ -7,6 +7,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::Invoke; using testing::Return; namespace Envoy { @@ -26,7 +27,12 @@ Event::DispatcherPtr MockApi::allocateDispatcher(Buffer::WatermarkFactoryPtr&& w return Event::DispatcherPtr{allocateDispatcher_(std::move(watermark_factory), time_system_)}; } -MockOsSysCalls::MockOsSysCalls() = default; +MockOsSysCalls::MockOsSysCalls() { + ON_CALL(*this, close(_)).WillByDefault(Invoke([](int fd) { + const int rc = ::close(fd); + return SysCallIntResult{rc, errno}; + })); +} MockOsSysCalls::~MockOsSysCalls() = default; diff --git a/test/server/BUILD b/test/server/BUILD index e46a0106c687..821e257bd2d7 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -58,6 +58,7 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/server:server_mocks", "//test/test_common:network_utility_lib", + "//test/test_common:threadsafe_singleton_injector_lib", ], ) diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 092ff2780649..eb53c6261cfa 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -3,6 +3,7 @@ #include "common/common/utility.h" #include "common/network/address_impl.h" +#include "common/network/io_socket_handle_impl.h" #include "common/network/raw_buffer_socket.h" #include "common/network/utility.h" @@ -11,6 +12,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" +#include "test/test_common/threadsafe_singleton_injector.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -27,7 +29,6 @@ using testing::ReturnRef; namespace Envoy { namespace Server { namespace { - class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable { public: ConnectionHandlerTest() @@ -110,6 +111,8 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable factory_; std::list listeners_; const Network::FilterChainSharedPtr filter_chain_; + NiceMock os_sys_calls_; + TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; }; TEST_F(ConnectionHandlerTest, RemoveListener) { @@ -611,9 +614,11 @@ TEST_F(ConnectionHandlerTest, ListenerFilterTimeout) { .WillOnce(Invoke([&](Network::ListenerFilterCallbacks&) -> Network::FilterStatus { return Network::FilterStatus::StopIteration; })); + Network::MockConnectionSocket* accepted_socket = new NiceMock(); + Network::IoSocketHandleImpl io_handle{42}; + EXPECT_CALL(*accepted_socket, ioHandle()).WillRepeatedly(ReturnRef(io_handle)); Event::MockTimer* timeout = new Event::MockTimer(&dispatcher_); EXPECT_CALL(*timeout, enableTimer(std::chrono::milliseconds(15000), _)); - Network::MockConnectionSocket* accepted_socket = new NiceMock(); listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket}, true); Stats::Gauge& downstream_pre_cx_active = stats_store_.gauge("downstream_pre_cx_active", Stats::Gauge::ImportMode::Accumulate); @@ -659,9 +664,11 @@ TEST_F(ConnectionHandlerTest, ContinueOnListenerFilterTimeout) { .WillOnce(Invoke([&](Network::ListenerFilterCallbacks&) -> Network::FilterStatus { return Network::FilterStatus::StopIteration; })); + Network::MockConnectionSocket* accepted_socket = new NiceMock(); + Network::IoSocketHandleImpl io_handle{42}; + EXPECT_CALL(*accepted_socket, ioHandle()).WillRepeatedly(ReturnRef(io_handle)); Event::MockTimer* timeout = new Event::MockTimer(&dispatcher_); EXPECT_CALL(*timeout, enableTimer(std::chrono::milliseconds(15000), _)); - Network::MockConnectionSocket* accepted_socket = new NiceMock(); listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket}, true); Stats::Gauge& downstream_pre_cx_active = stats_store_.gauge("downstream_pre_cx_active", Stats::Gauge::ImportMode::Accumulate); @@ -708,9 +715,12 @@ TEST_F(ConnectionHandlerTest, ListenerFilterTimeoutResetOnSuccess) { listener_filter_cb = &cb; return Network::FilterStatus::StopIteration; })); + Network::MockConnectionSocket* accepted_socket = new NiceMock(); + Network::IoSocketHandleImpl io_handle{42}; + EXPECT_CALL(*accepted_socket, ioHandle()).WillRepeatedly(ReturnRef(io_handle)); + Event::MockTimer* timeout = new Event::MockTimer(&dispatcher_); EXPECT_CALL(*timeout, enableTimer(std::chrono::milliseconds(15000), _)); - Network::MockConnectionSocket* accepted_socket = new NiceMock(); listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket}, true); EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr)); @@ -755,6 +765,51 @@ TEST_F(ConnectionHandlerTest, ListenerFilterDisabledTimeout) { EXPECT_CALL(*listener, onDestroy()); } +// Listener Filter could close socket in the context of listener callback. +TEST_F(ConnectionHandlerTest, ListenerFilterReportError) { + InSequence s; + + TestListener* test_listener = addListener(1, true, false, "test_listener"); + Network::MockListener* listener = new Network::MockListener(); + Network::ListenerCallbacks* listener_callbacks; + EXPECT_CALL(dispatcher_, createListener_(_, _, _, false)) + .WillOnce(Invoke( + [&](Network::Socket&, Network::ListenerCallbacks& cb, bool, bool) -> Network::Listener* { + listener_callbacks = &cb; + return listener; + })); + EXPECT_CALL(test_listener->socket_, localAddress()); + handler_->addListener(*test_listener); + + Network::MockListenerFilter* first_filter = new Network::MockListenerFilter(); + Network::MockListenerFilter* last_filter = new Network::MockListenerFilter(); + + EXPECT_CALL(factory_, createListenerFilterChain(_)) + .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { + manager.addAcceptFilter(Network::ListenerFilterPtr{first_filter}); + manager.addAcceptFilter(Network::ListenerFilterPtr{last_filter}); + return true; + })); + // The first filter close the socket + EXPECT_CALL(*first_filter, onAccept(_)) + .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus { + cb.socket().close(); + return Network::FilterStatus::StopIteration; + })); + // The last filter won't be invoked + EXPECT_CALL(*last_filter, onAccept(_)).Times(0); + Network::MockConnectionSocket* accepted_socket = new NiceMock(); + listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket}, true); + + dispatcher_.clearDeferredDeleteList(); + // Make sure the error leads to no listener timer created. + EXPECT_CALL(dispatcher_, createTimer_(_)).Times(0); + // Make sure we never try to match the filer chain since listener filter doesn't complete. + EXPECT_CALL(manager_, findFilterChain(_)).Times(0); + + EXPECT_CALL(*listener, onDestroy()); +} + // Ensure an exception is thrown if there are no filters registered for a UDP listener TEST_F(ConnectionHandlerTest, UdpListenerNoFilterThrowsException) { InSequence s; diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 768468f8c96e..e5b570919e19 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -176,6 +176,7 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { socket_ = std::make_unique>(); local_address_.reset(new Network::Address::Ipv4Instance("127.0.0.1", 1234)); remote_address_.reset(new Network::Address::Ipv4Instance("127.0.0.1", 1234)); + EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno})); } const Network::FilterChain* @@ -265,11 +266,9 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { const envoy::api::v2::core::SocketOption::SocketState& expected_state, const Network::SocketOptionName& expected_option, int expected_value, uint32_t expected_num_options = 1) { - NiceMock os_sys_calls; - TestThreadsafeSingletonInjector os_calls(&os_sys_calls); if (expected_option.has_value()) { expectCreateListenSocket(expected_state, expected_num_options); - expectSetsockopt(os_sys_calls, expected_option.level(), expected_option.option(), + expectSetsockopt(os_sys_calls_, expected_option.level(), expected_option.option(), expected_value, expected_num_options); manager_->addOrUpdateListener(listener, "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -280,6 +279,10 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { } } +protected: + NiceMock os_sys_calls_; + TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; + private: std::unique_ptr socket_; Network::Address::InstanceConstSharedPtr local_address_; @@ -390,9 +393,8 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, Network::Address::SocketType::Datagram, _, true)); - NiceMock os_sys_calls; - TestThreadsafeSingletonInjector os_calls(&os_sys_calls); - EXPECT_CALL(os_sys_calls, setsockopt_(_, _, _, _, _)).Times(testing::AtLeast(1)); + EXPECT_CALL(os_sys_calls_, setsockopt_(_, _, _, _, _)).Times(testing::AtLeast(1)); + EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno})); manager_->addOrUpdateListener(listener_proto, "", true); EXPECT_EQ(1u, manager_->listeners().size()); } @@ -694,6 +696,7 @@ drain_type: default ON_CALL(os_sys_calls, socket(AF_INET, _, 0)).WillByDefault(Return(Api::SysCallIntResult{5, 0})); ON_CALL(os_sys_calls, socket(AF_INET6, _, 0)).WillByDefault(Return(Api::SysCallIntResult{-1, 0})); + ON_CALL(os_sys_calls, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0})); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true)); @@ -726,6 +729,7 @@ drain_type: default ON_CALL(os_sys_calls, socket(AF_INET, _, 0)).WillByDefault(Return(Api::SysCallIntResult{-1, 0})); ON_CALL(os_sys_calls, socket(AF_INET6, _, 0)).WillByDefault(Return(Api::SysCallIntResult{5, 0})); + ON_CALL(os_sys_calls, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0})); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true)); @@ -2926,7 +2930,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TransparentFreebindListenerDisabl TEST_F(ListenerManagerImplWithRealFiltersTest, TransparentListenerEnabled) { auto listener = createIPv4Listener("TransparentListener"); listener.mutable_transparent()->set_value(true); - testSocketOption(listener, envoy::api::v2::core::SocketOption::STATE_PREBIND, ENVOY_SOCKET_IP_TRANSPARENT, /* expected_value */ 1, /* expected_num_options */ 2); @@ -2961,9 +2964,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, FastOpenListenerEnabled) { } TEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) { - NiceMock os_sys_calls; - TestThreadsafeSingletonInjector os_calls(&os_sys_calls); - const envoy::api::v2::Listener listener = parseListenerFromV2Yaml(R"EOF( name: SockoptsListener address: @@ -2981,11 +2981,11 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) { expectCreateListenSocket(envoy::api::v2::core::SocketOption::STATE_PREBIND, /* expected_num_options */ 3); - expectSetsockopt(os_sys_calls, + expectSetsockopt(os_sys_calls_, /* expected_sockopt_level */ 1, /* expected_sockopt_name */ 2, /* expected_value */ 3); - expectSetsockopt(os_sys_calls, + expectSetsockopt(os_sys_calls_, /* expected_sockopt_level */ 4, /* expected_sockopt_name */ 5, /* expected_value */ 6); From 0fde42edabd4b076e745a3153a7a2cd8ed64416f Mon Sep 17 00:00:00 2001 From: Dmitri Dolguikh Date: Wed, 28 Aug 2019 19:13:17 -0700 Subject: [PATCH 11/31] Fixes gcc 8.3.1 build failure due to FilterChainBenchmarkFixture::SetUp hiding base-class virtual functions (#8071) Description: I'm seeing "bazel-out/k8-fastbuild/bin/external/com_github_google_benchmark/_virtual_includes/benchmark/benchmark/benchmark.h:1071:16: error: 'virtual void benchmark::Fixture::SetUp(benchmark::State&)' was hidden" when running tests. This resolves the issue with hiding of the base-class functions. Risk Level: low Testing: Docs Changes: Release Notes: Signed-off-by: Dmitri Dolguikh --- test/server/filter_chain_benchmark_test.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index 0517635414c7..1ce0577c141e 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -149,6 +149,7 @@ const char YamlSingleDstPortBottom[] = R"EOF( class FilterChainBenchmarkFixture : public benchmark::Fixture { public: + using Fixture::SetUp; void SetUp(const ::benchmark::State& state) override { int64_t input_size = state.range(0); std::vector port_chains; From 9ac491af4ae700fde1f8135717f7dea0b407627a Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 29 Aug 2019 09:12:05 -0400 Subject: [PATCH 12/31] test: fix ups for various deprecated fields (#8068) Takeaways: we've lost the ability to do empty regex (which was covered in router tests and is proto constraint validated on the new safe regex) as well as negative lookahead (also covered in tests) along with a host of other things conveniently documented as not supported here: https://github.com/google/re2/wiki/Syntax Otherwise split up a bunch of tests, duplicated and tagged a bunch of tests, and cleaning up after we finally can remove deprecated fields again will be an order of magnitude easier. Also fixing a dup relnote from #8014 Risk Level: n/a (test only) Testing: yes. yes there is. Docs Changes: no Release Notes: no Signed-off-by: Alyssa Wilk --- docs/root/intro/version_history.rst | 1 - test/common/router/config_impl_test.cc | 458 ++++++++++++++---- .../network/dubbo_proxy/conn_manager_test.cc | 7 +- .../network/dubbo_proxy/route_matcher_test.cc | 26 +- .../thrift_proxy/route_matcher_test.cc | 7 +- 5 files changed, 402 insertions(+), 97 deletions(-) diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 715ec0fa839f..c3598a5c7c7a 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -29,7 +29,6 @@ Version history * http: added the ability to reject HTTP/1.1 requests with invalid HTTP header values, using the runtime feature `envoy.reloadable_features.strict_header_validation`. * http: changed Envoy to forward existing x-forwarded-proto from upstream trusted proxies. Guarded by `envoy.reloadable_features.trusted_forwarded_proto` which defaults true. * http: added the ability to configure the behavior of the server response header, via the :ref:`server_header_transformation` field. -* http: changed Envoy to forward existing x-forwarded-proto from downstream trusted proxies. Guarded by `envoy.reloadable_features.trusted_forwarded_proto` which defaults true. * http: added the ability to :ref:`merge adjacent slashes` in the path. * listeners: added :ref:`continue_on_listener_filters_timeout ` to configure whether a listener will still create a connection when listener filters time out. * listeners: added :ref:`HTTP inspector listener filter `. diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index ff6aeb4555d6..717bf35651a7 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -116,8 +116,169 @@ class ConfigImplTestBase { class RouteMatcherTest : public testing::Test, public ConfigImplTestBase {}; -// TODO(alyssawilk) go through all these tests and update or duplicate. -TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutes)) { +// When removing legacy fields this test can be removed. +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestLegacyRoutes)) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: regex + domains: + - bat.com + routes: + - match: + regex: "/t[io]c" + route: + cluster: clock + - match: + safe_regex: + google_re2: {} + regex: "/baa+" + route: + cluster: sheep + - match: + regex: ".*/\\d{3}$" + route: + cluster: three_numbers + prefix_rewrite: "/rewrote" + - match: + regex: ".*" + route: + cluster: regex_default +- name: regex2 + domains: + - bat2.com + routes: + - match: + regex: '' + route: + cluster: nothingness + - match: + regex: ".*" + route: + cluster: regex_default +- name: default + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: instant-server + timeout: 30s + virtual_clusters: + - pattern: "^/rides$" + method: POST + name: ride_request + - pattern: "^/rides/\\d+$" + method: PUT + name: update_ride + - pattern: "^/users/\\d+/chargeaccounts$" + method: POST + name: cc_add + - pattern: "^/users/\\d+/chargeaccounts/(?!validate)\\w+$" + method: PUT + name: cc_add + - pattern: "^/users$" + method: POST + name: create_user_login + - pattern: "^/users/\\d+$" + method: PUT + name: update_user + )EOF"; + + NiceMock stream_info; + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + + // Regular Expression matching + EXPECT_EQ("clock", + config.route(genHeaders("bat.com", "/tic", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("clock", + config.route(genHeaders("bat.com", "/toc", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("regex_default", + config.route(genHeaders("bat.com", "/tac", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("regex_default", + config.route(genHeaders("bat.com", "", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("regex_default", + config.route(genHeaders("bat.com", "/tick", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("regex_default", + config.route(genHeaders("bat.com", "/tic/toc", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("sheep", + config.route(genHeaders("bat.com", "/baa", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ( + "sheep", + config.route(genHeaders("bat.com", "/baaaaaaaaaaaa", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("regex_default", + config.route(genHeaders("bat.com", "/ba", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("nothingness", + config.route(genHeaders("bat2.com", "", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("regex_default", + config.route(genHeaders("bat2.com", "/foo", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("regex_default", + config.route(genHeaders("bat2.com", " ", "GET"), 0)->routeEntry()->clusterName()); + + // Regular Expression matching with query string params + EXPECT_EQ( + "clock", + config.route(genHeaders("bat.com", "/tic?tac=true", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ( + "regex_default", + config.route(genHeaders("bat.com", "/tac?tic=true", "GET"), 0)->routeEntry()->clusterName()); + + // Virtual cluster testing. + { + Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/rides", "GET"); + EXPECT_EQ("other", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/rides/blah", "POST"); + EXPECT_EQ("other", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/rides", "POST"); + EXPECT_EQ("ride_request", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/rides/123", "PUT"); + EXPECT_EQ("update_ride", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/rides/123/456", "POST"); + EXPECT_EQ("other", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = + genHeaders("api.lyft.com", "/users/123/chargeaccounts", "POST"); + EXPECT_EQ("cc_add", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = + genHeaders("api.lyft.com", "/users/123/chargeaccounts/hello123", "PUT"); + EXPECT_EQ("cc_add", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = + genHeaders("api.lyft.com", "/users/123/chargeaccounts/validate", "PUT"); + EXPECT_EQ("other", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/foo/bar", "PUT"); + EXPECT_EQ("other", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/users", "POST"); + EXPECT_EQ("create_user_login", + virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/users/123", "PUT"); + EXPECT_EQ("update_user", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } + { + Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/something/else", "GET"); + EXPECT_EQ("other", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); + } +} + +TEST_F(RouteMatcherTest, TestRoutes) { const std::string yaml = R"EOF( virtual_hosts: - name: www2 @@ -172,7 +333,9 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutes)) { - bat.com routes: - match: - regex: "/t[io]c" + safe_regex: + google_re2: {} + regex: "/t[io]c" route: cluster: clock - match: @@ -182,12 +345,16 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutes)) { route: cluster: sheep - match: - regex: ".*/\\d{3}$" + safe_regex: + google_re2: {} + regex: ".*/\\d{3}$" route: cluster: three_numbers prefix_rewrite: "/rewrote" - match: - regex: ".*" + safe_regex: + google_re2: {} + regex: ".*" route: cluster: regex_default - name: regex2 @@ -195,11 +362,9 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutes)) { - bat2.com routes: - match: - regex: '' - route: - cluster: nothingness - - match: - regex: ".*" + safe_regex: + google_re2: {} + regex: ".*" route: cluster: regex_default - name: default @@ -280,23 +445,45 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutes)) { cluster: instant-server timeout: 30s virtual_clusters: - - pattern: "^/rides$" - method: POST + - headers: + - name: ":path" + safe_regex_match: + google_re2: {} + regex: "^/rides$" + - name: ":method" + exact_match: POST name: ride_request - - pattern: "^/rides/\\d+$" - method: PUT + - headers: + - name: ":path" + safe_regex_match: + google_re2: {} + regex: "^/rides/\\d+$" + - name: ":method" + exact_match: PUT name: update_ride - - pattern: "^/users/\\d+/chargeaccounts$" - method: POST - name: cc_add - - pattern: "^/users/\\d+/chargeaccounts/(?!validate)\\w+$" - method: PUT + - headers: + - name: ":path" + safe_regex_match: + google_re2: {} + regex: "^/users/\\d+/chargeaccounts$" + - name: ":method" + exact_match: POST name: cc_add - - pattern: "^/users$" - method: POST + - headers: + - name: ":path" + safe_regex_match: + google_re2: {} + regex: "^/users$" + - name: ":method" + exact_match: POST name: create_user_login - - pattern: "^/users/\\d+$" - method: PUT + - headers: + - name: ":path" + safe_regex_match: + google_re2: {} + regex: "^/users/\\d+$" + - name: ":method" + exact_match: PUT name: update_user - headers: - name: ":path" @@ -372,8 +559,6 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutes)) { config.route(genHeaders("bat.com", "/baaaaaaaaaaaa", "GET"), 0)->routeEntry()->clusterName()); EXPECT_EQ("regex_default", config.route(genHeaders("bat.com", "/ba", "GET"), 0)->routeEntry()->clusterName()); - EXPECT_EQ("nothingness", - config.route(genHeaders("bat2.com", "", "GET"), 0)->routeEntry()->clusterName()); EXPECT_EQ("regex_default", config.route(genHeaders("bat2.com", "/foo", "GET"), 0)->routeEntry()->clusterName()); EXPECT_EQ("regex_default", @@ -556,21 +741,6 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutes)) { Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/rides/123/456", "POST"); EXPECT_EQ("other", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); } - { - Http::TestHeaderMapImpl headers = - genHeaders("api.lyft.com", "/users/123/chargeaccounts", "POST"); - EXPECT_EQ("cc_add", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); - } - { - Http::TestHeaderMapImpl headers = - genHeaders("api.lyft.com", "/users/123/chargeaccounts/hello123", "PUT"); - EXPECT_EQ("cc_add", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); - } - { - Http::TestHeaderMapImpl headers = - genHeaders("api.lyft.com", "/users/123/chargeaccounts/validate", "PUT"); - EXPECT_EQ("other", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); - } { Http::TestHeaderMapImpl headers = genHeaders("api.lyft.com", "/foo/bar", "PUT"); EXPECT_EQ("other", virtualClusterName(config.route(headers, 0)->routeEntry(), headers)); @@ -618,7 +788,8 @@ TEST_F(RouteMatcherTest, TestRoutesWithWildcardAndDefaultOnly) { config.route(genHeaders("example.com", "/", "GET"), 0)->routeEntry()->clusterName()); } -TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutesWithInvalidRegex)) { +// When deprecating regex: this test can be removed. +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutesWithInvalidRegexLegacy)) { std::string invalid_route = R"EOF( virtual_hosts: - name: regex @@ -651,6 +822,46 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutesWithInvalidRegex)) { EnvoyException, "Invalid regex '\\^/\\(\\+invalid\\)':"); } +TEST_F(RouteMatcherTest, TestRoutesWithInvalidRegex) { + std::string invalid_route = R"EOF( +virtual_hosts: + - name: regex + domains: ["*"] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/(+invalid)" + route: { cluster: "regex" } + )EOF"; + + std::string invalid_virtual_cluster = R"EOF( +virtual_hosts: + - name: regex + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: "regex" } + virtual_clusters: + name: "invalid" + headers: + name: "invalid" + safe_regex_match: + google_re2: {} + regex: "^/(+invalid)" + )EOF"; + + NiceMock stream_info; + + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_route), factory_context_, true), + EnvoyException, "no argument for repetition operator:"); + + EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_virtual_cluster), + factory_context_, true), + EnvoyException, "no argument for repetition operator"); +} + // Virtual cluster that contains neither pattern nor regex. This must be checked while pattern is // deprecated. TEST_F(RouteMatcherTest, TestRoutesWithInvalidVirtualCluster) { @@ -1078,7 +1289,7 @@ name: foo } } -TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Priority)) { +TEST_F(RouteMatcherTest, Priority) { const std::string yaml = R"EOF( virtual_hosts: - name: local_service @@ -1094,10 +1305,6 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Priority)) { prefix: "/bar" route: cluster: local_service_grpc - virtual_clusters: - - pattern: "^/bar$" - method: POST - name: foo )EOF"; TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); @@ -1165,7 +1372,7 @@ TEST_F(RouteMatcherTest, NoAutoRewriteAndAutoRewriteHeader) { EnvoyException); } -TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(HeaderMatchedRouting)) { +TEST_F(RouteMatcherTest, HeaderMatchedRouting) { const std::string yaml = R"EOF( virtual_hosts: - name: local_service @@ -1199,7 +1406,9 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(HeaderMatchedRouting)) { prefix: "/" headers: - name: test_header_pattern - regex_match: "^user=test-\\d+$" + safe_regex_match: + google_re2: {} + regex: "^user=test-\\d+$" route: cluster: local_service_with_header_pattern_set_regex - match: @@ -1289,7 +1498,8 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(HeaderMatchedRouting)) { } // Verify the fixes for https://github.com/envoyproxy/envoy/issues/2406 -TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidHeaderMatchedRoutingConfig)) { +// When removing regex_match this test can be removed entirely. +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidHeaderMatchedRoutingConfigLegacy)) { std::string value_with_regex_chars = R"EOF( virtual_hosts: - name: local_service @@ -1324,6 +1534,45 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidHeaderMatchedRoutingConf EnvoyException, "Invalid regex"); } +// Verify the fixes for https://github.com/envoyproxy/envoy/issues/2406 +TEST_F(RouteMatcherTest, InvalidHeaderMatchedRoutingConfig) { + std::string value_with_regex_chars = R"EOF( +virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/" + headers: + - name: test_header + exact_match: "(+not a regex)" + route: { cluster: "local_service" } + )EOF"; + + std::string invalid_regex = R"EOF( +virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/" + headers: + - name: test_header + safe_regex_match: + google_re2: {} + regex: "(+invalid regex)" + route: { cluster: "local_service" } + )EOF"; + + EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(value_with_regex_chars), + factory_context_, true)); + + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_regex), factory_context_, true), + EnvoyException, "no argument for repetition operator"); +} + +// When removing value: simply remove that section of the config and the relevant test. TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(QueryParamMatchedRouting)) { const std::string yaml = R"EOF( virtual_hosts: @@ -1438,7 +1687,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(QueryParamMatchedRouting)) { } } -// Verify the fixes for https://github.com/envoyproxy/envoy/issues/2406 +// When removing value: this test can be removed. TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidQueryParamMatchedRoutingConfig)) { std::string value_with_regex_chars = R"EOF( virtual_hosts: @@ -2250,7 +2499,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundResponseCodeConfig404) { config.route(headers, 0)->routeEntry()->clusterNotFoundResponseCode()); } -TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Shadow)) { +TEST_F(RouteMatcherTest, Shadow) { const std::string yaml = R"EOF( virtual_hosts: - name: www2 @@ -2268,7 +2517,11 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Shadow)) { route: request_mirror_policy: cluster: some_cluster2 - runtime_key: foo + runtime_fraction: + default_value: + numerator: 20 + denominator: HUNDRED + runtime_key: foo cluster: www2 - match: prefix: "/baz" @@ -2308,6 +2561,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Shadow)) { class RouteConfigurationV2 : public testing::Test, public ConfigImplTestBase {}; +// When removing runtime_key: this test can be removed. TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RequestMirrorPolicy)) { const std::string yaml = R"EOF( name: foo @@ -4265,6 +4519,8 @@ TEST_F(RoutePropertyTest, excludeVHRateLimits) { EXPECT_TRUE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); } +// When allow_origin: and allow_origin_regex: are removed, simply remove them +// and the relevant checks below. TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestVHostCorsConfig)) { const std::string yaml = R"EOF( virtual_hosts: @@ -4327,7 +4583,7 @@ TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestVHostCorsConfig)) { EXPECT_EQ(cors_policy->allowCredentials(), true); } -TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestRouteCorsConfig)) { +TEST_F(RoutePropertyTest, TestRouteCorsConfig) { const std::string yaml = R"EOF( virtual_hosts: - name: "default" @@ -4338,7 +4594,8 @@ TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestRouteCorsConfig)) { route: cluster: "ats" cors: - allow_origin: ["test-origin"] + allow_origin_string_match: + - exact: "test-origin" allow_methods: "test-methods" allow_headers: "test-headers" expose_headers: "test-expose-headers" @@ -4380,7 +4637,8 @@ TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestRouteCorsConfig)) { EXPECT_EQ(cors_policy->allowCredentials(), true); } -TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestVHostCorsLegacyConfig)) { +// When allow-origin: is removed, this test can be removed. +TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TTestVHostCorsLegacyConfig)) { const std::string yaml = R"EOF( virtual_hosts: - name: default @@ -4419,6 +4677,7 @@ TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestVHostCorsLegacyConfig)) { EXPECT_EQ(cors_policy->allowCredentials(), true); } +// When allow-origin: is removed, this test can be removed. TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestRouteCorsLegacyConfig)) { const std::string yaml = R"EOF( virtual_hosts: @@ -4910,14 +5169,17 @@ name: foo Envoy::EnvoyException, "Cannot create a Baz when metadata is empty."); } -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RouteConfigGetters)) { +TEST_F(RouteConfigurationV2, RouteConfigGetters) { const std::string yaml = R"EOF( name: foo virtual_hosts: - name: bar domains: ["*"] routes: - - match: { regex: "/rege[xy]" } + - match: + safe_regex: + google_re2: {} + regex: "/rege[xy]" route: { cluster: ww2 } - match: { path: "/exact-path" } route: { cluster: ww2 } @@ -4949,19 +5211,25 @@ name: foo EXPECT_EQ("foo", route_entry->virtualHost().routeConfig().name()); } -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RouteTracingConfig)) { +TEST_F(RouteConfigurationV2, RouteTracingConfig) { const std::string yaml = R"EOF( name: foo virtual_hosts: - name: bar domains: ["*"] routes: - - match: { regex: "/first" } + - match: + safe_regex: + google_re2: {} + regex: "/first" tracing: client_sampling: numerator: 1 route: { cluster: ww2 } - - match: { regex: "/second" } + - match: + safe_regex: + google_re2: {} + regex: "/second" tracing: overall_sampling: numerator: 1 @@ -5004,7 +5272,7 @@ name: foo } // Test to check Prefix Rewrite for redirects -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RedirectPrefixRewrite)) { +TEST_F(RouteConfigurationV2, RedirectPrefixRewrite) { std::string RedirectPrefixRewrite = R"EOF( name: AllRedirects virtual_hosts: @@ -5017,7 +5285,10 @@ name: AllRedirects redirect: { prefix_rewrite: "/new/path/" } - match: { prefix: "/host/prefix" } redirect: { host_redirect: new.lyft.com, prefix_rewrite: "/new/prefix"} - - match: { regex: "/[r][e][g][e][x].*"} + - match: + safe_regex: + google_re2: {} + regex: "/[r][e][g][e][x].*" redirect: { prefix_rewrite: "/new/regex-prefix/" } - match: { prefix: "/http/prefix"} redirect: { prefix_rewrite: "/https/prefix" , https_redirect: true } @@ -5191,7 +5462,7 @@ name: AllRedirects } } -TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(HeaderMatchedRoutingV2)) { +TEST_F(RouteMatcherTest, HeaderMatchedRoutingV2) { const std::string yaml = R"EOF( name: foo virtual_hosts: @@ -5224,7 +5495,9 @@ name: foo prefix: "/" headers: - name: test_header_pattern - regex_match: "^user=test-\\d+$" + safe_regex_match: + google_re2: {} + regex: "^user=test-\\d+$" route: cluster: local_service_with_header_pattern_set_regex - match: @@ -5366,8 +5639,7 @@ name: foo } } -TEST_F(RouteConfigurationV2, - DEPRECATED_FEATURE_TEST(RegexPrefixWithNoRewriteWorksWhenPathChanged)) { +TEST_F(RouteConfigurationV2, RegexPrefixWithNoRewriteWorksWhenPathChanged) { // Setup regex route entry. the regex is trivial, that's ok as we only want to test that // path change works. @@ -5377,7 +5649,10 @@ name: RegexNoMatch - name: regex domains: [regex.lyft.com] routes: - - match: { regex: "/regex"} + - match: + safe_regex: + google_re2: {} + regex: "/regex" route: { cluster: some-cluster } )EOF"; @@ -5398,14 +5673,17 @@ name: RegexNoMatch } } -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(NoIdleTimeout)) { +TEST_F(RouteConfigurationV2, NoIdleTimeout) { const std::string NoIdleTimeout = R"EOF( name: NoIdleTimeout virtual_hosts: - name: regex domains: [idle.lyft.com] routes: - - match: { regex: "/regex"} + - match: + safe_regex: + google_re2: {} + regex: "/regex" route: cluster: some-cluster )EOF"; @@ -5416,14 +5694,17 @@ name: NoIdleTimeout EXPECT_EQ(absl::nullopt, route_entry->idleTimeout()); } -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(ZeroIdleTimeout)) { +TEST_F(RouteConfigurationV2, ZeroIdleTimeout) { const std::string ZeroIdleTimeout = R"EOF( name: ZeroIdleTimeout virtual_hosts: - name: regex domains: [idle.lyft.com] routes: - - match: { regex: "/regex"} + - match: + safe_regex: + google_re2: {} + regex: "/regex" route: cluster: some-cluster idle_timeout: 0s @@ -5435,14 +5716,17 @@ name: ZeroIdleTimeout EXPECT_EQ(0, route_entry->idleTimeout().value().count()); } -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(ExplicitIdleTimeout)) { +TEST_F(RouteConfigurationV2, ExplicitIdleTimeout) { const std::string ExplicitIdleTimeout = R"EOF( name: ExplicitIdleTimeout virtual_hosts: - name: regex domains: [idle.lyft.com] routes: - - match: { regex: "/regex"} + - match: + safe_regex: + google_re2: {} + regex: "/regex" route: cluster: some-cluster idle_timeout: 7s @@ -5455,14 +5739,17 @@ name: ExplicitIdleTimeout EXPECT_EQ(7 * 1000, route_entry->idleTimeout().value().count()); } -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RetriableStatusCodes)) { +TEST_F(RouteConfigurationV2, RetriableStatusCodes) { const std::string ExplicitIdleTimeout = R"EOF( name: RetriableStatusCodes virtual_hosts: - name: regex domains: [idle.lyft.com] routes: - - match: { regex: "/regex"} + - match: + safe_regex: + google_re2: {} + regex: "/regex" route: cluster: some-cluster retry_policy: @@ -5477,14 +5764,17 @@ name: RetriableStatusCodes EXPECT_EQ(expected_codes, retry_policy.retriableStatusCodes()); } -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(UpgradeConfigs)) { +TEST_F(RouteConfigurationV2, UpgradeConfigs) { const std::string UpgradeYaml = R"EOF( name: RetriableStatusCodes virtual_hosts: - name: regex domains: [idle.lyft.com] routes: - - match: { regex: "/regex"} + - match: + safe_regex: + google_re2: {} + regex: "/regex" route: cluster: some-cluster upgrade_configs: @@ -5501,14 +5791,17 @@ name: RetriableStatusCodes EXPECT_FALSE(upgrade_map.find("disabled")->second); } -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(DuplicateUpgradeConfigs)) { +TEST_F(RouteConfigurationV2, DuplicateUpgradeConfigs) { const std::string yaml = R"EOF( name: RetriableStatusCodes virtual_hosts: - name: regex domains: [idle.lyft.com] routes: - - match: { regex: "/regex"} + - match: + safe_regex: + google_re2: {} + regex: "/regex" route: cluster: some-cluster upgrade_configs: @@ -5524,14 +5817,17 @@ name: RetriableStatusCodes // Verifies that we're creating a new instance of the retry plugins on each call instead of always // returning the same one. -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RetryPluginsAreNotReused)) { +TEST_F(RouteConfigurationV2, RetryPluginsAreNotReused) { const std::string ExplicitIdleTimeout = R"EOF( name: RetriableStatusCodes virtual_hosts: - name: regex domains: [idle.lyft.com] routes: - - match: { regex: "/regex"} + - match: + safe_regex: + google_re2: {} + regex: "/regex" route: cluster: some-cluster retry_policy: diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index f93f40b47f5d..536e167b0209 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -1156,8 +1156,7 @@ TEST_F(ConnectionManagerTest, PendingMessageEnd) { EXPECT_EQ(1U, store_.gauge("test.request_active", Stats::Gauge::ImportMode::Accumulate).value()); } -// TODO(alyssawilk) update. -TEST_F(ConnectionManagerTest, DEPRECATED_FEATURE_TEST(Routing)) { +TEST_F(ConnectionManagerTest, Routing) { const std::string yaml = R"EOF( stat_prefix: test protocol_type: Dubbo @@ -1169,7 +1168,9 @@ serialization_type: Hessian2 - match: method: name: - regex: "(.*?)" + safe_regex: + google_re2: {} + regex: "(.*?)" route: cluster: user_service_dubbo_server )EOF"; diff --git a/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc b/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc index 398743469bcb..1099862c5417 100644 --- a/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc @@ -38,8 +38,7 @@ parseDubboProxyFromV2Yaml(const std::string& yaml) { } // namespace -// TODO(alyssawilk) update. -TEST(DubboRouteMatcherTest, DEPRECATED_FEATURE_TEST(RouteByServiceNameWithAnyMethod)) { +TEST(DubboRouteMatcherTest, RouteByServiceNameWithAnyMethod) { { const std::string yaml = R"EOF( name: local_route @@ -48,7 +47,9 @@ interface: org.apache.dubbo.demo.DemoService - match: method: name: - regex: "(.*?)" + safe_regex: + google_re2: {} + regex: "(.*?)" route: cluster: user_service_dubbo_server )EOF"; @@ -95,7 +96,9 @@ group: test - match: method: name: - regex: "(.*?)" + safe_regex: + google_re2: {} + regex: "(.*?)" route: cluster: user_service_dubbo_server )EOF"; @@ -129,7 +132,9 @@ version: 1.0.0 - match: method: name: - regex: "(.*?)" + safe_regex: + google_re2: {} + regex: "(.*?)" route: cluster: user_service_dubbo_server )EOF"; @@ -168,7 +173,9 @@ group: HSF - match: method: name: - regex: "(.*?)" + safe_regex: + google_re2: {} + regex: "(.*?)" route: cluster: user_service_dubbo_server )EOF"; @@ -292,8 +299,7 @@ interface: org.apache.dubbo.demo.DemoService EXPECT_EQ("user_service_dubbo_server", matcher.route(metadata, 0)->routeEntry()->clusterName()); } -// TODO(alyssawilk) update. -TEST(DubboRouteMatcherTest, DEPRECATED_FEATURE_TEST(RouteByMethodWithRegexMatch)) { +TEST(DubboRouteMatcherTest, RouteByMethodWithRegexMatch) { const std::string yaml = R"EOF( name: local_route interface: org.apache.dubbo.demo.DemoService @@ -301,7 +307,9 @@ interface: org.apache.dubbo.demo.DemoService - match: method: name: - regex: "\\d{3}test" + safe_regex: + google_re2: {} + regex: "\\d{3}test" route: cluster: user_service_dubbo_server )EOF"; diff --git a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc index c7efe51ec11c..75b096d15bf0 100644 --- a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc @@ -331,8 +331,7 @@ name: config EXPECT_EQ("cluster1", route->routeEntry()->clusterName()); } -// TODO(alyssawilk) update. -TEST(ThriftRouteMatcherTest, DEPRECATED_FEATURE_TEST(RouteByRegexHeaderMatcher)) { +TEST(ThriftRouteMatcherTest, RouteByRegexHeaderMatcher) { const std::string yaml = R"EOF( name: config routes: @@ -340,7 +339,9 @@ name: config method_name: "method1" headers: - name: "x-version" - regex_match: "0.[5-9]" + safe_regex_match: + google_re2: {} + regex: "0.[5-9]" route: cluster: "cluster1" )EOF"; From 8556a755fba78406813aa9c572c7a7ee9b0b415b Mon Sep 17 00:00:00 2001 From: "Tejasvi (Teju) Nareddy" Date: Thu, 29 Aug 2019 08:49:12 -0700 Subject: [PATCH 13/31] include: add log dependency header to connection_handler.h (#8072) Signed-off-by: Teju Nareddy --- include/envoy/network/connection_handler.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/envoy/network/connection_handler.h b/include/envoy/network/connection_handler.h index 6c24a814db5f..2e5f8057db2f 100644 --- a/include/envoy/network/connection_handler.h +++ b/include/envoy/network/connection_handler.h @@ -9,6 +9,8 @@ #include "envoy/network/listener.h" #include "envoy/ssl/context.h" +#include "spdlog/spdlog.h" + namespace Envoy { namespace Network { From 4f2c5a4630126ed6b42b1cdaac88a8d399faa2a7 Mon Sep 17 00:00:00 2001 From: danzh Date: Thu, 29 Aug 2019 12:07:06 -0400 Subject: [PATCH 14/31] quiche: Update QUICHE dep (#8044) Update QUICHE tar ball to 4abb566fbbc63df8fe7c1ac30b21632b9eb18d0c. Add some new impl's for newly added api. Risk Level: low Testing: using quiche build in tests. Part of #2557 Signed-off-by: Dan Zhang --- bazel/external/quiche.BUILD | 225 ++++++++-- bazel/repository_locations.bzl | 6 +- source/extensions/quic_listeners/quiche/BUILD | 3 + .../quic_listeners/quiche/platform/BUILD | 9 + .../quiche/platform/flags_list.h | 408 +++++++++--------- .../platform/quic_mem_slice_storage_impl.h | 2 + .../quiche/platform/quic_text_utils_impl.h | 4 + .../quiche/platform/spdy_containers_impl.h | 2 + .../quiche/platform/spdy_map_util_impl.h | 18 + test/extensions/quic_listeners/quiche/BUILD | 1 + .../quic_listeners/quiche/platform/BUILD | 13 + 11 files changed, 443 insertions(+), 248 deletions(-) create mode 100644 source/extensions/quic_listeners/quiche/platform/spdy_map_util_impl.h diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index e08d8a5987eb..7f59318c97d5 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -711,6 +711,7 @@ envoy_cc_library( "quiche/spdy/platform/api/spdy_flags.h", "quiche/spdy/platform/api/spdy_logging.h", "quiche/spdy/platform/api/spdy_macros.h", + "quiche/spdy/platform/api/spdy_map_util.h", "quiche/spdy/platform/api/spdy_mem_slice.h", "quiche/spdy/platform/api/spdy_ptr_util.h", "quiche/spdy/platform/api/spdy_string.h", @@ -766,6 +767,16 @@ envoy_cc_library( deps = [":spdy_platform"], ) +envoy_cc_library( + name = "spdy_core_fifo_write_scheduler_lib", + hdrs = ["quiche/spdy/core/fifo_write_scheduler.h"], + repository = "@envoy", + deps = [ + ":spdy_core_write_scheduler_lib", + ":spdy_platform", + ], +) + envoy_cc_library( name = "spdy_core_framer_lib", srcs = [ @@ -849,6 +860,34 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "spdy_core_lifo_write_scheduler_lib", + hdrs = ["quiche/spdy/core/lifo_write_scheduler.h"], + repository = "@envoy", + deps = [ + ":spdy_core_write_scheduler_lib", + ":spdy_platform", + ], +) + +envoy_cc_library( + name = "spdy_core_intrusive_list_lib", + hdrs = ["quiche/spdy/core/spdy_intrusive_list.h"], + repository = "@envoy", +) + +envoy_cc_library( + name = "spdy_core_http2_priority_write_scheduler_lib", + hdrs = ["quiche/spdy/core/http2_priority_write_scheduler.h"], + repository = "@envoy", + deps = [ + ":spdy_core_intrusive_list_lib", + ":spdy_core_protocol_lib", + ":spdy_core_write_scheduler_lib", + ":spdy_platform", + ], +) + envoy_cc_library( name = "spdy_core_hpack_hpack_lib", srcs = [ @@ -976,6 +1015,7 @@ envoy_cc_library( "quiche/quic/platform/api/quic_pcc_sender.h", ], repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_time_lib", @@ -1021,6 +1061,7 @@ envoy_cc_library( # "quiche/quic/platform/api/quic_test_loopback.h", ], repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_platform_export", @@ -1033,6 +1074,7 @@ envoy_cc_library( name = "quic_platform_bbr2_sender", hdrs = ["quiche/quic/platform/api/quic_bbr2_sender.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_bbr2_sender_impl_lib"], ) @@ -1040,6 +1082,7 @@ envoy_cc_test_library( name = "quic_platform_epoll_lib", hdrs = ["quiche/quic/platform/api/quic_epoll.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_epoll_impl_lib"], ) @@ -1047,6 +1090,7 @@ envoy_cc_test_library( name = "quic_platform_expect_bug", hdrs = ["quiche/quic/platform/api/quic_expect_bug.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_expect_bug_impl_lib"], ) @@ -1054,6 +1098,7 @@ envoy_cc_library( name = "quic_platform_export", hdrs = ["quiche/quic/platform/api/quic_export.h"], repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_export_impl_lib"], ) @@ -1062,6 +1107,7 @@ envoy_cc_library( name = "quic_platform_ip_address_family", hdrs = ["quiche/quic/platform/api/quic_ip_address_family.h"], repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], ) @@ -1071,6 +1117,7 @@ envoy_cc_library( hdrs = ["quiche/quic/platform/api/quic_ip_address.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_platform_base", @@ -1083,6 +1130,7 @@ envoy_cc_test_library( name = "quic_platform_mock_log", hdrs = ["quiche/quic/platform/api/quic_mock_log.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_mock_log_impl_lib"], ) @@ -1090,6 +1138,7 @@ envoy_cc_test_library( name = "quic_platform_port_utils", hdrs = ["quiche/quic/platform/api/quic_port_utils.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_port_utils_impl_lib"], ) @@ -1097,6 +1146,7 @@ envoy_cc_test_library( name = "quic_platform_sleep", hdrs = ["quiche/quic/platform/api/quic_sleep.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_sleep_impl_lib"], ) @@ -1106,6 +1156,7 @@ envoy_cc_library( hdrs = ["quiche/quic/platform/api/quic_socket_address.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_platform_export", @@ -1117,6 +1168,7 @@ envoy_cc_test_library( name = "quic_platform_test", hdrs = ["quiche/quic/platform/api/quic_test.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_impl_lib"], ) @@ -1124,6 +1176,7 @@ envoy_cc_test_library( name = "quic_platform_test_output", hdrs = ["quiche/quic/platform/api/quic_test_output.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_output_impl_lib"], ) @@ -1131,6 +1184,7 @@ envoy_cc_test_library( name = "quic_platform_system_event_loop", hdrs = ["quiche/quic/platform/api/quic_system_event_loop.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_system_event_loop_impl_lib"], ) @@ -1138,6 +1192,7 @@ envoy_cc_test_library( name = "quic_platform_thread", hdrs = ["quiche/quic/platform/api/quic_thread.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_thread_impl_lib"], ) @@ -1156,6 +1211,7 @@ envoy_cc_library( name = "quic_core_proto_cached_network_parameters_proto_header", hdrs = ["quiche/quic/core/proto/cached_network_parameters_proto.h"], repository = "@envoy", + tags = ["nofips"], deps = [":quic_core_proto_cached_network_parameters_proto_cc"], ) @@ -1174,6 +1230,7 @@ envoy_cc_library( name = "quic_core_proto_source_address_token_proto_header", hdrs = ["quiche/quic/core/proto/source_address_token_proto.h"], repository = "@envoy", + tags = ["nofips"], deps = [":quic_core_proto_source_address_token_proto_cc"], ) @@ -1191,6 +1248,7 @@ envoy_cc_library( name = "quic_core_proto_crypto_server_config_proto_header", hdrs = ["quiche/quic/core/proto/crypto_server_config_proto.h"], repository = "@envoy", + tags = ["nofips"], deps = [":quic_core_proto_crypto_server_config_proto_cc"], ) @@ -1278,6 +1336,7 @@ envoy_cc_library( "quiche/quic/core/quic_simple_buffer_allocator.h", ], repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = [":quic_platform_export"], ) @@ -1311,6 +1370,8 @@ envoy_cc_library( tags = ["nofips"], deps = [ ":quic_core_bandwidth_lib", + ":quic_core_congestion_control_congestion_control_interface_lib", + ":quic_core_congestion_control_windowed_filter_lib", ":quic_core_packet_number_indexed_queue_lib", ":quic_core_packets_lib", ":quic_core_time_lib", @@ -1903,6 +1964,14 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_http_http_constants_lib", + hdrs = ["quiche/quic/core/http/http_constants.h"], + copts = quiche_copt, + repository = "@envoy", + deps = [":quic_core_types_lib"], +) + envoy_cc_library( name = "quic_core_http_client_lib", srcs = [ @@ -2003,6 +2072,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/http/spdy_server_push_utils.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_platform_base", @@ -2039,6 +2109,7 @@ envoy_cc_library( ":quic_core_crypto_crypto_handshake_lib", ":quic_core_error_codes_lib", ":quic_core_http_header_list_lib", + ":quic_core_http_http_constants_lib", ":quic_core_http_http_decoder_lib", ":quic_core_http_http_encoder_lib", ":quic_core_http_spdy_stream_body_buffer_lib", @@ -2230,6 +2301,9 @@ envoy_cc_library( ":quic_core_versions_lib", ":quic_platform", ":quic_platform_socket_address", + ":spdy_core_fifo_write_scheduler_lib", + ":spdy_core_http2_priority_write_scheduler_lib", + ":spdy_core_lifo_write_scheduler_lib", ":spdy_core_priority_write_scheduler_lib", ], ) @@ -2260,29 +2334,13 @@ envoy_cc_library( ) envoy_cc_library( - name = "quic_core_qpack_qpack_instruction_encoder_lib", - srcs = ["quiche/quic/core/qpack/qpack_instruction_encoder.cc"], - hdrs = ["quiche/quic/core/qpack/qpack_instruction_encoder.h"], - copts = quiche_copt, - repository = "@envoy", - deps = [ - ":http2_hpack_huffman_hpack_huffman_encoder_lib", - ":http2_hpack_varint_hpack_varint_encoder_lib", - ":quic_core_qpack_qpack_constants_lib", - ":quic_platform", - ], -) - -envoy_cc_library( - name = "quic_core_qpack_qpack_instruction_decoder_lib", - srcs = ["quiche/quic/core/qpack/qpack_instruction_decoder.cc"], - hdrs = ["quiche/quic/core/qpack/qpack_instruction_decoder.h"], - copts = quiche_copt, + name = "quic_core_qpack_blocking_manager_lib", + srcs = ["quiche/quic/core/qpack/qpack_blocking_manager.cc"], + hdrs = ["quiche/quic/core/qpack/qpack_blocking_manager.h"], repository = "@envoy", + tags = ["nofips"], deps = [ - ":http2_hpack_huffman_hpack_huffman_decoder_lib", - ":http2_hpack_varint_hpack_varint_decoder_lib", - ":quic_core_qpack_qpack_constants_lib", + ":quic_core_types_lib", ":quic_platform_base", ], ) @@ -2293,21 +2351,42 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/qpack_constants.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [":quic_platform_base"], ) +envoy_cc_library( + name = "quic_core_qpack_qpack_decoder_lib", + srcs = ["quiche/quic/core/qpack/qpack_decoder.cc"], + hdrs = ["quiche/quic/core/qpack/qpack_decoder.h"], + copts = quiche_copt, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_qpack_qpack_decoder_stream_sender_lib", + ":quic_core_qpack_qpack_encoder_stream_receiver_lib", + ":quic_core_qpack_qpack_header_table_lib", + ":quic_core_qpack_qpack_progressive_decoder_lib", + ":quic_core_types_lib", + ":quic_platform_base", + ], +) + envoy_cc_library( name = "quic_core_qpack_qpack_encoder_lib", srcs = ["quiche/quic/core/qpack/qpack_encoder.cc"], hdrs = ["quiche/quic/core/qpack/qpack_encoder.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ + ":quic_core_qpack_blocking_manager_lib", ":quic_core_qpack_qpack_constants_lib", ":quic_core_qpack_qpack_decoder_stream_receiver_lib", ":quic_core_qpack_qpack_encoder_stream_sender_lib", ":quic_core_qpack_qpack_header_table_lib", ":quic_core_qpack_qpack_instruction_encoder_lib", + ":quic_core_qpack_qpack_required_insert_count_lib", ":quic_core_qpack_value_splitting_header_list_lib", ":quic_core_types_lib", ":quic_platform_base", @@ -2315,49 +2394,76 @@ envoy_cc_library( ) envoy_cc_library( - name = "quic_core_qpack_qpack_progressive_decoder_lib", - srcs = ["quiche/quic/core/qpack/qpack_progressive_decoder.cc"], - hdrs = ["quiche/quic/core/qpack/qpack_progressive_decoder.h"], + name = "quic_core_qpack_qpack_header_table_lib", + srcs = ["quiche/quic/core/qpack/qpack_header_table.cc"], + hdrs = ["quiche/quic/core/qpack/qpack_header_table.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ + ":quic_core_qpack_qpack_static_table_lib", + ":quic_platform_base", + ":spdy_core_hpack_hpack_lib", + ], +) + +envoy_cc_library( + name = "quic_core_qpack_qpack_instruction_decoder_lib", + srcs = ["quiche/quic/core/qpack/qpack_instruction_decoder.cc"], + hdrs = ["quiche/quic/core/qpack/qpack_instruction_decoder.h"], + copts = quiche_copt, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":http2_hpack_huffman_hpack_huffman_decoder_lib", + ":http2_hpack_varint_hpack_varint_decoder_lib", ":quic_core_qpack_qpack_constants_lib", - ":quic_core_qpack_qpack_decoder_stream_sender_lib", - ":quic_core_qpack_qpack_encoder_stream_receiver_lib", - ":quic_core_qpack_qpack_header_table_lib", - ":quic_core_qpack_qpack_instruction_decoder_lib", - ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( - name = "quic_core_qpack_qpack_decoder_lib", - srcs = ["quiche/quic/core/qpack/qpack_decoder.cc"], - hdrs = ["quiche/quic/core/qpack/qpack_decoder.h"], + name = "quic_core_qpack_qpack_instruction_encoder_lib", + srcs = ["quiche/quic/core/qpack/qpack_instruction_encoder.cc"], + hdrs = ["quiche/quic/core/qpack/qpack_instruction_encoder.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], + deps = [ + ":http2_hpack_huffman_hpack_huffman_encoder_lib", + ":http2_hpack_varint_hpack_varint_encoder_lib", + ":quic_core_qpack_qpack_constants_lib", + ":quic_platform", + ], +) + +envoy_cc_library( + name = "quic_core_qpack_qpack_progressive_decoder_lib", + srcs = ["quiche/quic/core/qpack/qpack_progressive_decoder.cc"], + hdrs = ["quiche/quic/core/qpack/qpack_progressive_decoder.h"], + copts = quiche_copt, + repository = "@envoy", + tags = ["nofips"], deps = [ + ":quic_core_qpack_qpack_constants_lib", ":quic_core_qpack_qpack_decoder_stream_sender_lib", ":quic_core_qpack_qpack_encoder_stream_receiver_lib", ":quic_core_qpack_qpack_header_table_lib", - ":quic_core_qpack_qpack_progressive_decoder_lib", + ":quic_core_qpack_qpack_instruction_decoder_lib", + ":quic_core_qpack_qpack_required_insert_count_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( - name = "quic_core_qpack_qpack_header_table_lib", - srcs = ["quiche/quic/core/qpack/qpack_header_table.cc"], - hdrs = ["quiche/quic/core/qpack/qpack_header_table.h"], + name = "quic_core_qpack_qpack_required_insert_count_lib", + srcs = ["quiche/quic/core/qpack/qpack_required_insert_count.cc"], + hdrs = ["quiche/quic/core/qpack/qpack_required_insert_count.h"], copts = quiche_copt, repository = "@envoy", - deps = [ - ":quic_core_qpack_qpack_static_table_lib", - ":quic_platform_base", - ":spdy_core_hpack_hpack_lib", - ], + tags = ["nofips"], + deps = [":quic_platform_base"], ) envoy_cc_library( @@ -2365,6 +2471,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/qpack_utils.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [":quic_core_qpack_qpack_stream_sender_delegate_lib"], ) @@ -2374,6 +2481,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/qpack_encoder_stream_sender.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_qpack_qpack_constants_lib", ":quic_core_qpack_qpack_instruction_encoder_lib", @@ -2388,6 +2496,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/qpack_encoder_stream_receiver.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", @@ -2404,6 +2513,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/qpack_decoder_stream_sender.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_qpack_qpack_constants_lib", ":quic_core_qpack_qpack_instruction_encoder_lib", @@ -2419,6 +2529,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/qpack_decoder_stream_receiver.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", @@ -2436,6 +2547,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/qpack_static_table.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_platform_base", ":spdy_core_hpack_hpack_lib", @@ -2447,6 +2559,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/qpack_stream_receiver.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [":quic_platform_base"], ) @@ -2456,6 +2569,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/qpack_decoded_headers_accumulator.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_http_header_list_lib", ":quic_core_qpack_qpack_decoder_lib", @@ -2471,6 +2585,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/value_splitting_header_list.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_platform_base", ":spdy_core_header_block_lib", @@ -2502,6 +2617,7 @@ envoy_cc_library( hdrs = ["quiche/quic/core/qpack/qpack_stream_sender_delegate.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [":quic_platform_base"], ) @@ -2767,6 +2883,7 @@ envoy_cc_library( srcs = ["quiche/quic/core/quic_time.cc"], hdrs = ["quiche/quic/core/quic_time.h"], repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = [":quic_platform_base"], ) @@ -2819,10 +2936,12 @@ envoy_cc_library( "quiche/quic/core/quic_types.h", ], copts = quiche_copt, + external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ + ":quic_core_crypto_random_lib", ":quic_core_error_codes_lib", ":quic_core_time_lib", ":quic_platform_base", @@ -2915,6 +3034,7 @@ envoy_cc_test_library( hdrs = ["quiche/quic/test_tools/quic_config_peer.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_config_lib", ":quic_core_packets_lib", @@ -2928,6 +3048,7 @@ envoy_cc_test_library( hdrs = ["quiche/quic/test_tools/quic_framer_peer.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_crypto_encryption_lib", ":quic_core_framer_lib", @@ -2942,6 +3063,7 @@ envoy_cc_test_library( hdrs = ["quiche/quic/test_tools/mock_clock.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_time_lib", ":quic_platform", @@ -2954,6 +3076,7 @@ envoy_cc_test_library( hdrs = ["quiche/quic/test_tools/mock_random.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [":quic_core_crypto_random_lib"], ) @@ -2963,6 +3086,7 @@ envoy_cc_test_library( hdrs = ["quiche/quic/test_tools/quic_packet_generator_peer.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_packet_creator_lib", ":quic_core_packet_generator_lib", @@ -2976,6 +3100,7 @@ envoy_cc_test_library( hdrs = ["quiche/quic/test_tools/quic_sent_packet_manager_peer.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_packets_lib", @@ -2990,6 +3115,7 @@ envoy_cc_test_library( hdrs = ["quiche/quic/test_tools/simple_quic_framer.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_crypto_encryption_lib", ":quic_core_framer_lib", @@ -3004,6 +3130,7 @@ envoy_cc_test_library( hdrs = ["quiche/quic/test_tools/quic_stream_send_buffer_peer.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [":quic_core_stream_send_buffer_lib"], ) @@ -3013,6 +3140,7 @@ envoy_cc_test_library( hdrs = ["quiche/quic/test_tools/quic_stream_peer.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_packets_lib", ":quic_core_session_lib", @@ -3043,6 +3171,7 @@ envoy_cc_test_library( copts = quiche_copt, external_deps = ["ssl"], repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_buffer_allocator_lib", ":quic_core_congestion_control_congestion_control_interface_lib", @@ -3086,6 +3215,7 @@ envoy_cc_test_library( hdrs = ["quiche/quic/test_tools/quic_unacked_packet_map_peer.h"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [":quic_core_unacked_packet_map_lib"], ) @@ -3103,6 +3233,7 @@ envoy_cc_test_library( "quiche/epoll_server/platform/api/epoll_time.h", ], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:epoll_server_platform_impl_lib"], ) @@ -3124,6 +3255,7 @@ envoy_cc_test_library( }), copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [":epoll_server_platform"], ) @@ -3135,6 +3267,7 @@ envoy_cc_library( "quiche/common/platform/api/quiche_unordered_containers.h", ], repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_impl_lib"], ) @@ -3143,6 +3276,7 @@ envoy_cc_test_library( name = "quiche_common_platform_test", hdrs = ["quiche/common/platform/api/quiche_test.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quiche_common_platform_test_impl_lib"], ) @@ -3150,6 +3284,7 @@ envoy_cc_library( name = "quiche_common_lib", hdrs = ["quiche/common/simple_linked_hash_map.h"], repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = [":quiche_common_platform"], ) @@ -3162,6 +3297,7 @@ envoy_cc_test( }), copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [":epoll_server_lib"], ) @@ -3170,6 +3306,7 @@ envoy_cc_test( srcs = ["quiche/common/simple_linked_hash_map_test.cc"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quiche_common_lib", ":quiche_common_platform_test", @@ -3183,6 +3320,7 @@ envoy_cc_test( "quiche/http2/test_tools/http2_random_test.cc", ], repository = "@envoy", + tags = ["nofips"], deps = [ ":http2_platform", ":http2_test_tools_random", @@ -3193,6 +3331,7 @@ envoy_cc_test( name = "spdy_platform_api_test", srcs = ["quiche/spdy/platform/api/spdy_string_utils_test.cc"], repository = "@envoy", + tags = ["nofips"], deps = [ ":spdy_platform", ":spdy_platform_test", @@ -3206,6 +3345,7 @@ envoy_cc_library( ], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], visibility = ["//visibility:public"], deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_mem_slice_span_impl_lib"], ) @@ -3214,6 +3354,7 @@ envoy_cc_test_library( name = "quic_platform_test_mem_slice_vector_lib", hdrs = ["quiche/quic/platform/api/quic_test_mem_slice_vector.h"], repository = "@envoy", + tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_mem_slice_vector_impl_lib"], ) @@ -3230,6 +3371,7 @@ envoy_cc_test( srcs = ["quiche/spdy/core/spdy_header_block_test.cc"], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":spdy_core_header_block_lib", ":spdy_core_test_utils_lib", @@ -3250,6 +3392,7 @@ envoy_cc_test( ], copts = quiche_copt, repository = "@envoy", + tags = ["nofips"], deps = [ ":quic_core_buffer_allocator_lib", ":quic_platform", diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 5dec6ea9bb10..6a28c7026cca 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -243,9 +243,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/curl/curl/releases/download/curl-7_65_3/curl-7.65.3.tar.gz"], ), com_googlesource_quiche = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/2a930469533c3b541443488a629fe25cd8ff53d0.tar.gz - sha256 = "fcdebf54c89d839ffa7eefae166c8e4b551c765559db13ff15bff98047f344fb", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/2a930469533c3b541443488a629fe25cd8ff53d0.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/4abb566fbbc63df8fe7c1ac30b21632b9eb18d0c.tar.gz + sha256 = "c60bca3cf7f58b91394a89da96080657ff0fbe4d5675be9b21e90da8f68bc06f", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/4abb566fbbc63df8fe7c1ac30b21632b9eb18d0c.tar.gz"], ), com_google_cel_cpp = dict( sha256 = "f027c551d57d38fb9f0b5e4f21a2b0b8663987119e23b1fd8dfcc7588e9a2350", diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index b4b3369fe9bf..c71bb49c6f35 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -40,6 +40,7 @@ envoy_cc_library( srcs = ["envoy_quic_packet_writer.cc"], hdrs = ["envoy_quic_packet_writer.h"], external_deps = ["quiche_quic_platform"], + tags = ["nofips"], deps = [ ":envoy_quic_utils_lib", "@com_googlesource_quiche//:quic_core_packet_writer_interface_lib", @@ -71,6 +72,7 @@ envoy_cc_library( envoy_cc_library( name = "spdy_server_push_utils_for_envoy_lib", srcs = ["spdy_server_push_utils_for_envoy.cc"], + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ "//source/common/common:assert_lib", @@ -82,5 +84,6 @@ envoy_cc_library( name = "envoy_quic_utils_lib", hdrs = ["envoy_quic_utils.h"], external_deps = ["quiche_quic_platform"], + tags = ["nofips"], deps = ["//source/common/network:address_lib"], ) diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 08c42a1364a8..8fc764386a2f 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -94,6 +94,7 @@ envoy_cc_library( envoy_cc_library( name = "quic_platform_export_impl_lib", hdrs = ["quic_export_impl.h"], + tags = ["nofips"], visibility = ["//visibility:public"], ) @@ -104,6 +105,7 @@ envoy_cc_library( "quic_bug_tracker_impl.h", "quic_logging_impl.h", ], + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ "//source/common/common:assert_lib", @@ -153,6 +155,7 @@ envoy_cc_library( "abseil_optional", "googletest", ], + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":flags_impl_lib", @@ -191,6 +194,7 @@ envoy_cc_library( "abseil_time", "ssl", ], + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ "//source/common/common:assert_lib", @@ -205,6 +209,7 @@ envoy_cc_library( srcs = ["quic_mem_slice_span_impl.cc"], hdrs = ["quic_mem_slice_span_impl.h"], copts = ["-Wno-unused-parameter"], + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ "//include/envoy/buffer:buffer_interface", @@ -222,6 +227,7 @@ envoy_cc_library( "-Wno-error=invalid-offsetof", "-Wno-unused-parameter", ], + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ "@com_googlesource_quiche//:quic_core_buffer_allocator_lib", @@ -233,6 +239,7 @@ envoy_cc_library( envoy_cc_library( name = "quic_platform_bbr2_sender_impl_lib", hdrs = ["quic_bbr2_sender_impl.h"], + tags = ["nofips"], visibility = ["//visibility:public"], deps = ["@com_googlesource_quiche//:quic_core_congestion_control_bbr_lib"], ) @@ -241,6 +248,7 @@ envoy_cc_library( name = "envoy_quic_clock_lib", srcs = ["envoy_quic_clock.cc"], hdrs = ["envoy_quic_clock.h"], + tags = ["nofips"], visibility = ["//visibility:public"], deps = [ "//include/envoy/event:dispatcher_interface", @@ -277,6 +285,7 @@ envoy_cc_library( "spdy_flags_impl.h", "spdy_logging_impl.h", "spdy_macros_impl.h", + "spdy_map_util_impl.h", "spdy_mem_slice_impl.h", "spdy_ptr_util_impl.h", "spdy_string_impl.h", diff --git a/source/extensions/quic_listeners/quiche/platform/flags_list.h b/source/extensions/quic_listeners/quiche/platform/flags_list.h index 31ba6c3686f5..1b0defc6824d 100644 --- a/source/extensions/quic_listeners/quiche/platform/flags_list.h +++ b/source/extensions/quic_listeners/quiche/platform/flags_list.h @@ -1,5 +1,5 @@ // This file intentionally does not have header guards, it's intended to be -// included multiple times, each time with a different definition of QUIC_FLAG. +// included multiple times, each time with a different definition of QUICHE_FLAG. // NOLINT(namespace-envoy) @@ -18,24 +18,33 @@ QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_debugips, fa QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_external_users, false, "") -QUICHE_FLAG(bool, quic_reloadable_flag_enable_quic_stateless_reject_support, true, - "Enables server-side support for QUIC stateless rejects.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_active_streams_never_negative, false, + "If true, static streams should never be closed before QuicSession " + "destruction.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_add_upper_limit_of_buffered_control_frames, false, + "If true, close connection if there are too many (> 1000) buffered " + "control frames.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_aggressive_connection_aliveness, false, + "If true, QuicSession::ShouldKeepConnectionAlive() will not consider " + "locally closed streams whose highest byte offset is not received yet.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_backend_set_stream_ttl, false, "If true, check backend response header for X-Response-Ttl. If it is " "provided, the stream TTL is set. A QUIC stream will be immediately " "canceled when tries to write data if this TTL expired.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_client_enabled_bbr_v2, false, + "If true, allow client to enable BBRv2 on server via connection " + "option 'B2ON'.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_alpn_dispatch, false, "Support different QUIC sessions, as indicated by ALPN. Used for QBONE.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_always_reset_short_header_packets, true, - "If true, instead of send encryption none termination packets, send " - "stateless reset in response to short headers.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_app_limited_recovery, false, - "When you're app-limited entering recovery, stay app-limited until " - "you exit recovery in QUIC BBR.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_avoid_empty_frame_after_empty_headers, true, + "If enabled, do not call OnStreamFrame() with empty frame after " + "receiving empty or too large headers with FIN.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_flexible_app_limited, false, "When true and the BBR9 connection option is present, BBR only considers " @@ -60,15 +69,29 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_startup_rate_reduction, false, "When true, enables the BBS4 and BBS5 connection options, which reduce " "BBR's pacing rate in STARTUP as more losses occur as a fraction of CWND.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_change_default_lumpy_pacing_size_to_two, false, + "If true and --quic_lumpy_pacing_size is 1, QUIC will use a lumpy " + "size of two for pacing.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_clear_queued_packets_on_connection_close, false, + "Calls ClearQueuedPackets after sending a connection close packet") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, + "If true, set burst token to 2 in cwnd bootstrapping experiment.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_cwnd_and_pacing_gains, false, + "If true, uses conservative cwnd gain and pacing gain when cwnd gets " + "bootstrapped.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_debug_wrong_qos, false, "If true, consider getting QoS after stream has been detached as GFE bug.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr, true, "When true, defaults to BBR congestion control instead of Cubic.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_deprecate_ack_bundling_mode, false, - "If true, stop using AckBundling mode to send ACK, also deprecate " - "ack_queued from QuicConnection.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr_v2, false, + "If true, use BBRv2 as the default congestion controller. Takes " + "precedence over --quic_default_to_bbr.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_connection_migration_for_udp_proxying, true, "If true, GFE disables connection migration in connection option for " @@ -77,9 +100,21 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_connection_migration_for_udp QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_39, false, "If true, disable QUIC version 39.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_44, true, + "If true, disable QUIC version 44.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_do_not_accept_stop_waiting, false, + "In v44 and above, where STOP_WAITING is never sent, close the " + "connection if it's received.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_time, false, "If true, stop resetting ideal_next_packet_send_time_ in pacing sender.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_drop_invalid_small_initial_connection_id, true, + "When true, QuicDispatcher will drop packets that have an initial " + "destination connection ID that is too short, instead of responding " + "with a Version Negotiation packet to reject it.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_eighth_rtt_loss_detection, false, "When true, the LOSS connection option allows for 1/8 RTT of " "reording instead of the current 1/8th threshold which has been " @@ -89,49 +124,57 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_ack_decimation, false, "Default enables QUIC ack decimation and adds a connection option to " "disable it.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_pcc3, false, - "If true, enable experiment for testing PCC congestion-control.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_43, true, - "If true, enable QUIC version 43.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_fifo_write_scheduler, false, + "If true and FIFO connection option is received, write_blocked_streams " + "uses FIFO(stream with smallest ID has highest priority) write scheduler.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_44, true, - "If true, enable version 44 which uses IETF header format.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_lifo_write_scheduler, false, + "If true and LIFO connection option is received, write_blocked_streams " + "uses LIFO(stream with largest ID has highest priority) write scheduler.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_46, true, - "If true, enable QUIC version 46.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_pcc3, false, + "If true, enable experiment for testing PCC congestion-control.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_47, false, "If true, enable QUIC version 47 which adds support for variable " "length connection IDs.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_48, false, + "If true, enable QUIC version 48.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_99, false, "If true, enable version 99.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_faster_interval_add_in_sequence_buffer, false, - "If true, QuicStreamSequencerBuffer will switch to a new " - "QuicIntervalSet::AddOptimizedForAppend method in OnStreamData().") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_adaptive_time_loss, false, - "Simplify QUIC's adaptive time loss detection to measure the " + "Simplify QUICHE's adaptive time loss detection to measure the " "necessary reordering window for every spurious retransmit.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_has_pending_crypto_data, false, - "If true, QuicSession::HasPendingCryptoData checks whether the " - "crypto stream's send buffer is empty. This flag fixes a bug where " - "the retransmission alarm mode is wrong for the first CHLO packet.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bbr_cwnd_in_bandwidth_resumption, true, + "If true, adjust congestion window when doing bandwidth resumption in BBR.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_get_packet_header_size, false, + "Fixes quic::GetPacketHeaderSize and callsites when " + "QuicVersionHasLongHeaderLengths is false.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_packets_acked, true, + "If true, when detecting losses, use packets_acked of corresponding " + "packet number space.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_spurious_ack_alarm, false, - "If true, do not schedule ack alarm if should_send_ack is set in the " - "generator.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_rto_retransmission2, false, + "If true, when RTO fires and there is no packet to be RTOed, let " + "connection send.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_termination_packets, false, - "If true, GFE time wait list will send termination packets based on " - "current packet's encryption level.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_handle_staticness_for_spdy_stream, false, + "If true, QuicSpdySession::GetSpdyDataStream() will close the " + "connection if the returned stream is static.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_ignore_tlpr_if_no_pending_stream_data, false, + "If true, ignore TLPR if there is no pending stream data") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_limit_window_updates_in_traces, false, - "Limits the number of window update events recorded in Tracegraf logs.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_inline_getorcreatedynamicstream, false, + "If true, QuicSession::GetOrCreateDynamicStream() is deprecated, and " + "its contents are moved to GetOrCreateStream().") QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, "If true, QuicListener::OnSocketIsWritable will always return false, " @@ -141,20 +184,16 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_log_cert_name_for_empty_sct, true, "If true, log leaf cert subject name into warning log.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_log_is_proxy_in_tcs, false, - "If true, log whether a GFE QUIC server session is UDP proxied and whether " - "it is a health check connection, in transport connection stats.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_logging_frames_in_tracegraf, false, - "If true, populate frames info when logging tracegraf.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_loss_removes_from_inflight, true, + "When true, remove packets from inflight where they're declared " + "lost, rather than in MarkForRetransmission. Also no longer marks " + "handshake packets as no longer inflight when they're retransmitted.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_monotonic_epoll_clock, false, "If true, QuicEpollClock::Now() will monotonically increase.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_client_conn_ver_negotiation, false, - "If true, a client connection would be closed when a version " - "negotiation packet is received. It would be the higher layer's " - "responsibility to do the reconnection.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_negotiate_ack_delay_time, false, + "If true, will negotiate the ACK delay time.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_cloud_domain_sni_lookup_on_missing_sni, false, "Do not attempt to match an empty Server Name Indication (SNI) " @@ -164,68 +203,66 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_dup_experiment_id_2, false, "If true, transport connection stats doesn't report duplicated " "experiments for same connection.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_goaway_for_proxied_port_change, false, - "If true, for proxied quic sessions, GFE will not send a GOAWAY in " - "response to a client port change.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_stream_data_after_reset, false, + "If true, QuicStreamSequencer will not take in new data if the stream is reset.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_v2_scaling_factor, false, "When true, don't use an extra scaling factor when reading packets " - "from QUIC's RX_RING with TPACKET_V2.") + "from QUICHE's RX_RING with TPACKET_V2.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_optimize_inflight_check, false, - "Stop checking QuicUnackedPacketMap::HasUnackedRetransmittableFrames " - "and instead rely on the existing check that bytes_in_flight > 0") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_window_update_on_read_only_stream, false, + "If true, QuicConnection will be closed if a WindowUpdate frame is " + "received on a READ_UNIDIRECTIONAL stream.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_check_toss_on_insertion_failure, false, "If true, enable the code that fixes a race condition for quic udp " "proxying in L0. See b/70036019.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_munge_response_for_healthcheck, true, - "If true, for udp proxy, the health check packets from L1 to L0 will " - "be munged.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_read_packed_strings, true, "If true, QuicProxyDispatcher will prefer to extract client_address " "and server_vip from packed_client_address and packed_server_vip, " "respectively.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_supports_length_prefix, false, + "When true, QuicProxyUtils::GetConnectionId supports length prefixed " + "connection IDs.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_write_packed_strings, false, "If true, QuicProxyDispatcher will write packed_client_address and " "packed_server_vip in TcpProxyHeaderProto.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_length_prefix_from_packet_info, false, + "When true, QuicDispatcher::MaybeDispatchPacket will use packet_info.use_length_prefix " + "instead of an incorrect local computation.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, false, "If true, for L1 GFE, as requests come in, record frontend service to VIP " "mapping which is used to announce VIP in SHLO for proxied sessions. ") QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_all_traffic, false, "") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_unprocessable_packets_statelessly, false, + "If true, do not add connection ID of packets with unknown connection ID " + "and no version to time wait list, instead, send appropriate responses " + "depending on the packets' sizes and drop them.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, false, "If true, require handshake confirmation for QUIC connections, " "functionally disabling 0-rtt handshakes.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_rpm_decides_when_to_send_acks, false, - "If both this flag and " - "gfe2_reloadable_flag_quic_deprecate_ack_bundling_mode are true, " - "QuicReceivedPacketManager decides when to send ACKs.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, "When the STMP connection option is sent by the client, timestamps " "in the QUIC ACK frame are sent and processed.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true, - "If true, enable server push feature on QUIC.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_set_transmission_type_for_next_frame, true, - "If true, QuicPacketCreator::SetTransmissionType will set the " - "transmission type of the next successfully added frame.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_sent_packet_manager_cleanup, false, + "When true, remove obsolete functionality intended to test IETF QUIC " + "recovery.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_simplify_build_connectivity_probing_packet, true, - "If true, simplifies the implementation of " - "QuicFramer::BuildConnectivityProbingPacket().") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true, + "If true, enable server push feature on QUICHE.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_stateless_proxy, false, - "If true, UDP proxy will not drop versionless packets, in other " - "words, it will proxy all packets from client.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_simplify_stop_waiting, true, + "If true, do not send STOP_WAITING if no_stop_waiting_frame_.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_stop_reading_when_level_triggered, false, "When true, calling StopReading() on a level-triggered QUIC stream " @@ -237,31 +274,37 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_true, true, "A testonly reloadable flag that will always default to true.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_tolerate_reneging, false, - "If true, do not close connection if received largest acked decreases.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_tracegraf_populate_ack_packet_number, false, + "If true, populate packet_number of received ACK in tracegraf.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_track_ack_height_in_bandwidth_sampler, false, + "If true, QUIC will track max ack height in BandwidthSampler.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_unified_iw_options, false, "When true, set the initial congestion control window from connection " "options in QuicSentPacketManager rather than TcpCubicSenderBytes.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_cheap_stateless_rejects, true, - "If true, QUIC will use cheap stateless rejects without creating a full " - "connection. Prerequisite: --quic_allow_chlo_buffering has to be true.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_common_stream_check, false, "If true, use common code for checking whether a new stream ID may " "be allocated.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_connection_clock_for_last_ack_time, false, + "If true, QuicFasterStatsGatherer will use a GFEConnectionClock to " + "get the time when the last ack is received.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_header_stage_idle_list2, false, "If true, use header stage idle list for QUIC connections in GFE.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_http2_priority_write_scheduler, false, + "If true and H2PR connection option is received, write_blocked_streams_ " + "uses HTTP2 (tree-style) priority write scheduler.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_leto_key_exchange, false, "If true, QUIC will attempt to use the Leto key exchange service and " "only fall back to local key exchange if that fails.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_new_append_connection_id, false, - "When true QuicFramer will use AppendIetfConnectionIdsNew instead of " - "AppendIetfConnectionId.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_parse_public_header, false, + "When true, QuicDispatcher will always use QuicFramer::ParsePublicHeader") QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_pigeon_sockets, false, "Use USPS Direct Path for QUIC egress.") @@ -270,103 +313,40 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_quic_time_for_received_timestamp "If true, use QuicClock::Now() for the fallback source of packet " "received time instead of WallNow().") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_uber_loss_algorithm, false, - "If true, use one loss algorithm per encryption level.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_uber_received_packet_manager, false, - "If this flag and quic_rpm_decides_when_to_send_acks is true, use uber " - "received packet manager instead of the single received packet manager.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_validate_packet_number_post_decryption, false, - "If true, a QUIC endpoint will valid a received packet number after " - "successfully decrypting the packet.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_eliminate_static_stream_map_3, false, - "If true, static streams in a QuicSession will be stored inside dynamic stream map. " - "static_stream_map will no longer be used.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_simplify_stop_waiting, false, - "Do not send STOP_WAITING if no_stop_waiting_frame_ is true.") - -QUICHE_FLAG(bool, quic_reloadable_flag_send_quic_fallback_server_config_on_leto_error, false, - "If true and using Leto for QUIC shared-key calculations, GFE will react to a failure " - "to contact Leto by sending a REJ containing a fallback ServerConfig, allowing the " - "client to continue the handshake.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bbr_cwnd_in_bandwidth_resumption, true, - " If true, adjust congestion window when doing bandwidth resumption in BBR.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_cwnd_and_pacing_gains, false, - "If true, uses conservative cwnd gain and pacing gain.") - -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_do_not_accept_stop_waiting, false, - "In v44 and above, where STOP_WAITING is never sent, close the connection if it's received.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_loss_removes_from_inflight, false, - "When true, remove packets from inflight where they're declared lost, rather than in " - "MarkForRetransmission. Also no longer marks handshake packets as no longer inflight " - "when they're retransmitted.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, - "If true, set burst token to 2 in cwnd bootstrapping experiment.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_deprecate_queued_control_frames, false, - "If true, deprecate queued_control_frames_ from QuicPacketGenerator.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_check_connected_before_flush, false, - "If true, check whether connection is connected before flush.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_ignore_tlpr_if_sending_ping, false, - "If true, ignore TLPR for retransmission delay when sending pings from ping alarm.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_terminate_gquic_connection_as_ietf, false, - "If true, terminate Google QUIC connections similarly as IETF QUIC.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_44, false, - "If true, disable QUIC version 44.") - -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_fix_packets_acked, false, - "If true, when detecting losses, use packets_acked of corresponding packet number space.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_ignore_tlpr_if_no_pending_stream_data, false, - "If true, ignore TLPR if there is no pending stream data") - -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_drop_invalid_small_initial_connection_id, false, - "When true, QuicDispatcher will drop packets that have an initial destination connection ID " - "that is too short, instead of responding with a Version Negotiation packet to reject it.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_version_negotiation_grease, false, - "When true, QUIC Version Negotiation packets will randomly include fake versions.") - -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_fix_get_packet_header_size, false, - "Fixes quic::GetPacketHeaderSize and callsites when QuicVersionHasLongHeaderLengths is false.") - -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_change_default_lumpy_pacing_size_to_two, false, - "If true and --quic_lumpy_pacing_size is 1, QUIC will use a lumpy size of two for pacing.") + "When true, QUIC Version Negotiation packets will randomly include " + "fake versions.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_window_update_on_read_only_stream, false, - "If true, QuicConnection will be closed if a WindowUpdate frame is received on a " - "READ_UNIDIRECTIONAL stream.") +QUICHE_FLAG(bool, quic_reloadable_flag_send_quic_fallback_server_config_on_leto_error, false, + "If true and using Leto for QUIC shared-key calculations, GFE will react " + "to a failure to contact Leto by sending a REJ containing a fallback " + "ServerConfig, allowing the client to continue the handshake.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_clear_queued_packets_on_connection_close, false, - "Calls ClearQueuedPackets after sending a connection close packet") +QUICHE_FLAG(bool, quic_reloadable_flag_simplify_spdy_quic_https_scheme_detection, false, + "If true, simplify the logic for detecting REQUEST_HAS_HTTPS_SCHEME in " + "NetSpdyRequester::SetRequestUrlAndHost and " + "NetQuicRequester::SetRequestUrlAndHost. Fixes a bug where internal " + "redirects for QUIC connections would be treated as having an http scheme.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_48, false, - "If true, enable QUIC version 48.") +QUICHE_FLAG(bool, quic_restart_flag_do_not_create_raw_socket_selector_if_quic_enabled, false, + "If true, do not create the RawSocketSelector in " + "QuicListener::Initialize() if QUIC is disabled by flag.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_avoid_empty_frame_after_empty_headers, false, - "If enabled, do not call OnStreamFrame() with empty frame after receiving empty or too " - "large headers with FIN.") +QUICHE_FLAG(bool, quic_restart_flag_dont_fetch_quic_private_keys_from_leto, false, + "If true, GFE will not request private keys when fetching QUIC " + "ServerConfigs from Leto.") QUICHE_FLAG(bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false, "If true, inspects QUIC CHLOs for kLOAS and early creates sessions " "to allow multi-packet CHLOs") +QUICHE_FLAG(bool, quic_restart_flag_quic_connection_id_use_siphash, false, + "When true, QuicConnectionId::Hash uses SipHash instead of XOR.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_dispatcher_hands_chlo_extractor_one_version, false, + "When true, QuicDispatcher will pass the version from the packet to " + "the ChloExtractor instead of all supported versions.") + QUICHE_FLAG(bool, quic_restart_flag_quic_enable_gso_for_udp_egress, false, "If 1) flag is true, 2) UDP egress_method is used in GFE config, and " "3) UDP GSO is supported by the kernel, GFE will use UDP GSO for " @@ -378,9 +358,8 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_enable_sendmmsg_for_udp_egress, false, "gso is not supported by kernel, GFE will use sendmmsg for egress, " "except for UDP proxy.") -QUICHE_FLAG(bool, quic_restart_flag_quic_no_server_conn_ver_negotiation2, false, - "If true, dispatcher passes in a single version when creating a server " - "connection, such that version negotiation is not supported in connection.") +QUICHE_FLAG(bool, quic_restart_flag_quic_no_fallback_for_pigeon_socket, false, + "If true, GFEs using USPS egress will not fallback to raw ip socket.") QUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false, "If true, QUIC offload pacing when using USPS as egress method.") @@ -402,6 +381,10 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_testonly_default_false, false, QUICHE_FLAG(bool, quic_restart_flag_quic_testonly_default_true, true, "A testonly restart flag that will always default to true.") +QUICHE_FLAG(bool, quic_restart_flag_quic_use_allocated_connection_ids, true, + "When true, QuicConnectionId will allocate long connection IDs on " + "the heap instead of inline in the object.") + QUICHE_FLAG(bool, quic_restart_flag_quic_use_leto_for_quic_configs, false, "If true, use Leto to fetch QUIC server configs instead of using the " "seeds from Memento.") @@ -410,27 +393,12 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_use_pigeon_socket_to_backend, false, "If true, create a shared pigeon socket for all quic to backend " "connections and switch to use it after successful handshake.") -QUICHE_FLAG(bool, quic_restart_flag_quic_do_not_override_connection_id, false, - " When true, QuicFramer will not override connection IDs in headers and will instead " - "respect the source/destination direction as expected by IETF QUIC.") - -QUICHE_FLAG(bool, quic_restart_flag_quic_no_framer_object_in_dispatcher, false, - "If true, make QuicDispatcher no longer have an instance of QuicFramer.") - -QUICHE_FLAG( - bool, quic_restart_flag_dont_fetch_quic_private_keys_from_leto, false, - "If true, GFE will not request private keys when fetching QUIC ServerConfigs from Leto.") - -QUICHE_FLAG(bool, quic_restart_flag_quic_use_allocated_connection_ids, false, - "When true, QuicConnectionId will allocate long connection IDs on the heap instead of " - "inline in the object.") - QUICHE_FLAG(bool, quic_allow_chlo_buffering, true, "If true, allows packets to be buffered in anticipation of a " "future CHLO, and allow CHLO packets to be buffered until next " "iteration of the event loop.") -QUICHE_FLAG(bool, quic_disable_pacing_for_perf_tests, false, "If true, disable pacing in QUIC") +QUICHE_FLAG(bool, quic_disable_pacing_for_perf_tests, false, "If true, disable pacing in QUICHE") QUICHE_FLAG(bool, quic_enforce_single_packet_chlo, true, "If true, enforce that QUIC CHLOs fit in one packet") @@ -440,54 +408,86 @@ QUICHE_FLAG(bool, quic_enforce_single_packet_chlo, true, // 200 seconds * 1000 qps = 200000. // Of course, there are usually many queries per QUIC connection, so we allow a // factor of 3 leeway. -QUICHE_FLAG(int64_t, quic_time_wait_list_max_connections, 600000, +QUICHE_FLAG(int64_t, // allow-non-std-int + quic_time_wait_list_max_connections, 600000, "Maximum number of connections on the time-wait list. " "A negative value implies no configured limit.") -QUICHE_FLAG(int64_t, quic_time_wait_list_seconds, 200, +QUICHE_FLAG(int64_t, // allow-non-std-int + quic_time_wait_list_seconds, 200, "Time period for which a given connection_id should live in " "the time-wait state.") QUICHE_FLAG(double, quic_bbr_cwnd_gain, 2.0f, "Congestion window gain for QUIC BBR during PROBE_BW phase.") -QUICHE_FLAG(int32_t, quic_buffered_data_threshold, 8 * 1024, +QUICHE_FLAG(int32_t, // allow-non-std-int + quic_buffered_data_threshold, 8 * 1024, "If buffered data in QUIC stream is less than this " "threshold, buffers all provided data or asks upper layer for more data") -QUICHE_FLAG(int32_t, quic_ietf_draft_version, 0, - "Mechanism to override version label and ALPN for IETF interop.") - -QUICHE_FLAG(int32_t, quic_send_buffer_max_data_slice_size, 4 * 1024, +QUICHE_FLAG(int32_t, // allow-non-std-int + quic_send_buffer_max_data_slice_size, 4 * 1024, "Max size of data slice in bytes for QUIC stream send buffer.") QUICHE_FLAG(bool, quic_supports_tls_handshake, false, "If true, QUIC supports both QUIC Crypto and TLS 1.3 for the " "handshake protocol") -QUICHE_FLAG(int32_t, quic_lumpy_pacing_size, 1, +QUICHE_FLAG(int32_t, // allow-non-std-int + quic_lumpy_pacing_size, 1, "Number of packets that the pacing sender allows in bursts during pacing.") QUICHE_FLAG(double, quic_lumpy_pacing_cwnd_fraction, 0.25f, "Congestion window fraction that the pacing sender allows in bursts " "during pacing.") -QUICHE_FLAG(int32_t, quic_max_pace_time_into_future_ms, 10, +QUICHE_FLAG(int32_t, // allow-non-std-int + quic_max_pace_time_into_future_ms, 10, "Max time that QUIC can pace packets into the future in ms.") QUICHE_FLAG(double, quic_pace_time_into_future_srtt_fraction, 0.125f, // One-eighth smoothed RTT "Smoothed RTT fraction that a connection can pace packets into the future.") +QUICHE_FLAG(int32_t, // allow-non-std-int + quic_ietf_draft_version, 0, + "Mechanism to override version label and ALPN for IETF interop.") + QUICHE_FLAG(bool, quic_export_server_num_packets_per_write_histogram, false, "If true, export number of packets written per write operation histogram.") -QUICHE_FLAG(int64_t, quic_headers_stream_id_in_v99, 0, - "QUIC version 99 will use this stream ID for the headers stream.") - QUICHE_FLAG(bool, quic_disable_version_negotiation_grease_randomness, false, "If true, use predictable version negotiation versions.") +QUICHE_FLAG(int64_t, // allow-non-std-int + quic_max_tracked_packet_count, 10000, "Maximum number of tracked packets.") + +QUICHE_FLAG(bool, quic_prober_uses_length_prefixed_connection_ids, false, + "If true, QuicFramer::WriteClientVersionNegotiationProbePacket uses " + "length-prefixed connection IDs.") + +QUICHE_FLAG(bool, quic_client_convert_http_header_name_to_lowercase, true, + "If true, HTTP request header names sent from QuicSpdyClientBase(and " + "descendents) will be automatically converted to lower case.") + +QUICHE_FLAG(int32_t, // allow-non-std-int + quic_bbr2_default_probe_bw_base_duration_ms, 2000, + "The default minimum duration for BBRv2-native probes, in milliseconds.") + +QUICHE_FLAG(int32_t, // allow-non-std-int + quic_bbr2_default_probe_bw_max_rand_duration_ms, 1000, + "The default upper bound of the random amount of BBRv2-native " + "probes, in milliseconds.") + +QUICHE_FLAG(int32_t, // allow-non-std-int + quic_bbr2_default_probe_rtt_period_ms, 10000, + "The default period for entering PROBE_RTT, in milliseconds.") + +QUICHE_FLAG(double, quic_bbr2_default_loss_threshold, 0.02, + "The default loss threshold for QUIC BBRv2, should be a value " + "between 0 and 1.") + QUICHE_FLAG(bool, http2_reloadable_flag_http2_testonly_default_false, false, "A testonly reloadable flag that will always default to false.") diff --git a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_storage_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_storage_impl.h index 90c59db2426d..3a43ec7316e1 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_storage_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_storage_impl.h @@ -36,6 +36,8 @@ class QuicMemSliceStorageImpl { QuicMemSliceSpan ToSpan() { return QuicMemSliceSpan(QuicMemSliceSpanImpl(buffer_)); } + void Append(QuicMemSliceImpl mem_slice) { buffer_.move(mem_slice.single_slice_buffer()); } + private: Envoy::Buffer::OwnedImpl buffer_; }; diff --git a/source/extensions/quic_listeners/quiche/platform/quic_text_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_text_utils_impl.h index 42bb24e6828a..e39508adbb29 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_text_utils_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_text_utils_impl.h @@ -69,6 +69,10 @@ class QuicTextUtilsImpl { return std::any_of(data.begin(), data.end(), absl::ascii_isupper); } + static bool IsAllDigits(QuicStringPieceImpl data) { + return std::all_of(data.begin(), data.end(), absl::ascii_isdigit); + } + static std::vector Split(QuicStringPieceImpl data, char delim) { return absl::StrSplit(data, delim); } diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_containers_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_containers_impl.h index 57d953c939d3..35d08c6183bf 100644 --- a/source/extensions/quic_listeners/quiche/platform/spdy_containers_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/spdy_containers_impl.h @@ -37,4 +37,6 @@ inline size_t SpdyHashStringPairImpl(SpdyStringPieceImpl a, SpdyStringPieceImpl return absl::Hash>()(std::make_pair(a, b)); } +template +using SpdySmallMapImpl = absl::flat_hash_map; } // namespace spdy diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_map_util_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_map_util_impl.h new file mode 100644 index 000000000000..befd8c7f8c6f --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/spdy_map_util_impl.h @@ -0,0 +1,18 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include + +namespace spdy { + +template +bool SpdyContainsKeyImpl(const Collection& collection, const Key& key) { + return collection.find(key) != collection.end(); +} + +} // namespace spdy diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index 313ef1d0e808..b588a5c069fa 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -30,6 +30,7 @@ envoy_cc_test( name = "envoy_quic_writer_test", srcs = ["envoy_quic_writer_test.cc"], external_deps = ["quiche_quic_platform"], + tags = ["nofips"], deps = [ "//source/common/network:io_socket_error_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_packet_writer_lib", diff --git a/test/extensions/quic_listeners/quiche/platform/BUILD b/test/extensions/quic_listeners/quiche/platform/BUILD index 1383cfbfbb17..09ef037677b0 100644 --- a/test/extensions/quic_listeners/quiche/platform/BUILD +++ b/test/extensions/quic_listeners/quiche/platform/BUILD @@ -34,6 +34,7 @@ envoy_cc_test( copts = ["-Wno-unused-parameter"], data = ["//test/extensions/transport_sockets/tls/test_data:certs"], external_deps = ["quiche_quic_platform"], + tags = ["nofips"], deps = [ ":quic_platform_epoll_clock_lib", "//source/common/memory:stats_lib", @@ -113,6 +114,7 @@ envoy_cc_test_library( "//bazel:linux": ["quic_epoll_clock.h"], "//conditions:default": [], }), + tags = ["nofips"], deps = [ "@com_googlesource_quiche//:quic_platform", "@com_googlesource_quiche//:quic_platform_epoll_lib", @@ -122,12 +124,14 @@ envoy_cc_test_library( envoy_cc_test_library( name = "quic_platform_epoll_impl_lib", hdrs = ["quic_epoll_impl.h"], + tags = ["nofips"], deps = ["@com_googlesource_quiche//:epoll_server_lib"], ) envoy_cc_test_library( name = "quic_platform_expect_bug_impl_lib", hdrs = ["quic_expect_bug_impl.h"], + tags = ["nofips"], deps = [ "@com_googlesource_quiche//:quic_platform_base", "@com_googlesource_quiche//:quic_platform_mock_log", @@ -137,6 +141,7 @@ envoy_cc_test_library( envoy_cc_test_library( name = "quic_platform_mock_log_impl_lib", hdrs = ["quic_mock_log_impl.h"], + tags = ["nofips"], deps = ["@com_googlesource_quiche//:quic_platform_base"], ) @@ -144,6 +149,7 @@ envoy_cc_test_library( name = "quic_platform_port_utils_impl_lib", srcs = ["quic_port_utils_impl.cc"], hdrs = ["quic_port_utils_impl.h"], + tags = ["nofips"], deps = [ "//source/common/network:utility_lib", "//test/test_common:environment_lib", @@ -153,6 +159,7 @@ envoy_cc_test_library( envoy_cc_test_library( name = "quic_platform_test_mem_slice_vector_impl_lib", hdrs = ["quic_test_mem_slice_vector_impl.h"], + tags = ["nofips"], deps = [ "//include/envoy/buffer:buffer_interface", "@com_googlesource_quiche//:quic_platform_mem_slice_span", @@ -162,17 +169,20 @@ envoy_cc_test_library( envoy_cc_test_library( name = "quic_platform_sleep_impl_lib", hdrs = ["quic_sleep_impl.h"], + tags = ["nofips"], deps = ["@com_googlesource_quiche//:quic_core_time_lib"], ) envoy_cc_test_library( name = "quic_platform_system_event_loop_impl_lib", hdrs = ["quic_system_event_loop_impl.h"], + tags = ["nofips"], ) envoy_cc_test_library( name = "quic_platform_thread_impl_lib", hdrs = ["quic_thread_impl.h"], + tags = ["nofips"], deps = [ "//include/envoy/thread:thread_interface", "//source/common/common:assert_lib", @@ -183,6 +193,7 @@ envoy_cc_test_library( envoy_cc_test_library( name = "quic_platform_test_impl_lib", hdrs = ["quic_test_impl.h"], + tags = ["nofips"], deps = ["//source/common/common:assert_lib"], ) @@ -190,6 +201,7 @@ envoy_cc_test_library( name = "quic_platform_test_output_impl_lib", srcs = ["quic_test_output_impl.cc"], hdrs = ["quic_test_output_impl.h"], + tags = ["nofips"], deps = [ "//source/common/filesystem:filesystem_lib", "@com_googlesource_quiche//:quic_platform_base", @@ -217,6 +229,7 @@ envoy_cc_test_library( envoy_cc_test( name = "envoy_quic_clock_test", srcs = ["envoy_quic_clock_test.cc"], + tags = ["nofips"], deps = [ "//source/extensions/quic_listeners/quiche/platform:envoy_quic_clock_lib", "//test/test_common:simulated_time_system_lib", From 29f199c8625208a42483a1fa2de622c0fa105f49 Mon Sep 17 00:00:00 2001 From: Jyoti Mahapatra <49211422+jyotimahapatra@users.noreply.github.com> Date: Thu, 29 Aug 2019 11:30:50 -0700 Subject: [PATCH 15/31] tools: deprecated field check in Route Checker tool (#8058) We need a way to run the deprecated field check on the RouteConfiguration. Today the schema check tool validates the bootstrap config. This change will help achieve similar functionality on routes served from rds. Risk Level: Low Testing: Manual testing Docs Changes: included Release Notes: included Signed-off-by: Jyoti Mahapatra --- .../install/tools/route_table_check_tool.rst | 10 +++ docs/root/intro/version_history.rst | 1 + test/tools/router_check/router.cc | 9 ++- test/tools/router_check/router.h | 10 ++- test/tools/router_check/router_check.cc | 5 +- .../test/config/HeaderMatchedRouting.yaml | 4 +- .../router_check/test/config/TestRoutes.yaml | 63 ++++++++++++++----- 7 files changed, 83 insertions(+), 19 deletions(-) diff --git a/docs/root/install/tools/route_table_check_tool.rst b/docs/root/install/tools/route_table_check_tool.rst index ac0b523eec09..8acb2ac34a9d 100644 --- a/docs/root/install/tools/route_table_check_tool.rst +++ b/docs/root/install/tools/route_table_check_tool.rst @@ -40,6 +40,16 @@ Usage -p, --useproto Use Proto test file schema + -f, --fail-under + Represents a percent value for route test coverage under which the run should fail. + + --covall + Enables comprehensive code coverage percent calculation taking into account all the possible + asserts. + + --disable-deprecation-check + Disables the deprecation check for RouteConfiguration proto. + -h, --help Displays usage information and exits. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index c3598a5c7c7a..9667356cc3d8 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -45,6 +45,7 @@ Version history * router: added :ref:`rq_retry_skipped_request_not_complete ` counter stat to router stats. * router check tool: add coverage reporting & enforcement. * router check tool: add comprehensive coverage reporting. +* router check tool: add deprecated field check. * tls: added verification of IP address SAN fields in certificates against configured SANs in the certificate validation context. * tracing: added tags for gRPC response status and meesage. diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index 6922d26cea2f..5eeadc7a1bb1 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -63,7 +63,8 @@ ToolConfig::ToolConfig(std::unique_ptr headers, int ran : headers_(std::move(headers)), random_value_(random_value) {} // static -RouterCheckTool RouterCheckTool::create(const std::string& router_config_file) { +RouterCheckTool RouterCheckTool::create(const std::string& router_config_file, + const bool disableDeprecationCheck) { // TODO(hennna): Allow users to load a full config and extract the route configuration from it. envoy::api::v2::RouteConfiguration route_config; auto stats = std::make_unique(); @@ -72,6 +73,9 @@ RouterCheckTool RouterCheckTool::create(const std::string& router_config_file) { auto factory_context = std::make_unique>(); auto config = std::make_unique(route_config, *factory_context, false); + if (!disableDeprecationCheck) { + MessageUtil::checkForDeprecation(route_config, &factory_context->runtime_loader_); + } return RouterCheckTool(std::move(factory_context), std::move(config), std::move(stats), std::move(api), Coverage(route_config)); @@ -439,6 +443,8 @@ Options::Options(int argc, char** argv) { TCLAP::CmdLine cmd("router_check_tool", ' ', "none", true); TCLAP::SwitchArg is_proto("p", "useproto", "Use Proto test file schema", cmd, false); TCLAP::SwitchArg is_detailed("d", "details", "Show detailed test execution results", cmd, false); + TCLAP::SwitchArg disable_deprecation_check("", "disable-deprecation-check", + "Disable deprecated fields check", cmd, false); TCLAP::ValueArg fail_under("f", "fail-under", "Fail if test coverage is under a specified amount", false, 0.0, "float", cmd); @@ -461,6 +467,7 @@ Options::Options(int argc, char** argv) { is_detailed_ = is_detailed.getValue(); fail_under_ = fail_under.getValue(); comprehensive_coverage_ = comprehensive_coverage.getValue(); + disable_deprecation_check_ = disable_deprecation_check.getValue(); if (is_proto_) { config_path_ = config_path.getValue(); diff --git a/test/tools/router_check/router.h b/test/tools/router_check/router.h index 7c81835da7ae..e2156afa1072 100644 --- a/test/tools/router_check/router.h +++ b/test/tools/router_check/router.h @@ -65,10 +65,12 @@ class RouterCheckTool : Logger::Loggable { public: /** * @param router_config_file v2 router config file. + * @param disableDeprecationCheck flag to disable the RouteConfig deprecated field check * @return RouterCheckTool a RouterCheckTool instance with member variables set by the router * config file. * */ - static RouterCheckTool create(const std::string& router_config_file); + static RouterCheckTool create(const std::string& router_config_file, + const bool disableDeprecationCheck); /** * TODO(tonya11en): Use a YAML format for the expected routes. This will require a proto. @@ -198,6 +200,11 @@ class Options { */ bool isDetailed() const { return is_detailed_; } + /** + * @return true if the deprecated field check for RouteConfiguration is disabled. + */ + bool disableDeprecationCheck() const { return disable_deprecation_check_; } + private: std::string test_path_; std::string config_path_; @@ -207,5 +214,6 @@ class Options { bool comprehensive_coverage_; bool is_proto_; bool is_detailed_; + bool disable_deprecation_check_; }; } // namespace Envoy diff --git a/test/tools/router_check/router_check.cc b/test/tools/router_check/router_check.cc index e17f3eecc827..f3856e285d8c 100644 --- a/test/tools/router_check/router_check.cc +++ b/test/tools/router_check/router_check.cc @@ -10,8 +10,9 @@ int main(int argc, char* argv[]) { const bool enforce_coverage = options.failUnder() != 0.0; try { Envoy::RouterCheckTool checktool = - options.isProto() ? Envoy::RouterCheckTool::create(options.configPath()) - : Envoy::RouterCheckTool::create(options.unlabelledConfigPath()); + options.isProto() ? Envoy::RouterCheckTool::create(options.configPath(), + options.disableDeprecationCheck()) + : Envoy::RouterCheckTool::create(options.unlabelledConfigPath(), true); if (options.isDetailed()) { checktool.setShowDetails(); diff --git a/test/tools/router_check/test/config/HeaderMatchedRouting.yaml b/test/tools/router_check/test/config/HeaderMatchedRouting.yaml index f28c96a50a03..5f891bea08d5 100644 --- a/test/tools/router_check/test/config/HeaderMatchedRouting.yaml +++ b/test/tools/router_check/test/config/HeaderMatchedRouting.yaml @@ -30,7 +30,9 @@ virtual_hosts: prefix: / headers: - name: test_header_pattern - regex_match: ^user=test-\d+$ + safe_regex_match: + google_re2: {} + regex: ^user=test-\d+$ route: cluster: local_service_with_header_pattern_set_regex - match: diff --git a/test/tools/router_check/test/config/TestRoutes.yaml b/test/tools/router_check/test/config/TestRoutes.yaml index 34b665fb9fd5..862b6a4da8d5 100644 --- a/test/tools/router_check/test/config/TestRoutes.yaml +++ b/test/tools/router_check/test/config/TestRoutes.yaml @@ -104,26 +104,61 @@ virtual_hosts: timeout: seconds: 30 virtual_clusters: - - pattern: ^/rides$ - method: POST + - headers: + - name: :path + safe_regex_match: + google_re2: {} + regex: ^/rides$ + - name: :method + exact_match: POST name: ride_request - - pattern: ^/rides/\d+$ - method: PUT + - headers: + - name: :path + safe_regex_match: + google_re2: {} + regex: ^/rides/\d+$ + - name: :method + exact_match: PUT name: update_ride - - pattern: ^/users/\d+/chargeaccounts$ - method: POST + - headers: + - name: :path + safe_regex_match: + google_re2: {} + regex: ^/users/\d+/chargeaccounts$ + - name: :method + exact_match: POST name: cc_add - - pattern: ^/users/\d+/chargeaccounts/(?!validate)\w+$ - method: PUT + - headers: + - name: :path + safe_regex_match: + google_re2: {} + regex: ^/users/\d+/chargeaccounts/[^validate]\w+$ + - name: :method + exact_match: PUT name: cc_add - - pattern: ^/users$ - method: POST + - headers: + - name: :path + safe_regex_match: + google_re2: {} + regex: ^/users$ + - name: :method + exact_match: POST name: create_user_login - - pattern: ^/users/\d+$ - method: PUT + - headers: + - name: :path + safe_regex_match: + google_re2: {} + regex: ^/users/\d+$ + - name: :method + exact_match: PUT name: update_user - - pattern: ^/users/\d+/location$ - method: POST + - headers: + - name: :path + safe_regex_match: + google_re2: {} + regex: ^/users/\d+/location$ + - name: :method + exact_match: POST name: ulu internal_only_headers: - x-lyft-user-id From 6c6e18e5881936405cba72e936ecd6a5b3fe0852 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Fri, 30 Aug 2019 16:05:12 +0700 Subject: [PATCH 16/31] tracing: Add support for sending data in Zipkin v2 format (#6985) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Description: This patch supports sending a list of spans as JSON v2 and protobuf message over HTTP to Zipkin collector. [Sending protobuf](https://github.com/openzipkin/zipkin-api/blob/0.2.1/zipkin.proto) is considered to be more efficient than JSON, even compared to the v2's JSON (https://github.com/openzipkin/zipkin/pull/2589#issuecomment-491642768). This is an effort to rework https://github.com/envoyproxy/envoy/pull/6798. The approach is by serializing the v1 model to both v2 JSON and protobuf. Risk Level: Low, since the default is still HTTP-JSON v1 based on https://github.com/openzipkin/zipkin-api/blob/0.2.2/zipkin-api.yaml. Testing: Unit testing, manual integration test with real Zipkin collector server. Docs Changes: Added Release Notes: Added Fixes: #4839 Signed-off-by: Dhi Aurrahman Signed-off-by: José Carlos Chávez --- api/bazel/repositories.bzl | 25 ++ api/bazel/repository_locations.bzl | 8 + api/envoy/config/trace/v2/trace.proto | 28 +- docs/root/intro/deprecated.rst | 3 + docs/root/intro/version_history.rst | 1 + source/common/http/headers.h | 1 + source/extensions/tracers/zipkin/BUILD | 1 + .../extensions/tracers/zipkin/span_buffer.cc | 217 +++++++++- .../extensions/tracers/zipkin/span_buffer.h | 97 ++++- source/extensions/tracers/zipkin/tracer.cc | 6 +- source/extensions/tracers/zipkin/tracer.h | 10 +- .../tracers/zipkin/tracer_interface.h | 21 + source/extensions/tracers/zipkin/util.cc | 14 +- source/extensions/tracers/zipkin/util.h | 24 ++ .../tracers/zipkin/zipkin_core_constants.h | 3 + .../tracers/zipkin/zipkin_core_types.cc | 3 + .../tracers/zipkin/zipkin_core_types.h | 28 +- .../tracers/zipkin/zipkin_tracer_impl.cc | 69 +-- .../tracers/zipkin/zipkin_tracer_impl.h | 33 +- test/extensions/tracers/zipkin/BUILD | 1 + test/extensions/tracers/zipkin/config_test.cc | 3 +- .../tracers/zipkin/span_buffer_test.cc | 407 ++++++++++++++---- test/extensions/tracers/zipkin/tracer_test.cc | 2 +- .../tracers/zipkin/zipkin_tracer_impl_test.cc | 153 ++++--- 24 files changed, 925 insertions(+), 233 deletions(-) diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl index d76337dc1200..7af054b20f4f 100644 --- a/api/bazel/repositories.bzl +++ b/api/bazel/repositories.bzl @@ -38,6 +38,11 @@ def api_dependencies(): locations = REPOSITORY_LOCATIONS, build_file_content = KAFKASOURCE_BUILD_CONTENT, ) + envoy_http_archive( + name = "com_github_openzipkin_zipkinapi", + locations = REPOSITORY_LOCATIONS, + build_file_content = ZIPKINAPI_BUILD_CONTENT, + ) GOGOPROTO_BUILD_CONTENT = """ load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library") @@ -153,3 +158,23 @@ filegroup( ) """ + +ZIPKINAPI_BUILD_CONTENT = """ + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +api_proto_library( + name = "zipkin", + srcs = [ + "zipkin-jsonv2.proto", + "zipkin.proto", + ], + visibility = ["//visibility:public"], +) + +api_go_proto_library( + name = "zipkin", + proto = ":zipkin", +) +""" diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 2febf71148b7..8d688fd02e22 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -21,6 +21,9 @@ KAFKA_SOURCE_SHA = "ae7a1696c0a0302b43c5b21e515c37e6ecd365941f68a510a7e442eebddf UDPA_GIT_SHA = "4cbdcb9931ca743a915a7c5fda51b2ee793ed157" # Aug 22, 2019 UDPA_SHA256 = "6291d0c0e3a4d5f08057ea7a00ed0b0ec3dd4e5a3b1cf20f803774680b5a806f" +ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 +ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" + REPOSITORY_LOCATIONS = dict( bazel_skylib = dict( sha256 = BAZEL_SKYLIB_SHA256, @@ -62,4 +65,9 @@ REPOSITORY_LOCATIONS = dict( strip_prefix = "kafka-2.2.0-rc2/clients/src/main/resources/common/message", urls = ["https://github.com/apache/kafka/archive/2.2.0-rc2.zip"], ), + com_github_openzipkin_zipkinapi = dict( + sha256 = ZIPKINAPI_SHA256, + strip_prefix = "zipkin-api-" + ZIPKINAPI_RELEASE, + urls = ["https://github.com/openzipkin/zipkin-api/archive/" + ZIPKINAPI_RELEASE + ".tar.gz"], + ), ) diff --git a/api/envoy/config/trace/v2/trace.proto b/api/envoy/config/trace/v2/trace.proto index 9da6ef4313d8..65c027cd73fe 100644 --- a/api/envoy/config/trace/v2/trace.proto +++ b/api/envoy/config/trace/v2/trace.proto @@ -65,6 +65,7 @@ message LightstepConfig { string access_token_file = 2 [(validate.rules).string.min_bytes = 1]; } +// Configuration for the Zipkin tracer. message ZipkinConfig { // The cluster manager cluster that hosts the Zipkin collectors. Note that the // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster @@ -80,9 +81,34 @@ message ZipkinConfig { // trace instance. The default value is false, which will result in a 64 bit trace id being used. bool trace_id_128bit = 3; - // Determines whether client and server spans will shared the same span id. + // Determines whether client and server spans will share the same span context. // The default value is true. google.protobuf.BoolValue shared_span_context = 4; + + // Available Zipkin collector endpoint versions. + enum CollectorEndpointVersion { + // Zipkin API v1, JSON over HTTP. + // [#comment: The default implementation of Zipkin client before this field is added was only v1 + // and the way user configure this was by not explicitly specifying the version. Consequently, + // before this is added, the corresponding Zipkin collector expected to receive v1 payload. + // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when + // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, + // since in Zipkin realm this v1 version is considered to be not preferable anymore.] + HTTP_JSON_V1 = 0 [deprecated = true]; + + // Zipkin API v2, JSON over HTTP. + HTTP_JSON = 1; + + // Zipkin API v2, protobuf over HTTP. + HTTP_PROTO = 2; + + // [#not-implemented-hide:] + GRPC = 3; + } + + // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be + // used. + CollectorEndpointVersion collector_endpoint_version = 5; } // DynamicOtConfig is used to dynamically load a tracer from a shared library diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst index fb7832739383..57fd2dd5dbc3 100644 --- a/docs/root/intro/deprecated.rst +++ b/docs/root/intro/deprecated.rst @@ -31,6 +31,9 @@ Version 1.12.0 (pending) and `present_match` fields. * The :option:`--allow-unknown-fields` command-line option, use :option:`--allow-unknown-static-fields` instead. +* The use of HTTP_JSON_V1 :ref:`Zipkin collector endpoint version + ` or not explicitly + specifying it is deprecated, use HTTP_JSON or HTTP_PROTO instead. Version 1.11.0 (July 11, 2019) ============================== diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 9667356cc3d8..c4e6c2f2ac52 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -45,6 +45,7 @@ Version history * router: added :ref:`rq_retry_skipped_request_not_complete ` counter stat to router stats. * router check tool: add coverage reporting & enforcement. * router check tool: add comprehensive coverage reporting. +* tracing: added support to the Zipkin reporter for sending list of spans as Zipkin JSON v2 and protobuf message over HTTP. * router check tool: add deprecated field check. * tls: added verification of IP address SAN fields in certificates against configured SANs in the certificate validation context. diff --git a/source/common/http/headers.h b/source/common/http/headers.h index b711fb4b142a..b0bcb7cc48ce 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -178,6 +178,7 @@ class HeaderValues { const std::string GrpcWebText{"application/grpc-web-text"}; const std::string GrpcWebTextProto{"application/grpc-web-text+proto"}; const std::string Json{"application/json"}; + const std::string Protobuf{"application/x-protobuf"}; const std::string FormUrlEncoded{"application/x-www-form-urlencoded"}; } ContentTypeValues; diff --git a/source/extensions/tracers/zipkin/BUILD b/source/extensions/tracers/zipkin/BUILD index 77937e6dbbbf..652496900174 100644 --- a/source/extensions/tracers/zipkin/BUILD +++ b/source/extensions/tracers/zipkin/BUILD @@ -57,6 +57,7 @@ envoy_cc_library( "//source/common/singleton:const_singleton", "//source/common/tracing:http_tracer_lib", "//source/extensions/tracers:well_known_names", + "@com_github_openzipkin_zipkinapi//:zipkin_cc", ], ) diff --git a/source/extensions/tracers/zipkin/span_buffer.cc b/source/extensions/tracers/zipkin/span_buffer.cc index 387d851a9f91..66bb96a9463b 100644 --- a/source/extensions/tracers/zipkin/span_buffer.cc +++ b/source/extensions/tracers/zipkin/span_buffer.cc @@ -1,35 +1,224 @@ #include "extensions/tracers/zipkin/span_buffer.h" +#include "common/protobuf/protobuf.h" + +#include "extensions/tracers/zipkin/util.h" +#include "extensions/tracers/zipkin/zipkin_core_constants.h" + +#include "absl/strings/str_join.h" + namespace Envoy { namespace Extensions { namespace Tracers { namespace Zipkin { -// TODO(fabolive): Need to avoid the copy to improve performance. -bool SpanBuffer::addSpan(const Span& span) { - if (span_buffer_.size() == span_buffer_.capacity()) { - // Buffer full +SpanBuffer::SpanBuffer( + const envoy::config::trace::v2::ZipkinConfig::CollectorEndpointVersion& version, + const bool shared_span_context) + : serializer_{makeSerializer(version, shared_span_context)} {} + +SpanBuffer::SpanBuffer( + const envoy::config::trace::v2::ZipkinConfig::CollectorEndpointVersion& version, + const bool shared_span_context, uint64_t size) + : serializer_{makeSerializer(version, shared_span_context)} { + allocateBuffer(size); +} + +bool SpanBuffer::addSpan(Span&& span) { + const auto& annotations = span.annotations(); + if (span_buffer_.size() == span_buffer_.capacity() || annotations.empty() || + annotations.end() == + std::find_if(annotations.begin(), annotations.end(), [](const auto& annotation) { + return annotation.value() == ZipkinCoreConstants::get().CLIENT_SEND || + annotation.value() == ZipkinCoreConstants::get().SERVER_RECV; + })) { + + // Buffer full or invalid span. return false; } + span_buffer_.push_back(std::move(span)); return true; } -std::string SpanBuffer::toStringifiedJsonArray() { - std::string stringified_json_array = "["; +SerializerPtr SpanBuffer::makeSerializer( + const envoy::config::trace::v2::ZipkinConfig::CollectorEndpointVersion& version, + const bool shared_span_context) { + switch (version) { + case envoy::config::trace::v2::ZipkinConfig::HTTP_JSON_V1: + return std::make_unique(); + case envoy::config::trace::v2::ZipkinConfig::HTTP_JSON: + return std::make_unique(shared_span_context); + case envoy::config::trace::v2::ZipkinConfig::HTTP_PROTO: + return std::make_unique(shared_span_context); + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +std::string JsonV1Serializer::serialize(const std::vector& zipkin_spans) { + const std::string serialized_elements = + absl::StrJoin(zipkin_spans, ",", [](std::string* element, Span zipkin_span) { + absl::StrAppend(element, zipkin_span.toJson()); + }); + return absl::StrCat("[", serialized_elements, "]"); +} + +JsonV2Serializer::JsonV2Serializer(const bool shared_span_context) + : shared_span_context_{shared_span_context} {} + +std::string JsonV2Serializer::serialize(const std::vector& zipkin_spans) { + const std::string serialized_elements = + absl::StrJoin(zipkin_spans, ",", [this](std::string* out, const Span& zipkin_span) { + absl::StrAppend(out, + absl::StrJoin(toListOfSpans(zipkin_span), ",", + [](std::string* element, const zipkin::jsonv2::Span& span) { + std::string entry; + Protobuf::util::MessageToJsonString(span, &entry); + absl::StrAppend(element, entry); + })); + }); + return absl::StrCat("[", serialized_elements, "]"); +} - if (pendingSpans()) { - stringified_json_array += span_buffer_[0].toJson(); - const uint64_t size = span_buffer_.size(); - for (uint64_t i = 1; i < size; i++) { - stringified_json_array += ","; - stringified_json_array += span_buffer_[i].toJson(); +const std::vector +JsonV2Serializer::toListOfSpans(const Span& zipkin_span) const { + std::vector spans; + spans.reserve(zipkin_span.annotations().size()); + for (const auto& annotation : zipkin_span.annotations()) { + zipkin::jsonv2::Span span; + + if (annotation.value() == ZipkinCoreConstants::get().CLIENT_SEND) { + span.set_kind(ZipkinCoreConstants::get().KIND_CLIENT); + } else if (annotation.value() == ZipkinCoreConstants::get().SERVER_RECV) { + span.set_shared(shared_span_context_ && zipkin_span.annotations().size() > 1); + span.set_kind(ZipkinCoreConstants::get().KIND_SERVER); + } else { + continue; + } + + if (annotation.isSetEndpoint()) { + span.set_timestamp(annotation.timestamp()); + span.mutable_local_endpoint()->MergeFrom(toProtoEndpoint(annotation.endpoint())); + } + + span.set_trace_id(zipkin_span.traceIdAsHexString()); + if (zipkin_span.isSetParentId()) { + span.set_parent_id(zipkin_span.parentIdAsHexString()); + } + + span.set_id(zipkin_span.idAsHexString()); + span.set_name(zipkin_span.name()); + + if (zipkin_span.isSetDuration()) { + span.set_duration(zipkin_span.duration()); + } + + auto& tags = *span.mutable_tags(); + for (const auto& binary_annotation : zipkin_span.binaryAnnotations()) { + tags[binary_annotation.key()] = binary_annotation.value(); } + + spans.push_back(std::move(span)); + } + return spans; +} + +const zipkin::jsonv2::Endpoint +JsonV2Serializer::toProtoEndpoint(const Endpoint& zipkin_endpoint) const { + zipkin::jsonv2::Endpoint endpoint; + Network::Address::InstanceConstSharedPtr address = zipkin_endpoint.address(); + if (address) { + if (address->ip()->version() == Network::Address::IpVersion::v4) { + endpoint.set_ipv4(address->ip()->addressAsString()); + } else { + endpoint.set_ipv6(address->ip()->addressAsString()); + } + endpoint.set_port(address->ip()->port()); + } + + const std::string& service_name = zipkin_endpoint.serviceName(); + if (!service_name.empty()) { + endpoint.set_service_name(service_name); + } + + return endpoint; +} + +ProtobufSerializer::ProtobufSerializer(const bool shared_span_context) + : shared_span_context_{shared_span_context} {} + +std::string ProtobufSerializer::serialize(const std::vector& zipkin_spans) { + zipkin::proto3::ListOfSpans spans; + for (const Span& zipkin_span : zipkin_spans) { + spans.MergeFrom(toListOfSpans(zipkin_span)); + } + std::string serialized; + spans.SerializeToString(&serialized); + return serialized; +} + +const zipkin::proto3::ListOfSpans ProtobufSerializer::toListOfSpans(const Span& zipkin_span) const { + zipkin::proto3::ListOfSpans spans; + for (const auto& annotation : zipkin_span.annotations()) { + zipkin::proto3::Span span; + if (annotation.value() == ZipkinCoreConstants::get().CLIENT_SEND) { + span.set_kind(zipkin::proto3::Span::CLIENT); + } else if (annotation.value() == ZipkinCoreConstants::get().SERVER_RECV) { + span.set_shared(shared_span_context_ && zipkin_span.annotations().size() > 1); + span.set_kind(zipkin::proto3::Span::SERVER); + } else { + continue; + } + + if (annotation.isSetEndpoint()) { + span.set_timestamp(annotation.timestamp()); + span.mutable_local_endpoint()->MergeFrom(toProtoEndpoint(annotation.endpoint())); + } + + span.set_trace_id(zipkin_span.traceIdAsByteString()); + if (zipkin_span.isSetParentId()) { + span.set_parent_id(zipkin_span.parentIdAsByteString()); + } + + span.set_id(zipkin_span.idAsByteString()); + span.set_name(zipkin_span.name()); + + if (zipkin_span.isSetDuration()) { + span.set_duration(zipkin_span.duration()); + } + + auto& tags = *span.mutable_tags(); + for (const auto& binary_annotation : zipkin_span.binaryAnnotations()) { + tags[binary_annotation.key()] = binary_annotation.value(); + } + + auto* mutable_span = spans.add_spans(); + mutable_span->MergeFrom(span); + } + return spans; +} + +const zipkin::proto3::Endpoint +ProtobufSerializer::toProtoEndpoint(const Endpoint& zipkin_endpoint) const { + zipkin::proto3::Endpoint endpoint; + Network::Address::InstanceConstSharedPtr address = zipkin_endpoint.address(); + if (address) { + if (address->ip()->version() == Network::Address::IpVersion::v4) { + endpoint.set_ipv4(Util::toByteString(address->ip()->ipv4()->address())); + } else { + endpoint.set_ipv6(Util::toByteString(address->ip()->ipv6()->address())); + } + endpoint.set_port(address->ip()->port()); + } + + const std::string& service_name = zipkin_endpoint.serviceName(); + if (!service_name.empty()) { + endpoint.set_service_name(service_name); } - stringified_json_array += "]"; - return stringified_json_array; + return endpoint; } } // namespace Zipkin diff --git a/source/extensions/tracers/zipkin/span_buffer.h b/source/extensions/tracers/zipkin/span_buffer.h index a67479c644a4..a5718600129e 100644 --- a/source/extensions/tracers/zipkin/span_buffer.h +++ b/source/extensions/tracers/zipkin/span_buffer.h @@ -1,7 +1,13 @@ #pragma once +#include "envoy/config/trace/v2/trace.pb.h" + +#include "extensions/tracers/zipkin/tracer_interface.h" #include "extensions/tracers/zipkin/zipkin_core_types.h" +#include "zipkin-jsonv2.pb.h" +#include "zipkin.pb.h" + namespace Envoy { namespace Extensions { namespace Tracers { @@ -16,15 +22,26 @@ class SpanBuffer { /** * Constructor that creates an empty buffer. Space needs to be allocated by invoking * the method allocateBuffer(size). + * + * @param version The selected Zipkin collector version. @see + * api/envoy/config/trace/v2/trace.proto. + * @param shared_span_context To determine whether client and server spans will share the same + * span context. */ - SpanBuffer() = default; + SpanBuffer(const envoy::config::trace::v2::ZipkinConfig::CollectorEndpointVersion& version, + bool shared_span_context); /** * Constructor that initializes a buffer with the given size. * + * @param version The selected Zipkin collector version. @see + * api/envoy/config/trace/v2/trace.proto. + * @param shared_span_context To determine whether client and server spans will share the same + * span context. * @param size The desired buffer size. */ - SpanBuffer(uint64_t size) { allocateBuffer(size); } + SpanBuffer(const envoy::config::trace::v2::ZipkinConfig::CollectorEndpointVersion& version, + bool shared_span_context, uint64_t size); /** * Allocates space for an empty buffer or resizes a previously-allocated one. @@ -40,7 +57,7 @@ class SpanBuffer { * * @return true if the span was successfully added, or false if the buffer was full. */ - bool addSpan(const Span& span); + bool addSpan(Span&& span); /** * Empties the buffer. This method is supposed to be called when all buffered spans @@ -54,14 +71,82 @@ class SpanBuffer { uint64_t pendingSpans() { return span_buffer_.size(); } /** - * @return the contents of the buffer as a stringified array of JSONs, where - * each JSON in the array corresponds to one Zipkin span. + * Serializes std::vector span_buffer_ to std::string as payload for the reporter when the + * reporter does spans flushing. This function does only serialization and does not clear + * span_buffer_. + * + * @return std::string the contents of the buffer, a collection of serialized pending Zipkin + * spans. */ - std::string toStringifiedJsonArray(); + std::string serialize() const { return serializer_->serialize(span_buffer_); } private: + SerializerPtr + makeSerializer(const envoy::config::trace::v2::ZipkinConfig::CollectorEndpointVersion& version, + bool shared_span_context); + // We use a pre-allocated vector to improve performance std::vector span_buffer_; + SerializerPtr serializer_; +}; + +using SpanBufferPtr = std::unique_ptr; + +/** + * JsonV1Serializer implements Zipkin::Serializer that serializes list of Zipkin spans into JSON + * Zipkin v1 array. + */ +class JsonV1Serializer : public Serializer { +public: + JsonV1Serializer() = default; + + /** + * Serialize list of Zipkin spans into Zipkin v1 JSON array. + * @return std::string serialized pending spans as Zipkin v1 JSON array. + */ + std::string serialize(const std::vector& pending_spans) override; +}; + +/** + * JsonV2Serializer implements Zipkin::Serializer that serializes list of Zipkin spans into JSON + * Zipkin v2 array. + */ +class JsonV2Serializer : public Serializer { +public: + JsonV2Serializer(bool shared_span_context); + + /** + * Serialize list of Zipkin spans into Zipkin v2 JSON array. + * @return std::string serialized pending spans as Zipkin v2 JSON array. + */ + std::string serialize(const std::vector& pending_spans) override; + +private: + const std::vector toListOfSpans(const Span& zipkin_span) const; + const zipkin::jsonv2::Endpoint toProtoEndpoint(const Endpoint& zipkin_endpoint) const; + + const bool shared_span_context_; +}; + +/** + * ProtobufSerializer implements Zipkin::Serializer that serializes list of Zipkin spans into + * stringified (SerializeToString) protobuf message. + */ +class ProtobufSerializer : public Serializer { +public: + ProtobufSerializer(bool shared_span_context); + + /** + * Serialize list of Zipkin spans into Zipkin v2 zipkin::proto3::ListOfSpans. + * @return std::string serialized pending spans as Zipkin zipkin::proto3::ListOfSpans. + */ + std::string serialize(const std::vector& pending_spans) override; + +private: + const zipkin::proto3::ListOfSpans toListOfSpans(const Span& zipkin_span) const; + const zipkin::proto3::Endpoint toProtoEndpoint(const Endpoint& zipkin_endpoint) const; + + const bool shared_span_context_; }; } // namespace Zipkin diff --git a/source/extensions/tracers/zipkin/tracer.cc b/source/extensions/tracers/zipkin/tracer.cc index 8b21bef8f23a..aff1d659c12d 100644 --- a/source/extensions/tracers/zipkin/tracer.cc +++ b/source/extensions/tracers/zipkin/tracer.cc @@ -28,7 +28,7 @@ SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span } // Create an all-new span, with no parent id - SpanPtr span_ptr(new Span(time_source_)); + SpanPtr span_ptr = std::make_unique(time_source_); span_ptr->setName(span_name); uint64_t random_number = random_generator_.random(); span_ptr->setId(random_number); @@ -56,8 +56,8 @@ SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span } SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span_name, - SystemTime timestamp, SpanContext& previous_context) { - SpanPtr span_ptr(new Span(time_source_)); + SystemTime timestamp, const SpanContext& previous_context) { + SpanPtr span_ptr = std::make_unique(time_source_); Annotation annotation; uint64_t timestamp_micro; diff --git a/source/extensions/tracers/zipkin/tracer.h b/source/extensions/tracers/zipkin/tracer.h index 190b68631b63..d51e0645844a 100644 --- a/source/extensions/tracers/zipkin/tracer.h +++ b/source/extensions/tracers/zipkin/tracer.h @@ -33,7 +33,7 @@ class Reporter { * * @param span The span that needs action. */ - virtual void reportSpan(const Span& span) PURE; + virtual void reportSpan(Span&& span) PURE; }; using ReporterPtr = std::unique_ptr; @@ -72,6 +72,7 @@ class Tracer : public TracerInterface { * @param config The tracing configuration * @param span_name Name of the new span. * @param start_time The time indicating the beginning of the span. + * @return SpanPtr The root span. */ SpanPtr startSpan(const Tracing::Config&, const std::string& span_name, SystemTime timestamp); @@ -82,12 +83,15 @@ class Tracer : public TracerInterface { * @param span_name Name of the new span. * @param start_time The time indicating the beginning of the span. * @param previous_context The context of the span preceding the one to be created. + * @return SpanPtr The child span. */ SpanPtr startSpan(const Tracing::Config&, const std::string& span_name, SystemTime timestamp, - SpanContext& previous_context); + const SpanContext& previous_context); /** * TracerInterface::reportSpan. + * + * @param span The span to be reported. */ void reportSpan(Span&& span) override; @@ -103,6 +107,8 @@ class Tracer : public TracerInterface { /** * Associates a Reporter object with this Tracer. + * + * @param The span reporter. */ void setReporter(ReporterPtr reporter); diff --git a/source/extensions/tracers/zipkin/tracer_interface.h b/source/extensions/tracers/zipkin/tracer_interface.h index 4c55ab90980b..c56e130e2868 100644 --- a/source/extensions/tracers/zipkin/tracer_interface.h +++ b/source/extensions/tracers/zipkin/tracer_interface.h @@ -1,5 +1,9 @@ #pragma once +#include +#include +#include + #include "envoy/common/pure.h" namespace Envoy { @@ -31,6 +35,23 @@ class TracerInterface { virtual void reportSpan(Span&& span) PURE; }; +/** + * Buffered pending spans serializer. + */ +class Serializer { +public: + virtual ~Serializer() = default; + + /** + * Serialize buffered pending spans. + * + * @return std::string serialized buffered pending spans. + */ + virtual std::string serialize(const std::vector& spans) PURE; +}; + +using SerializerPtr = std::unique_ptr; + } // namespace Zipkin } // namespace Tracers } // namespace Extensions diff --git a/source/extensions/tracers/zipkin/util.cc b/source/extensions/tracers/zipkin/util.cc index d18eff673b04..18eea42daf87 100644 --- a/source/extensions/tracers/zipkin/util.cc +++ b/source/extensions/tracers/zipkin/util.cc @@ -7,6 +7,7 @@ #include "common/common/hex.h" #include "common/common/utility.h" +#include "absl/strings/str_join.h" #include "rapidjson/document.h" #include "rapidjson/stringbuffer.h" #include "rapidjson/writer.h" @@ -33,18 +34,7 @@ void Util::mergeJsons(std::string& target, const std::string& source, void Util::addArrayToJson(std::string& target, const std::vector& json_array, const std::string& field_name) { - std::string stringified_json_array = "["; - - if (!json_array.empty()) { - stringified_json_array += json_array[0]; - for (auto it = json_array.begin() + 1; it != json_array.end(); it++) { - stringified_json_array += ","; - stringified_json_array += *it; - } - } - stringified_json_array += "]"; - - mergeJsons(target, stringified_json_array, field_name); + mergeJsons(target, absl::StrCat("[", absl::StrJoin(json_array, ","), "]"), field_name); } uint64_t Util::generateRandom64(TimeSource& time_source) { diff --git a/source/extensions/tracers/zipkin/util.h b/source/extensions/tracers/zipkin/util.h index ce86f73080e9..8b30a155ae04 100644 --- a/source/extensions/tracers/zipkin/util.h +++ b/source/extensions/tracers/zipkin/util.h @@ -5,6 +5,8 @@ #include "envoy/common/time.h" +#include "common/common/byte_order.h" + namespace Envoy { namespace Extensions { namespace Tracers { @@ -48,6 +50,28 @@ class Util { * Returns a randomly-generated 64-bit integer number. */ static uint64_t generateRandom64(TimeSource& time_source); + + /** + * Returns byte string representation of a number. + * + * @param value Number that will be represented in byte string. + * @return std::string byte string representation of a number. + */ + template static std::string toByteString(Type value) { + return std::string(reinterpret_cast(&value), sizeof(Type)); + } + + /** + * Returns big endian byte string representation of a number. + * + * @param value Number that will be represented in byte string. + * @param flip indicates to flip order or not. + * @return std::string byte string representation of a number. + */ + template static std::string toBigEndianByteString(Type value) { + auto bytes = toEndianness(value); + return std::string(reinterpret_cast(&bytes), sizeof(Type)); + } }; } // namespace Zipkin diff --git a/source/extensions/tracers/zipkin/zipkin_core_constants.h b/source/extensions/tracers/zipkin/zipkin_core_constants.h index 7384df786a19..0180aeb8f22c 100644 --- a/source/extensions/tracers/zipkin/zipkin_core_constants.h +++ b/source/extensions/tracers/zipkin/zipkin_core_constants.h @@ -13,6 +13,9 @@ namespace Zipkin { class ZipkinCoreConstantValues { public: + const std::string KIND_CLIENT = "CLIENT"; + const std::string KIND_SERVER = "SERVER"; + const std::string CLIENT_SEND = "cs"; const std::string CLIENT_RECV = "cr"; const std::string SERVER_SEND = "ss"; diff --git a/source/extensions/tracers/zipkin/zipkin_core_types.cc b/source/extensions/tracers/zipkin/zipkin_core_types.cc index 1730e3cd75cd..16c9d688a437 100644 --- a/source/extensions/tracers/zipkin/zipkin_core_types.cc +++ b/source/extensions/tracers/zipkin/zipkin_core_types.cc @@ -241,6 +241,9 @@ void Span::finish() { cr.setTimestamp(stop_timestamp); cr.setValue(ZipkinCoreConstants::get().CLIENT_RECV); annotations_.push_back(std::move(cr)); + } + + if (monotonic_start_time_) { const int64_t monotonic_stop_time = std::chrono::duration_cast( time_source_.monotonicTime().time_since_epoch()) .count(); diff --git a/source/extensions/tracers/zipkin/zipkin_core_types.h b/source/extensions/tracers/zipkin/zipkin_core_types.h index 8c9ff909241f..9de9f4871620 100644 --- a/source/extensions/tracers/zipkin/zipkin_core_types.h +++ b/source/extensions/tracers/zipkin/zipkin_core_types.h @@ -6,11 +6,13 @@ #include "envoy/common/time.h" #include "envoy/network/address.h" +#include "common/common/assert.h" #include "common/common/hex.h" #include "extensions/tracers/zipkin/tracer_interface.h" #include "extensions/tracers/zipkin/util.h" +#include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" @@ -443,6 +445,11 @@ class Span : public ZipkinBase { */ const std::string idAsHexString() const { return Hex::uint64ToHex(id_); } + /** + * @return the span's id as a byte string. + */ + const std::string idAsByteString() const { return Util::toByteString(id_); } + /** * @return the span's name. */ @@ -460,6 +467,14 @@ class Span : public ZipkinBase { return parent_id_ ? Hex::uint64ToHex(parent_id_.value()) : EMPTY_HEX_STRING_; } + /** + * @return the span's parent id as a byte string. + */ + const std::string parentIdAsByteString() const { + ASSERT(parent_id_); + return Util::toByteString(parent_id_.value()); + } + /** * @return whether or not the debug attribute is set */ @@ -490,10 +505,21 @@ class Span : public ZipkinBase { */ const std::string traceIdAsHexString() const { return trace_id_high_.has_value() - ? Hex::uint64ToHex(trace_id_high_.value()) + Hex::uint64ToHex(trace_id_) + ? absl::StrCat(Hex::uint64ToHex(trace_id_high_.value()), Hex::uint64ToHex(trace_id_)) : Hex::uint64ToHex(trace_id_); } + /** + * @return the span's trace id as a byte string. + */ + const std::string traceIdAsByteString() const { + // https://github.com/openzipkin/zipkin-api/blob/v0.2.1/zipkin.proto#L60-L61. + return trace_id_high_.has_value() + ? absl::StrCat(Util::toBigEndianByteString(trace_id_high_.value()), + Util::toBigEndianByteString(trace_id_)) + : Util::toBigEndianByteString(trace_id_); + } + /** * @return the span's start time (monotonic, used to calculate duration). */ diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index fbe84ff78662..3b269f742c01 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -56,9 +56,9 @@ void ZipkinSpan::setSampled(bool sampled) { span_.setSampled(sampled); } Tracing::SpanPtr ZipkinSpan::spawnChild(const Tracing::Config& config, const std::string& name, SystemTime start_time) { - SpanContext context(span_); - return Tracing::SpanPtr{ - new ZipkinSpan(*tracer_.startSpan(config, name, start_time, context), tracer_)}; + SpanContext previous_context(span_); + return std::make_unique( + *tracer_.startSpan(config, name, start_time, previous_context), tracer_); } Driver::TlsTracer::TlsTracer(TracerPtr&& tracer, Driver& driver) @@ -76,23 +76,26 @@ Driver::Driver(const envoy::config::trace::v2::ZipkinConfig& zipkin_config, Config::Utility::checkCluster(TracerNames::get().Zipkin, zipkin_config.collector_cluster(), cm_); cluster_ = cm_.get(zipkin_config.collector_cluster())->info(); - std::string collector_endpoint = ZipkinCoreConstants::get().DEFAULT_COLLECTOR_ENDPOINT; + CollectorInfo collector; if (!zipkin_config.collector_endpoint().empty()) { - collector_endpoint = zipkin_config.collector_endpoint(); + collector.endpoint_ = zipkin_config.collector_endpoint(); } - + // The current default version of collector_endpoint_version is HTTP_JSON_V1. + collector.version_ = zipkin_config.collector_endpoint_version(); const bool trace_id_128bit = zipkin_config.trace_id_128bit(); const bool shared_span_context = PROTOBUF_GET_WRAPPED_OR_DEFAULT( zipkin_config, shared_span_context, ZipkinCoreConstants::get().DEFAULT_SHARED_SPAN_CONTEXT); + collector.shared_span_context_ = shared_span_context; - tls_->set([this, collector_endpoint, &random_generator, trace_id_128bit, shared_span_context]( + tls_->set([this, collector, &random_generator, trace_id_128bit, shared_span_context]( Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { - TracerPtr tracer(new Tracer(local_info_.clusterName(), local_info_.address(), random_generator, - trace_id_128bit, shared_span_context, time_source_)); + TracerPtr tracer = + std::make_unique(local_info_.clusterName(), local_info_.address(), random_generator, + trace_id_128bit, shared_span_context, time_source_); tracer->setReporter( - ReporterImpl::NewInstance(std::ref(*this), std::ref(dispatcher), collector_endpoint)); - return ThreadLocal::ThreadLocalObjectSharedPtr{new TlsTracer(std::move(tracer), *this)}; + ReporterImpl::NewInstance(std::ref(*this), std::ref(dispatcher), collector)); + return std::make_shared(std::move(tracer), *this); }); } @@ -117,16 +120,18 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, Http::HeaderMa } } catch (const ExtractorException& e) { - return Tracing::SpanPtr(new Tracing::NullSpan()); + return std::make_unique(); } - ZipkinSpanPtr active_span(new ZipkinSpan(*new_zipkin_span, tracer)); - return active_span; + // Return the active Zipkin span. + return std::make_unique(*new_zipkin_span, tracer); } ReporterImpl::ReporterImpl(Driver& driver, Event::Dispatcher& dispatcher, - const std::string& collector_endpoint) - : driver_(driver), collector_endpoint_(collector_endpoint) { + const CollectorInfo& collector) + : driver_(driver), + collector_(collector), span_buffer_{std::make_unique( + collector.version_, collector.shared_span_context_)} { flush_timer_ = dispatcher.createTimer([this]() -> void { driver_.tracerStats().timer_flushed_.inc(); flushSpans(); @@ -135,24 +140,23 @@ ReporterImpl::ReporterImpl(Driver& driver, Event::Dispatcher& dispatcher, const uint64_t min_flush_spans = driver_.runtime().snapshot().getInteger("tracing.zipkin.min_flush_spans", 5U); - span_buffer_.allocateBuffer(min_flush_spans); + span_buffer_->allocateBuffer(min_flush_spans); enableTimer(); } ReporterPtr ReporterImpl::NewInstance(Driver& driver, Event::Dispatcher& dispatcher, - const std::string& collector_endpoint) { - return ReporterPtr(new ReporterImpl(driver, dispatcher, collector_endpoint)); + const CollectorInfo& collector) { + return std::make_unique(driver, dispatcher, collector); } -// TODO(fabolive): Need to avoid the copy to improve performance. -void ReporterImpl::reportSpan(const Span& span) { - span_buffer_.addSpan(span); +void ReporterImpl::reportSpan(Span&& span) { + span_buffer_->addSpan(std::move(span)); const uint64_t min_flush_spans = driver_.runtime().snapshot().getInteger("tracing.zipkin.min_flush_spans", 5U); - if (span_buffer_.pendingSpans() == min_flush_spans) { + if (span_buffer_->pendingSpans() == min_flush_spans) { flushSpans(); } } @@ -164,18 +168,19 @@ void ReporterImpl::enableTimer() { } void ReporterImpl::flushSpans() { - if (span_buffer_.pendingSpans()) { - driver_.tracerStats().spans_sent_.add(span_buffer_.pendingSpans()); - - const std::string request_body = span_buffer_.toStringifiedJsonArray(); - Http::MessagePtr message(new Http::RequestMessageImpl()); + if (span_buffer_->pendingSpans()) { + driver_.tracerStats().spans_sent_.add(span_buffer_->pendingSpans()); + const std::string request_body = span_buffer_->serialize(); + Http::MessagePtr message = std::make_unique(); message->headers().insertMethod().value().setReference(Http::Headers::get().MethodValues.Post); - message->headers().insertPath().value(collector_endpoint_); + message->headers().insertPath().value(collector_.endpoint_); message->headers().insertHost().value(driver_.cluster()->name()); message->headers().insertContentType().value().setReference( - Http::Headers::get().ContentTypeValues.Json); + collector_.version_ == envoy::config::trace::v2::ZipkinConfig::HTTP_PROTO + ? Http::Headers::get().ContentTypeValues.Protobuf + : Http::Headers::get().ContentTypeValues.Json); - Buffer::InstancePtr body(new Buffer::OwnedImpl()); + Buffer::InstancePtr body = std::make_unique(); body->add(request_body); message->body() = std::move(body); @@ -186,7 +191,7 @@ void ReporterImpl::flushSpans() { .send(std::move(message), *this, Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(timeout))); - span_buffer_.clear(); + span_buffer_->clear(); } } diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h index 048b37cd18dc..4cecd015d6a3 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h @@ -11,6 +11,7 @@ #include "extensions/tracers/zipkin/span_buffer.h" #include "extensions/tracers/zipkin/tracer.h" +#include "extensions/tracers/zipkin/zipkin_core_constants.h" namespace Envoy { namespace Extensions { @@ -137,6 +138,23 @@ class Driver : public Tracing::Driver { TimeSource& time_source_; }; +/** + * Information about the Zipkin collector. + */ +struct CollectorInfo { + // The Zipkin collector endpoint/path to receive the collected trace data. e.g. /api/v1/spans if + // HTTP_JSON_V1 or /api/v2/spans otherwise. + std::string endpoint_{ZipkinCoreConstants::get().DEFAULT_COLLECTOR_ENDPOINT}; + + // The version of the collector. This is related to endpoint's supported payload specification and + // transport. Currently it defaults to envoy::config::trace::v2::ZipkinConfig::HTTP_JSON_V1. In + // the future, we will throw when collector_endpoint_version is not specified. + envoy::config::trace::v2::ZipkinConfig::CollectorEndpointVersion version_{ + envoy::config::trace::v2::ZipkinConfig::HTTP_JSON_V1}; + + bool shared_span_context_{ZipkinCoreConstants::get().DEFAULT_SHARED_SPAN_CONTEXT}; +}; + /** * This class derives from the abstract Zipkin::Reporter. * It buffers spans and relies on Http::AsyncClient to send spans to @@ -158,12 +176,11 @@ class ReporterImpl : public Reporter, Http::AsyncClient::Callbacks { * * @param driver ZipkinDriver to be associated with the reporter. * @param dispatcher Controls the timer used to flush buffered spans. - * @param collector_endpoint String representing the Zipkin endpoint to be used + * @param collector holds the endpoint version and path information. * when making HTTP POST requests carrying spans. This value comes from the * Zipkin-related tracing configuration. */ - ReporterImpl(Driver& driver, Event::Dispatcher& dispatcher, - const std::string& collector_endpoint); + ReporterImpl(Driver& driver, Event::Dispatcher& dispatcher, const CollectorInfo& collector); /** * Implementation of Zipkin::Reporter::reportSpan(). @@ -172,7 +189,7 @@ class ReporterImpl : public Reporter, Http::AsyncClient::Callbacks { * * @param span The span to be buffered. */ - void reportSpan(const Span& span) override; + void reportSpan(Span&& span) override; // Http::AsyncClient::Callbacks. // The callbacks below record Zipkin-span-related stats. @@ -184,14 +201,14 @@ class ReporterImpl : public Reporter, Http::AsyncClient::Callbacks { * * @param driver ZipkinDriver to be associated with the reporter. * @param dispatcher Controls the timer used to flush buffered spans. - * @param collector_endpoint String representing the Zipkin endpoint to be used + * @param collector holds the endpoint version and path information. * when making HTTP POST requests carrying spans. This value comes from the * Zipkin-related tracing configuration. * * @return Pointer to the newly-created ZipkinReporter. */ static ReporterPtr NewInstance(Driver& driver, Event::Dispatcher& dispatcher, - const std::string& collector_endpoint); + const CollectorInfo& collector); private: /** @@ -206,8 +223,8 @@ class ReporterImpl : public Reporter, Http::AsyncClient::Callbacks { Driver& driver_; Event::TimerPtr flush_timer_; - SpanBuffer span_buffer_; - const std::string collector_endpoint_; + const CollectorInfo collector_; + SpanBufferPtr span_buffer_; }; } // namespace Zipkin } // namespace Tracers diff --git a/test/extensions/tracers/zipkin/BUILD b/test/extensions/tracers/zipkin/BUILD index 9f98e8c3ba15..a481ea737220 100644 --- a/test/extensions/tracers/zipkin/BUILD +++ b/test/extensions/tracers/zipkin/BUILD @@ -31,6 +31,7 @@ envoy_extension_cc_test( "//source/common/common:utility_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", + "//source/common/protobuf:utility_lib", "//source/common/runtime:runtime_lib", "//source/extensions/tracers/zipkin:zipkin_lib", "//test/mocks:common_lib", diff --git a/test/extensions/tracers/zipkin/config_test.cc b/test/extensions/tracers/zipkin/config_test.cc index b62865dd2601..8b211fa74d10 100644 --- a/test/extensions/tracers/zipkin/config_test.cc +++ b/test/extensions/tracers/zipkin/config_test.cc @@ -49,7 +49,8 @@ TEST(ZipkinTracerConfigTest, ZipkinHttpTracerWithTypedConfig) { typed_config: "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig collector_cluster: fake_cluster - collector_endpoint: /api/v1/spans + collector_endpoint: /api/v2/spans + collector_endpoint_version: HTTP_PROTO )EOF"; envoy::config::trace::v2::Tracing configuration; diff --git a/test/extensions/tracers/zipkin/span_buffer_test.cc b/test/extensions/tracers/zipkin/span_buffer_test.cc index 320b6a909bb0..1ef6e88e6485 100644 --- a/test/extensions/tracers/zipkin/span_buffer_test.cc +++ b/test/extensions/tracers/zipkin/span_buffer_test.cc @@ -1,3 +1,5 @@ +#include "common/network/utility.h" + #include "extensions/tracers/zipkin/span_buffer.h" #include "test/test_common/test_time.h" @@ -10,104 +12,341 @@ namespace Tracers { namespace Zipkin { namespace { -TEST(ZipkinSpanBufferTest, defaultConstructorEndToEnd) { +enum class IpType { V4, V6 }; + +Endpoint createEndpoint(const IpType ip_type) { + Endpoint endpoint; + endpoint.setAddress(ip_type == IpType::V6 + ? Envoy::Network::Utility::parseInternetAddress( + "2001:db8:85a3::8a2e:370:4444", 7334, true) + : Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 8080, false)); + endpoint.setServiceName("service1"); + return endpoint; +} + +Annotation createAnnotation(const absl::string_view value, const IpType ip_type) { + Annotation annotation; + annotation.setValue(value.data()); + annotation.setTimestamp(1566058071601051); + annotation.setEndpoint(createEndpoint(ip_type)); + return annotation; +} + +BinaryAnnotation createTag() { + BinaryAnnotation tag; + tag.setKey("component"); + tag.setValue("proxy"); + return tag; +} + +Span createSpan(const std::vector& annotation_values, const IpType ip_type) { + DangerousDeprecatedTestTime test_time; + Span span(test_time.timeSystem()); + span.setId(1); + span.setTraceId(1); + span.setDuration(100); + std::vector annotations; + annotations.reserve(annotation_values.size()); + for (absl::string_view value : annotation_values) { + annotations.push_back(createAnnotation(value, ip_type)); + } + span.setAnnotations(annotations); + span.setBinaryAnnotations({createTag()}); + return span; +} + +void expectSerializedBuffer(SpanBuffer& buffer, const bool delay_allocation, + const std::vector& expected_list) { DangerousDeprecatedTestTime test_time; - SpanBuffer buffer; EXPECT_EQ(0ULL, buffer.pendingSpans()); - EXPECT_EQ("[]", buffer.toStringifiedJsonArray()); + EXPECT_EQ("[]", buffer.serialize()); + + if (delay_allocation) { + EXPECT_FALSE(buffer.addSpan(createSpan({"cs", "sr"}, IpType::V4))); + buffer.allocateBuffer(expected_list.size() + 1); + } + + // Add span after allocation, but missing required annotations should be false. EXPECT_FALSE(buffer.addSpan(Span(test_time.timeSystem()))); + EXPECT_FALSE(buffer.addSpan(createSpan({"aa"}, IpType::V4))); - buffer.allocateBuffer(2); - EXPECT_EQ(0ULL, buffer.pendingSpans()); - EXPECT_EQ("[]", buffer.toStringifiedJsonArray()); - - buffer.addSpan(Span(test_time.timeSystem())); - EXPECT_EQ(1ULL, buffer.pendingSpans()); - std::string expected_json_array_string = "[{" - R"("traceId":"0000000000000000",)" - R"("name":"",)" - R"("id":"0000000000000000",)" - R"("annotations":[],)" - R"("binaryAnnotations":[])" - "}]"; - EXPECT_EQ(expected_json_array_string, buffer.toStringifiedJsonArray()); + for (uint64_t i = 0; i < expected_list.size(); i++) { + buffer.addSpan(createSpan({"cs", "sr"}, IpType::V4)); + EXPECT_EQ(i + 1, buffer.pendingSpans()); + EXPECT_EQ(expected_list.at(i), buffer.serialize()); + } - buffer.clear(); - EXPECT_EQ(0ULL, buffer.pendingSpans()); - EXPECT_EQ("[]", buffer.toStringifiedJsonArray()); - - buffer.addSpan(Span(test_time.timeSystem())); - buffer.addSpan(Span(test_time.timeSystem())); - expected_json_array_string = "[" - "{" - R"("traceId":"0000000000000000",)" - R"("name":"",)" - R"("id":"0000000000000000",)" - R"("annotations":[],)" - R"("binaryAnnotations":[])" - "}," - "{" - R"("traceId":"0000000000000000",)" - R"("name":"",)" - R"("id":"0000000000000000",)" - R"("annotations":[],)" - R"("binaryAnnotations":[])" - "}" - "]"; - EXPECT_EQ(2ULL, buffer.pendingSpans()); - EXPECT_EQ(expected_json_array_string, buffer.toStringifiedJsonArray()); + // Add a valid span. Valid means can be serialized to v2. + EXPECT_TRUE(buffer.addSpan(createSpan({"cs"}, IpType::V4))); + // While the span is valid, however the buffer is full. + EXPECT_FALSE(buffer.addSpan(createSpan({"cs", "sr"}, IpType::V4))); buffer.clear(); EXPECT_EQ(0ULL, buffer.pendingSpans()); - EXPECT_EQ("[]", buffer.toStringifiedJsonArray()); + EXPECT_EQ("[]", buffer.serialize()); } -TEST(ZipkinSpanBufferTest, sizeConstructorEndtoEnd) { - DangerousDeprecatedTestTime test_time; - SpanBuffer buffer(2); +template std::string serializedMessageToJson(const std::string& serialized) { + Type message; + message.ParseFromString(serialized); + std::string json; + Protobuf::util::MessageToJsonString(message, &json); + return json; +} - EXPECT_EQ(0ULL, buffer.pendingSpans()); - EXPECT_EQ("[]", buffer.toStringifiedJsonArray()); - - buffer.addSpan(Span(test_time.timeSystem())); - EXPECT_EQ(1ULL, buffer.pendingSpans()); - std::string expected_json_array_string = "[{" - R"("traceId":"0000000000000000",)" - R"("name":"",)" - R"("id":"0000000000000000",)" - R"("annotations":[],)" - R"("binaryAnnotations":[])" - "}]"; - EXPECT_EQ(expected_json_array_string, buffer.toStringifiedJsonArray()); +TEST(ZipkinSpanBufferTest, ConstructBuffer) { + const std::string expected1 = R"([{"traceId":"0000000000000001",)" + R"("name":"",)" + R"("id":"0000000000000001",)" + R"("duration":100,)" + R"("annotations":[{"timestamp":1566058071601051,)" + R"("value":"cs",)" + R"("endpoint":{"ipv4":"1.2.3.4",)" + R"("port":8080,)" + R"("serviceName":"service1"}},)" + R"({"timestamp":1566058071601051,)" + R"("value":"sr",)" + R"("endpoint":{"ipv4":"1.2.3.4",)" + R"("port":8080,)" + R"("serviceName":"service1"}}],)" + R"("binaryAnnotations":[{"key":"component",)" + R"("value":"proxy"}]}])"; - buffer.clear(); - EXPECT_EQ(0ULL, buffer.pendingSpans()); - EXPECT_EQ("[]", buffer.toStringifiedJsonArray()); - - buffer.addSpan(Span(test_time.timeSystem())); - buffer.addSpan(Span(test_time.timeSystem())); - expected_json_array_string = "[" - "{" - R"("traceId":"0000000000000000",)" - R"("name":"",)" - R"("id":"0000000000000000",)" - R"("annotations":[],)" - R"("binaryAnnotations":[])" - "}," - "{" - R"("traceId":"0000000000000000",)" - R"("name":"",)" - R"("id":"0000000000000000",)" - R"("annotations":[],)" - R"("binaryAnnotations":[])" - "}]"; - EXPECT_EQ(2ULL, buffer.pendingSpans()); - EXPECT_EQ(expected_json_array_string, buffer.toStringifiedJsonArray()); + const std::string expected2 = R"([{"traceId":"0000000000000001",)" + R"("name":"",)" + R"("id":"0000000000000001",)" + R"("duration":100,)" + R"("annotations":[{"timestamp":1566058071601051,)" + R"("value":"cs",)" + R"("endpoint":{"ipv4":"1.2.3.4",)" + R"("port":8080,)" + R"("serviceName":"service1"}},)" + R"({"timestamp":1566058071601051,)" + R"("value":"sr",)" + R"("endpoint":{"ipv4":"1.2.3.4",)" + R"("port":8080,)" + R"("serviceName":"service1"}}],)" + R"("binaryAnnotations":[{"key":"component",)" + R"("value":"proxy"}]},)" + R"({"traceId":"0000000000000001",)" + R"("name":"",)" + R"("id":"0000000000000001",)" + R"("duration":100,)" + R"("annotations":[{"timestamp":1566058071601051,)" + R"("value":"cs",)" + R"("endpoint":{"ipv4":"1.2.3.4",)" + R"("port":8080,)" + R"("serviceName":"service1"}},)" + R"({"timestamp":1566058071601051,)" + R"("value":"sr",)" + R"("endpoint":{"ipv4":"1.2.3.4",)" + R"("port":8080,)" + R"("serviceName":"service1"}}],)" + R"("binaryAnnotations":[{"key":"component",)" + R"("value":"proxy"}]}])"; + const bool shared = true; + const bool delay_allocation = true; - buffer.clear(); - EXPECT_EQ(0ULL, buffer.pendingSpans()); - EXPECT_EQ("[]", buffer.toStringifiedJsonArray()); + SpanBuffer buffer1(envoy::config::trace::v2::ZipkinConfig::HTTP_JSON_V1, shared); + expectSerializedBuffer(buffer1, delay_allocation, {expected1, expected2}); + + // Prepare 3 slots, since we will add one more inside the `expectSerializedBuffer` function. + SpanBuffer buffer2(envoy::config::trace::v2::ZipkinConfig::HTTP_JSON_V1, shared, 3); + expectSerializedBuffer(buffer2, !delay_allocation, {expected1, expected2}); +} + +TEST(ZipkinSpanBufferTest, SerializeSpan) { + const bool shared = true; + SpanBuffer buffer1(envoy::config::trace::v2::ZipkinConfig::HTTP_JSON, shared, 2); + buffer1.addSpan(createSpan({"cs"}, IpType::V4)); + EXPECT_EQ("[{" + R"("traceId":"0000000000000001",)" + R"("id":"0000000000000001",)" + R"("kind":"CLIENT",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"1.2.3.4",)" + R"("port":8080},)" + R"("tags":{)" + R"("component":"proxy"})" + "}]", + buffer1.serialize()); + + SpanBuffer buffer1_v6(envoy::config::trace::v2::ZipkinConfig::HTTP_JSON, shared, 2); + buffer1_v6.addSpan(createSpan({"cs"}, IpType::V6)); + EXPECT_EQ("[{" + R"("traceId":"0000000000000001",)" + R"("id":"0000000000000001",)" + R"("kind":"CLIENT",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv6":"2001:db8:85a3::8a2e:370:4444",)" + R"("port":7334},)" + R"("tags":{)" + R"("component":"proxy"})" + "}]", + buffer1_v6.serialize()); + + SpanBuffer buffer2(envoy::config::trace::v2::ZipkinConfig::HTTP_JSON, shared, 2); + buffer2.addSpan(createSpan({"cs", "sr"}, IpType::V4)); + EXPECT_EQ("[{" + R"("traceId":"0000000000000001",)" + R"("id":"0000000000000001",)" + R"("kind":"CLIENT",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"1.2.3.4",)" + R"("port":8080},)" + R"("tags":{)" + R"("component":"proxy"}},)" + R"({)" + R"("traceId":"0000000000000001",)" + R"("id":"0000000000000001",)" + R"("kind":"SERVER",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"1.2.3.4",)" + R"("port":8080},)" + R"("tags":{)" + R"("component":"proxy"},)" + R"("shared":true)" + "}]", + buffer2.serialize()); + + SpanBuffer buffer3(envoy::config::trace::v2::ZipkinConfig::HTTP_JSON, !shared, 2); + buffer3.addSpan(createSpan({"cs", "sr"}, IpType::V4)); + EXPECT_EQ("[{" + R"("traceId":"0000000000000001",)" + R"("id":"0000000000000001",)" + R"("kind":"CLIENT",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"1.2.3.4",)" + R"("port":8080},)" + R"("tags":{)" + R"("component":"proxy"}},)" + R"({)" + R"("traceId":"0000000000000001",)" + R"("id":"0000000000000001",)" + R"("kind":"SERVER",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"1.2.3.4",)" + R"("port":8080},)" + R"("tags":{)" + R"("component":"proxy"})" + "}]", + buffer3.serialize()); + + SpanBuffer buffer4(envoy::config::trace::v2::ZipkinConfig::HTTP_PROTO, shared, 2); + buffer4.addSpan(createSpan({"cs"}, IpType::V4)); + EXPECT_EQ("{" + R"("spans":[{)" + R"("traceId":"AAAAAAAAAAE=",)" + R"("id":"AQAAAAAAAAA=",)" + R"("kind":"CLIENT",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"AQIDBA==",)" + R"("port":8080},)" + R"("tags":{)" + R"("component":"proxy"})" + "}]}", + serializedMessageToJson(buffer4.serialize())); + + SpanBuffer buffer4_v6(envoy::config::trace::v2::ZipkinConfig::HTTP_PROTO, shared, 2); + buffer4_v6.addSpan(createSpan({"cs"}, IpType::V6)); + EXPECT_EQ("{" + R"("spans":[{)" + R"("traceId":"AAAAAAAAAAE=",)" + R"("id":"AQAAAAAAAAA=",)" + R"("kind":"CLIENT",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv6":"IAENuIWjAAAAAIouA3BERA==",)" + R"("port":7334},)" + R"("tags":{)" + R"("component":"proxy"})" + "}]}", + serializedMessageToJson(buffer4_v6.serialize())); + + SpanBuffer buffer5(envoy::config::trace::v2::ZipkinConfig::HTTP_PROTO, shared, 2); + buffer5.addSpan(createSpan({"cs", "sr"}, IpType::V4)); + EXPECT_EQ("{" + R"("spans":[{)" + R"("traceId":"AAAAAAAAAAE=",)" + R"("id":"AQAAAAAAAAA=",)" + R"("kind":"CLIENT",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"AQIDBA==",)" + R"("port":8080},)" + R"("tags":{)" + R"("component":"proxy"}},)" + R"({)" + R"("traceId":"AAAAAAAAAAE=",)" + R"("id":"AQAAAAAAAAA=",)" + R"("kind":"SERVER",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"AQIDBA==",)" + R"("port":8080},)" + R"("tags":{)" + R"("component":"proxy"},)" + R"("shared":true)" + "}]}", + serializedMessageToJson(buffer5.serialize())); + + SpanBuffer buffer6(envoy::config::trace::v2::ZipkinConfig::HTTP_PROTO, !shared, 2); + buffer6.addSpan(createSpan({"cs", "sr"}, IpType::V4)); + EXPECT_EQ("{" + R"("spans":[{)" + R"("traceId":"AAAAAAAAAAE=",)" + R"("id":"AQAAAAAAAAA=",)" + R"("kind":"CLIENT",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"AQIDBA==",)" + R"("port":8080},)" + R"("tags":{)" + R"("component":"proxy"}},)" + R"({)" + R"("traceId":"AAAAAAAAAAE=",)" + R"("id":"AQAAAAAAAAA=",)" + R"("kind":"SERVER",)" + R"("timestamp":"1566058071601051",)" + R"("duration":"100",)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"AQIDBA==",)" + R"("port":8080},)" + R"("tags":{)" + R"("component":"proxy"})" + "}]}", + serializedMessageToJson(buffer6.serialize())); } } // namespace diff --git a/test/extensions/tracers/zipkin/tracer_test.cc b/test/extensions/tracers/zipkin/tracer_test.cc index 04fdbe3e07b4..bc5f9b9e32a9 100644 --- a/test/extensions/tracers/zipkin/tracer_test.cc +++ b/test/extensions/tracers/zipkin/tracer_test.cc @@ -28,7 +28,7 @@ namespace { class TestReporterImpl : public Reporter { public: TestReporterImpl(int value) : value_(value) {} - void reportSpan(const Span& span) override { reported_spans_.push_back(span); } + void reportSpan(Span&& span) override { reported_spans_.push_back(span); } int getValue() { return value_; } std::vector& reportedSpans() { return reported_spans_; } diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index b8d18baca67e..5566ed107e27 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -56,19 +56,71 @@ class ZipkinDriverTest : public testing::Test { random_, time_source_); } - void setupValidDriver() { + void setupValidDriver(const std::string& version) { EXPECT_CALL(cm_, get(Eq("fake_cluster"))).WillRepeatedly(Return(&cm_.thread_local_cluster_)); - const std::string yaml_string = R"EOF( + const std::string yaml_string = fmt::format(R"EOF( collector_cluster: fake_cluster collector_endpoint: /api/v1/spans - )EOF"; + collector_endpoint_version: {} + )EOF", + version); envoy::config::trace::v2::ZipkinConfig zipkin_config; TestUtility::loadFromYaml(yaml_string, zipkin_config); setup(zipkin_config, true); } + void expectValidFlushSeveralSpans(const std::string& version, const std::string& content_type) { + setupValidDriver(version); + + Http::MockAsyncClientRequest request(&cm_.async_client_); + Http::AsyncClient::Callbacks* callback; + const absl::optional timeout(std::chrono::seconds(5)); + + EXPECT_CALL(cm_.async_client_, + send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout))) + .WillOnce( + Invoke([&](Http::MessagePtr& message, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + callback = &callbacks; + + EXPECT_EQ("/api/v1/spans", message->headers().Path()->value().getStringView()); + EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ(content_type, message->headers().ContentType()->value().getStringView()); + + return &request; + })); + + EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.zipkin.min_flush_spans", 5)) + .Times(2) + .WillRepeatedly(Return(2)); + EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.zipkin.request_timeout", 5000U)) + .WillOnce(Return(5000U)); + + Tracing::SpanPtr first_span = driver_->startSpan( + config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); + first_span->finishSpan(); + + Tracing::SpanPtr second_span = driver_->startSpan( + config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); + second_span->finishSpan(); + + Http::MessagePtr msg(new Http::ResponseMessageImpl( + Http::HeaderMapPtr{new Http::TestHeaderMapImpl{{":status", "202"}}})); + + callback->onSuccess(std::move(msg)); + + EXPECT_EQ(2U, stats_.counter("tracing.zipkin.spans_sent").value()); + EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_dropped").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_failed").value()); + + callback->onFailure(Http::AsyncClient::FailureReason::Reset); + + EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_failed").value()); + } + // TODO(#4160): Currently time_system_ is initialized from DangerousDeprecatedTestTime, which uses // real time, not mock-time. When that is switched to use mock-time instead, I think // generateRandom64() may not be as random as we want, and we'll need to inject entropy @@ -133,58 +185,23 @@ TEST_F(ZipkinDriverTest, InitializeDriver) { } TEST_F(ZipkinDriverTest, FlushSeveralSpans) { - setupValidDriver(); - - Http::MockAsyncClientRequest request(&cm_.async_client_); - Http::AsyncClient::Callbacks* callback; - const absl::optional timeout(std::chrono::seconds(5)); - - EXPECT_CALL(cm_.async_client_, - send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout))) - .WillOnce( - Invoke([&](Http::MessagePtr& message, Http::AsyncClient::Callbacks& callbacks, - const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - callback = &callbacks; - - EXPECT_EQ("/api/v1/spans", message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/json", - message->headers().ContentType()->value().getStringView()); - - return &request; - })); - - EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.zipkin.min_flush_spans", 5)) - .Times(2) - .WillRepeatedly(Return(2)); - EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.zipkin.request_timeout", 5000U)) - .WillOnce(Return(5000U)); - - Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_, - start_time_, {Tracing::Reason::Sampling, true}); - first_span->finishSpan(); - - Tracing::SpanPtr second_span = driver_->startSpan(config_, request_headers_, operation_name_, - start_time_, {Tracing::Reason::Sampling, true}); - second_span->finishSpan(); - - Http::MessagePtr msg(new Http::ResponseMessageImpl( - Http::HeaderMapPtr{new Http::TestHeaderMapImpl{{":status", "202"}}})); - - callback->onSuccess(std::move(msg)); + expectValidFlushSeveralSpans("HTTP_JSON_V1", "application/json"); +} - EXPECT_EQ(2U, stats_.counter("tracing.zipkin.spans_sent").value()); - EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_sent").value()); - EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_dropped").value()); - EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_failed").value()); +TEST_F(ZipkinDriverTest, FlushSeveralSpansHttpJsonV1) { + expectValidFlushSeveralSpans("HTTP_JSON_V1", "application/json"); +} - callback->onFailure(Http::AsyncClient::FailureReason::Reset); +TEST_F(ZipkinDriverTest, FlushSeveralSpansHttpJson) { + expectValidFlushSeveralSpans("HTTP_JSON", "application/json"); +} - EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_failed").value()); +TEST_F(ZipkinDriverTest, FlushSeveralSpansHttpProto) { + expectValidFlushSeveralSpans("HTTP_PROTO", "application/x-protobuf"); } TEST_F(ZipkinDriverTest, FlushOneSpanReportFailure) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); Http::MockAsyncClientRequest request(&cm_.async_client_); Http::AsyncClient::Callbacks* callback; @@ -226,7 +243,7 @@ TEST_F(ZipkinDriverTest, FlushOneSpanReportFailure) { } TEST_F(ZipkinDriverTest, FlushSpansTimer) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); const absl::optional timeout(std::chrono::seconds(5)); EXPECT_CALL(cm_.async_client_, @@ -253,7 +270,7 @@ TEST_F(ZipkinDriverTest, FlushSpansTimer) { } TEST_F(ZipkinDriverTest, NoB3ContextSampledTrue) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID)); EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID)); @@ -267,7 +284,7 @@ TEST_F(ZipkinDriverTest, NoB3ContextSampledTrue) { } TEST_F(ZipkinDriverTest, NoB3ContextSampledFalse) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID)); EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID)); @@ -281,7 +298,7 @@ TEST_F(ZipkinDriverTest, NoB3ContextSampledFalse) { } TEST_F(ZipkinDriverTest, PropagateB3NoSampleDecisionSampleTrue) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, Hex::uint64ToHex(generateRandom64())); @@ -297,7 +314,7 @@ TEST_F(ZipkinDriverTest, PropagateB3NoSampleDecisionSampleTrue) { } TEST_F(ZipkinDriverTest, PropagateB3NoSampleDecisionSampleFalse) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, Hex::uint64ToHex(generateRandom64())); @@ -313,7 +330,7 @@ TEST_F(ZipkinDriverTest, PropagateB3NoSampleDecisionSampleFalse) { } TEST_F(ZipkinDriverTest, PropagateB3NotSampled) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID)); EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID)); @@ -335,7 +352,7 @@ TEST_F(ZipkinDriverTest, PropagateB3NotSampled) { } TEST_F(ZipkinDriverTest, PropagateB3NotSampledWithFalse) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID)); EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID)); @@ -357,7 +374,7 @@ TEST_F(ZipkinDriverTest, PropagateB3NotSampledWithFalse) { } TEST_F(ZipkinDriverTest, PropagateB3SampledWithTrue) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID)); EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID)); @@ -379,7 +396,7 @@ TEST_F(ZipkinDriverTest, PropagateB3SampledWithTrue) { } TEST_F(ZipkinDriverTest, PropagateB3SampleFalse) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, Hex::uint64ToHex(generateRandom64())); @@ -396,7 +413,7 @@ TEST_F(ZipkinDriverTest, PropagateB3SampleFalse) { } TEST_F(ZipkinDriverTest, ZipkinSpanTest) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); // ==== // Test effective setTag() @@ -476,7 +493,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanTest) { } TEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3HeadersTest) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); const std::string trace_id = Hex::uint64ToHex(generateRandom64()); const std::string span_id = Hex::uint64ToHex(generateRandom64()); @@ -500,7 +517,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3HeadersTest) { } TEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3HeadersEmptyParentSpanTest) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); // Root span so have same trace and span id const std::string id = Hex::uint64ToHex(generateRandom64()); @@ -521,7 +538,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3HeadersEmptyParentSpanTest) { } TEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3Headers128TraceIdTest) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); const uint64_t trace_id_high = generateRandom64(); const uint64_t trace_id_low = generateRandom64(); @@ -549,7 +566,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3Headers128TraceIdTest) { } TEST_F(ZipkinDriverTest, ZipkinSpanContextFromInvalidTraceIdB3HeadersTest) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, std::string("xyz")); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, @@ -563,7 +580,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanContextFromInvalidTraceIdB3HeadersTest) { } TEST_F(ZipkinDriverTest, ZipkinSpanContextFromInvalidSpanIdB3HeadersTest) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, Hex::uint64ToHex(generateRandom64())); @@ -577,7 +594,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanContextFromInvalidSpanIdB3HeadersTest) { } TEST_F(ZipkinDriverTest, ZipkinSpanContextFromInvalidParentIdB3HeadersTest) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, Hex::uint64ToHex(generateRandom64())); @@ -592,7 +609,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanContextFromInvalidParentIdB3HeadersTest) { } TEST_F(ZipkinDriverTest, ExplicitlySetSampledFalse) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); @@ -609,7 +626,7 @@ TEST_F(ZipkinDriverTest, ExplicitlySetSampledFalse) { } TEST_F(ZipkinDriverTest, ExplicitlySetSampledTrue) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, false}); @@ -626,7 +643,7 @@ TEST_F(ZipkinDriverTest, ExplicitlySetSampledTrue) { } TEST_F(ZipkinDriverTest, DuplicatedHeader) { - setupValidDriver(); + setupValidDriver("HTTP_JSON_V1"); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, Hex::uint64ToHex(generateRandom64())); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, From 0b0aa3f81e6e58327966f8ffa49932c5999a8306 Mon Sep 17 00:00:00 2001 From: Jyoti Mahapatra <49211422+jyotimahapatra@users.noreply.github.com> Date: Fri, 30 Aug 2019 09:00:00 -0700 Subject: [PATCH 17/31] Route Checker tool Fix code coverage bug in proto based schema (#8101) Signed-off-by: Jyoti Mahapatra --- test/tools/router_check/router.cc | 36 +++++------ .../ComprehensiveRoutes.golden.proto.json | 63 +++++++++++++++++++ .../test/config/ComprehensiveRoutes.yaml | 9 ++- .../test/config/Weighted.golden.proto.pb_text | 4 +- test/tools/router_check/test/route_tests.sh | 6 ++ test/tools/router_check/validation.proto | 13 ++-- 6 files changed, 103 insertions(+), 28 deletions(-) create mode 100644 test/tools/router_check/test/config/ComprehensiveRoutes.golden.proto.json diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index 5eeadc7a1bb1..fbd9eaed7031 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -224,13 +224,13 @@ bool RouterCheckTool::compareCluster(ToolConfig& tool_config, const std::string& bool RouterCheckTool::compareCluster( ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) { - if (expected.cluster_name().empty()) { + if (!expected.has_cluster_name()) { return true; } if (tool_config.route_ == nullptr) { - return compareResults("", expected.cluster_name(), "cluster_name"); + return compareResults("", expected.cluster_name().value(), "cluster_name"); } - return compareCluster(tool_config, expected.cluster_name()); + return compareCluster(tool_config, expected.cluster_name().value()); } bool RouterCheckTool::compareVirtualCluster(ToolConfig& tool_config, const std::string& expected) { @@ -251,13 +251,13 @@ bool RouterCheckTool::compareVirtualCluster(ToolConfig& tool_config, const std:: bool RouterCheckTool::compareVirtualCluster( ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) { - if (expected.virtual_cluster_name().empty()) { + if (!expected.has_virtual_cluster_name()) { return true; } if (tool_config.route_ == nullptr) { - return compareResults("", expected.virtual_cluster_name(), "virtual_cluster_name"); + return compareResults("", expected.virtual_cluster_name().value(), "virtual_cluster_name"); } - return compareVirtualCluster(tool_config, expected.virtual_cluster_name()); + return compareVirtualCluster(tool_config, expected.virtual_cluster_name().value()); } bool RouterCheckTool::compareVirtualHost(ToolConfig& tool_config, const std::string& expected) { @@ -275,13 +275,13 @@ bool RouterCheckTool::compareVirtualHost(ToolConfig& tool_config, const std::str bool RouterCheckTool::compareVirtualHost( ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) { - if (expected.virtual_host_name().empty()) { + if (!expected.has_virtual_host_name()) { return true; } if (tool_config.route_ == nullptr) { - return compareResults("", expected.virtual_host_name(), "virtual_host_name"); + return compareResults("", expected.virtual_host_name().value(), "virtual_host_name"); } - return compareVirtualHost(tool_config, expected.virtual_host_name()); + return compareVirtualHost(tool_config, expected.virtual_host_name().value()); } bool RouterCheckTool::compareRewritePath(ToolConfig& tool_config, const std::string& expected) { @@ -306,13 +306,13 @@ bool RouterCheckTool::compareRewritePath(ToolConfig& tool_config, const std::str bool RouterCheckTool::compareRewritePath( ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) { - if (expected.path_rewrite().empty()) { + if (!expected.has_path_rewrite()) { return true; } if (tool_config.route_ == nullptr) { - return compareResults("", expected.path_rewrite(), "path_rewrite"); + return compareResults("", expected.path_rewrite().value(), "path_rewrite"); } - return compareRewritePath(tool_config, expected.path_rewrite()); + return compareRewritePath(tool_config, expected.path_rewrite().value()); } bool RouterCheckTool::compareRewriteHost(ToolConfig& tool_config, const std::string& expected) { @@ -337,13 +337,13 @@ bool RouterCheckTool::compareRewriteHost(ToolConfig& tool_config, const std::str bool RouterCheckTool::compareRewriteHost( ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) { - if (expected.host_rewrite().empty()) { + if (!expected.has_host_rewrite()) { return true; } if (tool_config.route_ == nullptr) { - return compareResults("", expected.host_rewrite(), "host_rewrite"); + return compareResults("", expected.host_rewrite().value(), "host_rewrite"); } - return compareRewriteHost(tool_config, expected.host_rewrite()); + return compareRewriteHost(tool_config, expected.host_rewrite().value()); } bool RouterCheckTool::compareRedirectPath(ToolConfig& tool_config, const std::string& expected) { @@ -361,13 +361,13 @@ bool RouterCheckTool::compareRedirectPath(ToolConfig& tool_config, const std::st bool RouterCheckTool::compareRedirectPath( ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected) { - if (expected.path_redirect().empty()) { + if (!expected.has_path_redirect()) { return true; } if (tool_config.route_ == nullptr) { - return compareResults("", expected.path_redirect(), "path_redirect"); + return compareResults("", expected.path_redirect().value(), "path_redirect"); } - return compareRedirectPath(tool_config, expected.path_redirect()); + return compareRedirectPath(tool_config, expected.path_redirect().value()); } bool RouterCheckTool::compareHeaderField( diff --git a/test/tools/router_check/test/config/ComprehensiveRoutes.golden.proto.json b/test/tools/router_check/test/config/ComprehensiveRoutes.golden.proto.json new file mode 100644 index 000000000000..ebc21381c347 --- /dev/null +++ b/test/tools/router_check/test/config/ComprehensiveRoutes.golden.proto.json @@ -0,0 +1,63 @@ +{ + "tests": [ + { + "test_name": "Test 1", + "input": { + "authority": "www.lyft.com", + "path": "/new_endpoint", + "method": "GET" + }, + "validate": { + "cluster_name": "www2", + "virtual_cluster_name": "other", + "virtual_host_name": "www2_host", + "path_rewrite": "/api/new_endpoint", + "host_rewrite": "www.lyft.com", + "path_redirect": "" + } + }, + { + "test_name": "Test 2", + "input": { + "authority": "www.lyft.com", + "path": "/", + "method": "GET" + }, + "validate": { + "cluster_name": "root_www2", + "virtual_cluster_name": "other", + "virtual_host_name": "www2_host", + "path_rewrite": "/", + "host_rewrite": "www.lyft.com", + "path_redirect": "" + } + }, + { + "test_name": "Test 3", + "input": { + "authority": "www.lyft.com", + "path": "/foobar", + "method": "GET" + }, + "validate": { + "cluster_name": "www2", + "virtual_cluster_name": "other", + "virtual_host_name": "www2_host", + "path_rewrite": "/foobar", + "host_rewrite": "www.lyft.com", + "path_redirect": "" + } + }, + { + "test_name": "Test 4", + "input": { + "authority": "www.lyft.com", + "path": "/users/123", + "method": "PUT" + }, + "validate": { + "virtual_cluster_name": "update_user" + } + } + ] +} diff --git a/test/tools/router_check/test/config/ComprehensiveRoutes.yaml b/test/tools/router_check/test/config/ComprehensiveRoutes.yaml index 0613410256ca..6efad99a099e 100644 --- a/test/tools/router_check/test/config/ComprehensiveRoutes.yaml +++ b/test/tools/router_check/test/config/ComprehensiveRoutes.yaml @@ -17,6 +17,11 @@ virtual_hosts: route: cluster: www2 virtual_clusters: - - pattern: ^/users/\d+$ - method: PUT + - headers: + - name: :path + safe_regex_match: + google_re2: {} + regex: ^/users/\d+$ + - name: :method + exact_match: PUT name: update_user diff --git a/test/tools/router_check/test/config/Weighted.golden.proto.pb_text b/test/tools/router_check/test/config/Weighted.golden.proto.pb_text index c2ab5d18c377..fa7dedcb27c5 100644 --- a/test/tools/router_check/test/config/Weighted.golden.proto.pb_text +++ b/test/tools/router_check/test/config/Weighted.golden.proto.pb_text @@ -8,7 +8,7 @@ tests { method: "GET" } validate: { - path_redirect: "" + path_redirect: { value: "" } } } @@ -21,6 +21,6 @@ tests { random_value: 115 } validate: { - cluster_name: "cluster1" + cluster_name: { value: "cluster1" } } } \ No newline at end of file diff --git a/test/tools/router_check/test/route_tests.sh b/test/tools/router_check/test/route_tests.sh index 0b06cb7425bf..1e85bea3f598 100755 --- a/test/tools/router_check/test/route_tests.sh +++ b/test/tools/router_check/test/route_tests.sh @@ -24,6 +24,12 @@ if [[ "${COVERAGE_OUTPUT}" != *"Current route coverage: "* ]] ; then exit 1 fi +COMP_COVERAGE_CMD="${PATH_BIN} -c ${PATH_CONFIG}/ComprehensiveRoutes.yaml -t ${PATH_CONFIG}/ComprehensiveRoutes.golden.proto.json --details --useproto -f " +COVERAGE_OUTPUT=$($COMP_COVERAGE_CMD "100" "--covall" 2>&1) || echo "${COVERAGE_OUTPUT:-no-output}" +if [[ "${COVERAGE_OUTPUT}" != *"Current route coverage: 100%"* ]] ; then + exit 1 +fi + COMP_COVERAGE_CMD="${PATH_BIN} ${PATH_CONFIG}/ComprehensiveRoutes.yaml ${PATH_CONFIG}/ComprehensiveRoutes.golden.json --details -f " COVERAGE_OUTPUT=$($COMP_COVERAGE_CMD "100" "--covall" 2>&1) || echo "${COVERAGE_OUTPUT:-no-output}" if [[ "${COVERAGE_OUTPUT}" != *"Current route coverage: 100%"* ]] ; then diff --git a/test/tools/router_check/validation.proto b/test/tools/router_check/validation.proto index 9c86153cd3e9..4ff3ab35f434 100644 --- a/test/tools/router_check/validation.proto +++ b/test/tools/router_check/validation.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.RouterCheckToolSchema; import "envoy/api/v2/core/base.proto"; +import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; // [#protodoc-title: RouterCheckTool Validation] @@ -78,22 +79,22 @@ message ValidationInput { // For example, to test that no cluster match is expected use {“cluster_name”: “”}. message ValidationAssert { // Match the cluster name. - string cluster_name = 1; + google.protobuf.StringValue cluster_name = 1; // Match the virtual cluster name. - string virtual_cluster_name = 2; + google.protobuf.StringValue virtual_cluster_name = 2; // Match the virtual host name. - string virtual_host_name = 3; + google.protobuf.StringValue virtual_host_name = 3; // Match the host header field after rewrite. - string host_rewrite = 4; + google.protobuf.StringValue host_rewrite = 4; // Match the path header field after rewrite. - string path_rewrite = 5; + google.protobuf.StringValue path_rewrite = 5; // Match the returned redirect path. - string path_redirect = 6; + google.protobuf.StringValue path_redirect = 6; // Match the listed header fields. // Examples header fields include the “:path”, “cookie”, and “date” fields. From 7960564746700312b4fec21711c4387794f5ba06 Mon Sep 17 00:00:00 2001 From: Xin Date: Fri, 30 Aug 2019 13:28:27 -0400 Subject: [PATCH 18/31] [hcm] Add scoped RDS routing into HCM (#7762) Description: add Scoped RDS routing logic into HCM. Changes include: * in ActiveStream constructor latch a ScopedConfig impl to the activeStream if SRDS is enabled * in the beginning of ActiveStream::decodeHeaders(headers, end_stream), get routeConfig from latched ScopedConfig impl. This PR is the 3rd in the srds impl PR chain: [#7704, #7451, this]. Risk Level: Medium Testing: unit test and integration tests. Release Notes: Add scoped RDS routing support into HCM. Signed-off-by: Xin Zhuang --- api/envoy/api/v2/srds.proto | 55 ++- .../intro/arch_overview/http/http_routing.rst | 29 ++ docs/root/intro/version_history.rst | 5 +- include/envoy/stream_info/stream_info.h | 2 + source/common/http/BUILD | 1 + source/common/http/conn_manager_impl.cc | 57 ++- source/common/http/conn_manager_impl.h | 7 +- source/common/router/scoped_rds.cc | 2 +- test/common/grpc/grpc_client_integration.h | 26 +- test/common/http/BUILD | 15 +- .../http/conn_manager_impl_fuzz_test.cc | 25 +- test/common/http/conn_manager_impl_test.cc | 282 ++++++++++++++- test/common/router/scoped_rds_test.cc | 2 +- .../scoped_rds_integration_test.cc | 340 +++++++++++++++--- test/mocks/router/mocks.h | 1 + test/test_common/utility.cc | 6 +- test/test_common/utility.h | 4 +- 17 files changed, 719 insertions(+), 140 deletions(-) diff --git a/api/envoy/api/v2/srds.proto b/api/envoy/api/v2/srds.proto index 9038cb1e3257..617fdf9ac644 100644 --- a/api/envoy/api/v2/srds.proto +++ b/api/envoy/api/v2/srds.proto @@ -2,36 +2,27 @@ syntax = "proto3"; package envoy.api.v2; -option java_outer_classname = "SrdsProto"; -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_multiple_files = true; -option java_generic_services = true; - import "envoy/api/v2/discovery.proto"; - +import "gogoproto/gogo.proto"; import "google/api/annotations.proto"; - import "validate/validate.proto"; -import "gogoproto/gogo.proto"; +option java_outer_classname = "SrdsProto"; +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_multiple_files = true; +option java_generic_services = true; option (gogoproto.equal_all) = true; // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` // -// .. attention:: -// -// The Scoped RDS API is not yet fully implemented and *should not* be enabled in -// :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.HttpConnectionManager`. -// -// TODO(AndresGuedez): Update :ref:`arch_overview_http_routing` with scoped routing overview and -// configuration details. - // The Scoped Routes Discovery Service (SRDS) API distributes -// :ref:`ScopedRouteConfiguration` resources. Each -// ScopedRouteConfiguration resource represents a "routing scope" containing a mapping that allows -// the HTTP connection manager to dynamically assign a routing table (specified via -// a :ref:`RouteConfiguration` message) to each HTTP request. +// :ref:`ScopedRouteConfiguration` +// resources. Each ScopedRouteConfiguration resource represents a "routing +// scope" containing a mapping that allows the HTTP connection manager to +// dynamically assign a routing table (specified via a +// :ref:`RouteConfiguration` message) to each +// HTTP request. // [#proto-status: experimental] service ScopedRoutesDiscoveryService { rpc StreamScopedRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { @@ -52,9 +43,9 @@ service ScopedRoutesDiscoveryService { // :ref:`Key` to a // :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). // -// The HTTP connection manager builds up a table consisting of these Key to RouteConfiguration -// mappings, and looks up the RouteConfiguration to use per request according to the algorithm -// specified in the +// The HTTP connection manager builds up a table consisting of these Key to +// RouteConfiguration mappings, and looks up the RouteConfiguration to use per +// request according to the algorithm specified in the // :ref:`scope_key_builder` // assigned to the HttpConnectionManager. // @@ -104,8 +95,8 @@ service ScopedRoutesDiscoveryService { // Host: foo.com // X-Route-Selector: vip=172.10.10.20 // -// would result in the routing table defined by the `route-config1` RouteConfiguration being -// assigned to the HTTP request/stream. +// would result in the routing table defined by the `route-config1` +// RouteConfiguration being assigned to the HTTP request/stream. // // [#comment:next free field: 4] // [#proto-status: experimental] @@ -115,8 +106,9 @@ message ScopedRouteConfiguration { // Specifies a key which is matched against the output of the // :ref:`scope_key_builder` - // specified in the HttpConnectionManager. The matching is done per HTTP request and is dependent - // on the order of the fragments contained in the Key. + // specified in the HttpConnectionManager. The matching is done per HTTP + // request and is dependent on the order of the fragments contained in the + // Key. message Key { message Fragment { oneof type { @@ -127,14 +119,15 @@ message ScopedRouteConfiguration { } } - // The ordered set of fragments to match against. The order must match the fragments in the - // corresponding + // The ordered set of fragments to match against. The order must match the + // fragments in the corresponding // :ref:`scope_key_builder`. repeated Fragment fragments = 1 [(validate.rules).repeated .min_items = 1]; } - // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an RDS server to - // fetch the :ref:`envoy_api_msg_RouteConfiguration` associated with this scope. + // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an + // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated + // with this scope. string route_configuration_name = 2 [(validate.rules).string.min_bytes = 1]; // The key to match against. diff --git a/docs/root/intro/arch_overview/http/http_routing.rst b/docs/root/intro/arch_overview/http/http_routing.rst index 6a191be26821..574efa611ecf 100644 --- a/docs/root/intro/arch_overview/http/http_routing.rst +++ b/docs/root/intro/arch_overview/http/http_routing.rst @@ -50,6 +50,35 @@ request. The router filter supports the following features: * :ref:`Hash policy ` based routing. * :ref:`Absolute urls ` are supported for non-tls forward proxies. +.. _arch_overview_http_routing_route_scope: + +Route Scope +-------------- + +Scoped routing enables Envoy to put constraints on search space of domains and route rules. +A :ref:`Route Scope` associates a key with a :ref:`route table `. +For each request, a scope key is computed dynamically by the HTTP connection manager to pick the :ref:`route table`. + +The Scoped RDS (SRDS) API contains a set of :ref:`Scopes ` resources, each defining independent routing configuration, +along with a :ref:`ScopeKeyBuilder ` +defining the key construction algorithm used by Envoy to look up the scope corresponding to each request. + +For example, for the following scoped route configuration, Envoy will look into the "addr" header value, split the header value by ";" first, and use the first value for key 'x-foo-key' as the scope key. +If the "addr" header value is "foo=1;x-foo-key=127.0.0.1;x-bar-key=1.1.1.1", then "127.0.0.1" will be computed as the scope key to look up for corresponding route configuration. + +.. code-block:: yaml + + name: scope_by_addr + fragments: + - header_value_extractor: + name: Addr + element_separator: ; + element: + key: x-foo-key + separator: = + +.. _arch_overview_http_routing_route_table: + Route table ----------- diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index c4e6c2f2ac52..653bc4f65d6d 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -43,16 +43,17 @@ Version history ` for more information. * rbac: added conditions to the policy, see :ref:`condition `. * router: added :ref:`rq_retry_skipped_request_not_complete ` counter stat to router stats. +* router: :ref:`Scoped routing ` is supported. * router check tool: add coverage reporting & enforcement. * router check tool: add comprehensive coverage reporting. -* tracing: added support to the Zipkin reporter for sending list of spans as Zipkin JSON v2 and protobuf message over HTTP. * router check tool: add deprecated field check. * tls: added verification of IP address SAN fields in certificates against configured SANs in the +* tracing: added support to the Zipkin reporter for sending list of spans as Zipkin JSON v2 and protobuf message over HTTP. certificate validation context. * tracing: added tags for gRPC response status and meesage. +* upstream: added :ref:`an option ` that allows draining HTTP, TCP connection pools on cluster membership change. * upstream: added network filter chains to upstream connections, see :ref:`filters`. * upstream: use p2c to select hosts for least-requests load balancers if all host weights are the same, even in cases where weights are not equal to 1. -* upstream: added :ref:`an option ` that allows draining HTTP, TCP connection pools on cluster membership change. * zookeeper: parse responses and emit latency stats. 1.11.1 (August 13, 2019) diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index 1558503ccfe5..442d8a1699a1 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -109,6 +109,8 @@ struct ResponseCodeDetailValues { // The request was rejected because it attempted an unsupported upgrade. const std::string UpgradeFailed = "upgrade_failed"; + // The request was rejected by the HCM because there was no route configuration found. + const std::string RouteConfigurationNotFound = "route_configuration_not_found"; // The request was rejected by the router filter because there was no route found. const std::string RouteNotFound = "route_not_found"; // A direct response was generated by the router filter. diff --git a/source/common/http/BUILD b/source/common/http/BUILD index a41fdf2bb925..df01a16a57db 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -164,6 +164,7 @@ envoy_cc_library( "//include/envoy/router:rds_interface", "//include/envoy/router:scopes_interface", "//include/envoy/runtime:runtime_interface", + "//include/envoy/server:admin_interface", "//include/envoy/server:overload_manager_interface", "//include/envoy/ssl:connection_interface", "//include/envoy/stats:stats_interface", diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index c830114f5107..102373f9cadf 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -12,6 +12,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/network/drain_decision.h" #include "envoy/router/router.h" +#include "envoy/server/admin.h" #include "envoy/ssl/connection.h" #include "envoy/stats/scope.h" #include "envoy/tracing/http_tracer.h" @@ -431,12 +432,27 @@ void ConnectionManagerImpl::chargeTracingStats(const Tracing::Reason& tracing_re ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connection_manager) : connection_manager_(connection_manager), - snapped_route_config_(connection_manager.config_.routeConfigProvider()->config()), stream_id_(connection_manager.random_generator_.random()), request_response_timespan_(new Stats::Timespan( connection_manager_.stats_.named_.downstream_rq_time_, connection_manager_.timeSource())), stream_info_(connection_manager_.codec_->protocol(), connection_manager_.timeSource()), upstream_options_(std::make_shared()) { + // For Server::Admin, no routeConfigProvider or SRDS route provider is used. + ASSERT(dynamic_cast(&connection_manager_.config_) != nullptr || + ((connection_manager.config_.routeConfigProvider() == nullptr && + connection_manager.config_.scopedRouteConfigProvider() != nullptr) || + (connection_manager.config_.routeConfigProvider() != nullptr && + connection_manager.config_.scopedRouteConfigProvider() == nullptr)), + "Either routeConfigProvider or scopedRouteConfigProvider should be set in " + "ConnectionManagerImpl."); + if (connection_manager.config_.routeConfigProvider() != nullptr) { + snapped_route_config_ = connection_manager.config_.routeConfigProvider()->config(); + } else if (connection_manager.config_.scopedRouteConfigProvider() != nullptr) { + snapped_scoped_routes_config_ = + connection_manager_.config_.scopedRouteConfigProvider()->config(); + ASSERT(snapped_scoped_routes_config_ != nullptr, + "Scoped rds provider returns null for scoped routes config."); + } ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); @@ -613,6 +629,17 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); request_headers_ = std::move(headers); + // For Admin thread, we don't use routeConfigProvider or SRDS route provider. + if (dynamic_cast(&connection_manager_.config_) == nullptr && + connection_manager_.config_.scopedRouteConfigProvider() != nullptr) { + ASSERT(snapped_route_config_ == nullptr, + "Route config already latched to the active stream when scoped RDS is enabled."); + // We need to snap snapped_route_config_ here as it's used in mutateRequestHeaders later. + if (!snapScopedRouteConfig()) { + return; + } + } + if (Http::Headers::get().MethodValues.Head == request_headers_->Method()->value().getStringView()) { is_head_request_ = true; @@ -1220,10 +1247,36 @@ void ConnectionManagerImpl::startDrainSequence() { drain_timer_->enableTimer(config_.drainTimeout()); } +bool ConnectionManagerImpl::ActiveStream::snapScopedRouteConfig() { + ASSERT(request_headers_ != nullptr, + "Try to snap scoped route config when there is no request headers."); + + snapped_route_config_ = snapped_scoped_routes_config_->getRouteConfig(*request_headers_); + // NOTE: if a RDS subscription hasn't got a RouteConfiguration back, a Router::NullConfigImpl is + // returned, in that case we let it pass. + if (snapped_route_config_ == nullptr) { + ENVOY_STREAM_LOG(trace, "can't find SRDS scope.", *this); + // Stop decoding now. + maybeEndDecode(true); + sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Http::Code::NotFound, + "route scope not found", nullptr, is_head_request_, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().RouteConfigurationNotFound); + return false; + } + return true; +} + void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { Router::RouteConstSharedPtr route; if (request_headers_ != nullptr) { - route = snapped_route_config_->route(*request_headers_, stream_id_); + if (dynamic_cast(&connection_manager_.config_) == nullptr && + connection_manager_.config_.scopedRouteConfigProvider() != nullptr) { + // NOTE: re-select scope as well in case the scope key header has been changed by a filter. + snapScopedRouteConfig(); + } + if (snapped_route_config_ != nullptr) { + route = snapped_route_config_->route(*request_headers_, stream_id_); + } } stream_info_.route_entry_ = route ? route->routeEntry() : nullptr; cached_route_ = std::move(route); diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 84dde922406e..661871ea991a 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -496,6 +496,11 @@ class ConnectionManagerImpl : Logger::Loggable, void traceRequest(); + // Updates the snapped_route_config_ if scope found, or ends the stream by + // sending local reply. + // Returns true if scoped route config snapped, false otherwise. + bool snapScopedRouteConfig(); + void refreshCachedRoute(); // Pass on watermark callbacks to watermark subscribers. This boils down to passing watermark @@ -585,7 +590,7 @@ class ConnectionManagerImpl : Logger::Loggable, ConnectionManagerImpl& connection_manager_; Router::ConfigConstSharedPtr snapped_route_config_; - Router::ScopedConfigConstSharedPtr snapped_scoped_route_config_; + Router::ScopedConfigConstSharedPtr snapped_scoped_routes_config_; Tracing::SpanPtr active_span_; const uint64_t stream_id_; StreamEncoder* response_encoder_{}; diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index 989e284668c4..9c710540930f 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -316,7 +316,7 @@ void ScopedRdsConfigSubscription::onConfigUpdate( *to_remove_repeated.Add() = scoped_route.first; } onConfigUpdate(to_add_repeated, to_remove_repeated, version_info); -} // namespace Router +} ScopedRdsConfigProvider::ScopedRdsConfigProvider( ScopedRdsConfigSubscriptionSharedPtr&& subscription) diff --git a/test/common/grpc/grpc_client_integration.h b/test/common/grpc/grpc_client_integration.h index ff6d4d3a7b53..bdfc0c6ae1ba 100644 --- a/test/common/grpc/grpc_client_integration.h +++ b/test/common/grpc/grpc_client_integration.h @@ -43,7 +43,6 @@ class GrpcClientIntegrationParamTest : public BaseGrpcClientIntegrationParamTest, public testing::TestWithParam> { public: - ~GrpcClientIntegrationParamTest() override = default; static std::string protocolTestParamsToString( const ::testing::TestParamInfo>& p) { return fmt::format("{}_{}", @@ -54,10 +53,26 @@ class GrpcClientIntegrationParamTest ClientType clientType() const override { return std::get<1>(GetParam()); } }; +class DeltaSotwGrpcClientIntegrationParamTest + : public BaseGrpcClientIntegrationParamTest, + public testing::TestWithParam> { +public: + static std::string protocolTestParamsToString( + const ::testing::TestParamInfo>& + p) { + return fmt::format("{}_{}", + std::get<0>(p.param) == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6", + std::get<1>(p.param) == ClientType::GoogleGrpc ? "GoogleGrpc" : "EnvoyGrpc", + std::get<2>(p.param) ? "Delta" : "StateOfTheWorld"); + } + Network::Address::IpVersion ipVersion() const override { return std::get<0>(GetParam()); } + ClientType clientType() const override { return std::get<1>(GetParam()); } + bool isDelta() { return std::get<2>(GetParam()); } +}; + class DeltaSotwIntegrationParamTest : public testing::TestWithParam> { public: - ~DeltaSotwIntegrationParamTest() override = default; static std::string protocolTestParamsToString( const ::testing::TestParamInfo>& p) { return fmt::format("{}_{}_{}", @@ -84,10 +99,17 @@ class DeltaSotwIntegrationParamTest #define GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc)) +#define DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS \ + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ + testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc), \ + testing::Bool()) #else #define GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::Values(Grpc::ClientType::EnvoyGrpc)) +#define DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS \ + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ + testing::Values(Grpc::ClientType::EnvoyGrpc), testing::Bool()) #endif // ENVOY_GOOGLE_GRPC #define DELTA_INTEGRATION_PARAMS \ diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 7982b15c462f..1766f89aa0d7 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -129,17 +129,6 @@ envoy_cc_test_library( ], ) -envoy_cc_test_library( - name = "conn_manager_impl_common_lib", - hdrs = ["conn_manager_impl_common.h"], - deps = [ - "//include/envoy/common:time_interface", - "//include/envoy/config:config_provider_interface", - "//include/envoy/router:rds_interface", - "//test/mocks/router:router_mocks", - ], -) - envoy_proto_library( name = "conn_manager_impl_fuzz_proto", srcs = ["conn_manager_impl_fuzz.proto"], @@ -153,7 +142,6 @@ envoy_cc_fuzz_test( srcs = ["conn_manager_impl_fuzz_test.cc"], corpus = "conn_manager_impl_corpus", deps = [ - ":conn_manager_impl_common_lib", ":conn_manager_impl_fuzz_proto_cc", "//source/common/common:empty_string", "//source/common/http:conn_manager_lib", @@ -167,6 +155,7 @@ envoy_cc_fuzz_test( "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/network:network_mocks", + "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/tracing:tracing_mocks", @@ -180,7 +169,6 @@ envoy_cc_test( name = "conn_manager_impl_test", srcs = ["conn_manager_impl_test.cc"], deps = [ - ":conn_manager_impl_common_lib", "//include/envoy/access_log:access_log_interface", "//include/envoy/buffer:buffer_interface", "//include/envoy/event:dispatcher_interface", @@ -207,6 +195,7 @@ envoy_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/network:network_mocks", + "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", "//test/mocks/ssl:ssl_mocks", diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index df9a0c2b4b35..60a097dec870 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -21,7 +21,6 @@ #include "common/network/utility.h" #include "common/stats/symbol_table_creator.h" -#include "test/common/http/conn_manager_impl_common.h" #include "test/common/http/conn_manager_impl_fuzz.pb.h" #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" @@ -30,6 +29,7 @@ #include "test/mocks/http/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" +#include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/tracing/mocks.h" @@ -47,13 +47,15 @@ namespace Http { class FuzzConfig : public ConnectionManagerConfig { public: FuzzConfig() - : route_config_provider_(time_system_), scoped_route_config_provider_(time_system_), - stats_{{ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_), + : stats_{{ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_), POOL_HISTOGRAM(fake_stats_))}, "", fake_stats_}, tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}, listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_stats_))} { + ON_CALL(route_config_provider_, lastUpdated()).WillByDefault(Return(time_system_.systemTime())); + ON_CALL(scoped_route_config_provider_, lastUpdated()) + .WillByDefault(Return(time_system_.systemTime())); access_logs_.emplace_back(std::make_shared>()); } @@ -86,9 +88,17 @@ class FuzzConfig : public ConnectionManagerConfig { std::chrono::milliseconds streamIdleTimeout() const override { return stream_idle_timeout_; } std::chrono::milliseconds requestTimeout() const override { return request_timeout_; } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } - Router::RouteConfigProvider* routeConfigProvider() override { return &route_config_provider_; } + Router::RouteConfigProvider* routeConfigProvider() override { + if (use_srds_) { + return nullptr; + } + return &route_config_provider_; + } Config::ConfigProvider* scopedRouteConfigProvider() override { - return &scoped_route_config_provider_; + if (use_srds_) { + return &scoped_route_config_provider_; + } + return nullptr; } const std::string& serverName() override { return server_name_; } HttpConnectionManagerProto::ServerHeaderTransformation serverHeaderTransformation() override { @@ -124,8 +134,9 @@ class FuzzConfig : public ConnectionManagerConfig { NiceMock filter_factory_; Event::SimulatedTimeSystem time_system_; SlowDateProviderImpl date_provider_{time_system_}; - ConnectionManagerImplHelper::RouteConfigProvider route_config_provider_; - ConnectionManagerImplHelper::ScopedRouteConfigProvider scoped_route_config_provider_; + bool use_srds_{}; + Router::MockRouteConfigProvider route_config_provider_; + Router::MockScopedRouteConfigProvider scoped_route_config_provider_; std::string server_name_; HttpConnectionManagerProto::ServerHeaderTransformation server_transformation_{ HttpConnectionManagerProto::OVERWRITE}; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 496b2af6c7ea..ebf2c71d59ad 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -26,13 +26,13 @@ #include "extensions/access_loggers/file/file_access_log_impl.h" -#include "test/common/http/conn_manager_impl_common.h" #include "test/mocks/access_log/mocks.h" #include "test/mocks/buffer/mocks.h" #include "test/mocks/common.h" #include "test/mocks/http/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" +#include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/server/mocks.h" #include "test/mocks/ssl/mocks.h" @@ -69,9 +69,7 @@ namespace Http { class HttpConnectionManagerImplTest : public testing::Test, public ConnectionManagerConfig { public: HttpConnectionManagerImplTest() - : route_config_provider_(test_time_.timeSystem()), - scoped_route_config_provider_(test_time_.timeSystem()), - http_context_(fake_stats_.symbolTable()), access_log_path_("dummy_path"), + : http_context_(fake_stats_.symbolTable()), access_log_path_("dummy_path"), access_logs_{ AccessLog::InstanceSharedPtr{new Extensions::AccessLoggers::File::FileAccessLog( access_log_path_, {}, AccessLog::AccessLogFormatUtils::defaultAccessLogFormatter(), @@ -86,6 +84,10 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan http_context_.setTracer(tracer_); + ON_CALL(route_config_provider_, lastUpdated()) + .WillByDefault(Return(test_time_.timeSystem().systemTime())); + ON_CALL(scoped_route_config_provider_, lastUpdated()) + .WillByDefault(Return(test_time_.timeSystem().systemTime())); // response_encoder_ is not a NiceMock on purpose. This prevents complaining about this // method only. EXPECT_CALL(response_encoder_, getStream()).Times(AtLeast(0)); @@ -95,7 +97,8 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); } - void setup(bool ssl, const std::string& server_name, bool tracing = true) { + void setup(bool ssl, const std::string& server_name, bool tracing = true, bool use_srds = false) { + use_srds_ = use_srds; if (ssl) { ssl_connection_ = std::make_unique(); } @@ -271,9 +274,18 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan std::chrono::milliseconds streamIdleTimeout() const override { return stream_idle_timeout_; } std::chrono::milliseconds requestTimeout() const override { return request_timeout_; } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } - Router::RouteConfigProvider* routeConfigProvider() override { return &route_config_provider_; } + bool use_srds_{}; + Router::RouteConfigProvider* routeConfigProvider() override { + if (use_srds_) { + return nullptr; + } + return &route_config_provider_; + } Config::ConfigProvider* scopedRouteConfigProvider() override { - return &scoped_route_config_provider_; + if (use_srds_) { + return &scoped_route_config_provider_; + } + return nullptr; } const std::string& serverName() override { return server_name_; } HttpConnectionManagerProto::ServerHeaderTransformation serverHeaderTransformation() override { @@ -302,8 +314,9 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan bool shouldMergeSlashes() const override { return merge_slashes_; } DangerousDeprecatedTestTime test_time_; - ConnectionManagerImplHelper::RouteConfigProvider route_config_provider_; - ConnectionManagerImplHelper::ScopedRouteConfigProvider scoped_route_config_provider_; + NiceMock route_config_provider_; + std::shared_ptr route_config_{new NiceMock()}; + NiceMock scoped_route_config_provider_; NiceMock tracer_; Stats::IsolatedStoreImpl fake_stats_; Http::ContextImpl http_context_; @@ -1890,7 +1903,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { TEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledByDefault) { setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).Times(1).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0); conn_manager_->newStream(response_encoder_); })); @@ -1903,7 +1916,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledIfSetToZero) { request_timeout_ = std::chrono::milliseconds(0); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).Times(1).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0); conn_manager_->newStream(response_encoder_); })); @@ -1916,7 +1929,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutValidlyConfigured) { request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).Times(1).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)); @@ -1932,7 +1945,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutCallbackDisarmsAndReturns408 setup(false, ""); std::string response_body; - EXPECT_CALL(*codec_, dispatch(_)).Times(1).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); EXPECT_CALL(*request_timer, disableTimer()).Times(AtLeast(1)); @@ -1959,7 +1972,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsNotDisarmedOnIncompleteReq request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).Times(1).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); EXPECT_CALL(*request_timer, disableTimer()).Times(0); @@ -1982,7 +1995,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).Times(1).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); @@ -2058,7 +2071,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnEncodeHeaders) { })); EXPECT_CALL(response_encoder_, encodeHeaders(_, _)); - EXPECT_CALL(*codec_, dispatch(_)).Times(1).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); @@ -2084,7 +2097,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnConnectionTermin setup(false, ""); Event::MockTimer* request_timer = setUpTimer(); - EXPECT_CALL(*codec_, dispatch(_)).Times(1).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); HeaderMapPtr headers{ new TestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4274,7 +4287,7 @@ TEST_F(HttpConnectionManagerImplTest, OverlyLongHeadersRejected) { std::string response_code; std::string response_body; - EXPECT_CALL(*codec_, dispatch(_)).Times(1).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); HeaderMapPtr headers{ new TestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4299,7 +4312,7 @@ TEST_F(HttpConnectionManagerImplTest, OverlyLongHeadersAcceptedIfConfigured) { max_request_headers_kb_ = 62; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).Times(1).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); HeaderMapPtr headers{ new TestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4493,5 +4506,236 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { } } +// SRDS no scope found. +TEST_F(HttpConnectionManagerImplTest, TestSRDSRouteNotFound) { + setup(false, "", true, true); + + EXPECT_CALL(*static_cast( + scopedRouteConfigProvider()->config().get()), + getRouteConfig(_)) + .WillOnce(Return(nullptr)); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); + HeaderMapPtr headers{ + new TestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + })); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) + .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { + EXPECT_EQ("404", headers.Status()->value().getStringView()); + })); + + std::string response_body; + EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + EXPECT_EQ(response_body, "route scope not found"); +} + +// SRDS updating scopes affects routing. +TEST_F(HttpConnectionManagerImplTest, TestSRDSUpdate) { + setup(false, "", true, true); + + EXPECT_CALL(*static_cast( + scopedRouteConfigProvider()->config().get()), + getRouteConfig(_)) + .Times(3) + .WillOnce(Return(nullptr)) + .WillOnce(Return(route_config_)) + .WillOnce(Return(route_config_)); // refreshCachedRoute + EXPECT_CALL(*codec_, dispatch(_)) + .Times(2) // Once for no scoped routes, once for scoped routing + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { + StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); + HeaderMapPtr headers{ + new TestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + })); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) + .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { + EXPECT_EQ("404", headers.Status()->value().getStringView()); + })); + + std::string response_body; + EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + EXPECT_EQ(response_body, "route scope not found"); + + // Now route config provider returns something. + setupFilterChain(1, 0); // Recreate the chain for second stream. + const std::string fake_cluster1_name = "fake_cluster1"; + std::shared_ptr route1 = std::make_shared>(); + EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name)); + std::shared_ptr fake_cluster1 = + std::make_shared>(); + EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(fake_cluster1.get())); + EXPECT_CALL(*route_config_, route(_, _)).WillOnce(Return(route1)); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); + EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + Buffer::OwnedImpl fake_input2("1234"); + conn_manager_->onData(fake_input2, false); +} + +// SRDS Scope header update cause cross-scope reroute. +TEST_F(HttpConnectionManagerImplTest, TestSRDSCrossScopeReroute) { + setup(false, "", true, true); + + std::shared_ptr route_config1 = + std::make_shared>(); + std::shared_ptr route_config2 = + std::make_shared>(); + std::shared_ptr route1 = std::make_shared>(); + std::shared_ptr route2 = std::make_shared>(); + EXPECT_CALL(*route_config1, route(_, _)).WillRepeatedly(Return(route1)); + EXPECT_CALL(*route_config2, route(_, _)).WillRepeatedly(Return(route2)); + EXPECT_CALL(*static_cast( + scopedRouteConfigProvider()->config().get()), + getRouteConfig(_)) + // 1. Snap scoped route config; + // 2. refreshCachedRoute (both in decodeHeaders(headers,end_stream); + // 3. then refreshCachedRoute triggered by decoder_filters_[1]->callbacks_->route(). + .Times(3) + .WillRepeatedly(Invoke([&](const HeaderMap& headers) -> Router::ConfigConstSharedPtr { + auto& test_headers = static_cast(headers); + if (test_headers.get_("scope_key") == "foo") { + return route_config1; + } + return route_config2; + })); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); + HeaderMapPtr headers{new TestHeaderMapImpl{ + {":authority", "host"}, {":method", "GET"}, {"scope_key", "foo"}, {":path", "/foo"}}}; + decoder->decodeHeaders(std::move(headers), false); + data.drain(4); + })); + setupFilterChain(2, 0); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) -> FilterHeadersStatus { + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); + auto& test_headers = static_cast(headers); + // Clear cached route and change scope key to "bar". + decoder_filters_[0]->callbacks_->clearRouteCache(); + test_headers.remove("scope_key"); + test_headers.addCopy("scope_key", "bar"); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) -> FilterHeadersStatus { + auto& test_headers = static_cast(headers); + EXPECT_EQ(test_headers.get_("scope_key"), "bar"); + // Route now switched to route2 as header "scope_key" has changed. + EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->route()); + EXPECT_EQ(route2->routeEntry(), decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + return FilterHeadersStatus::StopIteration; + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +// SRDS scoped RouteConfiguration found and route found. +TEST_F(HttpConnectionManagerImplTest, TestSRDSRouteFound) { + setup(false, "", true, true); + setupFilterChain(1, 0); + + const std::string fake_cluster1_name = "fake_cluster1"; + std::shared_ptr route1 = std::make_shared>(); + EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name)); + std::shared_ptr fake_cluster1 = + std::make_shared>(); + EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(fake_cluster1.get())); + EXPECT_CALL(*scopedRouteConfigProvider()->config(), getRouteConfig(_)) + // 1. decodeHeaders() snaping route config. + // 2. refreshCachedRoute() later in the same decodeHeaders(). + .Times(2); + EXPECT_CALL( + *static_cast( + scopedRouteConfigProvider()->config()->route_config_.get()), + route(_, _)) + .WillOnce(Return(route1)); + StreamDecoder* decoder = nullptr; + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + decoder = &conn_manager_->newStream(response_encoder_); + HeaderMapPtr headers{ + new TestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + })); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); + EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +class HttpConnectionManagerImplDeathTest : public HttpConnectionManagerImplTest { +public: + Router::RouteConfigProvider* routeConfigProvider() override { + return route_config_provider2_.get(); + } + Config::ConfigProvider* scopedRouteConfigProvider() override { + return scoped_route_config_provider2_.get(); + } + + std::shared_ptr route_config_provider2_; + std::shared_ptr scoped_route_config_provider2_; +}; + +// HCM config can only have either RouteConfigProvider or ScopedRoutesConfigProvider. +TEST_F(HttpConnectionManagerImplDeathTest, InvalidConnectionManagerConfig) { + setup(false, ""); + + Buffer::OwnedImpl fake_input("1234"); + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + conn_manager_->newStream(response_encoder_); + })); + // Either RDS or SRDS should be set. + EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false), + "Either routeConfigProvider or scopedRouteConfigProvider should be set in " + "ConnectionManagerImpl."); + + route_config_provider2_ = std::make_shared>(); + + // Only route config provider valid. + EXPECT_NO_THROW(conn_manager_->onData(fake_input, false)); + + scoped_route_config_provider2_ = + std::make_shared>(); + // Can't have RDS and SRDS provider in the same time. + EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false), + "Either routeConfigProvider or scopedRouteConfigProvider should be set in " + "ConnectionManagerImpl."); + + route_config_provider2_.reset(); + // Only scoped route config provider valid. + EXPECT_NO_THROW(conn_manager_->onData(fake_input, false)); + +#if !defined(NDEBUG) + EXPECT_CALL(*scoped_route_config_provider2_, getConfig()).WillRepeatedly(Return(nullptr)); + // ASSERT failure when SRDS provider returns a nullptr. + EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false), + "Scoped rds provider returns null for scoped routes config."); +#endif // !defined(NDEBUG) +} + } // namespace Http } // namespace Envoy diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index 00936537bc7f..8ee6cfe8aecf 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -247,7 +247,7 @@ route_configuration_name: foo_routes } // Tests that multiple uniquely named non-conflict resources are allowed in config updates. -TEST_F(ScopedRdsTest, MultipleResourcesStow) { +TEST_F(ScopedRdsTest, MultipleResourcesSotw) { setup(); const std::string config_yaml = R"EOF( diff --git a/test/integration/scoped_rds_integration_test.cc b/test/integration/scoped_rds_integration_test.cc index e14e87d08fa7..1cefe11ac397 100644 --- a/test/integration/scoped_rds_integration_test.cc +++ b/test/integration/scoped_rds_integration_test.cc @@ -4,6 +4,7 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" +#include "test/test_common/printers.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -12,12 +13,12 @@ namespace Envoy { namespace { class ScopedRdsIntegrationTest : public HttpIntegrationTest, - public Grpc::GrpcClientIntegrationParamTest { + public Grpc::DeltaSotwGrpcClientIntegrationParamTest { protected: struct FakeUpstreamInfo { FakeHttpConnectionPtr connection_; FakeUpstream* upstream_{}; - FakeStreamPtr stream_; + absl::flat_hash_map stream_by_resource_name_; }; ScopedRdsIntegrationTest() @@ -29,7 +30,15 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, } void initialize() override { + // Setup two upstream hosts, one for each cluster. + setUpstreamCount(2); + config_helper_.addConfigModifier([](envoy::config::bootstrap::v2::Bootstrap& bootstrap) { + // Add the static cluster to serve SRDS. + auto* cluster_1 = bootstrap.mutable_static_resources()->add_clusters(); + cluster_1->MergeFrom(bootstrap.static_resources().clusters()[0]); + cluster_1->set_name("cluster_1"); + // Add the static cluster to serve SRDS. auto* scoped_rds_cluster = bootstrap.mutable_static_resources()->add_clusters(); scoped_rds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); @@ -50,15 +59,16 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, fragments: - header_value_extractor: name: Addr + element_separator: ; element: key: x-foo-key - separator: ; + separator: = )EOF"; envoy::config::filter::network::http_connection_manager::v2::ScopedRoutes::ScopeKeyBuilder scope_key_builder; TestUtility::loadFromYaml(scope_key_builder_config_yaml, scope_key_builder); auto* scoped_routes = http_connection_manager.mutable_scoped_routes(); - scoped_routes->set_name("foo-scoped-routes"); + scoped_routes->set_name(srds_config_name_); *scoped_routes->mutable_scope_key_builder() = scope_key_builder; envoy::api::v2::core::ApiConfigSource* rds_api_config_source = @@ -72,7 +82,11 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, scoped_routes->mutable_scoped_rds() ->mutable_scoped_rds_config_source() ->mutable_api_config_source(); - srds_api_config_source->set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); + if (isDelta()) { + srds_api_config_source->set_api_type(envoy::api::v2::core::ApiConfigSource::DELTA_GRPC); + } else { + srds_api_config_source->set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); + } grpc_service = srds_api_config_source->add_grpc_services(); setGrpcService(*grpc_service, "srds_cluster", getScopedRdsFakeUpstream().localAddress()); }); @@ -80,6 +94,41 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, HttpIntegrationTest::initialize(); } + // TODO(stevenzzzz): move these utility methods to base classes to share with other tests. + // Helper that verifies if given headers are in the response header map. + void verifyResponse(IntegrationStreamDecoderPtr response, const std::string& response_code, + const Http::TestHeaderMapImpl& expected_headers, + const std::string& expected_body) { + EXPECT_TRUE(response->complete()); + EXPECT_EQ(response_code, response->headers().Status()->value().getStringView()); + expected_headers.iterate( + [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { + auto response_headers = static_cast(context); + const Http::HeaderEntry* entry = response_headers->get( + Http::LowerCaseString{std::string(header.key().getStringView())}); + EXPECT_NE(entry, nullptr); + EXPECT_EQ(header.value().getStringView(), entry->value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }, + const_cast(static_cast(&response->headers()))); + EXPECT_EQ(response->body(), expected_body); + } + + // Helper that sends a request to Envoy, and verifies if Envoy response headers and body size is + // the same as the expected headers map. + void sendRequestAndVerifyResponse(const Http::TestHeaderMapImpl& request_headers, + const int request_size, + const Http::TestHeaderMapImpl& response_headers, + const int response_size, const int backend_idx) { + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = sendRequestAndWaitForResponse(request_headers, request_size, response_headers, + response_size, backend_idx); + verifyResponse(std::move(response), "200", response_headers, std::string(response_size, 'a')); + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(request_size, upstream_request_->bodyLength()); + cleanupUpstreamAndDownstream(); + } + void createUpstreams() override { HttpIntegrationTest::createUpstreams(); // Create the SRDS upstream. @@ -108,38 +157,87 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, resetFakeUpstreamInfo(&scoped_rds_upstream_info_); } - FakeUpstream& getRdsFakeUpstream() const { return *fake_upstreams_[2]; } + FakeUpstream& getRdsFakeUpstream() const { return *fake_upstreams_[3]; } - FakeUpstream& getScopedRdsFakeUpstream() const { return *fake_upstreams_[1]; } + FakeUpstream& getScopedRdsFakeUpstream() const { return *fake_upstreams_[2]; } - void createStream(FakeUpstreamInfo* upstream_info, FakeUpstream& upstream) { - upstream_info->upstream_ = &upstream; - AssertionResult result = - upstream_info->upstream_->waitForHttpConnection(*dispatcher_, upstream_info->connection_); - RELEASE_ASSERT(result, result.message()); - result = upstream_info->connection_->waitForNewStream(*dispatcher_, upstream_info->stream_); + void createStream(FakeUpstreamInfo* upstream_info, FakeUpstream& upstream, + const std::string& resource_name) { + if (upstream_info->upstream_ == nullptr) { + // bind upstream if not yet. + upstream_info->upstream_ = &upstream; + AssertionResult result = + upstream_info->upstream_->waitForHttpConnection(*dispatcher_, upstream_info->connection_); + RELEASE_ASSERT(result, result.message()); + } + if (!upstream_info->stream_by_resource_name_.try_emplace(resource_name, nullptr).second) { + RELEASE_ASSERT(false, + fmt::format("stream with resource name '{}' already exists!", resource_name)); + } + auto result = upstream_info->connection_->waitForNewStream( + *dispatcher_, upstream_info->stream_by_resource_name_[resource_name]); RELEASE_ASSERT(result, result.message()); - upstream_info->stream_->startGrpcStream(); + upstream_info->stream_by_resource_name_[resource_name]->startGrpcStream(); } - void createRdsStream() { createStream(&rds_upstream_info_, getRdsFakeUpstream()); } + void createRdsStream(const std::string& resource_name) { + createStream(&rds_upstream_info_, getRdsFakeUpstream(), resource_name); + } void createScopedRdsStream() { - createStream(&scoped_rds_upstream_info_, getScopedRdsFakeUpstream()); + createStream(&scoped_rds_upstream_info_, getScopedRdsFakeUpstream(), srds_config_name_); } void sendRdsResponse(const std::string& route_config, const std::string& version) { envoy::api::v2::DiscoveryResponse response; response.set_version_info(version); response.set_type_url(Config::TypeUrl::get().RouteConfiguration); - response.add_resources()->PackFrom( - TestUtility::parseYaml(route_config)); - rds_upstream_info_.stream_->sendGrpcMessage(response); + auto route_configuration = + TestUtility::parseYaml(route_config); + response.add_resources()->PackFrom(route_configuration); + ASSERT(rds_upstream_info_.stream_by_resource_name_[route_configuration.name()] != nullptr); + rds_upstream_info_.stream_by_resource_name_[route_configuration.name()]->sendGrpcMessage( + response); + } + + void sendSrdsResponse(const std::vector& sotw_list, + const std::vector& to_add_list, + const std::vector& to_delete_list, + const std::string& version) { + if (isDelta()) { + sendDeltaScopedRdsResponse(to_add_list, to_delete_list, version); + } else { + sendSotwScopedRdsResponse(sotw_list, version); + } + } + + void sendDeltaScopedRdsResponse(const std::vector& to_add_list, + const std::vector& to_delete_list, + const std::string& version) { + ASSERT(scoped_rds_upstream_info_.stream_by_resource_name_[srds_config_name_] != nullptr); + + envoy::api::v2::DeltaDiscoveryResponse response; + response.set_system_version_info(version); + response.set_type_url(Config::TypeUrl::get().ScopedRouteConfiguration); + + for (const auto& scope_name : to_delete_list) { + *response.add_removed_resources() = scope_name; + } + for (const auto& resource_proto : to_add_list) { + envoy::api::v2::ScopedRouteConfiguration scoped_route_proto; + TestUtility::loadFromYaml(resource_proto, scoped_route_proto); + auto resource = response.add_resources(); + resource->set_name(scoped_route_proto.name()); + resource->set_version(version); + resource->mutable_resource()->PackFrom(scoped_route_proto); + } + scoped_rds_upstream_info_.stream_by_resource_name_[srds_config_name_]->sendGrpcMessage( + response); } - void sendScopedRdsResponse(const std::vector& resource_protos, - const std::string& version) { - ASSERT(scoped_rds_upstream_info_.stream_ != nullptr); + void sendSotwScopedRdsResponse(const std::vector& resource_protos, + const std::string& version) { + ASSERT(scoped_rds_upstream_info_.stream_by_resource_name_[srds_config_name_] != nullptr); envoy::api::v2::DiscoveryResponse response; response.set_version_info(version); @@ -150,33 +248,29 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, TestUtility::loadFromYaml(resource_proto, scoped_route_proto); response.add_resources()->PackFrom(scoped_route_proto); } - - scoped_rds_upstream_info_.stream_->sendGrpcMessage(response); + scoped_rds_upstream_info_.stream_by_resource_name_[srds_config_name_]->sendGrpcMessage( + response); } + const std::string srds_config_name_{"foo-scoped-routes"}; FakeUpstreamInfo scoped_rds_upstream_info_; FakeUpstreamInfo rds_upstream_info_; }; INSTANTIATE_TEST_SUITE_P(IpVersionsAndGrpcTypes, ScopedRdsIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); // Test that a SRDS DiscoveryResponse is successfully processed. TEST_P(ScopedRdsIntegrationTest, BasicSuccess) { - const std::string scope_route1 = R"EOF( -name: foo_scope1 -route_configuration_name: foo_route1 + const std::string scope_tmpl = R"EOF( +name: {} +route_configuration_name: {} key: fragments: - - string_key: x-foo-key -)EOF"; - const std::string scope_route2 = R"EOF( -name: foo_scope2 -route_configuration_name: foo_route1 -key: - fragments: - - string_key: x-bar-key + - string_key: {} )EOF"; + const std::string scope_route1 = fmt::format(scope_tmpl, "foo_scope1", "foo_route1", "foo-route"); + const std::string scope_route2 = fmt::format(scope_tmpl, "foo_scope2", "foo_route1", "bar-route"); const std::string route_config_tmpl = R"EOF( name: {} @@ -190,35 +284,127 @@ route_configuration_name: foo_route1 on_server_init_function_ = [&]() { createScopedRdsStream(); - sendScopedRdsResponse({scope_route1, scope_route2}, "1"); - createRdsStream(); - sendRdsResponse(fmt::format(route_config_tmpl, "foo_route1", "cluster_foo_1"), "1"); - sendRdsResponse(fmt::format(route_config_tmpl, "foo_route1", "cluster_foo_2"), "2"); + sendSrdsResponse({scope_route1, scope_route2}, {scope_route1, scope_route2}, {}, "1"); + createRdsStream("foo_route1"); + // CreateRdsStream waits for connection which is fired by RDS subscription. + sendRdsResponse(fmt::format(route_config_tmpl, "foo_route1", "cluster_0"), "1"); }; initialize(); - test_server_->waitForCounterGe("http.config_test.scoped_rds.foo-scoped-routes.update_attempt", 2); + registerTestServerPorts({"http"}); + + // No scope key matches "xyz-route". + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/meh"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", "x-foo-key=xyz-route"}}); + response->waitForEndStream(); + verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, "route scope not found"); + cleanupUpstreamAndDownstream(); + + // Test "foo-route" and 'bar-route' both gets routed to cluster_0. + test_server_->waitForCounterGe("http.config_test.rds.foo_route1.update_success", 1); + for (const std::string& scope_key : std::vector{"foo-route", "bar-route"}) { + sendRequestAndVerifyResponse( + Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/meh"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", fmt::format("x-foo-key={}", scope_key)}}, + 456, Http::TestHeaderMapImpl{{":status", "200"}, {"service", scope_key}}, 123, + /*cluster_0*/ 0); + } + test_server_->waitForCounterGe("http.config_test.scoped_rds.foo-scoped-routes.update_attempt", + // update_attempt only increase after a response + isDelta() ? 1 : 2); test_server_->waitForCounterGe("http.config_test.scoped_rds.foo-scoped-routes.update_success", 1); // The version gauge should be set to xxHash64("1"). test_server_->waitForGaugeEq("http.config_test.scoped_rds.foo-scoped-routes.version", 13237225503670494420UL); - const std::string scope_route3 = R"EOF( -name: foo_scope3 -route_configuration_name: foo_route1 -key: - fragments: - - string_key: x-baz-key -)EOF"; - sendScopedRdsResponse({scope_route3}, "2"); + // Add a new scope scope_route3 with a brand new RouteConfiguration foo_route2. + const std::string scope_route3 = fmt::format(scope_tmpl, "foo_scope3", "foo_route2", "baz-route"); + + sendSrdsResponse({scope_route1, scope_route2, scope_route3}, /*added*/ {scope_route3}, {}, "2"); + test_server_->waitForCounterGe("http.config_test.rds.foo_route1.update_attempt", 2); + sendRdsResponse(fmt::format(route_config_tmpl, "foo_route1", "cluster_1"), "3"); + test_server_->waitForCounterGe("http.config_test.rds.foo_route1.update_success", 2); + createRdsStream("foo_route2"); + test_server_->waitForCounterGe("http.config_test.rds.foo_route2.update_attempt", 1); + sendRdsResponse(fmt::format(route_config_tmpl, "foo_route2", "cluster_0"), "1"); + test_server_->waitForCounterGe("http.config_test.rds.foo_route2.update_success", 1); test_server_->waitForCounterGe("http.config_test.scoped_rds.foo-scoped-routes.update_success", 2); + // The version gauge should be set to xxHash64("2"). test_server_->waitForGaugeEq("http.config_test.scoped_rds.foo-scoped-routes.version", 6927017134761466251UL); - test_server_->waitForCounterGe("http.config_test.rds.foo_route1.update_attempt", 3); - sendRdsResponse(fmt::format(route_config_tmpl, "foo_route1", "cluster_foo_3"), "3"); - test_server_->waitForCounterGe("http.config_test.rds.foo_route1.update_success", 3); - // RDS updates won't affect SRDS. - test_server_->waitForGaugeEq("http.config_test.scoped_rds.foo-scoped-routes.version", - 6927017134761466251UL); + // After RDS update, requests within scope 'foo_scope1' or 'foo_scope2' get routed to + // 'cluster_1'. + for (const std::string& scope_key : std::vector{"foo-route", "bar-route"}) { + sendRequestAndVerifyResponse( + Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/meh"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", fmt::format("x-foo-key={}", scope_key)}}, + 456, Http::TestHeaderMapImpl{{":status", "200"}, {"service", scope_key}}, 123, + /*cluster_1*/ 1); + } + // Now requests within scope 'foo_scope3' get routed to 'cluster_0'. + sendRequestAndVerifyResponse( + Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/meh"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", fmt::format("x-foo-key={}", "baz-route")}}, + 456, Http::TestHeaderMapImpl{{":status", "200"}, {"service", "bluh"}}, 123, + /*cluster_0*/ 0); + + // Delete foo_scope1 and requests within the scope gets 400s. + sendSrdsResponse({scope_route2, scope_route3}, {}, {"foo_scope1"}, "3"); + test_server_->waitForCounterGe("http.config_test.scoped_rds.foo-scoped-routes.update_success", 3); + codec_client_ = makeHttpConnection(lookupPort("http")); + response = codec_client_->makeHeaderOnlyRequest( + Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/meh"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", "x-foo-key=foo-route"}}); + response->waitForEndStream(); + verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, "route scope not found"); + cleanupUpstreamAndDownstream(); + // Add a new scope foo_scope4. + const std::string& scope_route4 = + fmt::format(scope_tmpl, "foo_scope4", "foo_route4", "xyz-route"); + sendSrdsResponse({scope_route3, scope_route2, scope_route4}, {scope_route4}, {}, "4"); + test_server_->waitForCounterGe("http.config_test.scoped_rds.foo-scoped-routes.update_success", 4); + codec_client_ = makeHttpConnection(lookupPort("http")); + response = codec_client_->makeHeaderOnlyRequest( + Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/meh"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", "x-foo-key=xyz-route"}}); + response->waitForEndStream(); + // Get 404 because RDS hasn't pushed route configuration "foo_route4" yet. + // But scope is found and the Router::NullConfigImpl is returned. + verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, ""); + cleanupUpstreamAndDownstream(); + + // RDS updated foo_route4, requests with scope key "xyz-route" now hit cluster_1. + test_server_->waitForCounterGe("http.config_test.rds.foo_route4.update_attempt", 1); + createRdsStream("foo_route4"); + sendRdsResponse(fmt::format(route_config_tmpl, "foo_route4", "cluster_1"), "3"); + test_server_->waitForCounterGe("http.config_test.rds.foo_route4.update_success", 1); + sendRequestAndVerifyResponse( + Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/meh"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", "x-foo-key=xyz-route"}}, + 456, Http::TestHeaderMapImpl{{":status", "200"}, {"service", "xyz-route"}}, 123, + /*cluster_1 */ 1); } // Test that a bad config update updates the corresponding stats. @@ -229,16 +415,56 @@ TEST_P(ScopedRdsIntegrationTest, ConfigUpdateFailure) { route_configuration_name: foo_route1 key: fragments: - - string_key: x-foo-key + - string_key: foo )EOF"; on_server_init_function_ = [this, &scope_route1]() { createScopedRdsStream(); - sendScopedRdsResponse({scope_route1}, "1"); + sendSrdsResponse({scope_route1}, {scope_route1}, {}, "1"); }; initialize(); test_server_->waitForCounterGe("http.config_test.scoped_rds.foo-scoped-routes.update_rejected", 1); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = + codec_client_->makeHeaderOnlyRequest(Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/meh"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", "x-foo-key=foo"}}); + response->waitForEndStream(); + verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, "route scope not found"); + cleanupUpstreamAndDownstream(); + + // SRDS update fixed the problem. + const std::string scope_route2 = R"EOF( +name: foo_scope1 +route_configuration_name: foo_route1 +key: + fragments: + - string_key: foo +)EOF"; + sendSrdsResponse({scope_route2}, {scope_route2}, {}, "1"); + test_server_->waitForCounterGe("http.config_test.rds.foo_route1.update_attempt", 1); + createRdsStream("foo_route1"); + const std::string route_config_tmpl = R"EOF( + name: {} + virtual_hosts: + - name: integration + domains: ["*"] + routes: + - match: {{ prefix: "/" }} + route: {{ cluster: {} }} +)EOF"; + sendRdsResponse(fmt::format(route_config_tmpl, "foo_route1", "cluster_0"), "1"); + test_server_->waitForCounterGe("http.config_test.rds.foo_route1.update_success", 1); + sendRequestAndVerifyResponse( + Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/meh"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", "x-foo-key=foo"}}, + 456, Http::TestHeaderMapImpl{{":status", "200"}, {"service", "bluh"}}, 123, /*cluster_0*/ 0); } } // namespace diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 06e5875c032a..b4e7c4ff7700 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -391,6 +391,7 @@ class MockRouteConfigProvider : public RouteConfigProvider { MOCK_CONST_METHOD0(configInfo, absl::optional()); MOCK_CONST_METHOD0(lastUpdated, SystemTime()); MOCK_METHOD0(onConfigUpdate, void()); + MOCK_CONST_METHOD1(validateConfig, void(const envoy::api::v2::RouteConfiguration&)); std::shared_ptr> route_config_{new NiceMock()}; }; diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 8cf850b1a300..c236ac4ce7d4 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -423,9 +423,11 @@ void TestHeaderMapImpl::addCopy(const std::string& key, const std::string& value void TestHeaderMapImpl::remove(const std::string& key) { remove(LowerCaseString(key)); } -std::string TestHeaderMapImpl::get_(const std::string& key) { return get_(LowerCaseString(key)); } +std::string TestHeaderMapImpl::get_(const std::string& key) const { + return get_(LowerCaseString(key)); +} -std::string TestHeaderMapImpl::get_(const LowerCaseString& key) { +std::string TestHeaderMapImpl::get_(const LowerCaseString& key) const { const HeaderEntry* header = get(key); if (!header) { return EMPTY_STRING; diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 4ff0ac413097..b4f048396ac6 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -637,8 +637,8 @@ class TestHeaderMapImpl : public HeaderMapImpl { using HeaderMapImpl::remove; void addCopy(const std::string& key, const std::string& value); void remove(const std::string& key); - std::string get_(const std::string& key); - std::string get_(const LowerCaseString& key); + std::string get_(const std::string& key) const; + std::string get_(const LowerCaseString& key) const; bool has(const std::string& key); bool has(const LowerCaseString& key); }; From 678bf8c2300de4987d20312ac1c4cdeeb3cad2fb Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 30 Aug 2019 17:20:56 -0400 Subject: [PATCH 19/31] owners: add @asraa and @lambdai to OWNERS. (#8110) * @asraa is joining Envoy OSS security team. * @lambdai is joining Friends of Envoy as v2 xDS point. Signed-off-by: Harvey Tuch --- OWNERS.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/OWNERS.md b/OWNERS.md index 8f5898ff1070..317dad396752 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -39,6 +39,7 @@ routing PRs, questions, etc. to the right place. * All maintainers * Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com) * Yan Avlasov ([yanavlasov](https://github.com/yanavlasov)) (yavlasov@google.com) +* Asra Ali ([asraa](https://github.com/asraa)) (asraa@google.com) # Emeritus maintainers @@ -60,3 +61,5 @@ matter expert reviews. Feel free to loop them in as needed. * Bazel/build. * Daniel Hochman ([danielhochman](https://github.com/danielhochman)) (dhochman@lyft.com) * Redis, Python, configuration/operational questions. +* Yuchen Dai ([lambdai](https://github.com/lambdai)) (lambdai@google.com) + * v2 xDS, listeners, filter chain discovery service. From dad0f2e239b39448c7e1f6504ddbb4229cf01abe Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 2 Sep 2019 10:50:31 -0400 Subject: [PATCH 20/31] protobuf: recursively validate unknown fields. (#8094) This PR unifies the recursive traversal of deprecated fields with that of unknown fields. It doesn't deal with moving to a validator visitor model for deprecation; this would be a nice cleanup that we track at https://github.com/envoyproxy/envoy/issues/8092. Risk level: Low Testing: New nested unknown field test added. Fixes #7980 Signed-off-by: Harvey Tuch --- source/common/protobuf/utility.cc | 41 +++++++++++++----------- source/common/protobuf/utility.h | 13 +++----- test/common/protobuf/utility_test.cc | 48 +++++++++++++++++++--------- test/tools/router_check/router.cc | 5 ++- 4 files changed, 64 insertions(+), 43 deletions(-) diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 4df4964b768f..b52a716f321f 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -88,21 +88,6 @@ ProtoValidationException::ProtoValidationException(const std::string& validation ENVOY_LOG_MISC(debug, "Proto validation error; throwing {}", what()); } -void MessageUtil::checkUnknownFields(const Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor) { - const auto& unknown_fields = message.GetReflection()->GetUnknownFields(message); - // If there are no unknown fields, we're done here. - if (unknown_fields.empty()) { - return; - } - std::string error_msg; - for (int n = 0; n < unknown_fields.field_count(); ++n) { - error_msg += absl::StrCat(n > 0 ? ", " : "", unknown_fields.field(n).number()); - } - validation_visitor.onUnknownField("type " + message.GetTypeName() + " with unknown field set {" + - error_msg + "}"); -} - void MessageUtil::loadFromJson(const std::string& json, Protobuf::Message& message, ProtobufMessage::ValidationVisitor& validation_visitor) { Protobuf::util::JsonParseOptions options; @@ -159,7 +144,7 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa if (absl::EndsWith(path, FileExtensions::get().ProtoBinary)) { // Attempt to parse the binary format. if (message.ParseFromString(contents)) { - MessageUtil::checkUnknownFields(message, validation_visitor); + MessageUtil::checkForUnexpectedFields(message, validation_visitor); return; } throw EnvoyException("Unable to parse file \"" + path + "\" as a binary protobuf (type " + @@ -180,7 +165,23 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa } } -void MessageUtil::checkForDeprecation(const Protobuf::Message& message, Runtime::Loader* runtime) { +void MessageUtil::checkForUnexpectedFields(const Protobuf::Message& message, + ProtobufMessage::ValidationVisitor& validation_visitor, + Runtime::Loader* runtime) { + // Reject unknown fields. + const auto& unknown_fields = message.GetReflection()->GetUnknownFields(message); + if (!unknown_fields.empty()) { + std::string error_msg; + for (int n = 0; n < unknown_fields.field_count(); ++n) { + error_msg += absl::StrCat(n > 0 ? ", " : "", unknown_fields.field(n).number()); + } + // We use the validation visitor but have hard coded behavior below for deprecated fields. + // TODO(htuch): Unify the deprecated and unknown visitor handling behind the validation + // visitor pattern. https://github.com/envoyproxy/envoy/issues/8092. + validation_visitor.onUnknownField("type " + message.GetTypeName() + + " with unknown field set {" + error_msg + "}"); + } + const Protobuf::Descriptor* descriptor = message.GetDescriptor(); const Protobuf::Reflection* reflection = message.GetReflection(); for (int i = 0; i < descriptor->field_count(); ++i) { @@ -231,10 +232,12 @@ void MessageUtil::checkForDeprecation(const Protobuf::Message& message, Runtime: if (field->is_repeated()) { const int size = reflection->FieldSize(message, field); for (int j = 0; j < size; ++j) { - checkForDeprecation(reflection->GetRepeatedMessage(message, field, j), runtime); + checkForUnexpectedFields(reflection->GetRepeatedMessage(message, field, j), + validation_visitor, runtime); } } else { - checkForDeprecation(reflection->GetMessage(message, field), runtime); + checkForUnexpectedFields(reflection->GetMessage(message, field), validation_visitor, + runtime); } } } diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index d0451893d479..00ae4bceb66c 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -206,9 +206,6 @@ class MessageUtil { return HashUtil::xxHash64(text); } - static void checkUnknownFields(const Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor); - static void loadFromJson(const std::string& json, Protobuf::Message& message, ProtobufMessage::ValidationVisitor& validation_visitor); static void loadFromJson(const std::string& json, ProtobufWkt::Struct& message); @@ -225,8 +222,9 @@ class MessageUtil { * in disallowed_features in runtime_features.h */ static void - checkForDeprecation(const Protobuf::Message& message, - Runtime::Loader* loader = Runtime::LoaderSingleton::getExisting()); + checkForUnexpectedFields(const Protobuf::Message& message, + ProtobufMessage::ValidationVisitor& validation_visitor, + Runtime::Loader* loader = Runtime::LoaderSingleton::getExisting()); /** * Validate protoc-gen-validate constraints on a given protobuf. @@ -238,9 +236,8 @@ class MessageUtil { template static void validate(const MessageType& message, ProtobufMessage::ValidationVisitor& validation_visitor) { - // Log warnings or throw errors if deprecated fields are in use. - checkForDeprecation(message); - checkUnknownFields(message, validation_visitor); + // Log warnings or throw errors if deprecated fields or unknown fields are in use. + checkForUnexpectedFields(message, validation_visitor); std::string err; if (!Validate(message, &err)) { diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 4371e4ecc9e8..539dfb6a28f6 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -1,8 +1,10 @@ #include +#include "envoy/api/v2/cds.pb.validate.h" #include "envoy/config/bootstrap/v2/bootstrap.pb.h" #include "envoy/config/bootstrap/v2/bootstrap.pb.validate.h" +#include "common/protobuf/message_validator_impl.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" #include "common/runtime/runtime_impl.h" @@ -145,6 +147,19 @@ TEST_F(ProtobufUtilityTest, DowncastAndValidateUnknownFields) { "unknown field set {1}) has unknown fields"); } +// Validated exception thrown when downcastAndValidate observes a nested unknown field. +TEST_F(ProtobufUtilityTest, DowncastAndValidateUnknownFieldsNested) { + envoy::config::bootstrap::v2::Bootstrap bootstrap; + auto* cluster = bootstrap.mutable_static_resources()->add_clusters(); + cluster->GetReflection()->MutableUnknownFields(cluster)->AddVarint(1, 0); + EXPECT_THROW_WITH_MESSAGE(TestUtility::validate(*cluster), EnvoyException, + "Protobuf message (type envoy.api.v2.Cluster with " + "unknown field set {1}) has unknown fields"); + EXPECT_THROW_WITH_MESSAGE(TestUtility::validate(bootstrap), EnvoyException, + "Protobuf message (type envoy.api.v2.Cluster with " + "unknown field set {1}) has unknown fields"); +} + TEST_F(ProtobufUtilityTest, LoadBinaryProtoFromFile) { envoy::config::bootstrap::v2::Bootstrap bootstrap; bootstrap.mutable_cluster_manager() @@ -494,20 +509,24 @@ class DeprecatedFieldsTest : public testing::Test { NiceMock validation_visitor_; }; +void checkForDeprecation(const Protobuf::Message& message) { + MessageUtil::checkForUnexpectedFields(message, ProtobufMessage::getStrictValidationVisitor()); +} + TEST_F(DeprecatedFieldsTest, NoCrashIfRuntimeMissing) { loader_.reset(); envoy::test::deprecation_test::Base base; base.set_not_deprecated("foo"); // Fatal checks for a non-deprecated field should cause no problem. - MessageUtil::checkForDeprecation(base); + checkForDeprecation(base); } TEST_F(DeprecatedFieldsTest, NoErrorWhenDeprecatedFieldsUnused) { envoy::test::deprecation_test::Base base; base.set_not_deprecated("foo"); // Fatal checks for a non-deprecated field should cause no problem. - MessageUtil::checkForDeprecation(base); + checkForDeprecation(base); EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); } @@ -517,7 +536,7 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDeprecated)) // Non-fatal checks for a deprecated field should log rather than throw an exception. EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", - MessageUtil::checkForDeprecation(base)); + checkForDeprecation(base)); EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); } @@ -526,7 +545,7 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowed)) envoy::test::deprecation_test::Base base; base.set_is_deprecated_fatal("foo"); EXPECT_THROW_WITH_REGEX( - MessageUtil::checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), ProtoValidationException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'"); } @@ -537,7 +556,7 @@ TEST_F(DeprecatedFieldsTest, // Make sure this is set up right. EXPECT_THROW_WITH_REGEX( - MessageUtil::checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), ProtoValidationException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'"); // The config will be rejected, so the feature will not be used. EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); @@ -549,7 +568,7 @@ TEST_F(DeprecatedFieldsTest, // Now the same deprecation check should only trigger a warning. EXPECT_LOG_CONTAINS( "warning", "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'", - MessageUtil::checkForDeprecation(base)); + checkForDeprecation(base)); EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); } @@ -559,7 +578,7 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(DisallowViaRuntime)) { EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", - MessageUtil::checkForDeprecation(base)); + checkForDeprecation(base)); EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); // Now create a new snapshot with this feature disallowed. @@ -567,7 +586,7 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(DisallowViaRuntime)) { {{"envoy.deprecated_features.deprecated.proto:is_deprecated", " false"}}); EXPECT_THROW_WITH_REGEX( - MessageUtil::checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), ProtoValidationException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'"); EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); } @@ -582,7 +601,7 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MixOfFatalAndWarnings)) { EXPECT_LOG_CONTAINS( "warning", "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", { EXPECT_THROW_WITH_REGEX( - MessageUtil::checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), ProtoValidationException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'"); }); } @@ -593,7 +612,7 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MessageDeprecated)) { base.mutable_deprecated_message(); EXPECT_LOG_CONTAINS( "warning", "Using deprecated option 'envoy.test.deprecation_test.Base.deprecated_message'", - MessageUtil::checkForDeprecation(base)); + checkForDeprecation(base)); EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); } @@ -601,15 +620,14 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(InnerMessageDeprecated)) { envoy::test::deprecation_test::Base base; base.mutable_not_deprecated_message()->set_inner_not_deprecated("foo"); // Checks for a non-deprecated field shouldn't trigger warnings - EXPECT_LOG_NOT_CONTAINS("warning", "Using deprecated option", - MessageUtil::checkForDeprecation(base)); + EXPECT_LOG_NOT_CONTAINS("warning", "Using deprecated option", checkForDeprecation(base)); base.mutable_not_deprecated_message()->set_inner_deprecated("bar"); // Checks for a deprecated sub-message should result in a warning. EXPECT_LOG_CONTAINS( "warning", "Using deprecated option 'envoy.test.deprecation_test.Base.InnerMessage.inner_deprecated'", - MessageUtil::checkForDeprecation(base)); + checkForDeprecation(base)); } // Check that repeated sub-messages get validated. @@ -623,7 +641,7 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(SubMessageDeprecated)) { EXPECT_LOG_CONTAINS("warning", "Using deprecated option " "'envoy.test.deprecation_test.Base.InnerMessage.inner_deprecated'", - MessageUtil::checkForDeprecation(base)); + checkForDeprecation(base)); } // Check that deprecated repeated messages trigger @@ -635,7 +653,7 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RepeatedMessageDeprecated)) EXPECT_LOG_CONTAINS("warning", "Using deprecated option " "'envoy.test.deprecation_test.Base.deprecated_repeated_message'", - MessageUtil::checkForDeprecation(base)); + checkForDeprecation(base)); } class TimestampUtilTest : public testing::Test, public ::testing::WithParamInterface {}; diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index fbd9eaed7031..e8e2143f4b8f 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -6,6 +6,7 @@ #include #include "common/network/utility.h" +#include "common/protobuf/message_validator_impl.h" #include "common/protobuf/utility.h" #include "common/stream_info/stream_info_impl.h" @@ -74,7 +75,9 @@ RouterCheckTool RouterCheckTool::create(const std::string& router_config_file, auto factory_context = std::make_unique>(); auto config = std::make_unique(route_config, *factory_context, false); if (!disableDeprecationCheck) { - MessageUtil::checkForDeprecation(route_config, &factory_context->runtime_loader_); + MessageUtil::checkForUnexpectedFields(route_config, + ProtobufMessage::getStrictValidationVisitor(), + &factory_context->runtime_loader_); } return RouterCheckTool(std::move(factory_context), std::move(config), std::move(stats), From 0eab93b59eefd3d4ce45f5934186ffdec9e9ff37 Mon Sep 17 00:00:00 2001 From: Cynthia Coan Date: Mon, 2 Sep 2019 20:20:07 -0600 Subject: [PATCH 21/31] Fuzz reuse (#8119) This PR allows the envoy_cc_fuzz_test rule to be used when pulling in envoy. which can be useful when you're writing filters for envoy, and want to reuse the fuzzing architecture envoy has already built. other rules already allow for this (see envoy_cc_test in this same file for example). Risk Level: Low Testing: Testing the Old Rule Still Works It is possible to test the old rules still work (even without specifying a repository), by simply choosing your favorite fuzz test, and choosing to run bazel test on it. For example: bazel test //test/common/router:header_parser_fuzz_test. Any envoy_cc_fuzz_test rule should do. Testing New Rules Work I've done testing inside my own repository, but if you want to create your own test rule you can probably do the following in envoy-filter-example: Checkout envoy-filter-example, and update the envoy submodule to this pr. Follow the directions in: test/fuzz/README.md to define a envoy_cc_fuzz_test rule. Make sure to add a line for: repository = "@envoy" which is the new argument being added. You should be able to run the fuzz test. Signed-off-by: Cynthia Coan --- bazel/envoy_test.bzl | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index e6985f92a1f3..73fdff21dc2d 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -63,8 +63,15 @@ def _envoy_test_linkopts(): }) + envoy_select_force_libcpp([], ["-lstdc++fs", "-latomic"]) # Envoy C++ fuzz test targets. These are not included in coverage runs. -def envoy_cc_fuzz_test(name, corpus, deps = [], tags = [], **kwargs): - if not (corpus.startswith("//") or corpus.startswith(":")): +def envoy_cc_fuzz_test( + name, + corpus, + repository = "", + size = "medium", + deps = [], + tags = [], + **kwargs): + if not (corpus.startswith("//") or corpus.startswith(":") or corpus.startswith("@")): corpus_name = name + "_corpus" corpus = native.glob([corpus + "/**"]) native.filegroup( @@ -81,7 +88,11 @@ def envoy_cc_fuzz_test(name, corpus, deps = [], tags = [], **kwargs): test_lib_name = name + "_lib" envoy_cc_test_library( name = test_lib_name, - deps = deps + ["//test/fuzz:fuzz_runner_lib", "//bazel:dynamic_stdlib"], + deps = deps + [ + repository + "//test/fuzz:fuzz_runner_lib", + repository + "//bazel:dynamic_stdlib", + ], + repository = repository, **kwargs ) native.cc_test( @@ -93,12 +104,13 @@ def envoy_cc_fuzz_test(name, corpus, deps = [], tags = [], **kwargs): data = [corpus_name], # No fuzzing on macOS. deps = select({ - "@envoy//bazel:apple": ["//test:dummy_main"], + "@envoy//bazel:apple": [repository + "//test:dummy_main"], "//conditions:default": [ ":" + test_lib_name, - "//test/fuzz:main", + repository + "//test/fuzz:main", ], }), + size = size, tags = tags, ) From 0b026cfa01287e55fa7b2adafcaba3a106d1f609 Mon Sep 17 00:00:00 2001 From: John Millikin Date: Tue, 3 Sep 2019 11:21:05 +0900 Subject: [PATCH 22/31] Set INCLUDE_DIRECTORIES so libcurl can find local urlapi.h (#8113) Fixes https://github.com/envoyproxy/envoy/issues/8112 Signed-off-by: John Millikin --- bazel/foreign_cc/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 82fcae17b6a6..1a96306c001b 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -75,6 +75,7 @@ envoy_cmake_external( "CURL_HIDDEN_SYMBOLS": "off", "CMAKE_USE_LIBSSH2": "off", "CMAKE_INSTALL_LIBDIR": "lib", + "INCLUDE_DIRECTORIES": "include/curl", }, lib_source = "@com_github_curl//:all", static_libraries = select({ From 911f3b0a08b7a8d724b42caab0b1f7398ab6efc2 Mon Sep 17 00:00:00 2001 From: Xin Date: Mon, 2 Sep 2019 22:26:10 -0400 Subject: [PATCH 23/31] cleanup: move test utility methods in ScopedRdsIntegrationTest to base class HttpIntegrationTest (#8108) Fixes #8050 Risk Level: LOW [refactor only] Signed-off-by: Xin Zhuang --- test/integration/http_integration.cc | 34 ++++++++++++++++++ test/integration/http_integration.h | 14 ++++++++ .../scoped_rds_integration_test.cc | 35 ------------------- 3 files changed, 48 insertions(+), 35 deletions(-) diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 62d2eb45ba41..62090c09d7c9 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -304,6 +304,40 @@ void HttpIntegrationTest::cleanupUpstreamAndDownstream() { } } +void HttpIntegrationTest::sendRequestAndVerifyResponse( + const Http::TestHeaderMapImpl& request_headers, const int request_size, + const Http::TestHeaderMapImpl& response_headers, const int response_size, + const int backend_idx) { + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = sendRequestAndWaitForResponse(request_headers, request_size, response_headers, + response_size, backend_idx); + verifyResponse(std::move(response), "200", response_headers, std::string(response_size, 'a')); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(request_size, upstream_request_->bodyLength()); + cleanupUpstreamAndDownstream(); +} + +void HttpIntegrationTest::verifyResponse(IntegrationStreamDecoderPtr response, + const std::string& response_code, + const Http::TestHeaderMapImpl& expected_headers, + const std::string& expected_body) { + EXPECT_TRUE(response->complete()); + EXPECT_EQ(response_code, response->headers().Status()->value().getStringView()); + expected_headers.iterate( + [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { + auto response_headers = static_cast(context); + const Http::HeaderEntry* entry = + response_headers->get(Http::LowerCaseString{std::string(header.key().getStringView())}); + EXPECT_NE(entry, nullptr); + EXPECT_EQ(header.value().getStringView(), entry->value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }, + const_cast(static_cast(&response->headers()))); + + EXPECT_EQ(response->body(), expected_body); +} + uint64_t HttpIntegrationTest::waitForNextUpstreamRequest(const std::vector& upstream_indices) { uint64_t upstream_with_request; diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index bbeda044911e..1a2193556e65 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -138,6 +138,20 @@ class HttpIntegrationTest : public BaseIntegrationTest { // Close |codec_client_| and |fake_upstream_connection_| cleanly. void cleanupUpstreamAndDownstream(); + // Verifies the response_headers contains the expected_headers, and response body matches given + // body string. + void verifyResponse(IntegrationStreamDecoderPtr response, const std::string& response_code, + const Http::TestHeaderMapImpl& expected_headers, + const std::string& expected_body); + + // Helper that sends a request to Envoy, and verifies if Envoy response headers and body size is + // the same as the expected headers map. + // Requires the "http" port has been registered. + void sendRequestAndVerifyResponse(const Http::TestHeaderMapImpl& request_headers, + const int request_size, + const Http::TestHeaderMapImpl& response_headers, + const int response_size, const int backend_idx); + // Check for completion of upstream_request_, and a simple "200" response. void checkSimpleRequestSuccess(uint64_t expected_request_size, uint64_t expected_response_size, IntegrationStreamDecoder* response); diff --git a/test/integration/scoped_rds_integration_test.cc b/test/integration/scoped_rds_integration_test.cc index 1cefe11ac397..4bcc2a38997b 100644 --- a/test/integration/scoped_rds_integration_test.cc +++ b/test/integration/scoped_rds_integration_test.cc @@ -94,41 +94,6 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, HttpIntegrationTest::initialize(); } - // TODO(stevenzzzz): move these utility methods to base classes to share with other tests. - // Helper that verifies if given headers are in the response header map. - void verifyResponse(IntegrationStreamDecoderPtr response, const std::string& response_code, - const Http::TestHeaderMapImpl& expected_headers, - const std::string& expected_body) { - EXPECT_TRUE(response->complete()); - EXPECT_EQ(response_code, response->headers().Status()->value().getStringView()); - expected_headers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - auto response_headers = static_cast(context); - const Http::HeaderEntry* entry = response_headers->get( - Http::LowerCaseString{std::string(header.key().getStringView())}); - EXPECT_NE(entry, nullptr); - EXPECT_EQ(header.value().getStringView(), entry->value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - const_cast(static_cast(&response->headers()))); - EXPECT_EQ(response->body(), expected_body); - } - - // Helper that sends a request to Envoy, and verifies if Envoy response headers and body size is - // the same as the expected headers map. - void sendRequestAndVerifyResponse(const Http::TestHeaderMapImpl& request_headers, - const int request_size, - const Http::TestHeaderMapImpl& response_headers, - const int response_size, const int backend_idx) { - codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = sendRequestAndWaitForResponse(request_headers, request_size, response_headers, - response_size, backend_idx); - verifyResponse(std::move(response), "200", response_headers, std::string(response_size, 'a')); - EXPECT_TRUE(upstream_request_->complete()); - EXPECT_EQ(request_size, upstream_request_->bodyLength()); - cleanupUpstreamAndDownstream(); - } - void createUpstreams() override { HttpIntegrationTest::createUpstreams(); // Create the SRDS upstream. From 1339ed2d4994392be93a3e2f9949afd40451e13a Mon Sep 17 00:00:00 2001 From: Andres Guedez <34292400+AndresGuedez@users.noreply.github.com> Date: Mon, 2 Sep 2019 22:27:50 -0400 Subject: [PATCH 24/31] upstream: fix invalid access of ClusterMap iterator during warming cluster modification (#8106) Risk Level: Medium Testing: New unit test added. Fix verified via --config=asan. Signed-off-by: Andres Guedez --- .../common/upstream/cluster_manager_impl.cc | 19 ++++- source/common/upstream/cluster_manager_impl.h | 6 ++ .../upstream/cluster_manager_impl_test.cc | 72 +++++++++++++++++++ test/common/upstream/utility.h | 18 ++--- 4 files changed, 104 insertions(+), 11 deletions(-) diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index cad4bd02d679..8a1b32cb90d8 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -487,9 +487,22 @@ bool ClusterManagerImpl::addOrUpdateCluster(const envoy::api::v2::Cluster& clust if (existing_active_cluster != active_clusters_.end() || existing_warming_cluster != warming_clusters_.end()) { - // The following init manager remove call is a NOP in the case we are already initialized. It's - // just kept here to avoid additional logic. - init_helper_.removeCluster(*existing_active_cluster->second->cluster_); + if (existing_active_cluster != active_clusters_.end()) { + // The following init manager remove call is a NOP in the case we are already initialized. + // It's just kept here to avoid additional logic. + init_helper_.removeCluster(*existing_active_cluster->second->cluster_); + } else { + // Validate that warming clusters are not added to the init_helper_. + // NOTE: This loop is compiled out in optimized builds. + for (const std::list& cluster_list : + {std::cref(init_helper_.primary_init_clusters_), + std::cref(init_helper_.secondary_init_clusters_)}) { + ASSERT(!std::any_of(cluster_list.begin(), cluster_list.end(), + [&existing_warming_cluster](Cluster* cluster) { + return existing_warming_cluster->second->cluster_.get() == cluster; + })); + } + } cm_stats_.cluster_modified_.inc(); } else { cm_stats_.cluster_added_.inc(); diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index e45e9752372f..cb45bb14aca7 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -90,6 +90,9 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { Singleton::Manager& singleton_manager_; }; +// For friend declaration in ClusterManagerInitHelper. +class ClusterManagerImpl; + /** * This is a helper class used during cluster management initialization. Dealing with primary * clusters, secondary clusters, and CDS, is quite complicated, so this makes it easier to test. @@ -129,6 +132,9 @@ class ClusterManagerInitHelper : Logger::Loggable { State state() const { return state_; } private: + // To enable invariant assertions on the cluster lists. + friend ClusterManagerImpl; + void initializeSecondaryClusters(); void maybeFinishInitialize(); void onClusterInit(Cluster& cluster); diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index f9632b12da76..6fbc2e5afea2 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -1234,6 +1234,78 @@ TEST_F(ClusterManagerImplTest, RemoveWarmingCluster) { EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get())); } +TEST_F(ClusterManagerImplTest, ModifyWarmingCluster) { + time_system_.setSystemTime(std::chrono::milliseconds(1234567891234)); + create(defaultConfig()); + + InSequence s; + ReadyWatcher initialized; + EXPECT_CALL(initialized, ready()); + cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); + + // Add a "fake_cluster" in warming state. + std::shared_ptr cluster1 = + std::make_shared>(); + EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)) + .WillOnce(Return(std::make_pair(cluster1, nullptr))); + EXPECT_CALL(*cluster1, initializePhase()).Times(0); + EXPECT_CALL(*cluster1, initialize(_)); + EXPECT_TRUE( + cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "version1")); + checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); + EXPECT_EQ(nullptr, cluster_manager_->get("fake_cluster")); + checkConfigDump(R"EOF( + dynamic_warming_clusters: + - version_info: "version1" + cluster: + name: "fake_cluster" + type: STATIC + connect_timeout: 0.25s + hosts: + - socket_address: + address: "127.0.0.1" + port_value: 11001 + last_updated: + seconds: 1234567891 + nanos: 234000000 + )EOF"); + + // Update the warming cluster that was just added. + std::shared_ptr cluster2 = + std::make_shared>(); + EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)) + .WillOnce(Return(std::make_pair(cluster2, nullptr))); + EXPECT_CALL(*cluster2, initializePhase()).Times(0); + EXPECT_CALL(*cluster2, initialize(_)); + EXPECT_TRUE(cluster_manager_->addOrUpdateCluster( + parseClusterFromV2Json(fmt::sprintf(kDefaultStaticClusterTmpl, "fake_cluster", + R"EOF( +"socket_address": { + "address": "127.0.0.1", + "port_value": 11002 +})EOF")), + "version2")); + checkStats(1 /*added*/, 1 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); + checkConfigDump(R"EOF( + dynamic_warming_clusters: + - version_info: "version2" + cluster: + name: "fake_cluster" + type: STATIC + connect_timeout: 0.25s + hosts: + - socket_address: + address: "127.0.0.1" + port_value: 11002 + last_updated: + seconds: 1234567891 + nanos: 234000000 + )EOF"); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get())); + EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster2.get())); +} + // Verify that shutting down the cluster manager destroys warming clusters. TEST_F(ClusterManagerImplTest, ShutdownWithWarming) { create(defaultConfig()); diff --git a/test/common/upstream/utility.h b/test/common/upstream/utility.h index b5da2071d09b..b41b9cbfd280 100644 --- a/test/common/upstream/utility.h +++ b/test/common/upstream/utility.h @@ -15,8 +15,7 @@ namespace Envoy { namespace Upstream { namespace { -inline std::string defaultStaticClusterJson(const std::string& name) { - return fmt::sprintf(R"EOF( +constexpr static const char* kDefaultStaticClusterTmpl = R"EOF( { "name": "%s", "connect_timeout": "0.250s", @@ -24,15 +23,18 @@ inline std::string defaultStaticClusterJson(const std::string& name) { "lb_policy": "round_robin", "hosts": [ { - "socket_address": { - "address": "127.0.0.1", - "port_value": 11001 - } + %s, } ] } - )EOF", - name); + )EOF"; + +inline std::string defaultStaticClusterJson(const std::string& name) { + return fmt::sprintf(kDefaultStaticClusterTmpl, name, R"EOF( +"socket_address": { + "address": "127.0.0.1", + "port_value": 11001 +})EOF"); } inline envoy::config::bootstrap::v2::Bootstrap From b28edcae3da369ed9969796f89118c1f898a27d9 Mon Sep 17 00:00:00 2001 From: jaychenatr <54647402+jaychenatr@users.noreply.github.com> Date: Mon, 2 Sep 2019 19:29:05 -0700 Subject: [PATCH 25/31] api:Add a flag to disable overprovisioning in ClusterLoadAssignment (#8080) * api:Add a flag to disable overprovisioning in ClusterLoadAssignment Signed-off-by: Jie Chen * api:Add [#next-major-version and [#not-implemented-hide to the comment for field of disable_overprovisioning in ClusterLoadAssignment Signed-off-by: Jie Chen * api:Refine comments for the new added bool flag as suggested. Signed-off-by: Jie Chen --- api/envoy/api/v2/eds.proto | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto index d680ef7ea5aa..1719ad6d2c34 100644 --- a/api/envoy/api/v2/eds.proto +++ b/api/envoy/api/v2/eds.proto @@ -117,6 +117,17 @@ message ClusterLoadAssignment { // are considered stale and should be marked unhealthy. // Defaults to 0 which means endpoints never go stale. google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration.gt.seconds = 0]; + + // The flag to disable overprovisioning. If it is set to true, + // :ref:`overprovisioning factor + // ` will be ignored + // and Envoy will not perform graceful failover between priority levels or + // localities as endpoints become unhealthy. Otherwise Envoy will perform + // graceful failover as :ref:`overprovisioning factor + // ` suggests. + // [#next-major-version: Unify with overprovisioning config as a single message.] + // [#not-implemented-hide:] + bool disable_overprovisioning = 5; } // Load balancing policy settings. From 085d72b490c124a02849812798f5513a8df9ae72 Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 3 Sep 2019 14:49:12 -0400 Subject: [PATCH 26/31] api: clone v2[alpha] to v3alpha. (#8125) This patch establishes a v3alpha baseline API, by doing a simple copy of v2[alpha] dirs and some sed-style heuristic fixups of BUILD dependencies and proto package namespaces. The objective is provide a baseline which we can compare the output from tooling described in #8083 in later PRs, providing smaller visual diffs. The core philosophy of the API migration is that every step will be captured in a script (at least until the last manual steps), api/migration/v3alpha.sh. This script will capture deterministic migration steps, allowing v2[alpha] to continue to be updated until we finalize v3. There is likely to be significant changes, e.g. in addition to the work scoped for v3, we might want to reduce the amount of API churn by referring back to v2 protos where it makes sense. This will be done via tooling in later PRs. Part of #8083. Risk level: Low Testing: build @envoy_api//... Signed-off-by: Harvey Tuch --- api/envoy/admin/v3alpha/BUILD | 76 + api/envoy/admin/v3alpha/certs.proto | 57 + api/envoy/admin/v3alpha/clusters.proto | 143 ++ api/envoy/admin/v3alpha/config_dump.proto | 266 ++++ api/envoy/admin/v3alpha/listeners.proto | 28 + api/envoy/admin/v3alpha/memory.proto | 37 + api/envoy/admin/v3alpha/metrics.proto | 26 + api/envoy/admin/v3alpha/mutex_stats.proto | 28 + api/envoy/admin/v3alpha/server_info.proto | 137 ++ api/envoy/admin/v3alpha/tap.proto | 20 + api/envoy/api/v3alpha/BUILD | 171 ++ api/envoy/api/v3alpha/README.md | 9 + api/envoy/api/v3alpha/auth/BUILD | 35 + api/envoy/api/v3alpha/auth/cert.proto | 407 +++++ api/envoy/api/v3alpha/cds.proto | 660 ++++++++ api/envoy/api/v3alpha/cluster/BUILD | 48 + .../api/v3alpha/cluster/circuit_breaker.proto | 70 + api/envoy/api/v3alpha/cluster/filter.proto | 29 + .../v3alpha/cluster/outlier_detection.proto | 117 ++ api/envoy/api/v3alpha/core/BUILD | 136 ++ api/envoy/api/v3alpha/core/address.proto | 121 ++ api/envoy/api/v3alpha/core/base.proto | 292 ++++ .../api/v3alpha/core/config_source.proto | 126 ++ api/envoy/api/v3alpha/core/grpc_service.proto | 173 ++ api/envoy/api/v3alpha/core/health_check.proto | 271 ++++ api/envoy/api/v3alpha/core/http_uri.proto | 54 + api/envoy/api/v3alpha/core/protocol.proto | 157 ++ api/envoy/api/v3alpha/discovery.proto | 230 +++ api/envoy/api/v3alpha/eds.proto | 135 ++ api/envoy/api/v3alpha/endpoint/BUILD | 49 + api/envoy/api/v3alpha/endpoint/endpoint.proto | 129 ++ .../api/v3alpha/endpoint/load_report.proto | 148 ++ api/envoy/api/v3alpha/lds.proto | 206 +++ api/envoy/api/v3alpha/listener/BUILD | 41 + api/envoy/api/v3alpha/listener/listener.proto | 210 +++ .../listener/udp_listener_config.proto | 31 + api/envoy/api/v3alpha/ratelimit/BUILD | 14 + .../api/v3alpha/ratelimit/ratelimit.proto | 66 + api/envoy/api/v3alpha/rds.proto | 135 ++ api/envoy/api/v3alpha/route/BUILD | 28 + api/envoy/api/v3alpha/route/route.proto | 1404 +++++++++++++++++ api/envoy/api/v3alpha/srds.proto | 135 ++ api/envoy/config/accesslog/v3alpha/BUILD | 22 + api/envoy/config/accesslog/v3alpha/als.proto | 64 + api/envoy/config/accesslog/v3alpha/file.proto | 32 + api/envoy/config/bootstrap/v3alpha/BUILD | 40 + .../config/bootstrap/v3alpha/bootstrap.proto | 318 ++++ .../dynamic_forward_proxy/v3alpha/BUILD | 11 + .../v3alpha/cluster.proto | 24 + .../dynamic_forward_proxy/v3alpha/BUILD | 12 + .../v3alpha/dns_cache.proto | 69 + api/envoy/config/common/tap/v3alpha/BUILD | 13 + .../config/common/tap/v3alpha/common.proto | 51 + .../config/filter/accesslog/v3alpha/BUILD | 28 + .../filter/accesslog/v3alpha/accesslog.proto | 248 +++ api/envoy/config/filter/fault/v3alpha/BUILD | 13 + .../config/filter/fault/v3alpha/fault.proto | 84 + .../http/adaptive_concurrency/v3alpha/BUILD | 11 + .../v3alpha/adaptive_concurrency.proto | 11 + .../config/filter/http/buffer/v3alpha/BUILD | 8 + .../filter/http/buffer/v3alpha/buffer.proto | 36 + .../config/filter/http/csrf/v3alpha/BUILD | 12 + .../filter/http/csrf/v3alpha/csrf.proto | 51 + .../http/dynamic_forward_proxy/v3alpha/BUILD | 11 + .../v3alpha/dynamic_forward_proxy.proto | 24 + .../filter/http/ext_authz/v3alpha/BUILD | 15 + .../http/ext_authz/v3alpha/ext_authz.proto | 209 +++ .../config/filter/http/fault/v3alpha/BUILD | 13 + .../filter/http/fault/v3alpha/fault.proto | 115 ++ .../config/filter/http/gzip/v3alpha/BUILD | 8 + .../filter/http/gzip/v3alpha/gzip.proto | 75 + .../http/header_to_metadata/v3alpha/BUILD | 9 + .../v3alpha/header_to_metadata.proto | 92 ++ .../filter/http/health_check/v3alpha/BUILD | 21 + .../health_check/v3alpha/health_check.proto | 44 + .../filter/http/ip_tagging/v3alpha/BUILD | 9 + .../http/ip_tagging/v3alpha/ip_tagging.proto | 53 + .../filter/http/jwt_authn/v3alpha/BUILD | 23 + .../filter/http/jwt_authn/v3alpha/README.md | 66 + .../http/jwt_authn/v3alpha/config.proto | 467 ++++++ .../config/filter/http/lua/v3alpha/BUILD | 8 + .../config/filter/http/lua/v3alpha/lua.proto | 21 + .../filter/http/rate_limit/v3alpha/BUILD | 11 + .../http/rate_limit/v3alpha/rate_limit.proto | 60 + .../config/filter/http/rbac/v3alpha/BUILD | 9 + .../filter/http/rbac/v3alpha/rbac.proto | 38 + .../config/filter/http/router/v3alpha/BUILD | 15 + .../filter/http/router/v3alpha/router.proto | 67 + .../config/filter/http/squash/v3alpha/BUILD | 8 + .../filter/http/squash/v3alpha/squash.proto | 55 + .../config/filter/http/tap/v3alpha/BUILD | 11 + .../config/filter/http/tap/v3alpha/tap.proto | 21 + .../filter/http/transcoder/v3alpha/BUILD | 13 + .../http/transcoder/v3alpha/transcoder.proto | 123 ++ .../network/client_ssl_auth/v3alpha/BUILD | 9 + .../v3alpha/client_ssl_auth.proto | 41 + .../filter/network/ext_authz/v3alpha/BUILD | 9 + .../network/ext_authz/v3alpha/ext_authz.proto | 35 + .../http_connection_manager/v3alpha/BUILD | 31 + .../v3alpha/http_connection_manager.proto | 599 +++++++ .../filter/network/mongo_proxy/v3alpha/BUILD | 9 + .../mongo_proxy/v3alpha/mongo_proxy.proto | 36 + .../filter/network/rate_limit/v3alpha/BUILD | 12 + .../rate_limit/v3alpha/rate_limit.proto | 47 + .../config/filter/network/rbac/v3alpha/BUILD | 9 + .../filter/network/rbac/v3alpha/rbac.proto | 52 + .../filter/network/redis_proxy/v3alpha/BUILD | 12 + .../redis_proxy/v3alpha/redis_proxy.proto | 236 +++ .../filter/network/tcp_proxy/v3alpha/BUILD | 23 + .../network/tcp_proxy/v3alpha/tcp_proxy.proto | 146 ++ .../config/grpc_credential/v3alpha/BUILD | 27 + .../grpc_credential/v3alpha/aws_iam.proto | 29 + .../v3alpha/file_based_metadata.proto | 28 + .../config/health_checker/redis/v3alpha/BUILD | 8 + .../health_checker/redis/v3alpha/redis.proto | 19 + api/envoy/config/metrics/v3alpha/BUILD | 43 + .../metrics/v3alpha/metrics_service.proto | 21 + api/envoy/config/metrics/v3alpha/stats.proto | 331 ++++ api/envoy/config/overload/v3alpha/BUILD | 14 + .../config/overload/v3alpha/overload.proto | 78 + api/envoy/config/ratelimit/v3alpha/BUILD | 20 + api/envoy/config/ratelimit/v3alpha/rls.proto | 26 + api/envoy/config/rbac/v3alpha/BUILD | 36 + api/envoy/config/rbac/v3alpha/rbac.proto | 215 +++ .../resource_monitor/fixed_heap/v3alpha/BUILD | 9 + .../fixed_heap/v3alpha/fixed_heap.proto | 19 + .../injected_resource/v3alpha/BUILD | 9 + .../v3alpha/injected_resource.proto | 20 + api/envoy/config/trace/v3alpha/BUILD | 24 + api/envoy/config/trace/v3alpha/trace.proto | 204 +++ .../transport_socket/alts/v3alpha/BUILD | 11 + .../transport_socket/alts/v3alpha/alts.proto | 24 + .../config/transport_socket/tap/v3alpha/BUILD | 12 + .../transport_socket/tap/v3alpha/tap.proto | 26 + api/envoy/data/accesslog/v3alpha/BUILD | 24 + .../data/accesslog/v3alpha/accesslog.proto | 356 +++++ api/envoy/data/cluster/v3alpha/BUILD | 11 + .../v3alpha/outlier_detection_event.proto | 102 ++ api/envoy/data/core/v3alpha/BUILD | 15 + .../core/v3alpha/health_check_event.proto | 85 + api/envoy/data/tap/v3alpha/BUILD | 37 + api/envoy/data/tap/v3alpha/common.proto | 31 + api/envoy/data/tap/v3alpha/http.proto | 60 + api/envoy/data/tap/v3alpha/transport.proto | 97 ++ api/envoy/data/tap/v3alpha/wrapper.proto | 34 + api/envoy/service/accesslog/v3alpha/BUILD | 23 + api/envoy/service/accesslog/v3alpha/als.proto | 71 + api/envoy/service/auth/v3alpha/BUILD | 28 + .../auth/v3alpha/attribute_context.proto | 154 ++ .../service/auth/v3alpha/external_auth.proto | 77 + api/envoy/service/discovery/v3alpha/BUILD | 75 + api/envoy/service/discovery/v3alpha/ads.proto | 38 + api/envoy/service/discovery/v3alpha/hds.proto | 127 ++ .../service/discovery/v3alpha/rtds.proto | 50 + api/envoy/service/discovery/v3alpha/sds.proto | 34 + api/envoy/service/load_stats/v3alpha/BUILD | 22 + .../service/load_stats/v3alpha/lrs.proto | 82 + api/envoy/service/metrics/v3alpha/BUILD | 24 + .../metrics/v3alpha/metrics_service.proto | 41 + api/envoy/service/ratelimit/v3alpha/BUILD | 24 + api/envoy/service/ratelimit/v3alpha/rls.proto | 95 ++ api/envoy/service/tap/v3alpha/BUILD | 36 + api/envoy/service/tap/v3alpha/common.proto | 200 +++ api/envoy/service/tap/v3alpha/tap.proto | 50 + api/envoy/service/tap/v3alpha/tapds.proto | 44 + api/envoy/service/trace/v3alpha/BUILD | 23 + .../service/trace/v3alpha/trace_service.proto | 46 + api/migration/v3alpha.sh | 6 + tools/api/clone.sh | 62 + 169 files changed, 14370 insertions(+) create mode 100644 api/envoy/admin/v3alpha/BUILD create mode 100644 api/envoy/admin/v3alpha/certs.proto create mode 100644 api/envoy/admin/v3alpha/clusters.proto create mode 100644 api/envoy/admin/v3alpha/config_dump.proto create mode 100644 api/envoy/admin/v3alpha/listeners.proto create mode 100644 api/envoy/admin/v3alpha/memory.proto create mode 100644 api/envoy/admin/v3alpha/metrics.proto create mode 100644 api/envoy/admin/v3alpha/mutex_stats.proto create mode 100644 api/envoy/admin/v3alpha/server_info.proto create mode 100644 api/envoy/admin/v3alpha/tap.proto create mode 100644 api/envoy/api/v3alpha/BUILD create mode 100644 api/envoy/api/v3alpha/README.md create mode 100644 api/envoy/api/v3alpha/auth/BUILD create mode 100644 api/envoy/api/v3alpha/auth/cert.proto create mode 100644 api/envoy/api/v3alpha/cds.proto create mode 100644 api/envoy/api/v3alpha/cluster/BUILD create mode 100644 api/envoy/api/v3alpha/cluster/circuit_breaker.proto create mode 100644 api/envoy/api/v3alpha/cluster/filter.proto create mode 100644 api/envoy/api/v3alpha/cluster/outlier_detection.proto create mode 100644 api/envoy/api/v3alpha/core/BUILD create mode 100644 api/envoy/api/v3alpha/core/address.proto create mode 100644 api/envoy/api/v3alpha/core/base.proto create mode 100644 api/envoy/api/v3alpha/core/config_source.proto create mode 100644 api/envoy/api/v3alpha/core/grpc_service.proto create mode 100644 api/envoy/api/v3alpha/core/health_check.proto create mode 100644 api/envoy/api/v3alpha/core/http_uri.proto create mode 100644 api/envoy/api/v3alpha/core/protocol.proto create mode 100644 api/envoy/api/v3alpha/discovery.proto create mode 100644 api/envoy/api/v3alpha/eds.proto create mode 100644 api/envoy/api/v3alpha/endpoint/BUILD create mode 100644 api/envoy/api/v3alpha/endpoint/endpoint.proto create mode 100644 api/envoy/api/v3alpha/endpoint/load_report.proto create mode 100644 api/envoy/api/v3alpha/lds.proto create mode 100644 api/envoy/api/v3alpha/listener/BUILD create mode 100644 api/envoy/api/v3alpha/listener/listener.proto create mode 100644 api/envoy/api/v3alpha/listener/udp_listener_config.proto create mode 100644 api/envoy/api/v3alpha/ratelimit/BUILD create mode 100644 api/envoy/api/v3alpha/ratelimit/ratelimit.proto create mode 100644 api/envoy/api/v3alpha/rds.proto create mode 100644 api/envoy/api/v3alpha/route/BUILD create mode 100644 api/envoy/api/v3alpha/route/route.proto create mode 100644 api/envoy/api/v3alpha/srds.proto create mode 100644 api/envoy/config/accesslog/v3alpha/BUILD create mode 100644 api/envoy/config/accesslog/v3alpha/als.proto create mode 100644 api/envoy/config/accesslog/v3alpha/file.proto create mode 100644 api/envoy/config/bootstrap/v3alpha/BUILD create mode 100644 api/envoy/config/bootstrap/v3alpha/bootstrap.proto create mode 100644 api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD create mode 100644 api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto create mode 100644 api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD create mode 100644 api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto create mode 100644 api/envoy/config/common/tap/v3alpha/BUILD create mode 100644 api/envoy/config/common/tap/v3alpha/common.proto create mode 100644 api/envoy/config/filter/accesslog/v3alpha/BUILD create mode 100644 api/envoy/config/filter/accesslog/v3alpha/accesslog.proto create mode 100644 api/envoy/config/filter/fault/v3alpha/BUILD create mode 100644 api/envoy/config/filter/fault/v3alpha/fault.proto create mode 100644 api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto create mode 100644 api/envoy/config/filter/http/buffer/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/buffer/v3alpha/buffer.proto create mode 100644 api/envoy/config/filter/http/csrf/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/csrf/v3alpha/csrf.proto create mode 100644 api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto create mode 100644 api/envoy/config/filter/http/ext_authz/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto create mode 100644 api/envoy/config/filter/http/fault/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/fault/v3alpha/fault.proto create mode 100644 api/envoy/config/filter/http/gzip/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/gzip/v3alpha/gzip.proto create mode 100644 api/envoy/config/filter/http/header_to_metadata/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto create mode 100644 api/envoy/config/filter/http/health_check/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/health_check/v3alpha/health_check.proto create mode 100644 api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto create mode 100644 api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/jwt_authn/v3alpha/README.md create mode 100644 api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto create mode 100644 api/envoy/config/filter/http/lua/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/lua/v3alpha/lua.proto create mode 100644 api/envoy/config/filter/http/rate_limit/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto create mode 100644 api/envoy/config/filter/http/rbac/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/rbac/v3alpha/rbac.proto create mode 100644 api/envoy/config/filter/http/router/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/router/v3alpha/router.proto create mode 100644 api/envoy/config/filter/http/squash/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/squash/v3alpha/squash.proto create mode 100644 api/envoy/config/filter/http/tap/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/tap/v3alpha/tap.proto create mode 100644 api/envoy/config/filter/http/transcoder/v3alpha/BUILD create mode 100644 api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto create mode 100644 api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD create mode 100644 api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto create mode 100644 api/envoy/config/filter/network/ext_authz/v3alpha/BUILD create mode 100644 api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto create mode 100644 api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD create mode 100644 api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto create mode 100644 api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD create mode 100644 api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto create mode 100644 api/envoy/config/filter/network/rate_limit/v3alpha/BUILD create mode 100644 api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto create mode 100644 api/envoy/config/filter/network/rbac/v3alpha/BUILD create mode 100644 api/envoy/config/filter/network/rbac/v3alpha/rbac.proto create mode 100644 api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD create mode 100644 api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto create mode 100644 api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD create mode 100644 api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto create mode 100644 api/envoy/config/grpc_credential/v3alpha/BUILD create mode 100644 api/envoy/config/grpc_credential/v3alpha/aws_iam.proto create mode 100644 api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto create mode 100644 api/envoy/config/health_checker/redis/v3alpha/BUILD create mode 100644 api/envoy/config/health_checker/redis/v3alpha/redis.proto create mode 100644 api/envoy/config/metrics/v3alpha/BUILD create mode 100644 api/envoy/config/metrics/v3alpha/metrics_service.proto create mode 100644 api/envoy/config/metrics/v3alpha/stats.proto create mode 100644 api/envoy/config/overload/v3alpha/BUILD create mode 100644 api/envoy/config/overload/v3alpha/overload.proto create mode 100644 api/envoy/config/ratelimit/v3alpha/BUILD create mode 100644 api/envoy/config/ratelimit/v3alpha/rls.proto create mode 100644 api/envoy/config/rbac/v3alpha/BUILD create mode 100644 api/envoy/config/rbac/v3alpha/rbac.proto create mode 100644 api/envoy/config/resource_monitor/fixed_heap/v3alpha/BUILD create mode 100644 api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto create mode 100644 api/envoy/config/resource_monitor/injected_resource/v3alpha/BUILD create mode 100644 api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto create mode 100644 api/envoy/config/trace/v3alpha/BUILD create mode 100644 api/envoy/config/trace/v3alpha/trace.proto create mode 100644 api/envoy/config/transport_socket/alts/v3alpha/BUILD create mode 100644 api/envoy/config/transport_socket/alts/v3alpha/alts.proto create mode 100644 api/envoy/config/transport_socket/tap/v3alpha/BUILD create mode 100644 api/envoy/config/transport_socket/tap/v3alpha/tap.proto create mode 100644 api/envoy/data/accesslog/v3alpha/BUILD create mode 100644 api/envoy/data/accesslog/v3alpha/accesslog.proto create mode 100644 api/envoy/data/cluster/v3alpha/BUILD create mode 100644 api/envoy/data/cluster/v3alpha/outlier_detection_event.proto create mode 100644 api/envoy/data/core/v3alpha/BUILD create mode 100644 api/envoy/data/core/v3alpha/health_check_event.proto create mode 100644 api/envoy/data/tap/v3alpha/BUILD create mode 100644 api/envoy/data/tap/v3alpha/common.proto create mode 100644 api/envoy/data/tap/v3alpha/http.proto create mode 100644 api/envoy/data/tap/v3alpha/transport.proto create mode 100644 api/envoy/data/tap/v3alpha/wrapper.proto create mode 100644 api/envoy/service/accesslog/v3alpha/BUILD create mode 100644 api/envoy/service/accesslog/v3alpha/als.proto create mode 100644 api/envoy/service/auth/v3alpha/BUILD create mode 100644 api/envoy/service/auth/v3alpha/attribute_context.proto create mode 100644 api/envoy/service/auth/v3alpha/external_auth.proto create mode 100644 api/envoy/service/discovery/v3alpha/BUILD create mode 100644 api/envoy/service/discovery/v3alpha/ads.proto create mode 100644 api/envoy/service/discovery/v3alpha/hds.proto create mode 100644 api/envoy/service/discovery/v3alpha/rtds.proto create mode 100644 api/envoy/service/discovery/v3alpha/sds.proto create mode 100644 api/envoy/service/load_stats/v3alpha/BUILD create mode 100644 api/envoy/service/load_stats/v3alpha/lrs.proto create mode 100644 api/envoy/service/metrics/v3alpha/BUILD create mode 100644 api/envoy/service/metrics/v3alpha/metrics_service.proto create mode 100644 api/envoy/service/ratelimit/v3alpha/BUILD create mode 100644 api/envoy/service/ratelimit/v3alpha/rls.proto create mode 100644 api/envoy/service/tap/v3alpha/BUILD create mode 100644 api/envoy/service/tap/v3alpha/common.proto create mode 100644 api/envoy/service/tap/v3alpha/tap.proto create mode 100644 api/envoy/service/tap/v3alpha/tapds.proto create mode 100644 api/envoy/service/trace/v3alpha/BUILD create mode 100644 api/envoy/service/trace/v3alpha/trace_service.proto create mode 100755 api/migration/v3alpha.sh create mode 100755 tools/api/clone.sh diff --git a/api/envoy/admin/v3alpha/BUILD b/api/envoy/admin/v3alpha/BUILD new file mode 100644 index 000000000000..71b0790b69a9 --- /dev/null +++ b/api/envoy/admin/v3alpha/BUILD @@ -0,0 +1,76 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "config_dump", + srcs = ["config_dump.proto"], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v3alpha:cds", + "//envoy/api/v3alpha:lds", + "//envoy/api/v3alpha:rds", + "//envoy/api/v3alpha:srds", + "//envoy/api/v3alpha/auth:cert", + "//envoy/config/bootstrap/v3alpha:bootstrap", + ], +) + +api_proto_library_internal( + name = "clusters", + srcs = ["clusters.proto"], + visibility = ["//visibility:public"], + deps = [ + ":metrics", + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:health_check", + "//envoy/type:percent", + ], +) + +api_proto_library_internal( + name = "listeners", + srcs = ["listeners.proto"], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v3alpha/core:address", + ], +) + +api_proto_library_internal( + name = "metrics", + srcs = ["metrics.proto"], + visibility = ["//visibility:public"], +) + +api_proto_library_internal( + name = "memory", + srcs = ["memory.proto"], + visibility = ["//visibility:public"], +) + +api_proto_library_internal( + name = "mutex_stats", + srcs = ["mutex_stats.proto"], + visibility = ["//visibility:public"], +) + +api_proto_library_internal( + name = "certs", + srcs = ["certs.proto"], + visibility = ["//visibility:public"], +) + +api_proto_library_internal( + name = "server_info", + srcs = ["server_info.proto"], + visibility = ["//visibility:public"], +) + +api_proto_library_internal( + name = "tap", + srcs = ["tap.proto"], + deps = [ + "//envoy/service/tap/v3alpha:common", + ], +) diff --git a/api/envoy/admin/v3alpha/certs.proto b/api/envoy/admin/v3alpha/certs.proto new file mode 100644 index 000000000000..e34fd36d992b --- /dev/null +++ b/api/envoy/admin/v3alpha/certs.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package envoy.admin.v3alpha; + +option java_outer_classname = "CertsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.admin.v3alpha"; + +import "google/protobuf/timestamp.proto"; + +// [#protodoc-title: Certificates] + +// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to +// display certificate information. See :ref:`/certs ` for more +// information. +message Certificates { + // List of certificates known to an Envoy. + repeated Certificate certificates = 1; +} + +message Certificate { + + // Details of CA certificate. + repeated CertificateDetails ca_cert = 1; + + // Details of Certificate Chain + repeated CertificateDetails cert_chain = 2; +} + +message CertificateDetails { + // Path of the certificate. + string path = 1; + + // Certificate Serial Number. + string serial_number = 2; + + // List of Subject Alternate names. + repeated SubjectAlternateName subject_alt_names = 3; + + // Minimum of days until expiration of certificate and it's chain. + uint64 days_until_expiration = 4; + + // Indicates the time from which the certificate is valid. + google.protobuf.Timestamp valid_from = 5; + + // Indicates the time at which the certificate expires. + google.protobuf.Timestamp expiration_time = 6; +} + +message SubjectAlternateName { + + // Subject Alternate Name. + oneof name { + string dns = 1; + string uri = 2; + } +} diff --git a/api/envoy/admin/v3alpha/clusters.proto b/api/envoy/admin/v3alpha/clusters.proto new file mode 100644 index 000000000000..093448d9f82c --- /dev/null +++ b/api/envoy/admin/v3alpha/clusters.proto @@ -0,0 +1,143 @@ +syntax = "proto3"; + +package envoy.admin.v3alpha; + +option java_outer_classname = "ClustersProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.admin.v3alpha"; + +import "envoy/admin/v3alpha/metrics.proto"; +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/core/health_check.proto"; +import "envoy/type/percent.proto"; + +// [#protodoc-title: Clusters] + +// Admin endpoint uses this wrapper for `/clusters` to display cluster status information. +// See :ref:`/clusters ` for more information. +message Clusters { + // Mapping from cluster name to each cluster's status. + repeated ClusterStatus cluster_statuses = 1; +} + +// Details an individual cluster's current status. +message ClusterStatus { + // Name of the cluster. + string name = 1; + + // Denotes whether this cluster was added via API or configured statically. + bool added_via_api = 2; + + // The success rate threshold used in the last interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *false*, all errors: externally and locally generated were used to calculate the threshold. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*, only externally generated errors were used to calculate the threshold. + // The threshold is used to eject hosts based on their success rate. See + // :ref:`Cluster outlier detection ` documentation for details. + // + // Note: this field may be omitted in any of the three following cases: + // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. + envoy.type.Percent success_rate_ejection_threshold = 3; + + // Mapping from host address to the host's current status. + repeated HostStatus host_statuses = 4; + + // The success rate threshold used in the last interval when only locally originated failures were + // taken into account and externally originated errors were treated as success. + // This field should be interpretted only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*. The threshold is used to eject hosts based on their success rate. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: this field may be omitted in any of the three following cases: + // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. + envoy.type.Percent local_origin_success_rate_ejection_threshold = 5; +} + +// Current state of a particular host. +message HostStatus { + // Address of this host. + envoy.api.v3alpha.core.Address address = 1; + + // List of stats specific to this host. + repeated SimpleMetric stats = 2; + + // The host's current health status. + HostHealthStatus health_status = 3; + + // Request success rate for this host over the last calculated interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *false*, all errors: externally and locally generated were used in success rate + // calculation. If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*, only externally generated errors were used in success rate calculation. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. + envoy.type.Percent success_rate = 4; + + // The host's weight. If not configured, the value defaults to 1. + uint32 weight = 5; + + // The hostname of the host, if applicable. + string hostname = 6; + + // The host's priority. If not configured, the value defaults to 0 (highest priority). + uint32 priority = 7; + + // Request success rate for this host over the last calculated + // interval when only locally originated errors are taken into account and externally originated + // errors were treated as success. + // This field should be interpretted only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. + envoy.type.Percent local_origin_success_rate = 8; +} + +// Health status for a host. +message HostHealthStatus { + // The host is currently failing active health checks. + bool failed_active_health_check = 1; + + // The host is currently considered an outlier and has been ejected. + bool failed_outlier_check = 2; + + // The host is currently being marked as degraded through active health checking. + bool failed_active_degraded_check = 4; + + // The host has been removed from service discovery, but is being stabilized due to active + // health checking. + bool pending_dynamic_removal = 5; + + // The host has not yet been health checked. + bool pending_active_hc = 6; + + // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported + // here. + // TODO(mrice32): pipe through remaining EDS health status possibilities. + envoy.api.v3alpha.core.HealthStatus eds_health_status = 3; +} diff --git a/api/envoy/admin/v3alpha/config_dump.proto b/api/envoy/admin/v3alpha/config_dump.proto new file mode 100644 index 000000000000..e909b1adfa72 --- /dev/null +++ b/api/envoy/admin/v3alpha/config_dump.proto @@ -0,0 +1,266 @@ +syntax = "proto3"; + +package envoy.admin.v3alpha; + +option java_outer_classname = "ConfigDumpProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.admin.v3alpha"; + +import "envoy/api/v3alpha/auth/cert.proto"; +import "envoy/api/v3alpha/cds.proto"; +import "envoy/api/v3alpha/lds.proto"; +import "envoy/api/v3alpha/rds.proto"; +import "envoy/api/v3alpha/srds.proto"; +import "envoy/config/bootstrap/v3alpha/bootstrap.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +import "gogoproto/gogo.proto"; + +// [#protodoc-title: ConfigDump] + +// The :ref:`/config_dump ` admin endpoint uses this wrapper +// message to maintain and serve arbitrary configuration information from any component in Envoy. +message ConfigDump { + // This list is serialized and dumped in its entirety at the + // :ref:`/config_dump ` endpoint. + // + // The following configurations are currently supported and will be dumped in the order given + // below: + // + // * *bootstrap*: :ref:`BootstrapConfigDump ` + // * *clusters*: :ref:`ClustersConfigDump ` + // * *listeners*: :ref:`ListenersConfigDump ` + // * *routes*: :ref:`RoutesConfigDump ` + repeated google.protobuf.Any configs = 1; +} + +// This message describes the bootstrap configuration that Envoy was started with. This includes +// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate +// the static portions of an Envoy configuration by reusing the output as the bootstrap +// configuration for another Envoy. +message BootstrapConfigDump { + envoy.config.bootstrap.v3alpha.Bootstrap bootstrap = 1; + + // The timestamp when the BootstrapConfig was last updated. + google.protobuf.Timestamp last_updated = 2; +} + +// Envoy's listener manager fills this message with all currently known listeners. Listener +// configuration information can be used to recreate an Envoy configuration by populating all +// listeners as static listeners or by returning them in a LDS response. +message ListenersConfigDump { + // This is the :ref:`version_info ` in the + // last processed LDS discovery response. If there are only static bootstrap listeners, this field + // will be "". + string version_info = 1; + + // Describes a statically loaded listener. + message StaticListener { + // The listener config. + envoy.api.v3alpha.Listener listener = 1; + + // The timestamp when the Listener was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + // Describes a dynamically loaded cluster via the LDS API. + message DynamicListener { + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the listener was loaded. In the future, discrete per-listener versions may be supported + // by the API. + string version_info = 1; + + // The listener config. + envoy.api.v3alpha.Listener listener = 2; + + // The timestamp when the Listener was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded listener configs. + repeated StaticListener static_listeners = 2; + + // The dynamically loaded active listeners. These are listeners that are available to service + // data plane traffic. + repeated DynamicListener dynamic_active_listeners = 3; + + // The dynamically loaded warming listeners. These are listeners that are currently undergoing + // warming in preparation to service data plane traffic. Note that if attempting to recreate an + // Envoy configuration from a configuration dump, the warming listeners should generally be + // discarded. + repeated DynamicListener dynamic_warming_listeners = 4; + + // The dynamically loaded draining listeners. These are listeners that are currently undergoing + // draining in preparation to stop servicing data plane traffic. Note that if attempting to + // recreate an Envoy configuration from a configuration dump, the draining listeners should + // generally be discarded. + repeated DynamicListener dynamic_draining_listeners = 5; +} + +// Envoy's cluster manager fills this message with all currently known clusters. Cluster +// configuration information can be used to recreate an Envoy configuration by populating all +// clusters as static clusters or by returning them in a CDS response. +message ClustersConfigDump { + // This is the :ref:`version_info ` in the + // last processed CDS discovery response. If there are only static bootstrap clusters, this field + // will be "". + string version_info = 1; + + // Describes a statically loaded cluster. + message StaticCluster { + // The cluster config. + envoy.api.v3alpha.Cluster cluster = 1; + + // The timestamp when the Cluster was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + // Describes a dynamically loaded cluster via the CDS API. + message DynamicCluster { + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by + // the API. + string version_info = 1; + + // The cluster config. + envoy.api.v3alpha.Cluster cluster = 2; + + // The timestamp when the Cluster was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded cluster configs. + repeated StaticCluster static_clusters = 2; + + // The dynamically loaded active clusters. These are clusters that are available to service + // data plane traffic. + repeated DynamicCluster dynamic_active_clusters = 3; + + // The dynamically loaded warming clusters. These are clusters that are currently undergoing + // warming in preparation to service data plane traffic. Note that if attempting to recreate an + // Envoy configuration from a configuration dump, the warming clusters should generally be + // discarded. + repeated DynamicCluster dynamic_warming_clusters = 4; +} + +// Envoy's RDS implementation fills this message with all currently loaded routes, as described by +// their RouteConfiguration objects. Static routes configured in the bootstrap configuration are +// separated from those configured dynamically via RDS. Route configuration information can be used +// to recreate an Envoy configuration by populating all routes as static routes or by returning them +// in RDS responses. +message RoutesConfigDump { + message StaticRouteConfig { + // The route config. + envoy.api.v3alpha.RouteConfiguration route_config = 1; + + // The timestamp when the Route was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicRouteConfig { + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the route configuration was loaded. + string version_info = 1; + + // The route config. + envoy.api.v3alpha.RouteConfiguration route_config = 2; + + // The timestamp when the Route was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded route configs. + repeated StaticRouteConfig static_route_configs = 2; + + // The dynamically loaded route configs. + repeated DynamicRouteConfig dynamic_route_configs = 3; +} + +// Envoy's scoped RDS implementation fills this message with all currently loaded route +// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both +// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the +// dynamically obtained scopes via the SRDS API. +message ScopedRoutesConfigDump { + message InlineScopedRouteConfigs { + // The name assigned to the scoped route configurations. + string name = 1; + + // The scoped route configurations. + repeated envoy.api.v3alpha.ScopedRouteConfiguration scoped_route_configs = 2; + + // The timestamp when the scoped route config set was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + message DynamicScopedRouteConfigs { + // The name assigned to the scoped route configurations. + string name = 1; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the scoped routes configuration was loaded. + string version_info = 2; + + // The scoped route configurations. + repeated envoy.api.v3alpha.ScopedRouteConfiguration scoped_route_configs = 3; + + // The timestamp when the scoped route config set was last updated. + google.protobuf.Timestamp last_updated = 4; + } + + // The statically loaded scoped route configs. + repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; + + // The dynamically loaded scoped route configs. + repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; +} + +// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. +message SecretsConfigDump { + // DynamicSecret contains secret information fetched via SDS. + message DynamicSecret { + // The name assigned to the secret. + string name = 1; + + // This is the per-resource version information. + string version_info = 2; + + // The timestamp when the secret was last updated. + google.protobuf.Timestamp last_updated = 3; + + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + envoy.api.v3alpha.auth.Secret secret = 4; + } + + // StaticSecret specifies statically loaded secret in bootstrap. + message StaticSecret { + // The name assigned to the secret. + string name = 1; + + // The timestamp when the secret was last updated. + google.protobuf.Timestamp last_updated = 2; + + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + envoy.api.v3alpha.auth.Secret secret = 3; + } + + // The statically loaded secrets. + repeated StaticSecret static_secrets = 1; + + // The dynamically loaded active secrets. These are secrets that are available to service + // clusters or listeners. + repeated DynamicSecret dynamic_active_secrets = 2; + + // The dynamically loaded warming secrets. These are secrets that are currently undergoing + // warming in preparation to service clusters or listeners. + repeated DynamicSecret dynamic_warming_secrets = 3; +} diff --git a/api/envoy/admin/v3alpha/listeners.proto b/api/envoy/admin/v3alpha/listeners.proto new file mode 100644 index 000000000000..5e4d121b1fb2 --- /dev/null +++ b/api/envoy/admin/v3alpha/listeners.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.admin.v3alpha; + +option java_outer_classname = "ListenersProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.admin.v3alpha"; + +import "envoy/api/v3alpha/core/address.proto"; + +// [#protodoc-title: Listeners] + +// Admin endpoint uses this wrapper for `/listeners` to display listener status information. +// See :ref:`/listeners ` for more information. +message Listeners { + // List of listener statuses. + repeated ListenerStatus listener_statuses = 1; +} + +// Details an individual listener's current status. +message ListenerStatus { + // Name of the listener + string name = 1; + + // The actual local address that the listener is listening on. If a listener was configured + // to listen on port 0, then this address has the port that was allocated by the OS. + envoy.api.v3alpha.core.Address local_address = 2; +} diff --git a/api/envoy/admin/v3alpha/memory.proto b/api/envoy/admin/v3alpha/memory.proto new file mode 100644 index 000000000000..4c17be034e47 --- /dev/null +++ b/api/envoy/admin/v3alpha/memory.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.admin.v3alpha; + +option java_outer_classname = "MemoryProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.admin.v3alpha"; + +// [#protodoc-title: Memory] + +// Proto representation of the internal memory consumption of an Envoy instance. These represent +// values extracted from an internal TCMalloc instance. For more information, see the section of the +// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). +message Memory { + + // The number of bytes allocated by the heap for Envoy. This is an alias for + // `generic.current_allocated_bytes`. + uint64 allocated = 1; + + // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for + // `generic.heap_size`. + uint64 heap_size = 2; + + // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards + // virtual memory usage, and depending on the OS, typically do not count towards physical memory + // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. + uint64 pageheap_unmapped = 3; + + // The number of bytes in free, mapped pages in the page heap. These bytes always count towards + // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also + // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. + uint64 pageheap_free = 4; + + // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias + // for `tcmalloc.current_total_thread_cache_bytes`. + uint64 total_thread_cache = 5; +} diff --git a/api/envoy/admin/v3alpha/metrics.proto b/api/envoy/admin/v3alpha/metrics.proto new file mode 100644 index 000000000000..5a52ff2648b4 --- /dev/null +++ b/api/envoy/admin/v3alpha/metrics.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.admin.v3alpha; + +option java_outer_classname = "MetricsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.admin.v3alpha"; + +// [#protodoc-title: Metrics] + +// Proto representation of an Envoy Counter or Gauge value. +message SimpleMetric { + enum Type { + COUNTER = 0; + GAUGE = 1; + } + + // Type of the metric represented. + Type type = 1; + + // Current metric value. + uint64 value = 2; + + // Name of the metric. + string name = 3; +} diff --git a/api/envoy/admin/v3alpha/mutex_stats.proto b/api/envoy/admin/v3alpha/mutex_stats.proto new file mode 100644 index 000000000000..72350dea8d77 --- /dev/null +++ b/api/envoy/admin/v3alpha/mutex_stats.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.admin.v3alpha; + +option java_outer_classname = "MutexStatsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.admin.v3alpha"; + +// [#protodoc-title: MutexStats] + +// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run +// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` +// [docs](https://abseil.io/about/design/mutex#extra-features). +// +// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not +// correspond to core clock frequency. For more information, see the `CycleClock` +// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). +message MutexStats { + + // The number of individual mutex contentions which have occurred since startup. + uint64 num_contentions = 1; + + // The length of the current contention wait cycle. + uint64 current_wait_cycles = 2; + + // The lifetime total of all contention wait cycles. + uint64 lifetime_wait_cycles = 3; +} diff --git a/api/envoy/admin/v3alpha/server_info.proto b/api/envoy/admin/v3alpha/server_info.proto new file mode 100644 index 000000000000..a259a87f1651 --- /dev/null +++ b/api/envoy/admin/v3alpha/server_info.proto @@ -0,0 +1,137 @@ +syntax = "proto3"; + +package envoy.admin.v3alpha; + +option java_outer_classname = "ServerInfoProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.admin.v3alpha"; + +import "google/protobuf/duration.proto"; + +// [#protodoc-title: Server State] + +// Proto representation of the value returned by /server_info, containing +// server version/server status information. +message ServerInfo { + // Server version. + string version = 1; + + enum State { + // Server is live and serving traffic. + LIVE = 0; + // Server is draining listeners in response to external health checks failing. + DRAINING = 1; + // Server has not yet completed cluster manager initialization. + PRE_INITIALIZING = 2; + // Server is running the cluster manager initialization callbacks (e.g., RDS). + INITIALIZING = 3; + } + + // State of the server. + State state = 2; + + // Uptime since current epoch was started. + google.protobuf.Duration uptime_current_epoch = 3; + + // Uptime since the start of the first epoch. + google.protobuf.Duration uptime_all_epochs = 4; + + // Hot restart version. + string hot_restart_version = 5; + + // Command line options the server is currently running with. + CommandLineOptions command_line_options = 6; +} + +message CommandLineOptions { + // See :option:`--base-id` for details. + uint64 base_id = 1; + + // See :option:`--concurrency` for details. + uint32 concurrency = 2; + + // See :option:`--config-path` for details. + string config_path = 3; + + // See :option:`--config-yaml` for details. + string config_yaml = 4; + + // See :option:`--allow-unknown-static-fields` for details. + bool allow_unknown_static_fields = 5; + + // See :option:`--reject-unknown-dynamic-fields` for details. + bool reject_unknown_dynamic_fields = 26; + + // See :option:`--admin-address-path` for details. + string admin_address_path = 6; + + enum IpVersion { + v4 = 0; + v6 = 1; + } + + // See :option:`--local-address-ip-version` for details. + IpVersion local_address_ip_version = 7; + + // See :option:`--log-level` for details. + string log_level = 8; + + // See :option:`--component-log-level` for details. + string component_log_level = 9; + + // See :option:`--log-format` for details. + string log_format = 10; + + // See :option:`--log-path` for details. + string log_path = 11; + + reserved 12; + + // See :option:`--service-cluster` for details. + string service_cluster = 13; + + // See :option:`--service-node` for details. + string service_node = 14; + + // See :option:`--service-zone` for details. + string service_zone = 15; + + // See :option:`--file-flush-interval-msec` for details. + google.protobuf.Duration file_flush_interval = 16; + + // See :option:`--drain-time-s` for details. + google.protobuf.Duration drain_time = 17; + + // See :option:`--parent-shutdown-time-s` for details. + google.protobuf.Duration parent_shutdown_time = 18; + + enum Mode { + // Validate configs and then serve traffic normally. + Serve = 0; + + // Validate configs and exit. + Validate = 1; + + // Completely load and initialize the config, and then exit without running the listener loop. + InitOnly = 2; + } + + // See :option:`--mode` for details. + Mode mode = 19; + + // max_stats and max_obj_name_len are now unused and have no effect. + uint64 max_stats = 20 [deprecated = true]; + uint64 max_obj_name_len = 21 [deprecated = true]; + + // See :option:`--disable-hot-restart` for details. + bool disable_hot_restart = 22; + + // See :option:`--enable-mutex-tracing` for details. + bool enable_mutex_tracing = 23; + + // See :option:`--restart-epoch` for details. + uint32 restart_epoch = 24; + + // See :option:`--cpuset-threads` for details. + bool cpuset_threads = 25; +} diff --git a/api/envoy/admin/v3alpha/tap.proto b/api/envoy/admin/v3alpha/tap.proto new file mode 100644 index 000000000000..b6fd6a85f567 --- /dev/null +++ b/api/envoy/admin/v3alpha/tap.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +import "envoy/service/tap/v3alpha/common.proto"; +import "validate/validate.proto"; + +package envoy.admin.v3alpha; + +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.admin.v3alpha"; + +// The /tap admin request body that is used to configure an active tap session. +message TapRequest { + // The opaque configuration ID used to match the configuration to a loaded extension. + // A tap extension configures a similar opaque ID that is used to match. + string config_id = 1 [(validate.rules).string.min_bytes = 1]; + + // The tap configuration to load. + service.tap.v3alpha.TapConfig tap_config = 2 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/api/v3alpha/BUILD b/api/envoy/api/v3alpha/BUILD new file mode 100644 index 000000000000..0e2892e87e69 --- /dev/null +++ b/api/envoy/api/v3alpha/BUILD @@ -0,0 +1,171 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +# Friends of core API packages - filters, services, service configs. +# Package //envoy/api/v3alpha contains xDS and discovery definitions that should +# be in //envoy/service/discovery, but remain here for backwards compatibility. +package_group( + name = "friends", + packages = [ + "//envoy/admin/...", + "//envoy/api/v3alpha", + "//envoy/config/...", + "//envoy/data/...", + "//envoy/service/...", + ], +) + +api_proto_library_internal( + name = "discovery", + srcs = ["discovery.proto"], + visibility = [":friends"], + deps = ["//envoy/api/v3alpha/core:base"], +) + +api_go_proto_library( + name = "discovery", + proto = ":discovery", + deps = ["//envoy/api/v3alpha/core:base_go_proto"], +) + +api_proto_library_internal( + name = "eds", + srcs = ["eds.proto"], + has_services = 1, + visibility = [":friends"], + deps = [ + ":discovery", + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:health_check", + "//envoy/api/v3alpha/endpoint", + "//envoy/type:percent", + ], +) + +api_go_grpc_library( + name = "eds", + proto = ":eds", + deps = [ + ":discovery_go_proto", + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/core:health_check_go_proto", + "//envoy/api/v3alpha/endpoint:endpoint_go_proto", + "//envoy/type:percent_go_proto", + ], +) + +api_proto_library_internal( + name = "cds", + srcs = ["cds.proto"], + has_services = 1, + visibility = [":friends"], + deps = [ + ":discovery", + ":eds", + "//envoy/api/v3alpha/auth:cert", + "//envoy/api/v3alpha/cluster:circuit_breaker", + "//envoy/api/v3alpha/cluster:filter", + "//envoy/api/v3alpha/cluster:outlier_detection", + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:config_source", + "//envoy/api/v3alpha/core:health_check", + "//envoy/api/v3alpha/core:protocol", + "//envoy/api/v3alpha/endpoint", + "//envoy/type:percent", + ], +) + +api_go_grpc_library( + name = "cds", + proto = ":cds", + deps = [ + ":discovery_go_proto", + ":eds_go_grpc", + "//envoy/api/v3alpha/auth:cert_go_proto", + "//envoy/api/v3alpha/cluster:circuit_breaker_go_proto", + "//envoy/api/v3alpha/cluster:filter_go_proto", + "//envoy/api/v3alpha/cluster:outlier_detection_go_proto", + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/core:config_source_go_proto", + "//envoy/api/v3alpha/core:health_check_go_proto", + "//envoy/api/v3alpha/core:protocol_go_proto", + "//envoy/api/v3alpha/endpoint:endpoint_go_proto", + "//envoy/type:percent_go_proto", + ], +) + +api_proto_library_internal( + name = "lds", + srcs = ["lds.proto"], + has_services = 1, + visibility = [":friends"], + deps = [ + ":discovery", + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/listener", + "//envoy/api/v3alpha/listener:udp_listener_config", + ], +) + +api_go_grpc_library( + name = "lds", + proto = ":lds", + deps = [ + ":discovery_go_proto", + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/listener:listener_go_proto", + "//envoy/api/v3alpha/listener:udp_listener_config_go_proto", + ], +) + +api_proto_library_internal( + name = "rds", + srcs = ["rds.proto"], + has_services = 1, + visibility = [":friends"], + deps = [ + ":discovery", + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:config_source", + "//envoy/api/v3alpha/route", + ], +) + +api_go_grpc_library( + name = "rds", + proto = ":rds", + deps = [ + ":discovery_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/core:config_source_go_proto", + "//envoy/api/v3alpha/route:route_go_proto", + ], +) + +api_proto_library_internal( + name = "srds", + srcs = ["srds.proto"], + has_services = 1, + visibility = [":friends"], + deps = [ + ":discovery", + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/route", + ], +) + +api_go_grpc_library( + name = "srds", + proto = ":srds", + deps = [ + ":discovery_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + ], +) diff --git a/api/envoy/api/v3alpha/README.md b/api/envoy/api/v3alpha/README.md new file mode 100644 index 000000000000..984be690a103 --- /dev/null +++ b/api/envoy/api/v3alpha/README.md @@ -0,0 +1,9 @@ +Protocol buffer definitions for xDS and top-level resource API messages. + +Package group `//envoy/api/v2:friends` enumerates all consumers of the shared +API messages. That includes package envoy.api.v2 itself, which contains several +xDS definitions. Default visibility for all shared definitions should be set to +`//envoy/api/v2:friends`. + +Additionally, packages envoy.api.v2.core and envoy.api.v2.auth are also +consumed throughout the subpackages of `//envoy/api/v2`. diff --git a/api/envoy/api/v3alpha/auth/BUILD b/api/envoy/api/v3alpha/auth/BUILD new file mode 100644 index 000000000000..f206a35f97f2 --- /dev/null +++ b/api/envoy/api/v3alpha/auth/BUILD @@ -0,0 +1,35 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +package_group( + name = "friends", + includes = [ + "//envoy/api/v3alpha:friends", + ], + packages = [ + "//envoy/api/v3alpha/cluster", + "//envoy/api/v3alpha/endpoint", + "//envoy/api/v3alpha/listener", + "//envoy/api/v3alpha/route", + ], +) + +api_proto_library_internal( + name = "cert", + srcs = ["cert.proto"], + visibility = [":friends"], + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:config_source", + ], +) + +api_go_proto_library( + name = "cert", + proto = ":cert", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/core:config_source_go_proto", + ], +) diff --git a/api/envoy/api/v3alpha/auth/cert.proto b/api/envoy/api/v3alpha/auth/cert.proto new file mode 100644 index 000000000000..925453074ac1 --- /dev/null +++ b/api/envoy/api/v3alpha/auth/cert.proto @@ -0,0 +1,407 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.auth; + +option java_outer_classname = "CertProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.auth"; +option go_package = "auth"; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/core/config_source.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_0``. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum.defined_only = true]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and + // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum.defined_only = true]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string.min_bytes = 1]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } +} + +message TlsCertificate { + // The TLS certificate chain. + core.DataSource certificate_chain = 1; + + // The TLS private key. + core.DataSource private_key = 2; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + core.DataSource password = 3; + + // [#not-implemented-hide:] + core.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated core.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated core.DataSource keys = 1 [(validate.rules).repeated .min_items = 1]; +} + +message CertificateValidationContext { + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`verify_subject_alt_name + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + core.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey \ + // | openssl pkey -pubin -outform DER \ + // | openssl dgst -sha256 -binary \ + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated .items.string = {min_bytes: 44, max_bytes: 44}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated .items.string = {min_bytes: 64, max_bytes: 95}]; + + // An optional list of Subject Alternative Names. If specified, Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified values. + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated string verify_subject_alt_name = 4; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + core.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; +} + +// TLS context shared by both client and server TLS contexts. +message CommonTlsContext { + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated .max_items = 1]; + + message CombinedCertificateValidationContext { + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message.required = true]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message.required = true]; + }; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; + + reserved 5; +} + +message UpstreamTlsContext { + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string.max_bytes = 255]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +message DownstreamTlsContext { + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // [#not-implemented-hide:] + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + } +} + +// [#proto-status: experimental] +message SdsSecretConfig { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via SDS. + // When only name is specified, then secret will be loaded from static resources [V2-API-DIFF]. + string name = 1; + core.ConfigSource sds_config = 2; +} + +// [#proto-status: experimental] +message Secret { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + oneof type { + TlsCertificate tls_certificate = 2; + TlsSessionTicketKeys session_ticket_keys = 3; + CertificateValidationContext validation_context = 4; + } +} diff --git a/api/envoy/api/v3alpha/cds.proto b/api/envoy/api/v3alpha/cds.proto new file mode 100644 index 000000000000..50b7adaf996e --- /dev/null +++ b/api/envoy/api/v3alpha/cds.proto @@ -0,0 +1,660 @@ +syntax = "proto3"; + +package envoy.api.v3alpha; + +option java_outer_classname = "CdsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha"; + +option java_generic_services = true; + +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/auth/cert.proto"; +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/core/config_source.proto"; +import "envoy/api/v3alpha/discovery.proto"; +import "envoy/api/v3alpha/core/health_check.proto"; +import "envoy/api/v3alpha/core/protocol.proto"; +import "envoy/api/v3alpha/cluster/circuit_breaker.proto"; +import "envoy/api/v3alpha/cluster/filter.proto"; +import "envoy/api/v3alpha/cluster/outlier_detection.proto"; +import "envoy/api/v3alpha/eds.proto"; +import "envoy/type/percent.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; +option (gogoproto.stable_marshaler_all) = true; + +// Return list of all clusters this proxy will load balance to. +service ClusterDiscoveryService { + rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc DeltaClusters(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { + } + + rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:clusters" + body: "*" + }; + } +} + +// [#protodoc-title: Clusters] + +// Configuration for a single upstream cluster. +// [#comment:next free field: 41] +message Cluster { + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // An optional alternative to the cluster name to be used while emitting stats. + // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be + // confused with :ref:`Router Filter Header + // `. + string alt_stat_name = 28; + + // Refer to :ref:`service discovery type ` + // for an explanation on each type. + enum DiscoveryType { + // Refer to the :ref:`static discovery type` + // for an explanation. + STATIC = 0; + + // Refer to the :ref:`strict DNS discovery + // type` + // for an explanation. + STRICT_DNS = 1; + + // Refer to the :ref:`logical DNS discovery + // type` + // for an explanation. + LOGICAL_DNS = 2; + + // Refer to the :ref:`service discovery type` + // for an explanation. + EDS = 3; + + // Refer to the :ref:`original destination discovery + // type` + // for an explanation. + ORIGINAL_DST = 4; + } + + // Extended cluster type. + message CustomClusterType { + // The type of the cluster to instantiate. The name must match a supported cluster type. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + google.protobuf.Any typed_config = 2; + } + + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum.defined_only = true]; + + // The custom cluster type. + CustomClusterType cluster_type = 38; + } + + // Only valid when discovery type is EDS. + message EdsClusterConfig { + // Configuration for the source of EDS updates for this Cluster. + core.ConfigSource eds_config = 1; + + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + } + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig eds_cluster_config = 3; + + // The timeout for new network connections to hosts in the cluster. + google.protobuf.Duration connect_timeout = 4 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // Refer to :ref:`load balancer type ` architecture + // overview section for information on each type. + enum LbPolicy { + // Refer to the :ref:`round robin load balancing + // policy` + // for an explanation. + ROUND_ROBIN = 0; + + // Refer to the :ref:`least request load balancing + // policy` + // for an explanation. + LEAST_REQUEST = 1; + + // Refer to the :ref:`ring hash load balancing + // policy` + // for an explanation. + RING_HASH = 2; + + // Refer to the :ref:`random load balancing + // policy` + // for an explanation. + RANDOM = 3; + + // Refer to the :ref:`original destination load balancing + // policy` + // for an explanation. + // + // .. attention:: + // + // **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. + // + ORIGINAL_DST_LB = 4 [deprecated = true]; + + // Refer to the :ref:`Maglev load balancing policy` + // for an explanation. + MAGLEV = 5; + + // This load balancer type must be specified if the configured cluster provides a cluster + // specific load balancer. Consult the configured cluster's documentation for whether to set + // this option or not. + CLUSTER_PROVIDED = 6; + } + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy lb_policy = 6 [(validate.rules).enum.defined_only = true]; + + // If the service discovery type is + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS`, + // then hosts is required. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`load_assignment` field instead. + // + repeated core.Address hosts = 7; + + // Setting this is required for specifying members of + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS` clusters. + // This field supersedes :ref:`hosts` field. + // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` + // once load_assignment is implemented.] + // + // .. attention:: + // + // Setting this allows non-EDS cluster types to contain embedded EDS equivalent + // :ref:`endpoint assignments`. + // Setting this overrides :ref:`hosts` values. + // + ClusterLoadAssignment load_assignment = 33; + + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + repeated core.HealthCheck health_checks = 8; + + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + google.protobuf.UInt32Value max_requests_per_connection = 9; + + // Optional :ref:`circuit breaking ` for the cluster. + cluster.CircuitBreakers circuit_breakers = 10; + + // The TLS configuration for connections to the upstream cluster. If no TLS + // configuration is specified, TLS will not be used for new connections. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + auth.UpstreamTlsContext tls_context = 11; + + reserved 12; + + // Additional options when handling HTTP requests. These options will be applicable to both + // HTTP1 and HTTP2 requests. + core.HttpProtocolOptions common_http_protocol_options = 29; + + // Additional options when handling HTTP1 requests. + core.Http1ProtocolOptions http_protocol_options = 13; + + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + core.Http2ProtocolOptions http2_protocol_options = 14; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map extension_protocol_options = 35; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map typed_extension_protocol_options = 36; + + reserved 15; + + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. If this setting is not specified, the value defaults to 5000ms. For + // cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + google.protobuf.Duration dns_refresh_rate = 16 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + // resolution. + bool respect_dns_ttl = 39; + + // When V4_ONLY is selected, the DNS resolver will only perform a lookup for + // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will + // only perform a lookup for addresses in the IPv6 family. If AUTO is + // specified, the DNS resolver will first perform a lookup for addresses in + // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + // For cluster types other than + // :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS`, + // this setting is + // ignored. + enum DnsLookupFamily { + AUTO = 0; + V4_ONLY = 1; + V6_ONLY = 2; + } + + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum.defined_only = true]; + + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + repeated core.Address dns_resolvers = 18; + + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + cluster.OutlierDetection outlier_detection = 19; + + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + google.protobuf.Duration cleanup_interval = 20 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + core.BindConfig upstream_bind_config = 21; + + // Optionally divide the endpoints in this cluster into subsets defined by + // endpoint metadata and selected by route and weighted cluster metadata. + message LbSubsetConfig { + + // If NO_FALLBACK is selected, a result + // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, + // any cluster endpoint may be returned (subject to policy, health checks, + // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + enum LbSubsetFallbackPolicy { + NO_FALLBACK = 0; + ANY_ENDPOINT = 1; + DEFAULT_SUBSET = 2; + } + + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum.defined_only = true]; + + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the *envoy.lb* + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + google.protobuf.Struct default_subset = 2; + + // Specifications for subsets. + message LbSubsetSelector { + // List of keys to match with the weighted cluster metadata. + repeated string keys = 1; + // The behavior used when no endpoint subset matches the selected route's + // metadata. + LbSubsetSelectorFallbackPolicy fallback_policy = 2 + [(validate.rules).enum.defined_only = true]; + + // Allows to override top level fallback policy per selector. + enum LbSubsetSelectorFallbackPolicy { + // If NOT_DEFINED top level config fallback policy is used instead. + NOT_DEFINED = 0; + // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. + NO_FALLBACK = 1; + // If ANY_ENDPOINT is selected, any cluster endpoint may be returned + // (subject to policy, health checks, etc). + ANY_ENDPOINT = 2; + // If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + DEFAULT_SUBSET = 3; + } + } + + // For each entry, LbEndpoint.Metadata's + // *envoy.lb* namespace is traversed and a subset is created for each unique + // combination of key and value. For example: + // + // .. code-block:: json + // + // { "subset_selectors": [ + // { "keys": [ "version" ] }, + // { "keys": [ "stage", "hardware_type" ] } + // ]} + // + // A subset is matched when the metadata from the selected route and + // weighted cluster contains the same keys and values as the subset's + // metadata. The same host may appear in multiple subsets. + repeated LbSubsetSelector subset_selectors = 3; + + // If true, routing to subsets will take into account the localities and locality weights of the + // endpoints when making the routing decision. + // + // There are some potential pitfalls associated with enabling this feature, as the resulting + // traffic split after applying both a subset match and locality weights might be undesirable. + // + // Consider for example a situation in which you have 50/50 split across two localities X/Y + // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 + // host selected but Y having 100, then a lot more load is being dumped on the single host in X + // than originally anticipated in the load balancing assignment delivered via EDS. + bool locality_weight_aware = 4; + + // When used with locality_weight_aware, scales the weight of each locality by the ratio + // of hosts in the subset vs hosts in the original subset. This aims to even out the load + // going to an individual locality if said locality is disproportionally affected by the + // subset predicate. + bool scale_locality_weight = 5; + + // If true, when a fallback policy is configured and its corresponding subset fails to find + // a host this will cause any host to be selected instead. + // + // This is useful when using the default subset as the fallback policy, given the default + // subset might become empty. With this option enabled, if that happens the LB will attempt + // to select a host from the entire cluster. + bool panic_mode_any = 6; + + // If true, metadata specified for a metadata key will be matched against the corresponding + // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value + // and any of the elements in the list matches the criteria. + bool list_as_any = 7; + } + + // Configuration for load balancing subsetting. + LbSubsetConfig lb_subset_config = 22; + + // Specific configuration for the LeastRequest load balancing policy. + message LeastRequestLbConfig { + // The number of random healthy hosts from which the host with the fewest active requests will + // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32.gte = 2]; + } + + // Specific configuration for the :ref:`RingHash` + // load balancing policy. + message RingHashLbConfig { + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64.lte = 8388608]; + + reserved 2; + + // The hash function used to hash hosts onto the ketama ring. + enum HashFunction { + // Use `xxHash `_, this is the default hash function. + XX_HASH = 0; + // Use `MurmurHash2 `_, this is compatible with + // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + // on Linux and not macOS. + MURMUR_HASH_2 = 1; + } + + // The hash function used to hash hosts onto the ketama ring. The value defaults to + // :ref:`XX_HASH`. + HashFunction hash_function = 3 [(validate.rules).enum.defined_only = true]; + + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64.lte = 8388608]; + } + + // Specific configuration for the + // :ref:`Original Destination ` + // load balancing policy. + message OriginalDstLbConfig { + // When true, :ref:`x-envoy-original-dst-host + // ` can be used to override destination + // address. + // + // .. attention:: + // + // This header isn't sanitized by default, so enabling this feature allows HTTP clients to + // route traffic to arbitrary hosts and/or ports, which may have serious security + // consequences. + bool use_http_header = 1; + } + + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig original_dst_lb_config = 34; + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig least_request_lb_config = 37; + } + + // Common configuration for all load balancer implementations. + message CommonLbConfig { + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // To disable panic mode, set to 0%. + // + // .. note:: + // The specified percent will be truncated to the nearest 1%. + envoy.type.Percent healthy_panic_threshold = 1; + // Configuration for :ref:`zone aware routing + // `. + message ZoneAwareLbConfig { + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + envoy.type.Percent routing_enabled = 1; + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + google.protobuf.UInt64Value min_cluster_size = 2; + } + // Configuration for :ref:`locality weighted load balancing + // ` + message LocalityWeightedLbConfig { + } + oneof locality_config_specifier { + ZoneAwareLbConfig zone_aware_lb_config = 2; + LocalityWeightedLbConfig locality_weighted_lb_config = 3; + } + // If set, all health check/weight/metadata updates that happen within this duration will be + // merged and delivered in one shot when the duration expires. The start of the duration is when + // the first update happens. This is useful for big clusters, with potentially noisy deploys + // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes + // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new + // cluster). Please always keep in mind that the use of sandbox technologies may change this + // behavior. + // + // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge + // window to 0. + // + // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is + // because merging those updates isn't currently safe. See + // https://github.com/envoyproxy/envoy/pull/3941. + google.protobuf.Duration update_merge_window = 4; + + // If set to true, Envoy will not consider new hosts when computing load balancing weights until + // they have been health checked for the first time. This will have no effect unless + // active health checking is also configured. + // + // Ignoring a host means that for any load balancing calculations that adjust weights based + // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and + // panic mode) Envoy will exclude these hosts in the denominator. + // + // For example, with hosts in two priorities P0 and P1, where P0 looks like + // {healthy, unhealthy (new), unhealthy (new)} + // and where P1 looks like + // {healthy, healthy} + // all traffic will still hit P0, as 1 / (3 - 2) = 1. + // + // Enabling this will allow scaling up the number of hosts for a given cluster without entering + // panic mode or triggering priority spillover, assuming the hosts pass the first health check. + // + // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not + // contribute to the calculation when deciding whether panic mode is enabled or not. + bool ignore_new_hosts_until_first_hc = 5; + + // If set to `true`, the cluster manager will drain all existing + // connections to upstream hosts whenever hosts are added or removed from the cluster. + bool close_connections_on_host_set_change = 6; + } + + // Common configuration for all load balancer implementations. + CommonLbConfig common_lb_config = 27; + + // Optional custom transport socket implementation to use for upstream connections. + core.TransportSocket transport_socket = 24; + + // The Metadata field can be used to provide additional information about the + // cluster. It can be used for stats, logging, and varying filter behavior. + // Fields should use reverse DNS notation to denote which entity within Envoy + // will need the information. For instance, if the metadata is intended for + // the Router filter, the filter name should be specified as *envoy.router*. + core.Metadata metadata = 25; + + enum ClusterProtocolSelection { + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + USE_CONFIGURED_PROTOCOL = 0; + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + USE_DOWNSTREAM_PROTOCOL = 1; + } + + // Determines how Envoy selects the protocol used to speak to upstream hosts. + ClusterProtocolSelection protocol_selection = 26; + + // Optional options for upstream connections. + envoy.api.v3alpha.UpstreamConnectionOptions upstream_connection_options = 30; + + // If an upstream host becomes unhealthy (as determined by the configured health checks + // or outlier detection), immediately close all connections to the failed host. + // + // .. note:: + // + // This is currently only supported for connections created by tcp_proxy. + // + // .. note:: + // + // The current implementation of this feature closes all connections immediately when + // the unhealthy status is detected. If there are a large number of connections open + // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of + // time exclusively closing these connections, and not processing any other traffic. + bool close_connections_on_host_health_failure = 31; + + // If this cluster uses EDS or STRICT_DNS to configure its hosts, immediately drain + // connections from any hosts that are removed from service discovery. + // + // This only affects behavior for hosts that are being actively health checked. + // If this flag is not set to true, Envoy will wait until the hosts fail active health + // checking before removing it from the cluster. + bool drain_connections_on_host_removal = 32; + + // An optional list of network filters that make up the filter chain for + // outgoing connections made by the cluster. Order matters as the filters are + // processed sequentially as connection events happen. + repeated cluster.Filter filters = 40; +} + +// An extensible structure containing the address Envoy should bind to when +// establishing upstream connections. +message UpstreamBindConfig { + // The address Envoy should bind to when establishing upstream connections. + core.Address source_address = 1; +} + +message UpstreamConnectionOptions { + // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. + core.TcpKeepalive tcp_keepalive = 1; +} diff --git a/api/envoy/api/v3alpha/cluster/BUILD b/api/envoy/api/v3alpha/cluster/BUILD new file mode 100644 index 000000000000..942701221a37 --- /dev/null +++ b/api/envoy/api/v3alpha/cluster/BUILD @@ -0,0 +1,48 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "circuit_breaker", + srcs = ["circuit_breaker.proto"], + visibility = [ + "//envoy/api/v3alpha:__pkg__", + ], + deps = [ + "//envoy/api/v3alpha/core:base", + ], +) + +api_go_proto_library( + name = "circuit_breaker", + proto = ":circuit_breaker", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + ], +) + +api_proto_library_internal( + name = "outlier_detection", + srcs = ["outlier_detection.proto"], + visibility = [ + "//envoy/api/v3alpha:__pkg__", + ], +) + +api_go_proto_library( + name = "outlier_detection", + proto = ":outlier_detection", +) + +api_proto_library_internal( + name = "filter", + srcs = ["filter.proto"], + visibility = [ + "//envoy/api/v3alpha:__pkg__", + ], +) + +api_go_proto_library( + name = "filter", + proto = ":filter", +) diff --git a/api/envoy/api/v3alpha/cluster/circuit_breaker.proto b/api/envoy/api/v3alpha/cluster/circuit_breaker.proto new file mode 100644 index 000000000000..39f4f77c5ddd --- /dev/null +++ b/api/envoy/api/v3alpha/cluster/circuit_breaker.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.cluster; + +option java_outer_classname = "CircuitBreakerProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.cluster"; +option go_package = "cluster"; +option csharp_namespace = "Envoy.Api.V2.ClusterNS"; +option ruby_package = "Envoy.Api.V2.ClusterNS"; + +import "envoy/api/v3alpha/core/base.proto"; + +import "google/protobuf/wrappers.proto"; + +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Circuit breakers] + +// :ref:`Circuit breaking` settings can be +// specified individually for each defined priority. +message CircuitBreakers { + + // A Thresholds defines CircuitBreaker settings for a + // :ref:`RoutingPriority`. + message Thresholds { + // The :ref:`RoutingPriority` + // the specified CircuitBreaker settings apply to. + // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once + // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] + core.RoutingPriority priority = 1; + + // The maximum number of connections that Envoy will make to the upstream + // cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_connections = 2; + + // The maximum number of pending requests that Envoy will allow to the + // upstream cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_pending_requests = 3; + + // The maximum number of parallel requests that Envoy will make to the + // upstream cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_requests = 4; + + // The maximum number of parallel retries that Envoy will allow to the + // upstream cluster. If not specified, the default is 3. + google.protobuf.UInt32Value max_retries = 5; + + // If track_remaining is true, then stats will be published that expose + // the number of resources remaining until the circuit breakers open. If + // not specified, the default is false. + bool track_remaining = 6; + + // The maximum number of connection pools per cluster that Envoy will concurrently support at + // once. If not specified, the default is unlimited. Set this for clusters which create a + // large number of connection pools. See + // :ref:`Circuit Breaking ` for + // more details. + google.protobuf.UInt32Value max_connection_pools = 7; + } + + // If multiple :ref:`Thresholds` + // are defined with the same :ref:`RoutingPriority`, + // the first one in the list is used. If no Thresholds is defined for a given + // :ref:`RoutingPriority`, the default values + // are used. + repeated Thresholds thresholds = 1; +} diff --git a/api/envoy/api/v3alpha/cluster/filter.proto b/api/envoy/api/v3alpha/cluster/filter.proto new file mode 100644 index 000000000000..ab1a3d13747b --- /dev/null +++ b/api/envoy/api/v3alpha/cluster/filter.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.cluster; + +option java_outer_classname = "FilterProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.cluster"; +option csharp_namespace = "Envoy.Api.V2.ClusterNS"; +option ruby_package = "Envoy.Api.V2.ClusterNS"; + +import "google/protobuf/any.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Upstream filters] +// +// Upstream filters apply to the connections to the upstream cluster hosts. +message Filter { + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Any typed_config = 2; +} diff --git a/api/envoy/api/v3alpha/cluster/outlier_detection.proto b/api/envoy/api/v3alpha/cluster/outlier_detection.proto new file mode 100644 index 000000000000..6600b566d1bd --- /dev/null +++ b/api/envoy/api/v3alpha/cluster/outlier_detection.proto @@ -0,0 +1,117 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.cluster; + +option java_outer_classname = "OutlierDetectionProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.cluster"; +option csharp_namespace = "Envoy.Api.V2.ClusterNS"; +option ruby_package = "Envoy.Api.V2.ClusterNS"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Outlier detection] + +// See the :ref:`architecture overview ` for +// more information on outlier detection. +message OutlierDetection { + // The number of consecutive 5xx responses or local origin errors that are mapped + // to 5xx error codes before a consecutive 5xx ejection + // occurs. Defaults to 5. + google.protobuf.UInt32Value consecutive_5xx = 1; + + // The time interval between ejection analysis sweeps. This can result in + // both new ejections as well as hosts being returned to service. Defaults + // to 10000ms or 10s. + google.protobuf.Duration interval = 2 [(validate.rules).duration.gt = {}]; + + // The base time that a host is ejected for. The real time is equal to the + // base time multiplied by the number of times the host has been ejected. + // Defaults to 30000ms or 30s. + google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration.gt = {}]; + + // The maximum % of an upstream cluster that can be ejected due to outlier + // detection. Defaults to 10% but will eject at least one host regardless of the value. + google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32.lte = 100]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive 5xx. This setting can be used to disable + // ejection or to ramp it up slowly. Defaults to 100. + google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32.lte = 100]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics. This setting can be used to + // disable ejection or to ramp it up slowly. Defaults to 100. + google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32.lte = 100]; + + // The number of hosts in a cluster that must have enough request volume to + // detect success rate outliers. If the number of hosts is less than this + // setting, outlier detection via success rate statistics is not performed + // for any host in the cluster. Defaults to 5. + google.protobuf.UInt32Value success_rate_minimum_hosts = 7; + + // The minimum number of total requests that must be collected in one + // interval (as defined by the interval duration above) to include this host + // in success rate based outlier detection. If the volume is lower than this + // setting, outlier detection via success rate statistics is not performed + // for that host. Defaults to 100. + google.protobuf.UInt32Value success_rate_request_volume = 8; + + // This factor is used to determine the ejection threshold for success rate + // outlier ejection. The ejection threshold is the difference between the + // mean success rate, and the product of this factor and the standard + // deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + google.protobuf.UInt32Value success_rate_stdev_factor = 9; + + // The number of consecutive gateway failures (502, 503, 504 status codes) + // before a consecutive gateway failure ejection occurs. Defaults to 5. + google.protobuf.UInt32Value consecutive_gateway_failure = 10; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive gateway failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 0. + google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 + [(validate.rules).uint32.lte = 100]; + + // Determines whether to distinguish local origin failures from external errors. If set to true + // the following configuration parameters are taken into account: + // :ref:`consecutive_local_origin_failure`, + // :ref:`enforcing_consecutive_local_origin_failure` + // and + // :ref:`enforcing_local_origin_success_rate`. + // Defaults to false. + bool split_external_local_origin_errors = 12; + + // The number of consecutive locally originated failures before ejection + // occurs. Defaults to 5. Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value consecutive_local_origin_failure = 13; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive locally originated failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 + [(validate.rules).uint32.lte = 100]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics for locally originated errors. + // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 + [(validate.rules).uint32.lte = 100]; +} diff --git a/api/envoy/api/v3alpha/core/BUILD b/api/envoy/api/v3alpha/core/BUILD new file mode 100644 index 000000000000..cfc6bd83ca78 --- /dev/null +++ b/api/envoy/api/v3alpha/core/BUILD @@ -0,0 +1,136 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +package_group( + name = "friends", + includes = [ + "//envoy/api/v3alpha:friends", + ], + packages = [ + "//envoy/api/v3alpha/auth", + "//envoy/api/v3alpha/cluster", + "//envoy/api/v3alpha/endpoint", + "//envoy/api/v3alpha/listener", + "//envoy/api/v3alpha/route", + ], +) + +api_proto_library_internal( + name = "address", + srcs = ["address.proto"], + visibility = [ + ":friends", + ], + deps = [":base"], +) + +api_go_proto_library( + name = "address", + proto = ":address", + deps = [":base_go_proto"], +) + +api_proto_library_internal( + name = "base", + srcs = ["base.proto"], + visibility = [ + ":friends", + ], + deps = [ + ":http_uri", + "//envoy/type:percent", + ], +) + +api_go_proto_library( + name = "base", + proto = ":base", + deps = [ + ":http_uri_go_proto", + "//envoy/type:percent_go_proto", + ], +) + +api_proto_library_internal( + name = "health_check", + srcs = ["health_check.proto"], + visibility = [ + ":friends", + ], + deps = [ + ":base", + "//envoy/type:range", + ], +) + +api_go_proto_library( + name = "health_check", + proto = ":health_check", + deps = [ + ":base_go_proto", + "//envoy/type:range_go_proto", + ], +) + +api_proto_library_internal( + name = "config_source", + srcs = ["config_source.proto"], + visibility = [ + ":friends", + ], + deps = [ + ":base", + ":grpc_service", + ], +) + +api_go_proto_library( + name = "config_source", + proto = ":config_source", + deps = [ + ":base_go_proto", + ":grpc_service_go_proto", + ], +) + +api_go_proto_library( + name = "http_uri", + proto = ":http_uri", +) + +api_proto_library_internal( + name = "http_uri", + srcs = ["http_uri.proto"], + visibility = [ + ":friends", + ], +) + +api_proto_library_internal( + name = "grpc_service", + srcs = ["grpc_service.proto"], + visibility = [ + ":friends", + ], + deps = [":base"], +) + +api_go_proto_library( + name = "grpc_service", + proto = ":grpc_service", + deps = [":base_go_proto"], +) + +api_proto_library_internal( + name = "protocol", + srcs = ["protocol.proto"], + visibility = [ + ":friends", + ], +) + +api_go_proto_library( + name = "protocol", + proto = ":protocol", +) diff --git a/api/envoy/api/v3alpha/core/address.proto b/api/envoy/api/v3alpha/core/address.proto new file mode 100644 index 000000000000..5cec89b11d8e --- /dev/null +++ b/api/envoy/api/v3alpha/core/address.proto @@ -0,0 +1,121 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.core; + +option java_outer_classname = "AddressProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.core"; + +import "envoy/api/v3alpha/core/base.proto"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Network addresses] + +message Pipe { + // Unix Domain Socket path. On Linux, paths starting with '@' will use the + // abstract namespace. The starting '@' is replaced by a null byte by Envoy. + // Paths starting with '@' will result in an error in environments other than + // Linux. + string path = 1 [(validate.rules).string.min_bytes = 1]; +} + +message SocketAddress { + enum Protocol { + option (gogoproto.goproto_enum_prefix) = false; + TCP = 0; + // [#not-implemented-hide:] + UDP = 1; + } + Protocol protocol = 1 [(validate.rules).enum.defined_only = true]; + // The address for this socket. :ref:`Listeners ` will bind + // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` + // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: + // It is possible to distinguish a Listener address via the prefix/suffix matching + // in :ref:`FilterChainMatch `.] When used + // within an upstream :ref:`BindConfig `, the address + // controls the source address of outbound connections. For :ref:`clusters + // `, the cluster type determines whether the + // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS + // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized + // via :ref:`resolver_name `. + string address = 2 [(validate.rules).string.min_bytes = 1]; + oneof port_specifier { + option (validate.required) = true; + uint32 port_value = 3 [(validate.rules).uint32.lte = 65535]; + // This is only valid if :ref:`resolver_name + // ` is specified below and the + // named resolver is capable of named port resolution. + string named_port = 4; + } + // The name of the custom resolver. This must have been registered with Envoy. If + // this is empty, a context dependent default applies. If the address is a concrete + // IP address, no resolution will occur. If address is a hostname this + // should be set for resolution other than DNS. Specifying a custom resolver with + // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. + string resolver_name = 5; + + // When binding to an IPv6 address above, this enables `IPv4 compatibility + // `_. Binding to ``::`` will + // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into + // IPv6 space as ``::FFFF:``. + bool ipv4_compat = 6; +} + +message TcpKeepalive { + // Maximum number of keepalive probes to send without response before deciding + // the connection is dead. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 9.) + google.protobuf.UInt32Value keepalive_probes = 1; + // The number of seconds a connection needs to be idle before keep-alive probes + // start being sent. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 7200s (ie 2 hours.) + google.protobuf.UInt32Value keepalive_time = 2; + // The number of seconds between keep-alive probes. Default is to use the OS + // level configuration (unless overridden, Linux defaults to 75s.) + google.protobuf.UInt32Value keepalive_interval = 3; +} + +message BindConfig { + // The address to bind to when creating a socket. + SocketAddress source_address = 1 [(validate.rules).message.required = true]; + + // Whether to set the *IP_FREEBIND* option when creating the socket. When this + // flag is set to true, allows the :ref:`source_address + // ` to be an IP address + // that is not configured on the system running Envoy. When this flag is set + // to false, the option *IP_FREEBIND* is disabled on the socket. When this + // flag is not set (default), the socket is not modified, i.e. the option is + // neither enabled nor disabled. + google.protobuf.BoolValue freebind = 2; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated SocketOption socket_options = 3; +} + +// Addresses specify either a logical or physical address and port, which are +// used to tell Envoy where to bind/listen, connect to upstream and find +// management servers. +message Address { + oneof address { + option (validate.required) = true; + + SocketAddress socket_address = 1; + Pipe pipe = 2; + } +} + +// CidrRange specifies an IP Address and a prefix length to construct +// the subnet mask for a `CIDR `_ range. +message CidrRange { + // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. + string address_prefix = 1 [(validate.rules).string.min_bytes = 1]; + // Length of prefix, e.g. 0, 32. + google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32.lte = 128]; +} diff --git a/api/envoy/api/v3alpha/core/base.proto b/api/envoy/api/v3alpha/core/base.proto new file mode 100644 index 000000000000..0661d99ec546 --- /dev/null +++ b/api/envoy/api/v3alpha/core/base.proto @@ -0,0 +1,292 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.core; + +option java_outer_classname = "BaseProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.core"; +option go_package = "core"; + +import "envoy/api/v3alpha/core/http_uri.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +import "envoy/type/percent.proto"; + +option (gogoproto.equal_all) = true; +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: Common types] + +// Identifies location of where either Envoy runs or where upstream hosts run. +message Locality { + // Region this :ref:`zone ` belongs to. + string region = 1; + + // Defines the local service zone where Envoy is running. Though optional, it + // should be set if discovery service routing is used and the discovery + // service exposes :ref:`zone data `, + // either in this message or via :option:`--service-zone`. The meaning of zone + // is context dependent, e.g. `Availability Zone (AZ) + // `_ + // on AWS, `Zone `_ on + // GCP, etc. + string zone = 2; + + // When used for locality of upstream hosts, this field further splits zone + // into smaller chunks of sub-zones so they can be load balanced + // independently. + string sub_zone = 3; +} + +// Identifies a specific Envoy instance. The node identifier is presented to the +// management server, which may use this identifier to distinguish per Envoy +// configuration for serving. +message Node { + // An opaque node identifier for the Envoy node. This also provides the local + // service node name. It should be set if any of the following features are + // used: :ref:`statsd `, :ref:`CDS + // `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-node`. + string id = 1; + + // Defines the local service cluster name where Envoy is running. Though + // optional, it should be set if any of the following features are used: + // :ref:`statsd `, :ref:`health check cluster + // verification `, + // :ref:`runtime override directory `, + // :ref:`user agent addition + // `, + // :ref:`HTTP global rate limiting `, + // :ref:`CDS `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-cluster`. + string cluster = 2; + + // Opaque metadata extending the node identifier. Envoy will pass this + // directly to the management server. + google.protobuf.Struct metadata = 3; + + // Locality specifying where the Envoy instance is running. + Locality locality = 4; + + // This is motivated by informing a management server during canary which + // version of Envoy is being tested in a heterogeneous fleet. This will be set + // by Envoy in management server RPCs. + string build_version = 5; +} + +// Metadata provides additional inputs to filters based on matched listeners, +// filter chains, routes and endpoints. It is structured as a map, usually from +// filter name (in reverse DNS format) to metadata specific to the filter. Metadata +// key-values for a filter are merged as connection and request handling occurs, +// with later values for the same key overriding earlier values. +// +// An example use of metadata is providing additional values to +// http_connection_manager in the envoy.http_connection_manager.access_log +// namespace. +// +// Another example use of metadata is to per service config info in cluster metadata, which may get +// consumed by multiple filters. +// +// For load balancing, Metadata provides a means to subset cluster endpoints. +// Endpoints have a Metadata object associated and routes contain a Metadata +// object to match against. There are some well defined metadata used today for +// this purpose: +// +// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an +// endpoint and is also used during header processing +// (x-envoy-upstream-canary) and for stats purposes. +message Metadata { + // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* + // namespace is reserved for Envoy's built-in filters. + map filter_metadata = 1; +} + +// Runtime derived uint32 with a default when not specified. +message RuntimeUInt32 { + // Default value if runtime value is not available. + uint32 default_value = 2; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 3 [(validate.rules).string.min_bytes = 1]; +} + +// Envoy supports :ref:`upstream priority routing +// ` both at the route and the virtual +// cluster level. The current priority implementation uses different connection +// pool and circuit breaking settings for each priority level. This means that +// even for HTTP/2 requests, two physical connections will be used to an +// upstream host. In the future Envoy will likely support true HTTP/2 priority +// over a single upstream connection. +enum RoutingPriority { + DEFAULT = 0; + HIGH = 1; +} + +// HTTP request method. +enum RequestMethod { + option (gogoproto.goproto_enum_prefix) = false; + METHOD_UNSPECIFIED = 0; + GET = 1; + HEAD = 2; + POST = 3; + PUT = 4; + DELETE = 5; + CONNECT = 6; + OPTIONS = 7; + TRACE = 8; + PATCH = 9; +} + +// Header name/value pair. +message HeaderValue { + // Header name. + string key = 1 [(validate.rules).string = {min_bytes: 1, max_bytes: 16384}]; + + // Header value. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown header values are replaced with the empty string instead of `-`. + string value = 2 [(validate.rules).string.max_bytes = 16384]; +} + +// Header name/value pair plus option to control append behavior. +message HeaderValueOption { + // Header name/value pair that this option applies to. + HeaderValue header = 1 [(validate.rules).message.required = true]; + + // Should the value be appended? If true (default), the value is appended to + // existing values. + google.protobuf.BoolValue append = 2; +} + +// Wrapper for a set of headers. +message HeaderMap { + repeated HeaderValue headers = 1; +} + +// Data source consisting of either a file or an inline value. +message DataSource { + oneof specifier { + option (validate.required) = true; + + // Local filesystem data source. + string filename = 1 [(validate.rules).string.min_bytes = 1]; + + // Bytes inlined in the configuration. + bytes inline_bytes = 2 [(validate.rules).bytes.min_len = 1]; + + // String inlined in the configuration. + string inline_string = 3 [(validate.rules).string.min_bytes = 1]; + } +} + +// The message specifies how to fetch data from remote and how to verify it. +message RemoteDataSource { + // The HTTP URI to fetch the remote data. + HttpUri http_uri = 1 [(validate.rules).message.required = true]; + + // SHA256 string for verifying data. + string sha256 = 2 [(validate.rules).string.min_bytes = 1]; +} + +// Async data source which support async data fetch. +message AsyncDataSource { + oneof specifier { + option (validate.required) = true; + + // Local async data source. + DataSource local = 1; + + // Remote async data source. + RemoteDataSource remote = 2; + } +} + +// Configuration for transport socket in :ref:`listeners ` and +// :ref:`clusters `. If the configuration is +// empty, a default transport socket implementation and configuration will be +// chosen based on the platform and existence of tls_context. +message TransportSocket { + // The name of the transport socket to instantiate. The name must match a supported transport + // socket implementation. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Implementation specific configuration which depends on the implementation being instantiated. + // See the supported transport socket implementations for further documentation. + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } +} + +// Generic socket option message. This would be used to set socket options that +// might not exist in upstream kernels or precompiled Envoy binaries. +message SocketOption { + // An optional name to give this socket option for debugging, etc. + // Uniqueness is not required and no special meaning is assumed. + string description = 1; + // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP + int64 level = 2; + // The numeric name as passed to setsockopt + int64 name = 3; + oneof value { + option (validate.required) = true; + + // Because many sockopts take an int value. + int64 int_value = 4; + // Otherwise it's a byte buffer. + bytes buf_value = 5; + } + enum SocketState { + option (gogoproto.goproto_enum_prefix) = false; + // Socket options are applied after socket creation but before binding the socket to a port + STATE_PREBIND = 0; + // Socket options are applied after binding the socket to a port but before calling listen() + STATE_BOUND = 1; + // Socket options are applied after calling listen() + STATE_LISTENING = 2; + } + // The state in which the option will be applied. When used in BindConfig + // STATE_PREBIND is currently the only valid value. + SocketState state = 6 [(validate.rules).enum.defined_only = true]; +} + +// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not +// specified via a runtime key. +message RuntimeFractionalPercent { + // Default value if the runtime value's for the numerator/denominator keys are not available. + envoy.type.FractionalPercent default_value = 1 [(validate.rules).message.required = true]; + + // Runtime key for a YAML representation of a FractionalPercent. + string runtime_key = 2; +} + +// Identifies a specific ControlPlane instance that Envoy is connected to. +message ControlPlane { + // An opaque control plane identifier that uniquely identifies an instance + // of control plane. This can be used to identify which control plane instance, + // the Envoy is connected to. + string identifier = 1; +} + +// Identifies the direction of the traffic relative to the local Envoy. +enum TrafficDirection { + // Default option is unspecified. + UNSPECIFIED = 0; + + // The transport is used for incoming traffic. + INBOUND = 1; + + // The transport is used for outgoing traffic. + OUTBOUND = 2; +} diff --git a/api/envoy/api/v3alpha/core/config_source.proto b/api/envoy/api/v3alpha/core/config_source.proto new file mode 100644 index 000000000000..58ef7daeb0a1 --- /dev/null +++ b/api/envoy/api/v3alpha/core/config_source.proto @@ -0,0 +1,126 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.core; + +option java_outer_classname = "ConfigSourceProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.core"; + +import "envoy/api/v3alpha/core/grpc_service.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Configuration sources] + +// API configuration source. This identifies the API type and cluster that Envoy +// will use to fetch an xDS API. +message ApiConfigSource { + // APIs may be fetched via either REST or gRPC. + enum ApiType { + // Ideally this would be 'reserved 0' but one can't reserve the default + // value. Instead we throw an exception if this is ever used. + UNSUPPORTED_REST_LEGACY = 0 [deprecated = true]; + // REST-JSON v2 API. The `canonical JSON encoding + // `_ for + // the v2 protos is used. + REST = 1; + // gRPC v2 API. + GRPC = 2; + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} + // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state + // with every update, the xDS server only sends what has changed since the last update. + // + // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. + // Do not use for other xDSes. TODO(fredlas) update/remove this warning when appropriate. + DELTA_GRPC = 3; + } + ApiType api_type = 1 [(validate.rules).enum.defined_only = true]; + // Cluster names should be used only with REST. If > 1 + // cluster is defined, clusters will be cycled through if any kind of failure + // occurs. + // + // .. note:: + // + // The cluster with name ``cluster_name`` must be statically defined and its + // type must not be ``EDS``. + repeated string cluster_names = 2; + + // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, + // services will be cycled through if any kind of failure occurs. + repeated GrpcService grpc_services = 4; + + // For REST APIs, the delay between successive polls. + google.protobuf.Duration refresh_delay = 3 [(gogoproto.stdduration) = true]; + + // For REST APIs, the request timeout. If not set, a default value of 1s will be used. + google.protobuf.Duration request_timeout = 5 + [(validate.rules).duration.gt.seconds = 0, (gogoproto.stdduration) = true]; + + // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be + // rate limited. + RateLimitSettings rate_limit_settings = 6; + + // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. + bool set_node_on_first_message_only = 7; +} + +// Aggregated Discovery Service (ADS) options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that ADS is to be used. +message AggregatedConfigSource { +} + +// Rate Limit settings to be applied for discovery requests made by Envoy. +message RateLimitSettings { + // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a + // default value of 100 will be used. + google.protobuf.UInt32Value max_tokens = 1; + + // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens + // per second will be used. + google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double.gt = 0.0]; +} + +// Configuration for :ref:`listeners `, :ref:`clusters +// `, :ref:`routes +// `, :ref:`endpoints +// ` etc. may either be sourced from the +// filesystem or from an xDS API source. Filesystem configs are watched with +// inotify for updates. +message ConfigSource { + oneof config_source_specifier { + option (validate.required) = true; + // Path on the filesystem to source and watch for configuration updates. + // + // .. note:: + // + // The path to the source must exist at config load time. + // + // .. note:: + // + // Envoy will only watch the file path for *moves.* This is because in general only moves + // are atomic. The same method of swapping files as is demonstrated in the + // :ref:`runtime documentation ` can be used here also. + string path = 1; + // API configuration source. + ApiConfigSource api_config_source = 2; + // When set, ADS will be used to fetch resources. The ADS API configuration + // source in the bootstrap configuration is used. + AggregatedConfigSource ads = 3; + } + + // When this timeout is specified, Envoy will wait no longer than the specified time for first + // config response on this xDS subscription during the :ref:`initialization process + // `. After reaching the timeout, Envoy will move to the next + // initialization phase, even if the first config is not delivered yet. The timer is activated + // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 + // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another + // timeout applies). The default is 15s. + google.protobuf.Duration initial_fetch_timeout = 4; +} diff --git a/api/envoy/api/v3alpha/core/grpc_service.proto b/api/envoy/api/v3alpha/core/grpc_service.proto new file mode 100644 index 000000000000..931a4cb8926f --- /dev/null +++ b/api/envoy/api/v3alpha/core/grpc_service.proto @@ -0,0 +1,173 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.core; + +option java_outer_classname = "GrpcServiceProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.core"; + +import "envoy/api/v3alpha/core/base.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/empty.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: gRPC services] + +// gRPC service configuration. This is used by :ref:`ApiConfigSource +// ` and filter configurations. +message GrpcService { + message EnvoyGrpc { + // The name of the upstream gRPC cluster. SSL credentials will be supplied + // in the :ref:`Cluster ` :ref:`tls_context + // `. + string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + } + + // [#proto-status: draft] + message GoogleGrpc { + // The target URI when using the `Google C++ gRPC client + // `_. SSL credentials will be supplied in + // :ref:`channel_credentials `. + string target_uri = 1 [(validate.rules).string.min_bytes = 1]; + + // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. + message SslCredentials { + // PEM encoded server root certificates. + DataSource root_certs = 1; + + // PEM encoded client private key. + DataSource private_key = 2; + + // PEM encoded client certificate chain. + DataSource cert_chain = 3; + } + + // Local channel credentials. Only UDS is supported for now. + // See https://github.com/grpc/grpc/pull/15909. + message GoogleLocalCredentials { + } + + // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call + // credential types. + message ChannelCredentials { + oneof credential_specifier { + option (validate.required) = true; + SslCredentials ssl_credentials = 1; + + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + google.protobuf.Empty google_default = 2; + + GoogleLocalCredentials local_credentials = 3; + } + } + + ChannelCredentials channel_credentials = 2; + + message CallCredentials { + message ServiceAccountJWTAccessCredentials { + string json_key = 1; + uint64 token_lifetime_seconds = 2; + } + + message GoogleIAMCredentials { + string authorization_token = 1; + string authority_selector = 2; + } + + message MetadataCredentialsFromPlugin { + string name = 1; + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + + oneof credential_specifier { + option (validate.required) = true; + + // Access token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. + string access_token = 1; + + // Google Compute Engine credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + google.protobuf.Empty google_compute_engine = 2; + + // Google refresh token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. + string google_refresh_token = 3; + + // Service Account JWT Access credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. + ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; + + // Google IAM credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. + GoogleIAMCredentials google_iam = 5; + + // Custom authenticator credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. + // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. + MetadataCredentialsFromPlugin from_plugin = 6; + } + } + + // A set of call credentials that can be composed with `channel credentials + // `_. + repeated CallCredentials call_credentials = 3; + + // The human readable prefix to use when emitting statistics for the gRPC + // service. + // + // .. csv-table:: + // :header: Name, Type, Description + // :widths: 1, 1, 2 + // + // streams_total, Counter, Total number of streams opened + // streams_closed_, Counter, Total streams closed with + string stat_prefix = 4 [(validate.rules).string.min_bytes = 1]; + + // The name of the Google gRPC credentials factory to use. This must have been registered with + // Envoy. If this is empty, a default credentials factory will be used that sets up channel + // credentials based on other configuration parameters. + string credentials_factory_name = 5; + + // Additional configuration for site-specific customizations of the Google + // gRPC library. + google.protobuf.Struct config = 6; + } + + oneof target_specifier { + option (validate.required) = true; + + // Envoy's in-built gRPC client. + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + EnvoyGrpc envoy_grpc = 1; + + // `Google C++ gRPC client `_ + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + GoogleGrpc google_grpc = 2; + } + + // The timeout for the gRPC request. This is the timeout for a specific + // request. + google.protobuf.Duration timeout = 3; + + // Field 4 reserved due to moving credentials inside the GoogleGrpc message + reserved 4; + + // Additional metadata to include in streams initiated to the GrpcService. + // This can be used for scenarios in which additional ad hoc authorization + // headers (e.g. `x-foo-bar: baz-key`) are to be injected. + repeated HeaderValue initial_metadata = 5; +} diff --git a/api/envoy/api/v3alpha/core/health_check.proto b/api/envoy/api/v3alpha/core/health_check.proto new file mode 100644 index 000000000000..1a9508fb6490 --- /dev/null +++ b/api/envoy/api/v3alpha/core/health_check.proto @@ -0,0 +1,271 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.core; + +option java_outer_classname = "HealthCheckProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.core"; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/type/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Health check] +// * Health checking :ref:`architecture overview `. +// * If health checking is configured for a cluster, additional statistics are emitted. They are +// documented :ref:`here `. + +message HealthCheck { + // The time to wait for a health check response. If the timeout is reached the + // health check attempt will be considered a failure. + google.protobuf.Duration timeout = 1 [ + (validate.rules).duration = { + required: true, + gt: {seconds: 0} + }, + (gogoproto.stdduration) = true + ]; + + // The interval between health checks. + google.protobuf.Duration interval = 2 [ + (validate.rules).duration = { + required: true, + gt: {seconds: 0} + }, + (gogoproto.stdduration) = true + ]; + + // An optional jitter amount in milliseconds. If specified, Envoy will start health + // checking after for a random time in ms between 0 and initial_jitter. This only + // applies to the first health check. + google.protobuf.Duration initial_jitter = 20; + + // An optional jitter amount in milliseconds. If specified, during every + // interval Envoy will add interval_jitter to the wait time. + google.protobuf.Duration interval_jitter = 3; + + // An optional jitter amount as a percentage of interval_ms. If specified, + // during every interval Envoy will add interval_ms * + // interval_jitter_percent / 100 to the wait time. + // + // If interval_jitter_ms and interval_jitter_percent are both set, both of + // them will be used to increase the wait time. + uint32 interval_jitter_percent = 18; + + // The number of unhealthy health checks required before a host is marked + // unhealthy. Note that for *http* health checking if a host responds with 503 + // this threshold is ignored and the host is considered unhealthy immediately. + google.protobuf.UInt32Value unhealthy_threshold = 4; + + // The number of healthy health checks required before a host is marked + // healthy. Note that during startup, only a single successful health check is + // required to mark a host healthy. + google.protobuf.UInt32Value healthy_threshold = 5; + + // [#not-implemented-hide:] Non-serving port for health checking. + google.protobuf.UInt32Value alt_port = 6; + + // Reuse health check connection between health checks. Default is true. + google.protobuf.BoolValue reuse_connection = 7; + + // Describes the encoding of the payload bytes in the payload. + message Payload { + oneof payload { + option (validate.required) = true; + + // Hex encoded payload. E.g., "000000FF". + string text = 1 [(validate.rules).string.min_bytes = 1]; + + // [#not-implemented-hide:] Binary payload. + bytes binary = 2; + } + } + + // [#comment:next free field: 10] + message HttpHealthCheck { + // The value of the host header in the HTTP health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. + string host = 1; + + // Specifies the HTTP path that will be requested during health checking. For example + // */healthcheck*. + string path = 2 [(validate.rules).string.min_bytes = 1]; + + // [#not-implemented-hide:] HTTP specific payload. + Payload send = 3; + + // [#not-implemented-hide:] HTTP specific response. + Payload receive = 4; + + // An optional service name parameter which is used to validate the identity of + // the health checked cluster. See the :ref:`architecture overview + // ` for more information. + string service_name = 5; + + // Specifies a list of HTTP headers that should be added to each request that is sent to the + // health checked cluster. For more information, including details on header value syntax, see + // the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated .max_items = 1000]; + + // Specifies a list of HTTP headers that should be removed from each request that is sent to the + // health checked cluster. + repeated string request_headers_to_remove = 8; + + // If set, health checks will be made using http/2. + bool use_http2 = 7; + + // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default + // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open + // semantics of :ref:`Int64Range `. + repeated envoy.type.Int64Range expected_statuses = 9; + } + + message TcpHealthCheck { + // Empty payloads imply a connect-only health check. + Payload send = 1; + + // When checking the response, “fuzzy” matching is performed such that each + // binary block must be found, and in the order specified, but not + // necessarily contiguous. + repeated Payload receive = 2; + } + + message RedisHealthCheck { + // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value + // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + // by setting the specified key to any value and waiting for traffic to drain. + string key = 1; + } + + // `grpc.health.v1.Health + // `_-based + // healthcheck. See `gRPC doc `_ + // for details. + message GrpcHealthCheck { + // An optional service name parameter which will be sent to gRPC service in + // `grpc.health.v1.HealthCheckRequest + // `_. + // message. See `gRPC health-checking overview + // `_ for more information. + string service_name = 1; + + // The value of the :authority header in the gRPC health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. + string authority = 2; + } + + // Custom health check. + message CustomHealthCheck { + // The registered name of the custom health checker. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // A custom health checker specific configuration which depends on the custom health checker + // being instantiated. See :api:`envoy/config/health_checker` for reference. + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + + oneof health_checker { + option (validate.required) = true; + + // HTTP health check. + HttpHealthCheck http_health_check = 8; + + // TCP health check. + TcpHealthCheck tcp_health_check = 9; + + // gRPC health check. + GrpcHealthCheck grpc_health_check = 11; + + // Custom health check. + CustomHealthCheck custom_health_check = 13; + } + + reserved 10; // redis_health_check is deprecated by :ref:`custom_health_check + // ` + reserved "redis_health_check"; + + // The "no traffic interval" is a special health check interval that is used when a cluster has + // never had traffic routed to it. This lower interval allows cluster information to be kept up to + // date, without sending a potentially large amount of active health checking traffic for no + // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. Note that this interval takes precedence over + // any other. + // + // The default value for "no traffic interval" is 60 seconds. + google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration.gt = {}]; + + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as + // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the + // standard health check interval that is defined. + // + // The default value for "unhealthy interval" is the same as "interval". + google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration.gt = {}]; + + // The "unhealthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as unhealthy. For subsequent health checks + // Envoy will shift back to using either "unhealthy interval" if present or the standard health + // check interval that is defined. + // + // The default value for "unhealthy edge interval" is the same as "unhealthy interval". + google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration.gt = {}]; + + // The "healthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as healthy. For subsequent health checks + // Envoy will shift back to using the standard health check interval that is defined. + // + // The default value for "healthy edge interval" is the same as the default interval. + google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration.gt = {}]; + + // Specifies the path to the :ref:`health check event log `. + // If empty, no event log will be written. + string event_log_path = 17; + + // If set to true, health check failure events will always be logged. If set to false, only the + // initial health check failure event will be logged. + // The default value is false. + bool always_log_health_check_failures = 19; +} + +// Endpoint health status. +enum HealthStatus { + // The health status is not known. This is interpreted by Envoy as *HEALTHY*. + UNKNOWN = 0; + + // Healthy. + HEALTHY = 1; + + // Unhealthy. + UNHEALTHY = 2; + + // Connection draining in progress. E.g., + // ``_ + // or + // ``_. + // This is interpreted by Envoy as *UNHEALTHY*. + DRAINING = 3; + + // Health check timed out. This is part of HDS and is interpreted by Envoy as + // *UNHEALTHY*. + TIMEOUT = 4; + + // Degraded. + DEGRADED = 5; +} diff --git a/api/envoy/api/v3alpha/core/http_uri.proto b/api/envoy/api/v3alpha/core/http_uri.proto new file mode 100644 index 000000000000..b47533f2066a --- /dev/null +++ b/api/envoy/api/v3alpha/core/http_uri.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.core; + +option java_outer_classname = "HttpUriProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.core"; + +import "google/protobuf/duration.proto"; +import "gogoproto/gogo.proto"; + +import "validate/validate.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: HTTP Service URI ] + +// Envoy external URI descriptor +message HttpUri { + // The HTTP server URI. It should be a full FQDN with protocol, host and path. + // + // Example: + // + // .. code-block:: yaml + // + // uri: https://www.googleapis.com/oauth2/v1/certs + // + string uri = 1 [(validate.rules).string.min_bytes = 1]; + + // Specify how `uri` is to be fetched. Today, this requires an explicit + // cluster, but in the future we may support dynamic cluster creation or + // inline DNS resolution. See `issue + // `_. + oneof http_upstream_type { + option (validate.required) = true; + // A cluster is created in the Envoy "cluster_manager" config + // section. This field specifies the cluster name. + // + // Example: + // + // .. code-block:: yaml + // + // cluster: jwks_cluster + // + string cluster = 2 [(validate.rules).string.min_bytes = 1]; + } + + // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + google.protobuf.Duration timeout = 3 [ + (validate.rules).duration.gte = {}, + (validate.rules).duration.required = true, + (gogoproto.stdduration) = true + ]; +} diff --git a/api/envoy/api/v3alpha/core/protocol.proto b/api/envoy/api/v3alpha/core/protocol.proto new file mode 100644 index 000000000000..63d0f6920d83 --- /dev/null +++ b/api/envoy/api/v3alpha/core/protocol.proto @@ -0,0 +1,157 @@ +// [#protodoc-title: Protocol options] + +syntax = "proto3"; + +package envoy.api.v3alpha.core; + +option java_outer_classname = "ProtocolProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.core"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Protocol options] + +// [#not-implemented-hide:] +message TcpProtocolOptions { +} + +message HttpProtocolOptions { + // The idle timeout for upstream connection pool connections. The idle timeout is defined as the + // period in which there are no active requests. If not set, there is no idle timeout. When the + // idle timeout is reached the connection will be closed. Note that request based timeouts mean + // that HTTP/2 PINGs will not keep the connection alive. + google.protobuf.Duration idle_timeout = 1 [(gogoproto.stdduration) = true]; +} + +message Http1ProtocolOptions { + // Handle HTTP requests with absolute URLs in the requests. These requests + // are generally sent by clients to forward/explicit proxies. This allows clients to configure + // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the + // *http_proxy* environment variable. + google.protobuf.BoolValue allow_absolute_url = 1; + + // Handle incoming HTTP/1.0 and HTTP 0.9 requests. + // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 + // style connect logic, dechunking, and handling lack of client host iff + // *default_host_for_http_10* is configured. + bool accept_http_10 = 2; + + // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as + // Envoy does not otherwise support HTTP/1.0 without a Host header. + // This is a no-op if *accept_http_10* is not true. + string default_host_for_http_10 = 3; +} + +// [#comment:next free field: 13] +message Http2ProtocolOptions { + // `Maximum table size `_ + // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values + // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header + // compression. + google.protobuf.UInt32Value hpack_table_size = 1; + + // `Maximum concurrent streams `_ + // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) + // and defaults to 2147483647. + google.protobuf.UInt32Value max_concurrent_streams = 2 + [(validate.rules).uint32 = {gte: 1, lte: 2147483647}]; + + // `Initial stream-level flow-control window + // `_ size. Valid values range from 65535 + // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 + // (256 * 1024 * 1024). + // + // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default + // window size now, so it's also the minimum. + + // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + // stop the flow of data to the codec buffers. + google.protobuf.UInt32Value initial_stream_window_size = 3 + [(validate.rules).uint32 = {gte: 65535, lte: 2147483647}]; + + // Similar to *initial_stream_window_size*, but for connection-level flow-control + // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. + google.protobuf.UInt32Value initial_connection_window_size = 4 + [(validate.rules).uint32 = {gte: 65535, lte: 2147483647}]; + + // Allows proxying Websocket and other upgrades over H2 connect. + bool allow_connect = 5; + + // [#not-implemented-hide:] Hiding until envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows metadata. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more + // information. + bool allow_metadata = 6; + + // Limit the number of pending outbound downstream frames of all types (frames that are waiting to + // be written into the socket). Exceeding this limit triggers flood mitigation and connection is + // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due + // to flood mitigation. The default limit is 10000. + // [#comment:TODO: implement same limits for upstream outbound frames as well.] + google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; + + // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, + // preventing high memory utilization when receiving continuous stream of these frames. Exceeding + // this limit triggers flood mitigation and connection is terminated. The + // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood + // mitigation. The default limit is 1000. + // [#comment:TODO: implement same limits for upstream outbound frames as well.] + google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; + + // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an + // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but + // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` + // stat tracks the number of connections terminated due to flood mitigation. + // Setting this to 0 will terminate connection upon receiving first frame with an empty payload + // and no end stream flag. The default limit is 1. + // [#comment:TODO: implement same limits for upstream inbound frames as well.] + google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; + + // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number + // of PRIORITY frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // max_inbound_priority_frames_per_stream * (1 + inbound_streams) + // + // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks + // the number of connections terminated due to flood mitigation. The default limit is 100. + // [#comment:TODO: implement same limits for upstream inbound frames as well.] + google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; + + // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number + // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // 1 + 2 * (inbound_streams + + // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) + // + // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks + // the number of connections terminated due to flood mitigation. The default limit is 10. + // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, + // but more complex implementations that try to estimate available bandwidth require at least 2. + // [#comment:TODO: implement same limits for upstream inbound frames as well.] + google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 + [(validate.rules).uint32 = {gte: 1}]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // See [RFC7540, sec. 8.1](https://tools.ietf.org/html/rfc7540#section-8.1) for details. + bool stream_error_on_invalid_http_messaging = 12; +} + +// [#not-implemented-hide:] +message GrpcProtocolOptions { + Http2ProtocolOptions http2_protocol_options = 1; +} diff --git a/api/envoy/api/v3alpha/discovery.proto b/api/envoy/api/v3alpha/discovery.proto new file mode 100644 index 000000000000..87433f0dca27 --- /dev/null +++ b/api/envoy/api/v3alpha/discovery.proto @@ -0,0 +1,230 @@ +syntax = "proto3"; + +package envoy.api.v3alpha; + +option java_outer_classname = "DiscoveryProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/base.proto"; + +import "google/protobuf/any.proto"; +import "google/rpc/status.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: Common discovery API components] + +// A DiscoveryRequest requests a set of versioned resources of the same type for +// a given Envoy node on some API. +message DiscoveryRequest { + // The version_info provided in the request messages will be the version_info + // received with the most recent successfully processed response or empty on + // the first request. It is expected that no new request is sent after a + // response is received until the Envoy instance is ready to ACK/NACK the new + // configuration. ACK/NACK takes place by returning the new API config version + // as applied or the previous API config version respectively. Each type_url + // (see below) has an independent version associated with it. + string version_info = 1; + + // The node making the request. + core.Node node = 2; + + // List of resources to subscribe to, e.g. list of cluster names or a route + // configuration name. If this is empty, all resources for the API are + // returned. LDS/CDS may have empty resource_names, which will cause all + // resources for the Envoy instance to be returned. The LDS and CDS responses + // will then imply a number of resources that need to be fetched via EDS/RDS, + // which will be explicitly enumerated in resource_names. + repeated string resource_names = 3; + + // Type of the resource that is being requested, e.g. + // "type.googleapis.com/envoy.api.v3alpha.ClusterLoadAssignment". This is implicit + // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is + // required for ADS. + string type_url = 4; + + // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above + // discussion on version_info and the DiscoveryResponse nonce comment. This + // may be empty if no nonce is available, e.g. at startup or for non-stream + // xDS implementations. + string response_nonce = 5; + + // This is populated when the previous :ref:`DiscoveryResponse ` + // failed to update configuration. The *message* field in *error_details* provides the Envoy + // internal exception related to the failure. It is only intended for consumption during manual + // debugging, the string provided is not guaranteed to be stable across Envoy versions. + google.rpc.Status error_detail = 6; +} + +message DiscoveryResponse { + // The version of the response data. + string version_info = 1; + + // The response resources. These resources are typed and depend on the API being called. + repeated google.protobuf.Any resources = 2; + + // [#not-implemented-hide:] + // Canary is used to support two Envoy command line flags: + // + // * --terminate-on-canary-transition-failure. When set, Envoy is able to + // terminate if it detects that configuration is stuck at canary. Consider + // this example sequence of updates: + // - Management server applies a canary config successfully. + // - Management server rolls back to a production config. + // - Envoy rejects the new production config. + // Since there is no sensible way to continue receiving configuration + // updates, Envoy will then terminate and apply production config from a + // clean slate. + // * --dry-run-canary. When set, a canary response will never be applied, only + // validated via a dry run. + bool canary = 3; + + // Type URL for resources. Identifies the xDS API when muxing over ADS. + // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). + string type_url = 4; + + // For gRPC based subscriptions, the nonce provides a way to explicitly ack a + // specific DiscoveryResponse in a following DiscoveryRequest. Additional + // messages may have been sent by Envoy to the management server for the + // previous version on the stream prior to this DiscoveryResponse, that were + // unprocessed at response send time. The nonce allows the management server + // to ignore any further DiscoveryRequests for the previous version until a + // DiscoveryRequest bearing the nonce. The nonce is optional and is not + // required for non-stream based xDS implementations. + string nonce = 5; + + // [#not-implemented-hide:] + // The control plane instance that sent the response. + core.ControlPlane control_plane = 6; +} + +// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC +// endpoint for Delta xDS. +// +// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full +// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a +// diff to the state of a xDS client. +// In Delta XDS there are per-resource versions, which allow tracking state at +// the resource granularity. +// An xDS Delta session is always in the context of a gRPC bidirectional +// stream. This allows the xDS server to keep track of the state of xDS clients +// connected to it. +// +// In Delta xDS the nonce field is required and used to pair +// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. +// Optionally, a response message level system_version_info is present for +// debugging purposes only. +// +// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest +// can be either or both of: [1] informing the server of what resources the +// client has gained/lost interest in (using resource_names_subscribe and +// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from +// the server (using response_nonce, with presence of error_detail making it a NACK). +// Additionally, the first message (for a given type_url) of a reconnected gRPC stream +// has a third role: informing the server of the resources (and their versions) +// that the client already possesses, using the initial_resource_versions field. +// +// As with state-of-the-world, when multiple resource types are multiplexed (ADS), +// all requests/acknowledgments/updates are logically walled off by type_url: +// a Cluster ACK exists in a completely separate world from a prior Route NACK. +// In particular, initial_resource_versions being sent at the "start" of every +// gRPC stream actually entails a message for each type_url, each with its own +// initial_resource_versions. +message DeltaDiscoveryRequest { + // The node making the request. + core.Node node = 1; + + // Type of the resource that is being requested, e.g. + // "type.googleapis.com/envoy.api.v3alpha.ClusterLoadAssignment". + string type_url = 2; + + // DeltaDiscoveryRequests allow the client to add or remove individual + // resources to the set of tracked resources in the context of a stream. + // All resource names in the resource_names_subscribe list are added to the + // set of tracked resources and all resource names in the resource_names_unsubscribe + // list are removed from the set of tracked resources. + // + // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or + // resource_names_unsubscribe list simply means that no resources are to be + // added or removed to the resource list. + // *Like* state-of-the-world xDS, the server must send updates for all tracked + // resources, but can also send updates for resources the client has not subscribed to. + // + // NOTE: the server must respond with all resources listed in resource_names_subscribe, + // even if it believes the client has the most recent version of them. The reason: + // the client may have dropped them, but then regained interest before it had a chance + // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. + // + // These two fields can be set in any DeltaDiscoveryRequest, including ACKs + // and initial_resource_versions. + // + // A list of Resource names to add to the list of tracked resources. + repeated string resource_names_subscribe = 3; + + // A list of Resource names to remove from the list of tracked resources. + repeated string resource_names_unsubscribe = 4; + + // Informs the server of the versions of the resources the xDS client knows of, to enable the + // client to continue the same logical xDS session even in the face of gRPC stream reconnection. + // It will not be populated: [1] in the very first stream of a session, since the client will + // not yet have any resources, [2] in any message after the first in a stream (for a given + // type_url), since the server will already be correctly tracking the client's state. + // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) + // The map's keys are names of xDS resources known to the xDS client. + // The map's values are opaque resource versions. + map initial_resource_versions = 5; + + // When the DeltaDiscoveryRequest is a ACK or NACK message in response + // to a previous DeltaDiscoveryResponse, the response_nonce must be the + // nonce in the DeltaDiscoveryResponse. + // Otherwise response_nonce must be omitted. + string response_nonce = 6; + + // This is populated when the previous :ref:`DiscoveryResponse ` + // failed to update configuration. The *message* field in *error_details* + // provides the Envoy internal exception related to the failure. + google.rpc.Status error_detail = 7; +} + +message DeltaDiscoveryResponse { + // The version of the response data (used for debugging). + string system_version_info = 1; + + // The response resources. These are typed resources, whose types must match + // the type_url field. + repeated Resource resources = 2; + + // field id 3 IS available! + + // Type URL for resources. Identifies the xDS API when muxing over ADS. + // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. + string type_url = 4; + + // Resources names of resources that have be deleted and to be removed from the xDS Client. + // Removed resources for missing resources can be ignored. + repeated string removed_resources = 6; + + // The nonce provides a way for DeltaDiscoveryRequests to uniquely + // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. + string nonce = 5; +} + +message Resource { + // The resource's name, to distinguish it from others of the same type of resource. + string name = 3; + + // [#not-implemented-hide:] + // The aliases are a list of other names that this resource can go by. + repeated string aliases = 4; + + // The resource level version. It allows xDS to track the state of individual + // resources. + string version = 1; + + // The resource being tracked. + google.protobuf.Any resource = 2; +} diff --git a/api/envoy/api/v3alpha/eds.proto b/api/envoy/api/v3alpha/eds.proto new file mode 100644 index 000000000000..7ba8592eb793 --- /dev/null +++ b/api/envoy/api/v3alpha/eds.proto @@ -0,0 +1,135 @@ +syntax = "proto3"; + +package envoy.api.v3alpha; + +option java_outer_classname = "EdsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha"; + +option java_generic_services = true; + +import "envoy/api/v3alpha/discovery.proto"; +import "envoy/api/v3alpha/endpoint/endpoint.proto"; +import "envoy/type/percent.proto"; + +import "google/api/annotations.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/wrappers.proto"; +import "google/protobuf/duration.proto"; + +option (gogoproto.equal_all) = true; +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: EDS] +// Endpoint discovery :ref:`architecture overview ` + +service EndpointDiscoveryService { + // The resource_names field in DiscoveryRequest specifies a list of clusters + // to subscribe to updates for. + rpc StreamEndpoints(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc DeltaEndpoints(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { + } + + rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:endpoints" + body: "*" + }; + } +} + +// Each route from RDS will map to a single cluster or traffic split across +// clusters using weights expressed in the RDS WeightedCluster. +// +// With EDS, each cluster is treated independently from a LB perspective, with +// LB taking place between the Localities within a cluster and at a finer +// granularity between the hosts within a locality. The percentage of traffic +// for each endpoint is determined by both its load_balancing_weight, and the +// load_balancing_weight of its locality. First, a locality will be selected, +// then an endpoint within that locality will be chose based on its weight. +message ClusterLoadAssignment { + // Name of the cluster. This will be the :ref:`service_name + // ` value if specified + // in the cluster :ref:`EdsClusterConfig + // `. + string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + + // List of endpoints to load balance to. + repeated endpoint.LocalityLbEndpoints endpoints = 2; + + // Map of named endpoints that can be referenced in LocalityLbEndpoints. + map named_endpoints = 5; + + // Load balancing policy settings. + message Policy { + reserved 1; + + message DropOverload { + // Identifier for the policy specifying the drop. + string category = 1 [(validate.rules).string.min_bytes = 1]; + + // Percentage of traffic that should be dropped for the category. + envoy.type.FractionalPercent drop_percentage = 2; + } + // Action to trim the overall incoming traffic to protect the upstream + // hosts. This action allows protection in case the hosts are unable to + // recover from an outage, or unable to autoscale or unable to handle + // incoming traffic volume for any reason. + // + // At the client each category is applied one after the other to generate + // the 'actual' drop percentage on all outgoing traffic. For example: + // + // .. code-block:: json + // + // { "drop_overloads": [ + // { "category": "throttle", "drop_percentage": 60 } + // { "category": "lb", "drop_percentage": 50 } + // ]} + // + // The actual drop percentages applied to the traffic at the clients will be + // "throttle"_drop = 60% + // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. + // actual_outgoing_load = 20% // remaining after applying all categories. + repeated DropOverload drop_overloads = 2; + + // Priority levels and localities are considered overprovisioned with this + // factor (in percentage). This means that we don't consider a priority + // level or locality unhealthy until the percentage of healthy hosts + // multiplied by the overprovisioning factor drops below 100. + // With the default value 140(1.4), Envoy doesn't consider a priority level + // or a locality unhealthy until their percentage of healthy hosts drops + // below 72%. For example: + // + // .. code-block:: json + // + // { "overprovisioning_factor": 100 } + // + // Read more at :ref:`priority levels ` and + // :ref:`localities `. + google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32.gt = 0]; + + // The max time until which the endpoints from this assignment can be used. + // If no new assignments are received before this time expires the endpoints + // are considered stale and should be marked unhealthy. + // Defaults to 0 which means endpoints never go stale. + google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration.gt.seconds = 0]; + + // The flag to disable overprovisioning. If it is set to true, + // :ref:`overprovisioning factor + // ` will be ignored + // and Envoy will not perform graceful failover between priority levels or + // localities as endpoints become unhealthy. Otherwise Envoy will perform + // graceful failover as :ref:`overprovisioning factor + // ` suggests. + // [#next-major-version: Unify with overprovisioning config as a single message.] + // [#not-implemented-hide:] + bool disable_overprovisioning = 5; + } + + // Load balancing policy settings. + Policy policy = 4; +} diff --git a/api/envoy/api/v3alpha/endpoint/BUILD b/api/envoy/api/v3alpha/endpoint/BUILD new file mode 100644 index 000000000000..1630438b13f6 --- /dev/null +++ b/api/envoy/api/v3alpha/endpoint/BUILD @@ -0,0 +1,49 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "endpoint", + srcs = ["endpoint.proto"], + visibility = ["//envoy/api/v3alpha:friends"], + deps = [ + "//envoy/api/v3alpha/auth:cert", + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:config_source", + "//envoy/api/v3alpha/core:health_check", + "//envoy/api/v3alpha/core:protocol", + ], +) + +api_go_proto_library( + name = "endpoint", + proto = ":endpoint", + deps = [ + "//envoy/api/v3alpha/auth:cert_go_proto", + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/core:config_source_go_proto", + "//envoy/api/v3alpha/core:health_check_go_proto", + "//envoy/api/v3alpha/core:protocol_go_proto", + ], +) + +api_proto_library_internal( + name = "load_report", + srcs = ["load_report.proto"], + visibility = ["//envoy/api/v3alpha:friends"], + deps = [ + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + ], +) + +api_go_proto_library( + name = "load_report", + proto = ":load_report", + deps = [ + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + ], +) diff --git a/api/envoy/api/v3alpha/endpoint/endpoint.proto b/api/envoy/api/v3alpha/endpoint/endpoint.proto new file mode 100644 index 000000000000..4bb1b57e8710 --- /dev/null +++ b/api/envoy/api/v3alpha/endpoint/endpoint.proto @@ -0,0 +1,129 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.endpoint; + +option java_outer_classname = "EndpointProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.endpoint"; +option go_package = "endpoint"; + +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/core/health_check.proto"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Endpoints] + +// Upstream host identifier. +message Endpoint { + // The upstream host address. + // + // .. attention:: + // + // The form of host address depends on the given cluster type. For STATIC or EDS, + // it is expected to be a direct IP address (or something resolvable by the + // specified :ref:`resolver ` + // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, + // and will be resolved via DNS. + core.Address address = 1; + + // The optional health check configuration. + message HealthCheckConfig { + // Optional alternative health check port value. + // + // By default the health check address port of an upstream host is the same + // as the host's serving address port. This provides an alternative health + // check port. Setting this with a non-zero value allows an upstream host + // to have different health check address port. + uint32 port_value = 1 [(validate.rules).uint32.lte = 65535]; + } + + // The optional health check configuration is used as configuration for the + // health checker to contact the health checked host. + // + // .. attention:: + // + // This takes into effect only for upstream clusters with + // :ref:`active health checking ` enabled. + HealthCheckConfig health_check_config = 2; +} + +// An Endpoint that Envoy can route traffic to. +message LbEndpoint { + // Upstream host identifier or a named reference. + oneof host_identifier { + Endpoint endpoint = 1; + string endpoint_name = 5; + } + + // Optional health status when known and supplied by EDS server. + core.HealthStatus health_status = 2; + + // The endpoint metadata specifies values that may be used by the load + // balancer to select endpoints in a cluster for a given request. The filter + // name should be specified as *envoy.lb*. An example boolean key-value pair + // is *canary*, providing the optional canary status of the upstream host. + // This may be matched against in a route's + // :ref:`RouteAction ` metadata_match field + // to subset the endpoints considered in cluster load balancing. + core.Metadata metadata = 3; + + // The optional load balancing weight of the upstream host; at least 1. + // Envoy uses the load balancing weight in some of the built in load + // balancers. The load balancing weight for an endpoint is divided by the sum + // of the weights of all endpoints in the endpoint's locality to produce a + // percentage of traffic for the endpoint. This percentage is then further + // weighted by the endpoint's locality's load balancing weight from + // LocalityLbEndpoints. If unspecified, each host is presumed to have equal + // weight in a locality. + google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; +} + +// A group of endpoints belonging to a Locality. +// One can have multiple LocalityLbEndpoints for a locality, but this is +// generally only done if the different groups need to have different load +// balancing weights or different priorities. +message LocalityLbEndpoints { + // Identifies location of where the upstream hosts run. + core.Locality locality = 1; + + // The group of endpoints belonging to the locality specified. + repeated LbEndpoint lb_endpoints = 2; + + // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load + // balancing weight for a locality is divided by the sum of the weights of all + // localities at the same priority level to produce the effective percentage + // of traffic for the locality. + // + // Locality weights are only considered when :ref:`locality weighted load + // balancing ` is + // configured. These weights are ignored otherwise. If no weights are + // specified when locality weighted load balancing is enabled, the locality is + // assigned no load. + google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}]; + + // Optional: the priority for this LocalityLbEndpoints. If unspecified this will + // default to the highest priority (0). + // + // Under usual circumstances, Envoy will only select endpoints for the highest + // priority (0). In the event all endpoints for a particular priority are + // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the + // next highest priority group. + // + // Priorities should range from 0 (highest) to N (lowest) without skipping. + uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; + + // Optional: Per locality proximity value which indicates how close this + // locality is from the source locality. This value only provides ordering + // information (lower the value, closer it is to the source locality). + // This will be consumed by load balancing schemes that need proximity order + // to determine where to route the requests. + // [#not-implemented-hide:] + google.protobuf.UInt32Value proximity = 6; +} diff --git a/api/envoy/api/v3alpha/endpoint/load_report.proto b/api/envoy/api/v3alpha/endpoint/load_report.proto new file mode 100644 index 000000000000..7e4aed3ba103 --- /dev/null +++ b/api/envoy/api/v3alpha/endpoint/load_report.proto @@ -0,0 +1,148 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.endpoint; + +option java_outer_classname = "LoadReportProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.endpoint"; + +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/core/base.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// These are stats Envoy reports to GLB every so often. Report frequency is +// defined by +// :ref:`LoadStatsResponse.load_reporting_interval`. +// Stats per upstream region/zone and optionally per subzone. +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message UpstreamLocalityStats { + // Name of zone, region and optionally endpoint group these metrics were + // collected from. Zone and region names could be empty if unknown. + core.Locality locality = 1; + + // The total number of requests successfully completed by the endpoints in the + // locality. + uint64 total_successful_requests = 2; + + // The total number of unfinished requests + uint64 total_requests_in_progress = 3; + + // The total number of requests that failed due to errors at the endpoint, + // aggregated over all endpoints in the locality. + uint64 total_error_requests = 4; + + // The total number of requests that were issued by this Envoy since + // the last report. This information is aggregated over all the + // upstream endpoints in the locality. + uint64 total_issued_requests = 8; + + // Stats for multi-dimensional load balancing. + repeated EndpointLoadMetricStats load_metric_stats = 5; + + // Endpoint granularity stats information for this locality. This information + // is populated if the Server requests it by setting + // :ref:`LoadStatsResponse.report_endpoint_granularity`. + repeated UpstreamEndpointStats upstream_endpoint_stats = 7; + + // [#not-implemented-hide:] The priority of the endpoint group these metrics + // were collected from. + uint32 priority = 6; +} + +message UpstreamEndpointStats { + // Upstream host address. + core.Address address = 1; + + // Opaque and implementation dependent metadata of the + // endpoint. Envoy will pass this directly to the management server. + google.protobuf.Struct metadata = 6; + + // The total number of requests successfully completed by the endpoints in the + // locality. These include non-5xx responses for HTTP, where errors + // originate at the client and the endpoint responded successfully. For gRPC, + // the grpc-status values are those not covered by total_error_requests below. + uint64 total_successful_requests = 2; + + // The total number of unfinished requests for this endpoint. + uint64 total_requests_in_progress = 3; + + // The total number of requests that failed due to errors at the endpoint. + // For HTTP these are responses with 5xx status codes and for gRPC the + // grpc-status values: + // + // - DeadlineExceeded + // - Unimplemented + // - Internal + // - Unavailable + // - Unknown + // - DataLoss + uint64 total_error_requests = 4; + + // The total number of requests that were issued to this endpoint + // since the last report. A single TCP connection, HTTP or gRPC + // request or stream is counted as one request. + uint64 total_issued_requests = 7; + + // Stats for multi-dimensional load balancing. + repeated EndpointLoadMetricStats load_metric_stats = 5; +} + +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message EndpointLoadMetricStats { + // Name of the metric; may be empty. + string metric_name = 1; + + // Number of calls that finished and included this metric. + uint64 num_requests_finished_with_metric = 2; + + // Sum of metric values across all calls that finished with this metric for + // load_reporting_interval. + double total_metric_value = 3; +} + +// Per cluster load stats. Envoy reports these stats a management server in a +// :ref:`LoadStatsRequest` +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +// Next ID: 7 +message ClusterStats { + // The name of the cluster. + string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + + // The eds_cluster_config service_name of the cluster. + // It's possible that two clusters send the same service_name to EDS, + // in that case, the management server is supposed to do aggregation on the load reports. + string cluster_service_name = 6; + + // Need at least one. + repeated UpstreamLocalityStats upstream_locality_stats = 2 + [(validate.rules).repeated .min_items = 1]; + + // Cluster-level stats such as total_successful_requests may be computed by + // summing upstream_locality_stats. In addition, below there are additional + // cluster-wide stats. + // + // The total number of dropped requests. This covers requests + // deliberately dropped by the drop_overload policy and circuit breaking. + uint64 total_dropped_requests = 3; + + message DroppedRequests { + // Identifier for the policy specifying the drop. + string category = 1 [(validate.rules).string.min_bytes = 1]; + // Total number of deliberately dropped requests for the category. + uint64 dropped_count = 2; + } + // Information about deliberately dropped requests for each category specified + // in the DropOverload policy. + repeated DroppedRequests dropped_requests = 5; + + // Period over which the actual load report occurred. This will be guaranteed to include every + // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy + // and the *LoadStatsResponse* message sent from the management server, this may be longer than + // the requested load reporting interval in the *LoadStatsResponse*. + google.protobuf.Duration load_report_interval = 4; +} diff --git a/api/envoy/api/v3alpha/lds.proto b/api/envoy/api/v3alpha/lds.proto new file mode 100644 index 000000000000..d9976d6c0e3c --- /dev/null +++ b/api/envoy/api/v3alpha/lds.proto @@ -0,0 +1,206 @@ +syntax = "proto3"; + +package envoy.api.v3alpha; + +option java_outer_classname = "LdsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha"; + +option java_generic_services = true; + +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/discovery.proto"; +import "envoy/api/v3alpha/listener/listener.proto"; +import "envoy/api/v3alpha/listener/udp_listener_config.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Listener] +// Listener :ref:`configuration overview ` + +// The Envoy instance initiates an RPC at startup to discover a list of +// listeners. Updates are delivered via streaming from the LDS server and +// consist of a complete update of all listeners. Existing connections will be +// allowed to drain from listeners that are no longer present. +service ListenerDiscoveryService { + rpc DeltaListeners(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { + } + + rpc StreamListeners(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:listeners" + body: "*" + }; + } +} + +// [#comment:next free field: 19] +message Listener { + // The unique name by which this listener is known. If no name is provided, + // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + // updated or removed via :ref:`LDS ` a unique name must be provided. + string name = 1; + + // The address that the listener should listen on. In general, the address must be unique, though + // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + // Linux as the actual port will be allocated by the OS. + core.Address address = 2 [(validate.rules).message.required = true]; + + // A list of filter chains to consider for this listener. The + // :ref:`FilterChain ` with the most specific + // :ref:`FilterChainMatch ` criteria is used on a + // connection. + // + // Example using SNI for filter chain selection can be found in the + // :ref:`FAQ entry `. + repeated listener.FilterChain filter_chains = 3; + + // If a connection is redirected using *iptables*, the port on which the proxy + // receives it might be different from the original destination address. When this flag is set to + // true, the listener hands off redirected connections to the listener associated with the + // original destination address. If there is no listener associated with the original destination + // address, the connection is handled by the listener that receives it. Defaults to false. + // + // .. attention:: + // + // This field is deprecated. Use :ref:`an original_dst ` + // :ref:`listener filter ` instead. + // + // Note that hand off to another listener is *NOT* performed without this flag. Once + // :ref:`FilterChainMatch ` is implemented this flag + // will be removed, as filter chain matching can be used to select a filter chain based on the + // restored destination address. + google.protobuf.BoolValue use_original_dst = 4 [deprecated = true]; + + // Soft limit on size of the listener’s new connection read and write buffers. + // If unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // Listener metadata. + core.Metadata metadata = 6; + + // [#not-implemented-hide:] + message DeprecatedV1 { + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // [V2-API-DIFF] This is deprecated in v2, all Listeners will bind to their + // port. An additional filter chain must be created for every original + // destination port this listener may redirect to in v2, with the original + // port specified in the FilterChainMatch destination_port field. + // + // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] + google.protobuf.BoolValue bind_to_port = 1; + } + + // [#not-implemented-hide:] + DeprecatedV1 deprecated_v1 = 7; + + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // The type of draining to perform at a listener-wide level. + DrainType drain_type = 8; + + // Listener filters have the opportunity to manipulate and augment the connection metadata that + // is used in connection filter chain matching, for example. These filters are run before any in + // :ref:`filter_chains `. Order matters as the + // filters are processed sequentially right after a socket has been accepted by the listener, and + // before a connection is created. + // UDP Listener filters can be specified when the protocol in the listener socket address in + // :ref:`protocol ` is :ref:'UDP + // `. + // UDP listeners currently support a single filter. + repeated listener.ListenerFilter listener_filters = 9; + + // The timeout to wait for all listener filters to complete operation. If the timeout is reached, + // the accepted socket is closed without a connection being created unless + // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the + // timeout. If not specified, a default timeout of 15s is used. + google.protobuf.Duration listener_filters_timeout = 15 [(gogoproto.stdduration) = true]; + + // Whether a connection should be created when listener filters timeout. Default is false. + // + // .. attention:: + // + // Some listener filters, such as :ref:`Proxy Protocol filter + // `, should not be used with this option. It will cause + // unexpected behavior when a connection is created. + bool continue_on_listener_filters_timeout = 17; + + // Whether the listener should be set as a transparent socket. + // When this flag is set to true, connections can be redirected to the listener using an + // *iptables* *TPROXY* target, in which case the original source and destination addresses and + // ports are preserved on accepted connections. This flag should be used in combination with + // :ref:`an original_dst ` :ref:`listener filter + // ` to mark the connections' local addresses as + // "restored." This can be used to hand off each redirected connection to another listener + // associated with the connection's destination address. Direct connections to the socket without + // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are + // therefore treated as if they were redirected. + // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. + // When this flag is not set (default), the socket is not modified, i.e. the transparent option + // is neither set nor reset. + google.protobuf.BoolValue transparent = 10; + + // Whether the listener should set the *IP_FREEBIND* socket option. When this + // flag is set to true, listeners can be bound to an IP address that is not + // configured on the system running Envoy. When this flag is set to false, the + // option *IP_FREEBIND* is disabled on the socket. When this flag is not set + // (default), the socket is not modified, i.e. the option is neither enabled + // nor disabled. + google.protobuf.BoolValue freebind = 11; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated core.SocketOption socket_options = 13; + + // Whether the listener should accept TCP Fast Open (TFO) connections. + // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + // the socket, with a queue length of the specified size + // (see `details in RFC7413 `_). + // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + // When this flag is not set (default), the socket is not modified, + // i.e. the option is neither enabled nor disabled. + // + // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + // TCP_FASTOPEN. + // See `ip-sysctl.txt `_. + // + // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; + + reserved 14; + + // Specifies the intended direction of the traffic relative to the local Envoy. + core.TrafficDirection traffic_direction = 16; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:'UDP + // `, this field specifies the actual udp listener to create, + // i.e. :ref:`udp_listener_name + // ` = "raw_udp_listener" for + // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". + listener.UdpListenerConfig udp_listener_config = 18; +} diff --git a/api/envoy/api/v3alpha/listener/BUILD b/api/envoy/api/v3alpha/listener/BUILD new file mode 100644 index 000000000000..693ead54dde0 --- /dev/null +++ b/api/envoy/api/v3alpha/listener/BUILD @@ -0,0 +1,41 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "listener", + srcs = ["listener.proto"], + visibility = ["//envoy/api/v3alpha:friends"], + deps = [ + "//envoy/api/v3alpha/auth:cert", + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + ], +) + +api_go_proto_library( + name = "listener", + proto = ":listener", + deps = [ + "//envoy/api/v3alpha/auth:cert_go_proto", + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + ], +) + +api_proto_library_internal( + name = "udp_listener_config", + srcs = ["udp_listener_config.proto"], + visibility = ["//envoy/api/v3alpha:friends"], + deps = [ + "//envoy/api/v3alpha/core:base", + ], +) + +api_go_proto_library( + name = "udp_listener_config", + proto = ":udp_listener_config", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + ], +) diff --git a/api/envoy/api/v3alpha/listener/listener.proto b/api/envoy/api/v3alpha/listener/listener.proto new file mode 100644 index 000000000000..2aa7146a822c --- /dev/null +++ b/api/envoy/api/v3alpha/listener/listener.proto @@ -0,0 +1,210 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.listener; + +option java_outer_classname = "ListenerProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.listener"; +option go_package = "listener"; +option csharp_namespace = "Envoy.Api.V2.ListenerNS"; +option ruby_package = "Envoy::Api::V2::ListenerNS"; + +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/auth/cert.proto"; +import "envoy/api/v3alpha/core/base.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Listener components] +// Listener :ref:`configuration overview ` + +message Filter { + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 4; + } + + reserved 3; +} + +// Specifies the match criteria for selecting a specific filter chain for a +// listener. +// +// In order for a filter chain to be selected, *ALL* of its criteria must be +// fulfilled by the incoming connection, properties of which are set by the +// networking stack and/or listener filters. +// +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Source type (e.g. any, local or external network). +// 7. Source IP address. +// 8. Source port. +// +// For criteria that allow ranges or wildcards, the most specific value in any +// of the configured filter chains that matches the incoming connection is going +// to be used (e.g. for SNI ``www.example.com`` the most specific match would be +// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter +// chain without ``server_names`` requirements). +// +// [#comment: Implemented rules are kept in the preference order, with deprecated fields +// listed at the end, because that's how we want to list them in the docs. +// +// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] +message FilterChainMatch { + // Optional destination port to consider when use_original_dst is set on the + // listener in determining a filter chain match. + google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {gte: 1, lte: 65535}]; + + // If non-empty, an IP address and prefix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + repeated core.CidrRange prefix_ranges = 3; + + // If non-empty, an IP address and suffix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + string address_suffix = 4; + + // [#not-implemented-hide:] + google.protobuf.UInt32Value suffix_len = 5; + + enum ConnectionSourceType { + // Any connection source matches. + ANY = 0; + // Match a connection originating from the same host. + LOCAL = 1; + // Match a connection originating from a different host. + EXTERNAL = 2; + } + + // Specifies the connection source IP match type. Can be any, local or external network. + ConnectionSourceType source_type = 12 [(validate.rules).enum.defined_only = true]; + + // The criteria is satisfied if the source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the + // parameter is not specified or the list is empty, the source IP address is + // ignored. + repeated core.CidrRange source_prefix_ranges = 6; + + // The criteria is satisfied if the source port of the downstream connection + // is contained in at least one of the specified ports. If the parameter is + // not specified, the source port is ignored. + repeated uint32 source_ports = 7 [(validate.rules).repeated .items.uint32 = {gte: 1, lte: 65535}]; + + // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining + // a filter chain match. Those values will be compared against the server names of a new + // connection, when detected by one of the listener filters. + // + // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` + // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. + // + // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. + // + // .. attention:: + // + // See the :ref:`FAQ entry ` on how to configure SNI for more + // information. + repeated string server_names = 11; + + // If non-empty, a transport protocol to consider when determining a filter chain match. + // This value will be compared against the transport protocol of a new connection, when + // it's detected by one of the listener filters. + // + // Suggested values include: + // + // * ``raw_buffer`` - default, used when no transport protocol is detected, + // * ``tls`` - set by :ref:`envoy.listener.tls_inspector ` + // when TLS protocol is detected. + string transport_protocol = 9; + + // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when + // determining a filter chain match. Those values will be compared against the application + // protocols of a new connection, when detected by one of the listener filters. + // + // Suggested values include: + // + // * ``http/1.1`` - set by :ref:`envoy.listener.tls_inspector + // `, + // * ``h2`` - set by :ref:`envoy.listener.tls_inspector ` + // + // .. attention:: + // + // Currently, only :ref:`TLS Inspector ` provides + // application protocol detection based on the requested + // `ALPN `_ values. + // + // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, + // and matching on values other than ``h2`` is going to lead to a lot of false negatives, + // unless all connecting clients are known to use ALPN. + repeated string application_protocols = 10; + + reserved 1; + reserved "sni_domains"; +} + +// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and +// various other parameters. +message FilterChain { + // The criteria to use when matching a connection to this filter chain. + FilterChainMatch filter_chain_match = 1; + + // The TLS context for this filter chain. + auth.DownstreamTlsContext tls_context = 2; + + // A list of individual network filters that make up the filter chain for + // connections established with the listener. Order matters as the filters are + // processed sequentially as connection events happen. Note: If the filter + // list is empty, the connection will close by default. + repeated Filter filters = 3; + + // Whether the listener should expect a PROXY protocol V1 header on new + // connections. If this option is enabled, the listener will assume that that + // remote address of the connection is the one specified in the header. Some + // load balancers including the AWS ELB support this option. If the option is + // absent or set to false, Envoy will use the physical peer address of the + // connection as the remote address. + google.protobuf.BoolValue use_proxy_proto = 4; + + // [#not-implemented-hide:] filter chain metadata. + core.Metadata metadata = 5; + + // See :ref:`base.TransportSocket` description. + core.TransportSocket transport_socket = 6; + + // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no + // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter + // chain is to be dynamically updated or removed via FCDS a unique name must be provided. + string name = 7; +} + +message ListenerFilter { + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Filter specific configuration which depends on the filter being instantiated. + // See the supported filters for further documentation. + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } +} diff --git a/api/envoy/api/v3alpha/listener/udp_listener_config.proto b/api/envoy/api/v3alpha/listener/udp_listener_config.proto new file mode 100644 index 000000000000..763a08a93ad3 --- /dev/null +++ b/api/envoy/api/v3alpha/listener/udp_listener_config.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.listener; + +option java_outer_classname = "UdpListenerConfigProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.listener"; +option go_package = "listener"; +option csharp_namespace = "Envoy.Api.V2.ListenerNS"; +option ruby_package = "Envoy::Api::V2::ListenerNS"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/any.proto"; + +// [#protodoc-title: Udp Listener Config] +// Listener :ref:`configuration overview ` + +message UdpListenerConfig { + // Used to look up UDP listener factory, matches "raw_udp_listener" or + // "quic_listener" to create a specific udp listener. + // If not specified, treat as "raw_udp_listener". + string udp_listener_name = 1; + + // Used to create a specific listener factory. To some factory, e.g. + // "raw_udp_listener", config is not needed. + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } +} diff --git a/api/envoy/api/v3alpha/ratelimit/BUILD b/api/envoy/api/v3alpha/ratelimit/BUILD new file mode 100644 index 000000000000..b08c1fc029a0 --- /dev/null +++ b/api/envoy/api/v3alpha/ratelimit/BUILD @@ -0,0 +1,14 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "ratelimit", + srcs = ["ratelimit.proto"], + visibility = ["//envoy/api/v3alpha:friends"], +) + +api_go_proto_library( + name = "ratelimit", + proto = ":ratelimit", +) diff --git a/api/envoy/api/v3alpha/ratelimit/ratelimit.proto b/api/envoy/api/v3alpha/ratelimit/ratelimit.proto new file mode 100644 index 000000000000..c10bfef83b98 --- /dev/null +++ b/api/envoy/api/v3alpha/ratelimit/ratelimit.proto @@ -0,0 +1,66 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.ratelimit; + +option java_outer_classname = "RatelimitProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.ratelimit"; +option go_package = "ratelimit"; + +import "validate/validate.proto"; + +// [#protodoc-title: Common rate limit components] + +// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to +// determine the final rate limit key and overall allowed limit. Here are some examples of how +// they might be used for the domain "envoy". +// +// .. code-block:: cpp +// +// ["authenticated": "false"], ["remote_address": "10.0.0.1"] +// +// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The +// configuration supplies a default limit for the *remote_address* key. If there is a desire to +// raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the +// configuration. +// +// .. code-block:: cpp +// +// ["authenticated": "false"], ["path": "/foo/bar"] +// +// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if +// configured that way in the service). +// +// .. code-block:: cpp +// +// ["authenticated": "false"], ["path": "/foo/bar"], ["remote_address": "10.0.0.1"] +// +// What it does: Limits unauthenticated traffic to a specific path for a specific IP address. +// Like (1) we can raise/block specific IP addresses if we want with an override configuration. +// +// .. code-block:: cpp +// +// ["authenticated": "true"], ["client_id": "foo"] +// +// What it does: Limits all traffic for an authenticated client "foo" +// +// .. code-block:: cpp +// +// ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] +// +// What it does: Limits traffic to a specific path for an authenticated client "foo" +// +// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. +// This enables building complex application scenarios with a generic backend. +message RateLimitDescriptor { + message Entry { + // Descriptor key. + string key = 1 [(validate.rules).string.min_bytes = 1]; + + // Descriptor value. + string value = 2 [(validate.rules).string.min_bytes = 1]; + } + + // Descriptor entries. + repeated Entry entries = 1 [(validate.rules).repeated .min_items = 1]; +} diff --git a/api/envoy/api/v3alpha/rds.proto b/api/envoy/api/v3alpha/rds.proto new file mode 100644 index 000000000000..ed20b34e7cca --- /dev/null +++ b/api/envoy/api/v3alpha/rds.proto @@ -0,0 +1,135 @@ +syntax = "proto3"; + +package envoy.api.v3alpha; + +option java_outer_classname = "RdsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha"; + +option java_generic_services = true; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/core/config_source.proto"; +import "envoy/api/v3alpha/discovery.proto"; +import "envoy/api/v3alpha/route/route.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: HTTP route configuration] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// The resource_names field in DiscoveryRequest specifies a route configuration. +// This allows an Envoy configuration with multiple HTTP listeners (and +// associated HTTP connection manager filters) to use different route +// configurations. Each listener will bind its HTTP connection manager filter to +// a route table via this identifier. +service RouteDiscoveryService { + rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc DeltaRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { + } + + rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:routes" + body: "*" + }; + } +} + +// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for +// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered +// during the processing of an HTTP request if a route for the request cannot be resolved. The +// :ref:`resource_names_subscribe ` +// field contains a list of virtual host names or aliases to track. The contents of an alias would +// be the contents of a *host* or *authority* header used to make an http request. An xDS server +// will match an alias to a virtual host based on the content of :ref:`domains' +// ` field. The *resource_names_unsubscribe* field contains +// a list of virtual host names that have been :ref:`unsubscribed ` +// from the routing table associated with the RouteConfiguration. +service VirtualHostDiscoveryService { + rpc DeltaVirtualHosts(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { + } +} + +// [#comment:next free field: 10] +message RouteConfiguration { + // The name of the route configuration. For example, it might match + // :ref:`route_config_name + // ` + // in :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v3alpha.Rds`. + string name = 1; + + // An array of virtual hosts that make up the route table. + repeated route.VirtualHost virtual_hosts = 2; + + // An array of virtual hosts will be dynamically loaded via the VHDS API. + // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used + // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for + // on-demand discovery of virtual hosts. The contents of these two fields will be merged to + // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration + // taking precedence. + // [#not-implemented-hide:] + Vhds vhds = 9; + + // Optionally specifies a list of HTTP headers that the connection manager + // will consider to be internal only. If they are found on external requests they will be cleaned + // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information. + repeated string internal_only_headers = 3; + + // Specifies a list of HTTP headers that should be added to each response that + // the connection manager encodes. Headers specified at this level are applied + // after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or + // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption response_headers_to_add = 4 + [(validate.rules).repeated .max_items = 1000]; + + // Specifies a list of HTTP headers that should be removed from each response + // that the connection manager encodes. + repeated string response_headers_to_remove = 5; + + // Specifies a list of HTTP headers that should be added to each request + // routed by the HTTP connection manager. Headers specified at this level are + // applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or + // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated .max_items = 1000]; + + // Specifies a list of HTTP headers that should be removed from each request + // routed by the HTTP connection manager. + repeated string request_headers_to_remove = 8; + + // An optional boolean that specifies whether the clusters that the route + // table refers to will be validated by the cluster manager. If set to true + // and a route refers to a non-existent cluster, the route table will not + // load. If set to false and a route refers to a non-existent cluster, the + // route table will load and the router filter will return a 404 if the route + // is selected at runtime. This setting defaults to true if the route table + // is statically defined via the :ref:`route_config + // ` + // option. This setting default to false if the route table is loaded dynamically via the + // :ref:`rds + // ` + // option. Users may wish to override the default behavior in certain cases (for example when + // using CDS with a static route table). + google.protobuf.BoolValue validate_clusters = 7; +} + +// [#not-implemented-hide:] +message Vhds { + // Configuration source specifier for VHDS. + envoy.api.v3alpha.core.ConfigSource config_source = 1 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/api/v3alpha/route/BUILD b/api/envoy/api/v3alpha/route/BUILD new file mode 100644 index 000000000000..0b660893c5d4 --- /dev/null +++ b/api/envoy/api/v3alpha/route/BUILD @@ -0,0 +1,28 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "route", + srcs = ["route.proto"], + visibility = ["//envoy/api/v3alpha:friends"], + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/type:percent", + "//envoy/type:range", + "//envoy/type/matcher:regex", + "//envoy/type/matcher:string", + ], +) + +api_go_proto_library( + name = "route", + proto = ":route", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/type:percent_go_proto", + "//envoy/type:range_go_proto", + "//envoy/type/matcher:regex_go_proto", + "//envoy/type/matcher:string_go_proto", + ], +) diff --git a/api/envoy/api/v3alpha/route/route.proto b/api/envoy/api/v3alpha/route/route.proto new file mode 100644 index 000000000000..963d94f1b022 --- /dev/null +++ b/api/envoy/api/v3alpha/route/route.proto @@ -0,0 +1,1404 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.route; + +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.route"; +option go_package = "route"; +option java_generic_services = true; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/type/matcher/regex.proto"; +import "envoy/type/matcher/string.proto"; +import "envoy/type/percent.proto"; +import "envoy/type/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: HTTP route] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// The top level element in the routing configuration is a virtual host. Each virtual host has +// a logical name as well as a set of domains that get routed to it based on the incoming request's +// host header. This allows a single listener to service multiple top level domain path trees. Once +// a virtual host is selected based on the domain, the routes are processed in order to see which +// upstream cluster to route to or whether to perform a redirect. +// [#comment:next free field: 17] +message VirtualHost { + // The logical name of the virtual host. This is used when emitting certain + // statistics but is not relevant for routing. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // A list of domains (host/authority header) that will be matched to this + // virtual host. Wildcard hosts are supported in the suffix or prefix form. + // + // Domain search order: + // 1. Exact domain names: ``www.foo.com``. + // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. + // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. + // 4. Special wildcard ``*`` matching any domain. + // + // .. note:: + // + // The wildcard will not match the empty string. + // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. + // The longest wildcards match first. + // Only a single virtual host in the entire route configuration can match on ``*``. A domain + // must be unique across all virtual hosts or the config will fail to load. + repeated string domains = 2 [(validate.rules).repeated .min_items = 1]; + + // The list of routes that will be matched, in order, for incoming requests. + // The first route that matches will be used. + repeated Route routes = 3; + + enum TlsRequirementType { + // No TLS requirement for the virtual host. + NONE = 0; + + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + EXTERNAL_ONLY = 1; + + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + ALL = 2; + } + + // Specifies the type of TLS enforcement the virtual host expects. If this option is not + // specified, there is no TLS requirement for the virtual host. + TlsRequirementType require_tls = 4; + + // A list of virtual clusters defined for this virtual host. Virtual clusters + // are used for additional statistics gathering. + repeated VirtualCluster virtual_clusters = 5; + + // Specifies a set of rate limit configurations that will be applied to the + // virtual host. + repeated RateLimit rate_limits = 6; + + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the + // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 7 + [(validate.rules).repeated .max_items = 1000]; + + // Specifies a list of HTTP headers that should be removed from each request + // handled by this virtual host. + repeated string request_headers_to_remove = 13; + + // Specifies a list of HTTP headers that should be added to each response + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the + // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated .max_items = 1000]; + + // Specifies a list of HTTP headers that should be removed from each response + // handled by this virtual host. + repeated string response_headers_to_remove = 11; + + // Indicates that the virtual host has a CORS policy. + CorsPolicy cors = 8; + + reserved 9; + + // The per_filter_config field can be used to provide virtual host-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map per_filter_config = 12; + + // The per_filter_config field can be used to provide virtual host-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map typed_per_filter_config = 15; + + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the upstream request. Setting this option will cause it to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the upstream + // will see the attempt count as perceived by the second Envoy. Defaults to false. + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + bool include_request_attempt_count = 14; + + // Indicates the retry policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + RetryPolicy retry_policy = 16; + + // Indicates the hedge policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + HedgePolicy hedge_policy = 17; +} + +// A route is both a specification of how to match a request as well as an indication of what to do +// next (e.g., redirect, forward, rewrite, etc.). +// +// .. attention:: +// +// Envoy supports routing on HTTP method via :ref:`header matching +// `. +// [#comment:next free field: 15] +message Route { + // Name for the route. + string name = 14; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message.required = true]; + + oneof action { + option (validate.required) = true; + + // Route request to some upstream cluster. + RouteAction route = 2; + + // Return a redirect. + RedirectAction redirect = 3; + + // Return an arbitrary HTTP response directly, without proxying. + DirectResponseAction direct_response = 7; + } + + // The Metadata field can be used to provide additional information + // about the route. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as *envoy.router*. + core.Metadata metadata = 4; + + // Decorator for the matched route. + Decorator decorator = 5; + + reserved 6; + + // The per_filter_config field can be used to provide route-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` for + // if and how it is utilized. + map per_filter_config = 8; + + // The per_filter_config field can be used to provide route-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` for + // if and how it is utilized. + map typed_per_filter_config = 13; + + // Specifies a set of headers that will be added to requests matching this + // route. Headers specified at this level are applied before headers from the + // enclosing :ref:`envoy_api_msg_route.VirtualHost` and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 9 + [(validate.rules).repeated .max_items = 1000]; + + // Specifies a list of HTTP headers that should be removed from each request + // matching this route. + repeated string request_headers_to_remove = 12; + + // Specifies a set of headers that will be added to responses to requests + // matching this route. Headers specified at this level are applied before + // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on + // :ref:`custom request headers `. + repeated core.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated .max_items = 1000]; + + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + repeated string response_headers_to_remove = 11; + + // Presence of the object defines whether the connection manager's tracing configuration + // is overridden by this route specific instance. + Tracing tracing = 15; +} + +// Compared to the :ref:`cluster ` field that specifies a +// single upstream cluster as the target of a request, the :ref:`weighted_clusters +// ` option allows for specification of +// multiple upstream clusters along with weights that indicate the percentage of +// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the +// weights. +// [#comment:next free field: 11] +message WeightedCluster { + message ClusterWeight { + // Name of the upstream cluster. The cluster must exist in the + // :ref:`cluster manager configuration `. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // An integer between 0 and :ref:`total_weight + // `. When a request matches the route, + // the choice of an upstream cluster is determined by its weight. The sum of weights across all + // entries in the clusters array must add up to the total_weight, which defaults to 100. + google.protobuf.UInt32Value weight = 2; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered for + // load balancing. Note that this will be merged with what's provided in :ref: + // `RouteAction.MetadataMatch `, with values + // here taking precedence. The filter name should be specified as *envoy.lb*. + core.Metadata metadata_match = 3; + + // Specifies a list of headers to be added to requests when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 4 + [(validate.rules).repeated .max_items = 1000]; + + // Specifies a list of HTTP headers that should be removed from each request when + // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + repeated string request_headers_to_remove = 9; + + // Specifies a list of headers to be added to responses when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption response_headers_to_add = 5 + [(validate.rules).repeated .max_items = 1000]; + + // Specifies a list of headers to be removed from responses when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + repeated string response_headers_to_remove = 6; + + reserved 7; + + // The per_filter_config field can be used to provide weighted cluster-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map per_filter_config = 8; + + // The per_filter_config field can be used to provide weighted cluster-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map typed_per_filter_config = 10; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + + // Specifies the total weight across all clusters. The sum of all cluster weights must equal this + // value, which must be greater than 0. Defaults to 100. + google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32.gte = 1]; + + // Specifies the runtime key prefix that should be used to construct the + // runtime keys associated with each cluster. When the *runtime_key_prefix* is + // specified, the router will look for weights associated with each upstream + // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where + // *cluster[i]* denotes an entry in the clusters array field. If the runtime + // key for the cluster does not exist, the value specified in the + // configuration file will be used as the default weight. See the :ref:`runtime documentation + // ` for how key names map to the underlying implementation. + string runtime_key_prefix = 2; +} + +message RouteMatch { + oneof path_specifier { + option (validate.required) = true; + + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the *:path* header. + string prefix = 1; + + // If specified, the route is an exact path rule meaning that the path must + // exactly match the *:path* header once the query string is removed. + string path = 2; + + // If specified, the route is a regular expression rule meaning that the + // regex must match the *:path* header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the *:path* header matches the regex. The regex grammar is defined `here + // `_. + // + // Examples: + // + // * The regex */b[io]t* matches the path */bit* + // * The regex */b[io]t* matches the path */bot* + // * The regex */b[io]t* does not match the path */bite* + // * The regex */b[io]t* does not match the path */bit/bot* + // + // .. attention:: + // This field has been deprecated in favor of `safe_regex` as it is not safe for use with + // untrusted input in all cases. + string regex = 3 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + + // If specified, the route is a regular expression rule meaning that the + // regex must match the *:path* header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the *:path* header matches the regex. + // + // [#next-major-version: In the v3 API we should redo how path specification works such + // that we utilize StringMatcher, and additionally have consistent options around whether we + // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + // to deprecate the existing options. We should even consider whether we want to do away with + // path_specifier entirely and just rely on a set of header matchers which can already match + // on :path, etc. The issue with that is it is unclear how to generically deal with query string + // stripping. This needs more thought.] + type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message.required = true]; + } + + // Indicates that prefix/path matching should be case insensitive. The default + // is true. + google.protobuf.BoolValue case_sensitive = 4; + + reserved 5; + + // Indicates that the route should additionally match on a runtime key. Every time the route + // is considered for a match, it must also fall under the percentage of matches indicated by + // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the router continues to evaluate the remaining match criteria. A runtime_fraction + // route configuration can be used to roll out route changes in a gradual manner without full + // code/config deploys. Refer to the :ref:`traffic shifting + // ` docs for additional documentation. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an + // integer with the assumption that the value is an integral percentage out of 100. For + // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent + // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. + core.RuntimeFractionalPercent runtime_fraction = 9; + + // Specifies a set of headers that the route should match on. The router will + // check the request’s headers against all the specified headers in the route + // config. A match will happen if all the headers in the route are present in + // the request with the same values (or based on presence if the value field + // is not in the config). + repeated HeaderMatcher headers = 6; + + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the *path* header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the *path* header's + // query string for a match to occur. + repeated QueryParameterMatcher query_parameters = 7; + + message GrpcRouteMatchOptions { + } + + // If specified, only gRPC requests will be matched. The router will check + // that the content-type header has a application/grpc or one of the various + // application/grpc+ values. + GrpcRouteMatchOptions grpc = 8; +} + +// [#comment:next free field: 11] +message CorsPolicy { + // Specifies the origins that will be allowed to do CORS requests. + // + // An origin is allowed if either allow_origin or allow_origin_regex match. + // + // .. attention:: + // This field has been deprecated in favor of `allow_origin_string_match`. + repeated string allow_origin = 1 [deprecated = true]; + + // Specifies regex patterns that match allowed origins. + // + // An origin is allowed if either allow_origin or allow_origin_regex match. + // + // .. attention:: + // This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for + // use with untrusted input in all cases. + repeated string allow_origin_regex = 8 + [(validate.rules).repeated .items.string.max_bytes = 1024, deprecated = true]; + + // Specifies string patterns that match allowed origins. An origin is allowed if any of the + // string matchers match. + repeated type.matcher.StringMatcher allow_origin_string_match = 11; + + // Specifies the content for the *access-control-allow-methods* header. + string allow_methods = 2; + + // Specifies the content for the *access-control-allow-headers* header. + string allow_headers = 3; + + // Specifies the content for the *access-control-expose-headers* header. + string expose_headers = 4; + + // Specifies the content for the *access-control-max-age* header. + string max_age = 5; + + // Specifies whether the resource allows credentials. + google.protobuf.BoolValue allow_credentials = 6; + + oneof enabled_specifier { + // Specifies if CORS is enabled. Defaults to true. Only effective on route. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`filter_enabled` field instead. + google.protobuf.BoolValue enabled = 7 [deprecated = true]; + + // Specifies if CORS is enabled. + // + // More information on how this can be controlled via runtime can be found + // :ref:`here `. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + core.RuntimeFractionalPercent filter_enabled = 9; + } + + // Specifies if CORS policies are evaluated and tracked when filter is off but + // does not enforce any policies. + // + // More information on how this can be controlled via runtime can be found + // :ref:`here `. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + core.RuntimeFractionalPercent shadow_enabled = 10; +} + +// [#comment:next free field: 30] +message RouteAction { + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 + // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + string cluster_header = 2 [(validate.rules).string.min_bytes = 1]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + WeightedCluster weighted_clusters = 3; + } + + enum ClusterNotFoundResponseCode { + // HTTP status code - 503 Service Unavailable. + SERVICE_UNAVAILABLE = 0; + + // HTTP status code - 404 Not Found. + NOT_FOUND = 1; + } + + // The HTTP status code to use when configured cluster is not found. + // The default response code is 503 Service Unavailable. + ClusterNotFoundResponseCode cluster_not_found_response_code = 20 + [(validate.rules).enum.defined_only = true]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what's set in this field will be considered + // for load balancing. If using :ref:`weighted_clusters + // `, metadata will be merged, with values + // provided there taking precedence. The filter name should be specified as *envoy.lb*. + core.Metadata metadata_match = 4; + + // Indicates that during forwarding, the matched prefix (or path) should be + // swapped with this value. This option allows application URLs to be rooted + // at a different path from those exposed at the reverse proxy layer. The router filter will + // place the original path before rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // .. attention:: + // + // Pay careful attention to the use of trailing slashes in the + // :ref:`route's match ` prefix value. + // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, + // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single + // :ref:`Route `, as shown by the below config entries: + // + // .. code-block:: yaml + // + // - match: + // prefix: "/prefix/" + // route: + // prefix_rewrite: "/" + // - match: + // prefix: "/prefix" + // route: + // prefix_rewrite: "/" + // + // Having above entries in the config, requests to */prefix* will be stripped to */*, while + // requests to */prefix/etc* will be stripped to */etc*. + string prefix_rewrite = 5; + + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite = 6; + + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type *strict_dns* or *logical_dns*. Setting this to true with other cluster + // types has no effect. + google.protobuf.BoolValue auto_host_rewrite = 7; + + // Indicates that during forwarding, the host header will be swapped with the content of given + // downstream or :ref:`custom ` header. + // If header value is empty, host header is left intact. + // + // .. attention:: + // + // Pay attention to the potential security implications of using this option. Provided header + // must come from trusted source. + string auto_host_rewrite_header = 29; + } + + // Specifies the upstream timeout for the route. If not specified, the default is 15s. This + // spans between the point at which the entire downstream request (i.e. end-of-stream) has been + // processed and when the upstream response has been completely processed. A value of 0 will + // disable the route's timeout. + // + // .. note:: + // + // This timeout includes all retries. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + google.protobuf.Duration timeout = 8 [(gogoproto.stdduration) = true]; + + // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, + // although the connection manager wide :ref:`stream_idle_timeout + // ` + // will still apply. A value of 0 will completely disable the route's idle timeout, even if a + // connection manager stream idle timeout is configured. + // + // The idle timeout is distinct to :ref:`timeout + // `, which provides an upper bound + // on the upstream response time; :ref:`idle_timeout + // ` instead bounds the amount + // of time the request's stream may be idle. + // + // After header decoding, the idle timeout will apply on downstream and + // upstream request events. Each time an encode/decode event for headers or + // data is processed for the stream, the timer will be reset. If the timeout + // fires, the stream is terminated with a 408 Request Timeout error code if no + // upstream response header has been received, otherwise a stream reset + // occurs. + google.protobuf.Duration idle_timeout = 24 [(gogoproto.stdduration) = true]; + + // Indicates that the route has a retry policy. Note that if this is set, + // it'll take precedence over the virtual host level retry policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + RetryPolicy retry_policy = 9; + + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is + // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. + message RequestMirrorPolicy { + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + string cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // If not specified, all requests to the target cluster will be mirrored. If + // specified, Envoy will lookup the runtime key to get the % of requests to + // mirror. Valid values are from 0 to 10000, allowing for increments of + // 0.01% of requests to be mirrored. If the runtime key is specified in the + // configuration but not present in runtime, 0 is the default and thus 0% of + // requests will be mirrored. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`runtime_fraction + // ` field instead. + string runtime_key = 2 [deprecated = true]; + + // If both :ref:`runtime_key + // ` and this field are not + // specified, all requests to the target cluster will be mirrored. + // + // If specified, this field takes precedence over the `runtime_key` field and requests must also + // fall under the percentage of matches indicated by this field. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a :ref:`FractionalPercent ` proto represented + // as JSON/YAML and may also be represented as an integer with the assumption that the value + // is an integral percentage out of 100. For instance, a runtime key lookup returning the + // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is + // HUNDRED. This is behaviour is different to that of the deprecated `runtime_key` field, + // where the implicit denominator is 10000. + core.RuntimeFractionalPercent runtime_fraction = 3; + } + + // Indicates that the route has a request mirroring policy. + RequestMirrorPolicy request_mirror_policy = 10; + + // Optionally specifies the :ref:`routing priority `. + // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once + // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] + core.RoutingPriority priority = 11; + + reserved 12; + reserved 18; + reserved 19; + + // Specifies a set of rate limit configurations that could be applied to the + // route. + repeated RateLimit rate_limits = 13; + + // Specifies if the rate limit filter should include the virtual host rate + // limits. By default, if the route configured rate limits, the virtual host + // :ref:`rate_limits ` are not applied to the + // request. + google.protobuf.BoolValue include_vh_rate_limits = 14; + + // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + // `. + message HashPolicy { + message Header { + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + string header_name = 1 [(validate.rules).string.min_bytes = 1]; + } + + // Envoy supports two types of cookie affinity: + // + // 1. Passive. Envoy takes a cookie that's present in the cookies header and + // hashes on its value. + // + // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + // on the first request from the client in its response to the client, + // based on the endpoint the request gets sent to. The client then + // presents this on the next and all subsequent requests. The hash of + // this is sufficient to ensure these requests get sent to the same + // endpoint. The cookie is generated by hashing the source and + // destination ports and addresses so that multiple independent HTTP2 + // streams on the same connection will independently receive the same + // cookie, even if they arrive at the Envoy simultaneously. + message Cookie { + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. If the TTL is present and zero, the generated cookie will + // be a session cookie. + google.protobuf.Duration ttl = 2 [(gogoproto.stdduration) = true]; + + // The name of the path for the cookie. If no path is specified here, no path + // will be set for the cookie. + string path = 3; + } + + message ConnectionProperties { + // Hash on source IP address. + bool source_ip = 1; + } + + oneof policy_specifier { + option (validate.required) = true; + + // Header hash policy. + Header header = 1; + + // Cookie hash policy. + Cookie cookie = 2; + + // Connection properties hash policy. + ConnectionProperties connection_properties = 3; + } + + // The flag that shortcircuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + bool terminal = 4; + } + + // Specifies a list of hash policies to use for ring hash load balancing. Each + // hash policy is evaluated individually and the combined result is used to + // route the request. The method of combination is deterministic such that + // identical lists of hash policies will produce the same hash. Since a hash + // policy examines specific parts of a request, it can fail to produce a hash + // (i.e. if the hashed header is not present). If (and only if) all configured + // hash policies fail to generate a hash, no hash will be produced for + // the route. In this case, the behavior is the same as if no hash policies + // were specified (i.e. the ring hash load balancer will choose a random + // backend). If a hash policy has the "terminal" attribute set to true, and + // there is already a hash generated, the hash is returned immediately, + // ignoring the rest of the hash policy list. + repeated HashPolicy hash_policy = 15; + + reserved 16; + reserved 22; + + // Indicates that the route has a CORS policy. + CorsPolicy cors = 17; + + reserved 21; + + // If present, and the request is a gRPC request, use the + // `grpc-timeout header `_, + // or its default value (infinity) instead of + // :ref:`timeout `, but limit the applied timeout + // to the maximum value specified here. If configured as 0, the maximum allowed timeout for + // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used + // and gRPC requests time out like any other requests using + // :ref:`timeout ` or its default. + // This can be used to prevent unexpected upstream request timeouts due to potentially long + // time gaps between gRPC request and response in gRPC streaming mode. + google.protobuf.Duration max_grpc_timeout = 23 [(gogoproto.stdduration) = true]; + + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + google.protobuf.Duration grpc_timeout_offset = 28 [(gogoproto.stdduration) = true]; + + // Allows enabling and disabling upgrades on a per-route basis. + // This overrides any enabled/disabled upgrade filter chain specified in the + // HttpConnectionManager + // :ref:upgrade_configs` + // ` + // but does not affect any custom filter chain specified there. + message UpgradeConfig { + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] will be proxied upstream. + string upgrade_type = 1; + // Determines if upgrades are available on this route. Defaults to true. + google.protobuf.BoolValue enabled = 2; + }; + repeated UpgradeConfig upgrade_configs = 25; + + // Configures :ref:`internal redirect ` behavior. + enum InternalRedirectAction { + PASS_THROUGH_INTERNAL_REDIRECT = 0; + HANDLE_INTERNAL_REDIRECT = 1; + } + InternalRedirectAction internal_redirect_action = 26; + + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + HedgePolicy hedge_policy = 27; +} + +// HTTP retry :ref:`architecture overview `. +// [#comment:next free field: 9] +message RetryPolicy { + // Specifies the conditions under which retry takes place. These are the same + // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + string retry_on = 1; + + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. These are the same conditions documented for + // :ref:`config_http_filters_router_x-envoy-max-retries`. + google.protobuf.UInt32Value num_retries = 2; + + // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The + // same conditions documented for + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + // + // .. note:: + // + // If left unspecified, Envoy will use the global + // :ref:`route timeout ` for the request. + // Consequently, when using a :ref:`5xx ` based + // retry policy, a request that times out will not be retried as the total timeout budget + // would have been exhausted. + google.protobuf.Duration per_try_timeout = 3 [(gogoproto.stdduration) = true]; + + message RetryPriority { + string name = 1 [(validate.rules).string.min_bytes = 1]; + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + + // Specifies an implementation of a RetryPriority which is used to determine the + // distribution of load across priorities used for retries. Refer to + // :ref:`retry plugin configuration ` for more details. + RetryPriority retry_priority = 4; + + message RetryHostPredicate { + string name = 1 [(validate.rules).string.min_bytes = 1]; + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + + // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host + // for retries. If any of the predicates reject the host, host selection will be reattempted. + // Refer to :ref:`retry plugin configuration ` for more + // details. + repeated RetryHostPredicate retry_host_predicate = 5; + + // The maximum number of times host selection will be reattempted before giving up, at which + // point the host that was last selected will be routed to. If unspecified, this will default to + // retrying once. + int64 host_selection_retry_max_attempts = 6; + + // HTTP status codes that should trigger a retry in addition to those specified by retry_on. + repeated uint32 retriable_status_codes = 7; + + message RetryBackOff { + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + google.protobuf.Duration base_interval = 1 [ + (validate.rules).duration = { + required: true, + gt: {seconds: 0} + }, + (gogoproto.stdduration) = true + ]; + + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the `base_interval` if set. The default is 10 times the + // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + google.protobuf.Duration max_interval = 2 + [(validate.rules).duration.gt = {seconds: 0}, (gogoproto.stdduration) = true]; + } + + // Specifies parameters that control retry back off. This parameter is optional, in which case the + // default base interval is 25 milliseconds or, if set, the current value of the + // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times + // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + // describes Envoy's back-off algorithm. + RetryBackOff retry_back_off = 8; +} + +// HTTP request hedging :ref:`architecture overview `. +message HedgePolicy { + // Specifies the number of initial requests that should be sent upstream. + // Must be at least 1. + // Defaults to 1. + // [#not-implemented-hide:] + google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32.gte = 1]; + + // Specifies a probability that an additional upstream request should be sent + // on top of what is specified by initial_requests. + // Defaults to 0. + // [#not-implemented-hide:] + envoy.type.FractionalPercent additional_request_chance = 2; + + // Indicates that a hedged request should be sent when the per-try timeout + // is hit. This will only occur if the retry policy also indicates that a + // timed out request should be retried. + // Once a timed out request is retried due to per try timeout, the router + // filter will ensure that it is not retried again even if the returned + // response headers would otherwise be retried according the specified + // :ref:`RetryPolicy `. + // Defaults to false. + bool hedge_on_per_try_timeout = 3; +} + +message RedirectAction { + // When the scheme redirection take place, the following rules apply: + // 1. If the source URI scheme is `http` and the port is explicitly + // set to `:80`, the port will be removed after the redirection + // 2. If the source URI scheme is `https` and the port is explicitly + // set to `:443`, the port will be removed after the redirection + oneof scheme_rewrite_specifier { + // The scheme portion of the URL will be swapped with "https". + bool https_redirect = 4; + // The scheme portion of the URL will be swapped with this value. + string scheme_redirect = 7; + } + // The host portion of the URL will be swapped with this value. + string host_redirect = 1; + // The port value of the URL will be swapped with this value. + uint32 port_redirect = 8; + + oneof path_rewrite_specifier { + // The path portion of the URL will be swapped with this value. + string path_redirect = 2; + + // Indicates that during redirection, the matched prefix (or path) + // should be swapped with this value. This option allows redirect URLs be dynamically created + // based on the request. + // + // .. attention:: + // + // Pay attention to the use of trailing slashes as mentioned in + // :ref:`RouteAction's prefix_rewrite `. + string prefix_rewrite = 5; + } + + enum RedirectResponseCode { + // Moved Permanently HTTP Status Code - 301. + MOVED_PERMANENTLY = 0; + + // Found HTTP Status Code - 302. + FOUND = 1; + + // See Other HTTP Status Code - 303. + SEE_OTHER = 2; + + // Temporary Redirect HTTP Status Code - 307. + TEMPORARY_REDIRECT = 3; + + // Permanent Redirect HTTP Status Code - 308. + PERMANENT_REDIRECT = 4; + } + + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + RedirectResponseCode response_code = 3 [(validate.rules).enum.defined_only = true]; + + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + bool strip_query = 6; +} + +message DirectResponseAction { + // Specifies the HTTP response status to be returned. + uint32 status = 1 [(validate.rules).uint32 = {gte: 100, lt: 600}]; + + // Specifies the content of the response body. If this setting is omitted, + // no body is included in the generated response. + // + // .. note:: + // + // Headers can be specified using *response_headers_to_add* in the enclosing + // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or + // :ref:`envoy_api_msg_route.VirtualHost`. + core.DataSource body = 2; +} + +message Decorator { + // The operation name associated with the request matched to this route. If tracing is + // enabled, this information will be used as the span name reported for this request. + // + // .. note:: + // + // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden + // by the :ref:`x-envoy-decorator-operation + // ` header. + string operation = 1 [(validate.rules).string.min_bytes = 1]; +} + +message Tracing { + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + envoy.type.FractionalPercent client_sampling = 1; + + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + envoy.type.FractionalPercent random_sampling = 2; + + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + envoy.type.FractionalPercent overall_sampling = 3; +} + +// A virtual cluster is a way of specifying a regex matching rule against +// certain important endpoints such that statistics are generated explicitly for +// the matched requests. The reason this is useful is that when doing +// prefix/path matching Envoy does not always know what the application +// considers to be an endpoint. Thus, it’s impossible for Envoy to generically +// emit per endpoint statistics. However, often systems have highly critical +// endpoints that they wish to get “perfect” statistics on. Virtual cluster +// statistics are perfect in the sense that they are emitted on the downstream +// side such that they include network level failures. +// +// Documentation for :ref:`virtual cluster statistics `. +// +// .. note:: +// +// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for +// every application endpoint. This is both not easily maintainable and as well the matching and +// statistics output are not free. +message VirtualCluster { + // Specifies a regex pattern to use for matching requests. The entire path of the request + // must match the regex. The regex grammar used is defined `here + // `_. + // + // Examples: + // + // * The regex */rides/\d+* matches the path */rides/0* + // * The regex */rides/\d+* matches the path */rides/123* + // * The regex */rides/\d+* does not match the path */rides/123/456* + // + // .. attention:: + // This field has been deprecated in favor of `headers` as it is not safe for use with + // untrusted input in all cases. + string pattern = 1 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + + // Specifies a list of header matchers to use for matching requests. Each specified header must + // match. The pseudo-headers `:path` and `:method` can be used to match the request path and + // method, respectively. + repeated HeaderMatcher headers = 4; + + // Specifies the name of the virtual cluster. The virtual cluster name as well + // as the virtual host name are used when emitting statistics. The statistics are emitted by the + // router filter and are documented :ref:`here `. + string name = 2 [(validate.rules).string.min_bytes = 1]; + + // Optionally specifies the HTTP method to match on. For example GET, PUT, + // etc. + // + // .. attention:: + // This field has been deprecated in favor of `headers`. + core.RequestMethod method = 3 [deprecated = true]; +} + +// Global rate limiting :ref:`architecture overview `. +message RateLimit { + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32.lte = 10]; + + // The key to be set in runtime to disable this rate limit configuration. + string disable_key = 2; + + message Action { + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("source_cluster", "") + // + // is derived from the :option:`--service-cluster` option. + message SourceCluster { + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("destination_cluster", "") + // + // Once a request matches against a route table rule, a routed cluster is determined by one of + // the following :ref:`route table configuration ` + // settings: + // + // * :ref:`cluster ` indicates the upstream cluster + // to route to. + // * :ref:`weighted_clusters ` + // chooses a cluster randomly from a set of clusters with attributed weight. + // * :ref:`cluster_header ` indicates which + // header in the request contains the target cluster. + message DestinationCluster { + } + + // The following descriptor entry is appended when a header contains a key that matches the + // *header_name*: + // + // .. code-block:: cpp + // + // ("", "") + message RequestHeaders { + // The header name to be queried from the request headers. The header’s + // value is used to populate the value of the descriptor entry for the + // descriptor_key. + string header_name = 1 [(validate.rules).string.min_bytes = 1]; + + // The key to use in the descriptor entry. + string descriptor_key = 2 [(validate.rules).string.min_bytes = 1]; + } + + // The following descriptor entry is appended to the descriptor and is populated using the + // trusted address from :ref:`x-forwarded-for `: + // + // .. code-block:: cpp + // + // ("remote_address", "") + message RemoteAddress { + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("generic_key", "") + message GenericKey { + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string.min_bytes = 1]; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("header_match", "") + message HeaderValueMatch { + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string.min_bytes = 1]; + + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + google.protobuf.BoolValue expect_match = 2; + + // Specifies a set of headers that the rate limit action should match + // on. The action will check the request’s headers against all the + // specified headers in the config. A match will happen if all the + // headers in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + repeated HeaderMatcher headers = 3 [(validate.rules).repeated .min_items = 1]; + } + + oneof action_specifier { + option (validate.required) = true; + + // Rate limit on source cluster. + SourceCluster source_cluster = 1; + + // Rate limit on destination cluster. + DestinationCluster destination_cluster = 2; + + // Rate limit on request headers. + RequestHeaders request_headers = 3; + + // Rate limit on remote address. + RemoteAddress remote_address = 4; + + // Rate limit on a generic key. + GenericKey generic_key = 5; + + // Rate limit on the existence of request headers. + HeaderValueMatch header_value_match = 6; + } + } + + // A list of actions that are to be applied for this rate limit configuration. + // Order matters as the actions are processed sequentially and the descriptor + // is composed by appending descriptor entries in that sequence. If an action + // cannot append a descriptor entry, no descriptor is generated for the + // configuration. See :ref:`composing actions + // ` for additional documentation. + repeated Action actions = 3 [(validate.rules).repeated .min_items = 1]; +} + +// .. attention:: +// +// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* +// header. Thus, if attempting to match on *Host*, match on *:authority* instead. +// +// .. attention:: +// +// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both +// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., +// +// .. code-block:: json +// +// { +// "name": ":method", +// "exact_match": "POST" +// } +// +// .. attention:: +// In the absence of any header match specifier, match will default to :ref:`present_match +// `. i.e, a request that has the :ref:`name +// ` header will match, regardless of the header's +// value. +// +// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] +message HeaderMatcher { + // Specifies the name of the header in the request. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + reserved 2; // value deprecated by :ref:`exact_match + // ` + + reserved 3; // regex deprecated by :ref:`regex_match + // ` + + // Specifies how the header match will be performed to route the request. + oneof header_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 4; + + // If specified, this regex string is a regular expression rule which implies the entire request + // header value must match the regex. The rule will not match if only a subsequence of the + // request header value matches the regex. The regex grammar used in the value field is defined + // `here `_. + // + // Examples: + // + // * The regex *\d{3}* matches the value *123* + // * The regex *\d{3}* does not match the value *1234* + // * The regex *\d{3}* does not match the value *123.456* + // + // .. attention:: + // This field has been deprecated in favor of `safe_regex_match` as it is not safe for use + // with untrusted input in all cases. + string regex_match = 5 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + + // If specified, this regex string is a regular expression rule which implies the entire request + // header value must match the regex. The rule will not match if only a subsequence of the + // request header value matches the regex. + type.matcher.RegexMatcher safe_regex_match = 11; + + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting of + // an optional plus or minus sign followed by a sequence of digits. The rule will not match if + // the header value does not represent an integer. Match will fail for empty values, floating + // point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, + // "-1somestring" + envoy.type.Int64Range range_match = 6; + + // If specified, header match will be performed based on whether the header is in the + // request. + bool present_match = 7; + + // If specified, header match will be performed based on the prefix of the header value. + // Note: empty prefix is not allowed, please use present_match instead. + // + // Examples: + // + // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. + string prefix_match = 9 [(validate.rules).string.min_bytes = 1]; + + // If specified, header match will be performed based on the suffix of the header value. + // Note: empty suffix is not allowed, please use present_match instead. + // + // Examples: + // + // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. + string suffix_match = 10 [(validate.rules).string.min_bytes = 1]; + } + + // If specified, the match result will be inverted before checking. Defaults to false. + // + // Examples: + // + // * The regex *\d{3}* does not match the value *1234*, so it will match when inverted. + // * The range [-10,0) will match the value -1, so it will not match when inverted. + bool invert_match = 8; +} + +// Query parameter matching treats the query string of a request's :path header +// as an ampersand-separated list of keys and/or key=value elements. +message QueryParameterMatcher { + // Specifies the name of a key that must be present in the requested + // *path*'s query string. + string name = 1 [(validate.rules).string = {min_bytes: 1, max_bytes: 1024}]; + + // Specifies the value of the key. If the value is absent, a request + // that contains the key in its query string will match, whether the + // key appears with a value (e.g., "?debug=true") or not (e.g., "?debug") + // + // ..attention:: + // This field is deprecated. Use an `exact` match inside the `string_match` field. + string value = 3 [deprecated = true]; + + // Specifies whether the query parameter value is a regular expression. + // Defaults to false. The entire query parameter value (i.e., the part to + // the right of the equals sign in "key=value") must match the regex. + // E.g., the regex "\d+$" will match "123" but not "a123" or "123a". + // + // ..attention:: + // This field is deprecated. Use a `safe_regex` match inside the `string_match` field. + google.protobuf.BoolValue regex = 4 [deprecated = true]; + + oneof query_parameter_match_specifier { + // Specifies whether a query parameter value should match against a string. + type.matcher.StringMatcher string_match = 5 [(validate.rules).message.required = true]; + + // Specifies whether a query parameter should be present. + bool present_match = 6; + } +} diff --git a/api/envoy/api/v3alpha/srds.proto b/api/envoy/api/v3alpha/srds.proto new file mode 100644 index 000000000000..22ad6e675683 --- /dev/null +++ b/api/envoy/api/v3alpha/srds.proto @@ -0,0 +1,135 @@ +syntax = "proto3"; + +package envoy.api.v3alpha; + +import "envoy/api/v3alpha/discovery.proto"; +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "validate/validate.proto"; + +option java_outer_classname = "SrdsProto"; +option java_package = "io.envoyproxy.envoy.api.v3alpha"; +option java_multiple_files = true; +option java_generic_services = true; +option (gogoproto.equal_all) = true; + +// [#protodoc-title: HTTP scoped routing configuration] +// * Routing :ref:`architecture overview ` +// +// The Scoped Routes Discovery Service (SRDS) API distributes +// :ref:`ScopedRouteConfiguration` +// resources. Each ScopedRouteConfiguration resource represents a "routing +// scope" containing a mapping that allows the HTTP connection manager to +// dynamically assign a routing table (specified via a +// :ref:`RouteConfiguration` message) to each +// HTTP request. +// [#proto-status: experimental] +service ScopedRoutesDiscoveryService { + rpc StreamScopedRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc DeltaScopedRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { + } + + rpc FetchScopedRoutes(DiscoveryRequest) returns (DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:scoped-routes" + body: "*" + }; + } +} + +// Specifies a routing scope, which associates a +// :ref:`Key` to a +// :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). +// +// The HTTP connection manager builds up a table consisting of these Key to +// RouteConfiguration mappings, and looks up the RouteConfiguration to use per +// request according to the algorithm specified in the +// :ref:`scope_key_builder` +// assigned to the HttpConnectionManager. +// +// For example, with the following configurations (in YAML): +// +// HttpConnectionManager config: +// +// .. code:: +// +// ... +// scoped_routes: +// name: foo-scoped-routes +// scope_key_builder: +// fragments: +// - header_value_extractor: +// name: X-Route-Selector +// element_separator: , +// element: +// separator: = +// key: vip +// +// ScopedRouteConfiguration resources (specified statically via +// :ref:`scoped_route_configurations_list` +// or obtained dynamically via SRDS): +// +// .. code:: +// +// (1) +// name: route-scope1 +// route_configuration_name: route-config1 +// key: +// fragments: +// - string_key: 172.10.10.20 +// +// (2) +// name: route-scope2 +// route_configuration_name: route-config2 +// key: +// fragments: +// - string_key: 172.20.20.30 +// +// A request from a client such as: +// +// .. code:: +// +// GET / HTTP/1.1 +// Host: foo.com +// X-Route-Selector: vip=172.10.10.20 +// +// would result in the routing table defined by the `route-config1` +// RouteConfiguration being assigned to the HTTP request/stream. +// +// [#comment:next free field: 4] +// [#proto-status: experimental] +message ScopedRouteConfiguration { + // The name assigned to the routing scope. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Specifies a key which is matched against the output of the + // :ref:`scope_key_builder` + // specified in the HttpConnectionManager. The matching is done per HTTP + // request and is dependent on the order of the fragments contained in the + // Key. + message Key { + message Fragment { + oneof type { + option (validate.required) = true; + + // A string to match against. + string string_key = 1; + } + } + + // The ordered set of fragments to match against. The order must match the + // fragments in the corresponding + // :ref:`scope_key_builder`. + repeated Fragment fragments = 1 [(validate.rules).repeated .min_items = 1]; + } + + // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an + // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated + // with this scope. + string route_configuration_name = 2 [(validate.rules).string.min_bytes = 1]; + + // The key to match against. + Key key = 3 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/accesslog/v3alpha/BUILD b/api/envoy/config/accesslog/v3alpha/BUILD new file mode 100644 index 000000000000..4f5da73ee424 --- /dev/null +++ b/api/envoy/config/accesslog/v3alpha/BUILD @@ -0,0 +1,22 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "als", + srcs = ["als.proto"], + deps = [ + "//envoy/api/v3alpha/core:grpc_service", + ], +) + +api_proto_library_internal( + name = "file", + srcs = ["file.proto"], +) + +api_go_proto_library( + name = "als", + proto = ":als", + deps = ["//envoy/api/v3alpha/core:grpc_service_go_proto"], +) diff --git a/api/envoy/config/accesslog/v3alpha/als.proto b/api/envoy/config/accesslog/v3alpha/als.proto new file mode 100644 index 000000000000..a194d1449e4b --- /dev/null +++ b/api/envoy/config/accesslog/v3alpha/als.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package envoy.config.accesslog.v3alpha; + +option java_outer_classname = "AlsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.accesslog.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/grpc_service.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: gRPC Access Log Service (ALS)] + +// Configuration for the built-in *envoy.http_grpc_access_log* +// :ref:`AccessLog `. This configuration +// will populate :ref:`StreamAccessLogsMessage.http_logs +// `. +message HttpGrpcAccessLogConfig { + CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message.required = true]; + + // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers + // `. + repeated string additional_request_headers_to_log = 2; + + // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers + // `. + repeated string additional_response_headers_to_log = 3; + + // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers + // `. + repeated string additional_response_trailers_to_log = 4; +} + +// Configuration for the built-in *envoy.tcp_grpc_access_log* type. This configuration will +// populate *StreamAccessLogsMessage.tcp_logs*. +message TcpGrpcAccessLogConfig { + CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message.required = true]; +} + +// Common configuration for gRPC access logs. +message CommonGrpcAccessLogConfig { + // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier + // `. This allows the + // access log server to differentiate between different access logs coming from the same Envoy. + string log_name = 1 [(validate.rules).string.min_bytes = 1]; + + // The gRPC service for the access log service. + envoy.api.v3alpha.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; + + // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time + // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to + // 1 second. + google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration.gt = {}]; + + // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until + // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it + // to zero effectively disables the batching. Defaults to 16384. + google.protobuf.UInt32Value buffer_size_bytes = 4; +} diff --git a/api/envoy/config/accesslog/v3alpha/file.proto b/api/envoy/config/accesslog/v3alpha/file.proto new file mode 100644 index 000000000000..b07658bc9275 --- /dev/null +++ b/api/envoy/config/accesslog/v3alpha/file.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.config.accesslog.v3alpha; + +option java_outer_classname = "FileProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.accesslog.v3alpha"; +option go_package = "v2"; + +import "validate/validate.proto"; +import "google/protobuf/struct.proto"; + +// [#protodoc-title: File access log] + +// Custom configuration for an :ref:`AccessLog +// ` that writes log entries directly to a +// file. Configures the built-in *envoy.file_access_log* AccessLog. +message FileAccessLog { + // A path to a local file to which to write the access log entries. + string path = 1 [(validate.rules).string.min_bytes = 1]; + + // Access log format. Envoy supports :ref:`custom access log formats + // ` as well as a :ref:`default format + // `. + oneof access_log_format { + // Access log :ref:`format string` + string format = 2; + + // Access log :ref:`format dictionary` + google.protobuf.Struct json_format = 3; + } +} diff --git a/api/envoy/config/bootstrap/v3alpha/BUILD b/api/envoy/config/bootstrap/v3alpha/BUILD new file mode 100644 index 000000000000..d148021c741a --- /dev/null +++ b/api/envoy/config/bootstrap/v3alpha/BUILD @@ -0,0 +1,40 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "bootstrap", + srcs = ["bootstrap.proto"], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v3alpha:cds", + "//envoy/api/v3alpha:lds", + "//envoy/api/v3alpha/auth:cert", + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:config_source", + "//envoy/config/metrics/v3alpha:metrics_service", + "//envoy/config/metrics/v3alpha:stats", + "//envoy/config/overload/v3alpha:overload", + "//envoy/config/ratelimit/v3alpha:rls", + "//envoy/config/trace/v3alpha:trace", + ], +) + +api_go_proto_library( + name = "bootstrap", + proto = ":bootstrap", + deps = [ + "//envoy/api/v3alpha:cds_go_grpc", + "//envoy/api/v3alpha:lds_go_grpc", + "//envoy/api/v3alpha/auth:cert_go_proto", + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/core:config_source_go_proto", + "//envoy/config/metrics/v3alpha:metrics_service_go_proto", + "//envoy/config/metrics/v3alpha:stats_go_proto", + "//envoy/config/overload/v3alpha:overload_go_proto", + "//envoy/config/ratelimit/v3alpha:rls_go_grpc", + "//envoy/config/trace/v3alpha:trace_go_proto", + ], +) diff --git a/api/envoy/config/bootstrap/v3alpha/bootstrap.proto b/api/envoy/config/bootstrap/v3alpha/bootstrap.proto new file mode 100644 index 000000000000..57157a4ae3f3 --- /dev/null +++ b/api/envoy/config/bootstrap/v3alpha/bootstrap.proto @@ -0,0 +1,318 @@ +// [#protodoc-title: Bootstrap] +// This proto is supplied via the :option:`-c` CLI flag and acts as the root +// of the Envoy v2 configuration. See the :ref:`v2 configuration overview +// ` for more detail. + +syntax = "proto3"; + +package envoy.config.bootstrap.v3alpha; + +option java_outer_classname = "BootstrapProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.bootstrap.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/auth/cert.proto"; +import "envoy/api/v3alpha/core/config_source.proto"; +import "envoy/api/v3alpha/cds.proto"; +import "envoy/api/v3alpha/lds.proto"; +import "envoy/config/trace/v3alpha/trace.proto"; +import "envoy/config/metrics/v3alpha/stats.proto"; +import "envoy/config/overload/v3alpha/overload.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// Bootstrap :ref:`configuration overview `. +message Bootstrap { + // Node identity to present to the management server and for instance + // identification purposes (e.g. in generated headers). + envoy.api.v3alpha.core.Node node = 1; + + message StaticResources { + // Static :ref:`Listeners `. These listeners are + // available regardless of LDS configuration. + repeated envoy.api.v3alpha.Listener listeners = 1; + + // If a network based configuration source is specified for :ref:`cds_config + // `, it's + // necessary to have some initial cluster definitions available to allow Envoy to know how to + // speak to the management server. These cluster definitions may not use :ref:`EDS + // ` (i.e. they should be static IP or DNS-based). + repeated envoy.api.v3alpha.Cluster clusters = 2; + + // These static secrets can be used by :ref:`SdsSecretConfig + // ` + repeated envoy.api.v3alpha.auth.Secret secrets = 3; + } + // Statically specified resources. + StaticResources static_resources = 2; + + message DynamicResources { + // All :ref:`Listeners ` are provided by a single + // :ref:`LDS ` configuration source. + envoy.api.v3alpha.core.ConfigSource lds_config = 1; + + // All post-bootstrap :ref:`Cluster ` definitions are + // provided by a single :ref:`CDS ` + // configuration source. + envoy.api.v3alpha.core.ConfigSource cds_config = 2; + + // A single :ref:`ADS ` source may be optionally + // specified. This must have :ref:`api_type + // ` :ref:`GRPC + // `. Only + // :ref:`ConfigSources ` that have + // the :ref:`ads ` field set will be + // streamed on the ADS channel. + envoy.api.v3alpha.core.ApiConfigSource ads_config = 3; + + reserved 4; + } + // xDS configuration sources. + DynamicResources dynamic_resources = 3; + + // Configuration for the cluster manager which owns all upstream clusters + // within the server. + ClusterManager cluster_manager = 4; + + // Health discovery service config option. + // (:ref:`core.ApiConfigSource `) + envoy.api.v3alpha.core.ApiConfigSource hds_config = 14; + + // Optional file system path to search for startup flag files. + string flags_path = 5; + + // Optional set of stats sinks. + repeated envoy.config.metrics.v3alpha.StatsSink stats_sinks = 6; + + // Configuration for internal processing of stats. + envoy.config.metrics.v3alpha.StatsConfig stats_config = 13; + + // Optional duration between flushes to configured stats sinks. For + // performance reasons Envoy latches counters and only flushes counters and + // gauges at a periodic interval. If not specified the default is 5000ms (5 + // seconds). + // Duration must be at least 1ms and at most 5 min. + google.protobuf.Duration stats_flush_interval = 7 [ + (validate.rules).duration = { + lt: {seconds: 300}, + gte: {nanos: 1000000} + }, + (gogoproto.stdduration) = true + ]; + + // Optional watchdog configuration. + Watchdog watchdog = 8; + + // Configuration for an external tracing provider. If not specified, no + // tracing will be performed. + envoy.config.trace.v3alpha.Tracing tracing = 9; + + reserved 10; + + // Configuration for the runtime configuration provider (deprecated). If not + // specified, a “null” provider will be used which will result in all defaults + // being used. + Runtime runtime = 11 [deprecated = true]; + + // Configuration for the runtime configuration provider. If not + // specified, a “null” provider will be used which will result in all defaults + // being used. + LayeredRuntime layered_runtime = 17; + + // Configuration for the local administration HTTP server. + Admin admin = 12; + + // Optional overload manager configuration. + envoy.config.overload.v3alpha.OverloadManager overload_manager = 15; + + // Enable :ref:`stats for event dispatcher `, defaults to false. + // Note that this records a value for each iteration of the event loop on every thread. This + // should normally be minimal overhead, but when using + // :ref:`statsd `, it will send each observed + // value over the wire individually because the statsd protocol doesn't have any way to represent + // a histogram summary. Be aware that this can be a very large volume of data. + bool enable_dispatcher_stats = 16; + + // Optional string which will be used in lieu of x-envoy in prefixing headers. + // + // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be + // transformed into x-foo-retry-on etc. + // + // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the + // headers Envoy will trust for core code and core extensions only. Be VERY careful making + // changes to this string, especially in multi-layer Envoy deployments or deployments using + // extensions which are not upstream. + string header_prefix = 18; +} + +// Administration interface :ref:`operations documentation +// `. +message Admin { + // The path to write the access log for the administration server. If no + // access log is desired specify ‘/dev/null’. This is only required if + // :ref:`address ` is set. + string access_log_path = 1; + + // The cpu profiler output path for the administration server. If no profile + // path is specified, the default is ‘/var/log/envoy/envoy.prof’. + string profile_path = 2; + + // The TCP address that the administration server will listen on. + // If not specified, Envoy will not start an administration server. + envoy.api.v3alpha.core.Address address = 3; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated envoy.api.v3alpha.core.SocketOption socket_options = 4; +} + +// Cluster manager :ref:`architecture overview `. +message ClusterManager { + // Name of the local cluster (i.e., the cluster that owns the Envoy running + // this configuration). In order to enable :ref:`zone aware routing + // ` this option must be set. + // If *local_cluster_name* is defined then :ref:`clusters + // ` must be defined in the :ref:`Bootstrap + // static cluster resources + // `. This is + // unrelated to the :option:`--service-cluster` option which does not `affect zone aware routing + // `_. + string local_cluster_name = 1; + + message OutlierDetection { + // Specifies the path to the outlier event log. + string event_log_path = 1; + } + // Optional global configuration for outlier detection. + OutlierDetection outlier_detection = 2; + + // Optional configuration used to bind newly established upstream connections. + // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. + envoy.api.v3alpha.core.BindConfig upstream_bind_config = 3; + + // A management server endpoint to stream load stats to via + // *StreamLoadStats*. This must have :ref:`api_type + // ` :ref:`GRPC + // `. + envoy.api.v3alpha.core.ApiConfigSource load_stats_config = 4; +} + +// Envoy process watchdog configuration. When configured, this monitors for +// nonresponsive threads and kills the process after the configured thresholds. +message Watchdog { + // The duration after which Envoy counts a nonresponsive thread in the + // *server.watchdog_miss* statistic. If not specified the default is 200ms. + google.protobuf.Duration miss_timeout = 1; + + // The duration after which Envoy counts a nonresponsive thread in the + // *server.watchdog_mega_miss* statistic. If not specified the default is + // 1000ms. + google.protobuf.Duration megamiss_timeout = 2; + + // If a watched thread has been nonresponsive for this duration, assume a + // programming error and kill the entire Envoy process. Set to 0 to disable + // kill behavior. If not specified the default is 0 (disabled). + google.protobuf.Duration kill_timeout = 3; + + // If at least two watched threads have been nonresponsive for at least this + // duration assume a true deadlock and kill the entire Envoy process. Set to 0 + // to disable this behavior. If not specified the default is 0 (disabled). + google.protobuf.Duration multikill_timeout = 4; +} + +// Runtime :ref:`configuration overview ` (deprecated). +message Runtime { + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. Envoy + // will watch the location for changes and reload the file system tree when + // they happen. If this parameter is not set, there will be no disk based + // runtime. + string symlink_root = 1; + + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + string subdirectory = 2; + + // Specifies an optional subdirectory to load within the root directory. If + // specified and the directory exists, configuration values within this + // directory will override those found in the primary subdirectory. This is + // useful when Envoy is deployed across many different types of servers. + // Sometimes it is useful to have a per service cluster directory for runtime + // configuration. See below for exactly how the override directory is used. + string override_subdirectory = 3; + + // Static base runtime. This will be :ref:`overridden + // ` by other runtime layers, e.g. + // disk or admin. This follows the :ref:`runtime protobuf JSON representation + // encoding `. + google.protobuf.Struct base = 4; +} + +message RuntimeLayer { + // :ref:`Disk runtime ` layer. + message DiskLayer { + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. + // Envoy will watch the location for changes and reload the file system tree + // when they happen. See documentation on runtime :ref:`atomicity + // ` for further details on how reloads are + // treated. + string symlink_root = 1; + + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + string subdirectory = 3; + + // :ref:`Append ` the + // service cluster to the path under symlink root. + bool append_service_cluster = 2; + } + + // :ref:`Admin console runtime ` layer. + message AdminLayer { + } + + // :ref:`Runtime Discovery Service (RTDS) ` layer. + message RtdsLayer { + // Resource to subscribe to at *rtds_config* for the RTDS layer. + string name = 1; + + // RTDS configuration source. + envoy.api.v3alpha.core.ConfigSource rtds_config = 2; + } + + // Descriptive name for the runtime layer. This is only used for the runtime + // :http:get:`/runtime` output. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + oneof layer_specifier { + // :ref:`Static runtime ` layer. + // This follows the :ref:`runtime protobuf JSON representation encoding + // `. Unlike static xDS resources, this static + // layer is overridable by later layers in the runtime virtual filesystem. + option (validate.required) = true; + + google.protobuf.Struct static_layer = 2; + DiskLayer disk_layer = 3; + AdminLayer admin_layer = 4; + RtdsLayer rtds_layer = 5; + } +} + +// Runtime :ref:`configuration overview `. +message LayeredRuntime { + // The :ref:`layers ` of the runtime. This is ordered + // such that later layers in the list overlay earlier entries. + repeated RuntimeLayer layers = 1; +} diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD new file mode 100644 index 000000000000..50d0aa2354eb --- /dev/null +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD @@ -0,0 +1,11 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "cluster", + srcs = ["cluster.proto"], + deps = [ + "//envoy/config/common/dynamic_forward_proxy/v3alpha:dns_cache", + ], +) diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto new file mode 100644 index 000000000000..baed68d3b1ac --- /dev/null +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.config.cluster.dynamic_forward_proxy.v3alpha; + +option java_outer_classname = "DynamicForwardProxyClusterProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v3alpha"; +option go_package = "v2alpha"; + +import "envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Dynamic forward proxy cluster configuration] + +// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview +// ` for more information. +message ClusterConfig { + // The DNS cache configuration that the cluster will attach to. Note this configuration must + // match that of associated :ref:`dynamic forward proxy HTTP filter configuration + // `. + common.dynamic_forward_proxy.v3alpha.DnsCacheConfig dns_cache_config = 1 + [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD new file mode 100644 index 000000000000..bdd23e86de9f --- /dev/null +++ b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD @@ -0,0 +1,12 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "dns_cache", + srcs = ["dns_cache.proto"], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v3alpha:cds", + ], +) diff --git a/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto new file mode 100644 index 000000000000..7b8a67be4333 --- /dev/null +++ b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto @@ -0,0 +1,69 @@ +syntax = "proto3"; + +package envoy.config.common.dynamic_forward_proxy.v3alpha; + +option java_outer_classname = "DnsCacheProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v3alpha"; + +import "envoy/api/v3alpha/cds.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Dynamic forward proxy common configuration] + +// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview +// ` for more information. +message DnsCacheConfig { + // The name of the cache. Multiple named caches allow independent dynamic forward proxy + // configurations to operate within a single Envoy process using different configurations. All + // configurations with the same name *must* otherwise have the same settings when referenced + // from different configuration components. Configuration will fail to load if this is not + // the case. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // The DNS lookup family to use during resolution. + // + // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy eyeballs" mode. The + // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and + // then configures a host to have a primary and fall back address. With this, we could very + // likely build a "happy eyeballs" connection pool which would race the primary / fall back + // address and return the one that wins. This same method could potentially also be used for + // QUIC to TCP fall back.] + api.v3alpha.Cluster.DnsLookupFamily dns_lookup_family = 2 + [(validate.rules).enum.defined_only = true]; + + // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. + // + // .. note: + // + // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be + // added in a future change. + google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration.gt = {}]; + + // The TTL for hosts that are unused. Hosts that have not been used in the configured time + // interval will be purged. If not specified defaults to 5m. + // + // .. note: + // + // The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This + // means that if the configured TTL is shorter than the refresh rate the host may not be removed + // immediately. + // + // .. note: + // + // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. + google.protobuf.Duration host_ttl = 4 [(validate.rules).duration.gt = {}]; + + // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. + // + // .. note: + // + // The implementation is approximate and enforced independently on each worker thread, thus + // it is possible for the maximum hosts in the cache to go slightly above the configured + // value depending on timing. This is similar to how other circuit breakers work. + google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32.gt = 0]; +} diff --git a/api/envoy/config/common/tap/v3alpha/BUILD b/api/envoy/config/common/tap/v3alpha/BUILD new file mode 100644 index 000000000000..673a602800af --- /dev/null +++ b/api/envoy/config/common/tap/v3alpha/BUILD @@ -0,0 +1,13 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "common", + srcs = ["common.proto"], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v3alpha/core:config_source", + "//envoy/service/tap/v3alpha:common", + ], +) diff --git a/api/envoy/config/common/tap/v3alpha/common.proto b/api/envoy/config/common/tap/v3alpha/common.proto new file mode 100644 index 000000000000..4e5debddbc51 --- /dev/null +++ b/api/envoy/config/common/tap/v3alpha/common.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +import "envoy/service/tap/v3alpha/common.proto"; +import "envoy/api/v3alpha/core/config_source.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +package envoy.config.common.tap.v3alpha; + +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.common.tap.v3alpha"; + +// [#protodoc-title: Common tap extension configuration] + +// Common configuration for all tap extensions. +message CommonExtensionConfig { + + // [#not-implemented-hide:] + message TapDSConfig { + // Configuration for the source of TapDS updates for this Cluster. + envoy.api.v3alpha.core.ConfigSource config_source = 1 + [(validate.rules).message.required = true]; + + // Tap config to request from XDS server. + string name = 2 [(validate.rules).string.min_bytes = 1]; + } + + oneof config_type { + option (validate.required) = true; + + // If specified, the tap filter will be configured via an admin handler. + AdminConfig admin_config = 1; + + // If specified, the tap filter will be configured via a static configuration that cannot be + // changed. + service.tap.v3alpha.TapConfig static_config = 2; + + // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. + TapDSConfig tapds_config = 3; + } +} + +// Configuration for the admin handler. See :ref:`here ` for +// more information. +message AdminConfig { + // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is + // matched to the configured filter opaque ID to determine which filter to configure. + string config_id = 1 [(validate.rules).string.min_bytes = 1]; +} diff --git a/api/envoy/config/filter/accesslog/v3alpha/BUILD b/api/envoy/config/filter/accesslog/v3alpha/BUILD new file mode 100644 index 000000000000..3f241bc5e10b --- /dev/null +++ b/api/envoy/config/filter/accesslog/v3alpha/BUILD @@ -0,0 +1,28 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "accesslog", + srcs = ["accesslog.proto"], + visibility = [ + "//envoy/config/filter/http/router/v3alpha:__pkg__", + "//envoy/config/filter/network/http_connection_manager/v3alpha:__pkg__", + "//envoy/config/filter/network/tcp_proxy/v3alpha:__pkg__", + ], + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/route", + "//envoy/type:percent", + ], +) + +api_go_proto_library( + name = "accesslog", + proto = ":accesslog", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/route:route_go_proto", + "//envoy/type:percent_go_proto", + ], +) diff --git a/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto b/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto new file mode 100644 index 000000000000..381e7bdf9a87 --- /dev/null +++ b/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto @@ -0,0 +1,248 @@ +syntax = "proto3"; + +package envoy.config.filter.accesslog.v3alpha; + +option java_outer_classname = "AccesslogProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/route/route.proto"; +import "envoy/type/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Common access log types] + +message AccessLog { + // The name of the access log implementation to instantiate. The name must + // match a statically registered access log. Current built-in loggers include: + // + // #. "envoy.file_access_log" + // #. "envoy.http_grpc_access_log" + // #. "envoy.tcp_grpc_access_log" + string name = 1; + + // Filter which is used to determine if the access log needs to be written. + AccessLogFilter filter = 2; + + // Custom configuration that depends on the access log being instantiated. Built-in + // configurations include: + // + // #. "envoy.file_access_log": :ref:`FileAccessLog + // ` + // #. "envoy.http_grpc_access_log": :ref:`HttpGrpcAccessLogConfig + // ` + // #. "envoy.tcp_grpc_access_log": :ref:`TcpGrpcAccessLogConfig + // ` + oneof config_type { + google.protobuf.Struct config = 3; + + google.protobuf.Any typed_config = 4; + } +} + +message AccessLogFilter { + oneof filter_specifier { + option (validate.required) = true; + + // Status code filter. + StatusCodeFilter status_code_filter = 1; + + // Duration filter. + DurationFilter duration_filter = 2; + + // Not health check filter. + NotHealthCheckFilter not_health_check_filter = 3; + + // Traceable filter. + TraceableFilter traceable_filter = 4; + + // Runtime filter. + RuntimeFilter runtime_filter = 5; + + // And filter. + AndFilter and_filter = 6; + + // Or filter. + OrFilter or_filter = 7; + + // Header filter. + HeaderFilter header_filter = 8; + + // Response flag filter. + ResponseFlagFilter response_flag_filter = 9; + + // gRPC status filter. + GrpcStatusFilter grpc_status_filter = 10; + + // Extension filter. + ExtensionFilter extension_filter = 11; + } +} + +// Filter on an integer comparison. +message ComparisonFilter { + enum Op { + // = + EQ = 0; + + // >= + GE = 1; + + // <= + LE = 2; + } + + // Comparison operator. + Op op = 1 [(validate.rules).enum.defined_only = true]; + + // Value to compare against. + envoy.api.v3alpha.core.RuntimeUInt32 value = 2; +} + +// Filters on HTTP response/status code. +message StatusCodeFilter { + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message.required = true]; +} + +// Filters on total request duration in milliseconds. +message DurationFilter { + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message.required = true]; +} + +// Filters for requests that are not health check requests. A health check +// request is marked by the health check filter. +message NotHealthCheckFilter { +} + +// Filters for requests that are traceable. See the tracing overview for more +// information on how a request becomes traceable. +message TraceableFilter { +} + +// Filters for random sampling of requests. +message RuntimeFilter { + // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. + // If found in runtime, this value will replace the default numerator. + string runtime_key = 1 [(validate.rules).string.min_bytes = 1]; + + // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. + envoy.type.FractionalPercent percent_sampled = 2; + + // By default, sampling pivots on the header + // :ref:`x-request-id` being present. If + // :ref:`x-request-id` is present, the filter will + // consistently sample across multiple hosts based on the runtime key value and the value + // extracted from :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based + // on the runtime key value alone. *use_independent_randomness* can be used for logging kill + // switches within complex nested :ref:`AndFilter + // ` and :ref:`OrFilter + // ` blocks that are easier to reason + // about from a probability perspective (i.e., setting to true will cause the filter to behave + // like an independent random variable when composed within logical operator filters). + bool use_independent_randomness = 3; +} + +// Performs a logical “and” operation on the result of each filter in filters. +// Filters are evaluated sequentially and if one of them returns false, the +// filter returns false immediately. +message AndFilter { + repeated AccessLogFilter filters = 1 [(validate.rules).repeated .min_items = 2]; +} + +// Performs a logical “or” operation on the result of each individual filter. +// Filters are evaluated sequentially and if one of them returns true, the +// filter returns true immediately. +message OrFilter { + repeated AccessLogFilter filters = 2 [(validate.rules).repeated .min_items = 2]; +} + +// Filters requests based on the presence or value of a request header. +message HeaderFilter { + // Only requests with a header which matches the specified HeaderMatcher will pass the filter + // check. + envoy.api.v3alpha.route.HeaderMatcher header = 1 [(validate.rules).message.required = true]; +} + +// Filters requests that received responses with an Envoy response flag set. +// A list of the response flags can be found +// in the access log formatter :ref:`documentation`. +message ResponseFlagFilter { + // Only responses with the any of the flags listed in this field will be logged. + // This field is optional. If it is not specified, then any response flag will pass + // the filter check. + repeated string flags = 1 [(validate.rules).repeated .items.string = { + in: [ + "LH", + "UH", + "UT", + "LR", + "UR", + "UF", + "UC", + "UO", + "NR", + "DI", + "FI", + "RL", + "UAEX", + "RLSE", + "DC", + "URX", + "SI", + "IH" + ] + }]; +} + +// Filters gRPC requests based on their response status. If a gRPC status is not provided, the +// filter will infer the status from the HTTP status code. +message GrpcStatusFilter { + enum Status { + OK = 0; + CANCELED = 1; + UNKNOWN = 2; + INVALID_ARGUMENT = 3; + DEADLINE_EXCEEDED = 4; + NOT_FOUND = 5; + ALREADY_EXISTS = 6; + PERMISSION_DENIED = 7; + RESOURCE_EXHAUSTED = 8; + FAILED_PRECONDITION = 9; + ABORTED = 10; + OUT_OF_RANGE = 11; + UNIMPLEMENTED = 12; + INTERNAL = 13; + UNAVAILABLE = 14; + DATA_LOSS = 15; + UNAUTHENTICATED = 16; + } + + // Logs only responses that have any one of the gRPC statuses in this field. + repeated Status statuses = 1 [(validate.rules).repeated .items.enum.defined_only = true]; + + // If included and set to true, the filter will instead block all responses with a gRPC status or + // inferred gRPC status enumerated in statuses, and allow all other responses. + bool exclude = 2; +} + +// Extension filter is statically registered at runtime. +message ExtensionFilter { + // The name of the filter implementation to instantiate. The name must + // match a statically registered filter. + string name = 1; + + // Custom configuration that depends on the filter being instantiated. + oneof config_type { + google.protobuf.Struct config = 2; + google.protobuf.Any typed_config = 3; + } +} diff --git a/api/envoy/config/filter/fault/v3alpha/BUILD b/api/envoy/config/filter/fault/v3alpha/BUILD new file mode 100644 index 000000000000..22e3bec56ca3 --- /dev/null +++ b/api/envoy/config/filter/fault/v3alpha/BUILD @@ -0,0 +1,13 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "fault", + srcs = ["fault.proto"], + visibility = [ + "//envoy/config/filter/http/fault/v3alpha:__pkg__", + "//envoy/config/filter/network/mongo_proxy/v3alpha:__pkg__", + ], + deps = ["//envoy/type:percent"], +) diff --git a/api/envoy/config/filter/fault/v3alpha/fault.proto b/api/envoy/config/filter/fault/v3alpha/fault.proto new file mode 100644 index 000000000000..b54a063e7665 --- /dev/null +++ b/api/envoy/config/filter/fault/v3alpha/fault.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; + +package envoy.config.filter.fault.v3alpha; + +option java_outer_classname = "FaultProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.fault.v3alpha"; +option go_package = "v2"; + +import "envoy/type/percent.proto"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Common fault injection types] + +// Delay specification is used to inject latency into the +// HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. +message FaultDelay { + // Fault delays are controlled via an HTTP header (if applicable). See the + // :ref:`http fault filter ` documentation for + // more information. + message HeaderDelay { + } + + enum FaultDelayType { + // Unused and deprecated. + FIXED = 0; + } + + // Unused and deprecated. Will be removed in the next release. + FaultDelayType type = 1 [deprecated = true]; + + reserved 2; + + oneof fault_delay_secifier { + option (validate.required) = true; + + // Add a fixed delay before forwarding the operation upstream. See + // https://developers.google.com/protocol-buffers/docs/proto3#json for + // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified + // delay will be injected before a new request/operation. For TCP + // connections, the proxying of the connection upstream will be delayed + // for the specified period. This is required if type is FIXED. + google.protobuf.Duration fixed_delay = 3 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // Fault delays are controlled via an HTTP header (if applicable). + HeaderDelay header_delay = 5; + } + + // The percentage of operations/connections/requests on which the delay will be injected. + type.FractionalPercent percentage = 4; +} + +// Describes a rate limit to be applied. +message FaultRateLimit { + // Describes a fixed/constant rate limit. + message FixedLimit { + // The limit supplied in KiB/s. + uint64 limit_kbps = 1 [(validate.rules).uint64.gte = 1]; + } + + // Rate limits are controlled via an HTTP header (if applicable). See the + // :ref:`http fault filter ` documentation for + // more information. + message HeaderLimit { + } + + oneof limit_type { + option (validate.required) = true; + + // A fixed rate limit. + FixedLimit fixed_limit = 1; + + // Rate limits are controlled via an HTTP header (if applicable). + HeaderLimit header_limit = 3; + } + + // The percentage of operations/connections/requests on which the rate limit will be injected. + type.FractionalPercent percentage = 2; +} diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD new file mode 100644 index 000000000000..aa2b0634739c --- /dev/null +++ b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD @@ -0,0 +1,11 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "adaptive_concurrency", + srcs = ["adaptive_concurrency.proto"], + deps = [ + "//envoy/api/v3alpha/core:base", + ], +) diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto new file mode 100644 index 000000000000..17bac55800ce --- /dev/null +++ b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package envoy.config.filter.http.adaptive_concurrency.v3alpha; + +option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v3alpha"; +option java_outer_classname = "AdaptiveConcurrencyProto"; +option java_multiple_files = true; +option go_package = "v2alpha"; + +message AdaptiveConcurrency { +} diff --git a/api/envoy/config/filter/http/buffer/v3alpha/BUILD b/api/envoy/config/filter/http/buffer/v3alpha/BUILD new file mode 100644 index 000000000000..e59429af9ace --- /dev/null +++ b/api/envoy/config/filter/http/buffer/v3alpha/BUILD @@ -0,0 +1,8 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "buffer", + srcs = ["buffer.proto"], +) diff --git a/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto b/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto new file mode 100644 index 000000000000..a948493b2450 --- /dev/null +++ b/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.config.filter.http.buffer.v3alpha; + +option java_outer_classname = "BufferProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v3alpha"; +option go_package = "v2"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Buffer] +// Buffer :ref:`configuration overview `. + +message Buffer { + reserved 2; // formerly max_request_time + + // The maximum request size that the filter will buffer before the connection + // manager will stop buffering and return a 413 response. + google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; +} + +message BufferPerRoute { + oneof override { + option (validate.required) = true; + + // Disable the buffer filter for this particular vhost or route. + bool disabled = 1 [(validate.rules).bool.const = true]; + + // Override the global configuration of the filter with this new config. + Buffer buffer = 2 [(validate.rules).message.required = true]; + } +} diff --git a/api/envoy/config/filter/http/csrf/v3alpha/BUILD b/api/envoy/config/filter/http/csrf/v3alpha/BUILD new file mode 100644 index 000000000000..b5da684c54b4 --- /dev/null +++ b/api/envoy/config/filter/http/csrf/v3alpha/BUILD @@ -0,0 +1,12 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "csrf", + srcs = ["csrf.proto"], + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/type/matcher:string", + ], +) diff --git a/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto b/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto new file mode 100644 index 000000000000..5eaa14c567d7 --- /dev/null +++ b/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package envoy.config.filter.http.csrf.v3alpha; + +option java_outer_classname = "CsrfPolicyProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/type/matcher/string.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: CSRF] +// Cross-Site Request Forgery :ref:`configuration overview `. + +// CSRF filter config. +message CsrfPolicy { + // Specifies if CSRF is enabled. + // + // More information on how this can be controlled via runtime can be found + // :ref:`here `. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + envoy.api.v3alpha.core.RuntimeFractionalPercent filter_enabled = 1 + [(validate.rules).message.required = true]; + + // Specifies that CSRF policies will be evaluated and tracked, but not enforced. + // This is intended to be used when filter_enabled is off. + // + // More information on how this can be controlled via runtime can be found + // :ref:`here `. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + envoy.api.v3alpha.core.RuntimeFractionalPercent shadow_enabled = 2; + + // Specifies additional source origins that will be allowed in addition to + // the destination origin. + // + // More information on how this can be configured via runtime can be found + // :ref:`here `. + repeated envoy.type.matcher.StringMatcher additional_origins = 3; +} diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD new file mode 100644 index 000000000000..f09166ba8129 --- /dev/null +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD @@ -0,0 +1,11 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "dynamic_forward_proxy", + srcs = ["dynamic_forward_proxy.proto"], + deps = [ + "//envoy/config/common/dynamic_forward_proxy/v3alpha:dns_cache", + ], +) diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto new file mode 100644 index 000000000000..0fab44d63db5 --- /dev/null +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.config.filter.http.dynamic_forward_proxy.v3alpha; + +option java_outer_classname = "DynamicForwardProxyProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v3alpha"; +option go_package = "v2alpha"; + +import "envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Dynamic forward proxy] + +// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview +// ` for more information. +message FilterConfig { + // The DNS cache configuration that the filter will attach to. Note this configuration must + // match that of associated :ref:`dynamic forward proxy cluster configuration + // `. + common.dynamic_forward_proxy.v3alpha.DnsCacheConfig dns_cache_config = 1 + [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD b/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD new file mode 100644 index 000000000000..39f9e44bb382 --- /dev/null +++ b/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD @@ -0,0 +1,15 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "ext_authz", + srcs = ["ext_authz.proto"], + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:grpc_service", + "//envoy/api/v3alpha/core:http_uri", + "//envoy/type:http_status", + "//envoy/type/matcher:string", + ], +) diff --git a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto new file mode 100644 index 000000000000..af6f3c4866e5 --- /dev/null +++ b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto @@ -0,0 +1,209 @@ +syntax = "proto3"; + +package envoy.config.filter.http.ext_authz.v3alpha; + +option java_outer_classname = "ExtAuthzProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/core/grpc_service.proto"; +import "envoy/api/v3alpha/core/http_uri.proto"; + +import "envoy/type/http_status.proto"; +import "envoy/type/matcher/string.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: External Authorization] +// External Authorization :ref:`configuration overview `. + +message ExtAuthz { + // External authorization service configuration. + oneof services { + // gRPC service configuration (default timeout: 200ms). + envoy.api.v3alpha.core.GrpcService grpc_service = 1; + + // HTTP service configuration (default timeout: 200ms). + HttpService http_service = 3; + } + + // Changes filter's behaviour on errors: + // + // 1. When set to true, the filter will *accept* client request even if the communication with + // the authorization service has failed, or if the authorization service has returned a HTTP 5xx + // error. + // + // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* + // response if the communication with the authorization service has failed, or if the + // authorization service has returned a HTTP 5xx error. + // + // Note that errors can be *always* tracked in the :ref:`stats + // `. + bool failure_mode_allow = 2; + + // Sets the package version the gRPC service should use. This is particularly + // useful when transitioning from alpha to release versions assuming that both definitions are + // semantically compatible. Deprecation note: This field is deprecated and should only be used for + // version upgrade. See release notes for more details. + bool use_alpha = 4 [deprecated = true]; + + // Enables filter to buffer the client request body and send it within the authorization request. + // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization + // request message indicating if the body data is partial. + BufferSettings with_request_body = 5; + + // Clears route cache in order to allow the external authorization service to correctly affect + // routing decisions. Filter clears all cached routes when: + // + // 1. The field is set to *true*. + // + // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. + // + // 3. At least one *authorization response header* is added to the client request, or is used for + // altering another client request header. + // + bool clear_route_cache = 6; + + // Sets the HTTP status that is returned to the client when there is a network error between the + // filter and the authorization server. The default status is HTTP 403 Forbidden. + envoy.type.HttpStatus status_on_error = 7; + + // Specifies a list of metadata namespaces whose values, if present, will be passed to the + // ext_authz service as an opaque *protobuf::Struct*. + // + // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata + // ` is set, + // then the following will pass the jwt payload to the authorization server. + // + // .. code-block:: yaml + // + // metadata_context_namespaces: + // - envoy.filters.http.jwt_authn + // + repeated string metadata_context_namespaces = 8; +} + +// Configuration for buffering the request data. +message BufferSettings { + // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return + // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number + // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow + // `. + uint32 max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; + + // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. + // The authorization request will be dispatched and no 413 HTTP error will be returned by the + // filter. + bool allow_partial_message = 2; +} + +// HttpService is used for raw HTTP communication between the filter and the authorization service. +// When configured, the filter will parse the client request and use these attributes to call the +// authorization server. Depending on the response, the filter may reject or accept the client +// request. Note that in any of these events, metadata can be added, removed or overridden by the +// filter: +// +// *On authorization request*, a list of allowed request headers may be supplied. See +// :ref:`allowed_headers +// ` +// for details. Additional headers metadata may be added to the authorization request. See +// :ref:`headers_to_add +// ` for +// details. +// +// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and +// additional headers metadata may be added to the original client request. See +// :ref:`allowed_upstream_headers +// ` +// for details. +// +// On other authorization response statuses, the filter will not allow traffic. Additional headers +// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers +// ` +// for details. +message HttpService { + // Sets the HTTP server URI which the authorization requests must be sent to. + envoy.api.v3alpha.core.HttpUri server_uri = 1; + + // Sets a prefix to the value of authorization request header *Path*. + string path_prefix = 2; + + reserved 3; + reserved 4; + reserved 5; + reserved 6; + + // Settings used for controlling authorization request metadata. + AuthorizationRequest authorization_request = 7; + + // Settings used for controlling authorization response metadata. + AuthorizationResponse authorization_response = 8; +} + +message AuthorizationRequest { + // Authorization request will include the client request headers that have a correspondent match + // in the :ref:`list `. Note that in addition to the + // user's supplied matchers: + // + // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. + // + // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have + // a message body. + // + envoy.type.matcher.ListStringMatcher allowed_headers = 1; + + // Sets a list of headers that will be included to the request to authorization service. Note that + // client request of the same key will be overridden. + repeated envoy.api.v3alpha.core.HeaderValue headers_to_add = 2; +} + +message AuthorizationResponse { + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the original client request. + // Note that coexistent headers will be overridden. + envoy.type.matcher.ListStringMatcher allowed_upstream_headers = 1; + + // When this :ref:`list `. is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that when this list is *not* set, all the authorization response headers, except *Authority + // (Host)* will be in the response to the client. When a header is included in this list, *Path*, + // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. + envoy.type.matcher.ListStringMatcher allowed_client_headers = 2; +} + +// Extra settings on a per virtualhost/route/weighted-cluster level. +message ExtAuthzPerRoute { + oneof override { + option (validate.required) = true; + + // Disable the ext auth filter for this particular vhost or route. + // If disabled is specified in multiple per-filter-configs, the most specific one will be used. + bool disabled = 1 [(validate.rules).bool.const = true]; + + // Check request settings for this route. + CheckSettings check_settings = 2 [(validate.rules).message.required = true]; + } +} + +// Extra settings for the check request. You can use this to provide extra context for the +// external authorization server on specific virtual hosts \ routes. For example, adding a context +// extension on the virtual host level can give the ext-authz server information on what virtual +// host is used without needing to parse the host header. If CheckSettings is specified in multiple +// per-filter-configs, they will be merged in order, and the result will be used. +message CheckSettings { + // Context extensions to set on the CheckRequest's + // :ref:`AttributeContext.context_extensions` + // + // Merge semantics for this field are such that keys from more specific configs override. + // + // .. note:: + // + // These settings are only applied to a filter configured with a + // :ref:`grpc_service`. + map context_extensions = 1; +} diff --git a/api/envoy/config/filter/http/fault/v3alpha/BUILD b/api/envoy/config/filter/http/fault/v3alpha/BUILD new file mode 100644 index 000000000000..1fd5632c088d --- /dev/null +++ b/api/envoy/config/filter/http/fault/v3alpha/BUILD @@ -0,0 +1,13 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "fault", + srcs = ["fault.proto"], + deps = [ + "//envoy/api/v3alpha/route", + "//envoy/config/filter/fault/v3alpha:fault", + "//envoy/type:percent", + ], +) diff --git a/api/envoy/config/filter/http/fault/v3alpha/fault.proto b/api/envoy/config/filter/http/fault/v3alpha/fault.proto new file mode 100644 index 000000000000..f654ec17f617 --- /dev/null +++ b/api/envoy/config/filter/http/fault/v3alpha/fault.proto @@ -0,0 +1,115 @@ +syntax = "proto3"; + +package envoy.config.filter.http.fault.v3alpha; + +option java_outer_classname = "FaultProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/route/route.proto"; +import "envoy/config/filter/fault/v3alpha/fault.proto"; +import "envoy/type/percent.proto"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Fault Injection] +// Fault Injection :ref:`configuration overview `. + +message FaultAbort { + reserved 1; + + oneof error_type { + option (validate.required) = true; + + // HTTP status code to use to abort the HTTP request. + uint32 http_status = 2 [(validate.rules).uint32 = {gte: 200, lt: 600}]; + } + + // The percentage of requests/operations/connections that will be aborted with the error code + // provided. + type.FractionalPercent percentage = 3; +} + +message HTTPFault { + // If specified, the filter will inject delays based on the values in the + // object. + filter.fault.v3alpha.FaultDelay delay = 1; + + // If specified, the filter will abort requests based on the values in + // the object. At least *abort* or *delay* must be specified. + FaultAbort abort = 2; + + // Specifies the name of the (destination) upstream cluster that the + // filter should match on. Fault injection will be restricted to requests + // bound to the specific upstream cluster. + string upstream_cluster = 3; + + // Specifies a set of headers that the filter should match on. The fault + // injection filter can be applied selectively to requests that match a set of + // headers specified in the fault filter config. The chances of actual fault + // injection further depend on the value of the :ref:`percentage + // ` field. + // The filter will check the request's headers against all the specified + // headers in the filter config. A match will happen if all the headers in the + // config are present in the request with the same values (or based on + // presence if the *value* field is not in the config). + repeated envoy.api.v3alpha.route.HeaderMatcher headers = 4; + + // Faults are injected for the specified list of downstream hosts. If this + // setting is not set, faults are injected for all downstream nodes. + // Downstream node name is taken from :ref:`the HTTP + // x-envoy-downstream-service-node + // ` header and compared + // against downstream_nodes list. + repeated string downstream_nodes = 5; + + // The maximum number of faults that can be active at a single time via the configured fault + // filter. Note that because this setting can be overridden at the route level, it's possible + // for the number of active faults to be greater than this value (if injected via a different + // route). If not specified, defaults to unlimited. This setting can be overridden via + // `runtime ` and any faults that are not injected + // due to overflow will be indicated via the `faults_overflow + // ` stat. + // + // .. attention:: + // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy + // limit. It's possible for the number of active faults to rise slightly above the configured + // amount due to the implementation details. + google.protobuf.UInt32Value max_active_faults = 6; + + // The response rate limit to be applied to the response body of the stream. When configured, + // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + // ` runtime key. + // + // .. attention:: + // This is a per-stream limit versus a connection level limit. This means that concurrent streams + // will each get an independent limit. + filter.fault.v3alpha.FaultRateLimit response_rate_limit = 7; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_delay_percent + string delay_percent_runtime = 8; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.abort_percent + string abort_percent_runtime = 9; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_duration_ms + string delay_duration_runtime = 10; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.http_status + string abort_http_status_runtime = 11; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.max_active_faults + string max_active_faults_runtime = 12; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.rate_limit.response_percent + string response_rate_limit_percent_runtime = 13; +} diff --git a/api/envoy/config/filter/http/gzip/v3alpha/BUILD b/api/envoy/config/filter/http/gzip/v3alpha/BUILD new file mode 100644 index 000000000000..e34d73c51c21 --- /dev/null +++ b/api/envoy/config/filter/http/gzip/v3alpha/BUILD @@ -0,0 +1,8 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "gzip", + srcs = ["gzip.proto"], +) diff --git a/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto b/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto new file mode 100644 index 000000000000..5b5c6d6d1df7 --- /dev/null +++ b/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; + +package envoy.config.filter.http.gzip.v3alpha; + +option java_outer_classname = "GzipProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v3alpha"; +option go_package = "v2"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Gzip] +// Gzip :ref:`configuration overview `. + +message Gzip { + // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values + // use more memory, but are faster and produce better compression results. The default value is 5. + google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {gte: 1, lte: 9}]; + + // Minimum response length, in bytes, which will trigger compression. The default value is 30. + google.protobuf.UInt32Value content_length = 2 [(validate.rules).uint32.gte = 30]; + + message CompressionLevel { + enum Enum { + DEFAULT = 0; + BEST = 1; + SPEED = 2; + } + } + + // A value used for selecting the zlib compression level. This setting will affect speed and + // amount of compression applied to the content. "BEST" provides higher compression at the cost of + // higher latency, "SPEED" provides lower compression with minimum impact on response time. + // "DEFAULT" provides an optimal result between speed and compression. This field will be set to + // "DEFAULT" if not specified. + CompressionLevel.Enum compression_level = 3 [(validate.rules).enum.defined_only = true]; + + enum CompressionStrategy { + DEFAULT = 0; + FILTERED = 1; + HUFFMAN = 2; + RLE = 3; + } + + // A value used for selecting the zlib compression strategy which is directly related to the + // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though + // there are situations which changing this parameter might produce better results. For example, + // run-length encoding (RLE) is typically used when the content is known for having sequences + // which same data occurs many consecutive times. For more information about each strategy, please + // refer to zlib manual. + CompressionStrategy compression_strategy = 4 [(validate.rules).enum.defined_only = true]; + + // Set of strings that allows specifying which mime-types yield compression; e.g., + // application/json, text/html, etc. When this field is not defined, compression will be applied + // to the following mime-types: "application/javascript", "application/json", + // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml". + repeated string content_type = 6 [(validate.rules).repeated = {max_items: 50}]; + + // If true, disables compression when the response contains an etag header. When it is false, the + // filter will preserve weak etags and remove the ones that require strong validation. + bool disable_on_etag_header = 7; + + // If true, removes accept-encoding from the request headers before dispatching it to the upstream + // so that responses do not get compressed before reaching the filter. + bool remove_accept_encoding_header = 8; + + // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. + // Larger window results in better compression at the expense of memory usage. The default is 12 + // which will produce a 4096 bytes window. For more details about this parameter, please refer to + // zlib manual > deflateInit2. + google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {gte: 9, lte: 15}]; +} diff --git a/api/envoy/config/filter/http/header_to_metadata/v3alpha/BUILD b/api/envoy/config/filter/http/header_to_metadata/v3alpha/BUILD new file mode 100644 index 000000000000..3f8503acbe65 --- /dev/null +++ b/api/envoy/config/filter/http/header_to_metadata/v3alpha/BUILD @@ -0,0 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "header_to_metadata", + srcs = ["header_to_metadata.proto"], + deps = [], +) diff --git a/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto b/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto new file mode 100644 index 000000000000..927574a5a721 --- /dev/null +++ b/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto @@ -0,0 +1,92 @@ +syntax = "proto3"; + +package envoy.config.filter.http.header_to_metadata.v3alpha; + +option java_outer_classname = "HeaderToMetadataProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v3alpha"; +option go_package = "v2"; + +import "validate/validate.proto"; + +// [#protodoc-title: Header-To-Metadata Filter] +// +// The configuration for transforming headers into metadata. This is useful +// for matching load balancer subsets, logging, etc. +// +// Header to Metadata :ref:`configuration overview `. + +message Config { + enum ValueType { + STRING = 0; + NUMBER = 1; + + // The value is a serialized `protobuf.Value + // `_. + PROTOBUF_VALUE = 2; + } + + // ValueEncode defines the encoding algorithm. + enum ValueEncode { + // The value is not encoded. + NONE = 0; + + // The value is encoded in `Base64 `_. + // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the + // non-ASCII characters in the header. + BASE64 = 1; + } + + message KeyValuePair { + // The namespace — if this is empty, the filter's namespace will be used. + string metadata_namespace = 1; + + // The key to use within the namespace. + string key = 2 [(validate.rules).string.min_bytes = 1]; + + // The value to pair with the given key. + // + // When used for a `on_header_present` case, if value is non-empty it'll be used + // instead of the header value. If both are empty, no metadata is added. + // + // When used for a `on_header_missing` case, a non-empty value must be provided + // otherwise no metadata is added. + string value = 3; + + // The value's type — defaults to string. + ValueType type = 4; + + // How is the value encoded, default is NONE (not encoded). + // The value will be decoded accordingly before storing to metadata. + ValueEncode encode = 5; + } + + // A Rule defines what metadata to apply when a header is present or missing. + message Rule { + // The header that triggers this rule — required. + string header = 1 [(validate.rules).string.min_bytes = 1]; + + // If the header is present, apply this metadata KeyValuePair. + // + // If the value in the KeyValuePair is non-empty, it'll be used instead + // of the header value. + KeyValuePair on_header_present = 2; + + // If the header is not present, apply this metadata KeyValuePair. + // + // The value in the KeyValuePair must be set, since it'll be used in lieu + // of the missing header value. + KeyValuePair on_header_missing = 3; + + // Whether or not to remove the header after a rule is applied. + // + // This prevents headers from leaking. + bool remove = 4; + } + + // The list of rules to apply to requests. + repeated Rule request_rules = 1; + + // The list of rules to apply to responses. + repeated Rule response_rules = 2; +} diff --git a/api/envoy/config/filter/http/health_check/v3alpha/BUILD b/api/envoy/config/filter/http/health_check/v3alpha/BUILD new file mode 100644 index 000000000000..89e6eb3af702 --- /dev/null +++ b/api/envoy/config/filter/http/health_check/v3alpha/BUILD @@ -0,0 +1,21 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "health_check", + srcs = ["health_check.proto"], + deps = [ + "//envoy/api/v3alpha/route", + "//envoy/type:percent", + ], +) + +api_go_proto_library( + name = "health_check", + proto = ":health_check", + deps = [ + "//envoy/api/v3alpha/route:route_go_proto", + "//envoy/type:percent_go_proto", + ], +) diff --git a/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto b/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto new file mode 100644 index 000000000000..31fcdfffaa80 --- /dev/null +++ b/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package envoy.config.filter.http.health_check.v3alpha; + +option java_outer_classname = "HealthCheckProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v3alpha"; +option go_package = "v2"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/api/v3alpha/route/route.proto"; +import "envoy/type/percent.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: Health check] +// Health check :ref:`configuration overview `. + +message HealthCheck { + // Specifies whether the filter operates in pass through mode or not. + google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message.required = true]; + + reserved 2; + reserved "endpoint"; + + // If operating in pass through mode, the amount of time in milliseconds + // that the filter should cache the upstream response. + google.protobuf.Duration cache_time = 3 [(gogoproto.stdduration) = true]; + + // If operating in non-pass-through mode, specifies a set of upstream cluster + // names and the minimum percentage of servers in each of those clusters that + // must be healthy or degraded in order for the filter to return a 200. + map cluster_min_healthy_percentages = 4; + + // Specifies a set of health check request headers to match on. The health check filter will + // check a request’s headers against all the specified headers. To specify the health check + // endpoint, set the ``:path`` header to match on. + repeated envoy.api.v3alpha.route.HeaderMatcher headers = 5; +} diff --git a/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD b/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD new file mode 100644 index 000000000000..5b34fcd9c458 --- /dev/null +++ b/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD @@ -0,0 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "ip_tagging", + srcs = ["ip_tagging.proto"], + deps = ["//envoy/api/v3alpha/core:address"], +) diff --git a/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto new file mode 100644 index 000000000000..e305800a5fc8 --- /dev/null +++ b/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +package envoy.config.filter.http.ip_tagging.v3alpha; + +option java_outer_classname = "IpTaggingProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/address.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: IP tagging] +// IP tagging :ref:`configuration overview `. + +message IPTagging { + + // The type of requests the filter should apply to. The supported types + // are internal, external or both. The + // :ref:`x-forwarded-for` header is + // used to determine if a request is internal and will result in + // :ref:`x-envoy-internal` + // being set. The filter defaults to both, and it will apply to all request types. + enum RequestType { + // Both external and internal requests will be tagged. This is the default value. + BOTH = 0; + + // Only internal requests will be tagged. + INTERNAL = 1; + + // Only external requests will be tagged. + EXTERNAL = 2; + } + + // The type of request the filter should apply to. + RequestType request_type = 1 [(validate.rules).enum.defined_only = true]; + + // Supplies the IP tag name and the IP address subnets. + message IPTag { + // Specifies the IP tag name to apply. + string ip_tag_name = 1; + + // A list of IP address subnets that will be tagged with + // ip_tag_name. Both IPv4 and IPv6 are supported. + repeated envoy.api.v3alpha.core.CidrRange ip_list = 2; + } + + // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. + // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] + // The set of IP tags for the filter. + repeated IPTag ip_tags = 4 [(validate.rules).repeated .min_items = 1]; +} diff --git a/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD b/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD new file mode 100644 index 000000000000..2970da93f467 --- /dev/null +++ b/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD @@ -0,0 +1,23 @@ +licenses(["notice"]) # Apache 2 + +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +api_proto_library_internal( + name = "jwt_authn", + srcs = ["config.proto"], + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:http_uri", + "//envoy/api/v3alpha/route", + ], +) + +api_go_proto_library( + name = "jwt_authn", + proto = ":jwt_authn", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/core:http_uri_go_proto", + "//envoy/api/v3alpha/route:route_go_proto", + ], +) diff --git a/api/envoy/config/filter/http/jwt_authn/v3alpha/README.md b/api/envoy/config/filter/http/jwt_authn/v3alpha/README.md new file mode 100644 index 000000000000..c390a4d5ce50 --- /dev/null +++ b/api/envoy/config/filter/http/jwt_authn/v3alpha/README.md @@ -0,0 +1,66 @@ +# JWT Authentication HTTP filter config + +## Overview + +1. The proto file in this folder defines an HTTP filter config for "jwt_authn" filter. + +2. This filter will verify the JWT in the HTTP request as: + - The signature should be valid + - JWT should not be expired + - Issuer and audiences are valid and specified in the filter config. + +3. [JWK](https://tools.ietf.org/html/rfc7517#appendix-A) is needed to verify JWT signature. It can be fetched from a remote server or read from a local file. If the JWKS is fetched remotely, it will be cached by the filter. + +3. If a JWT is valid, the user is authenticated and the request will be forwarded to the backend server. If a JWT is not valid, the request will be rejected with an error message. + +## The locations to extract JWT + +JWT will be extracted from the HTTP headers or query parameters. The default location is the HTTP header: +``` +Authorization: Bearer +``` +The next default location is in the query parameter as: +``` +?access_token= +``` + +If a custom location is desired, `from_headers` or `from_params` can be used to specify custom locations to extract JWT. + +## HTTP header to pass successfully verified JWT + +If a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64 encoded JWT payload in JSON. + + +## Further header options + +In addition to the `name` field, which specifies the HTTP header name, +the `from_headers` section can specify an optional `value_prefix` value, as in: + +```yaml + from_headers: + - name: bespoke + value_prefix: jwt_value +``` + +The above will cause the jwt_authn filter to look for the JWT in the `bespoke` header, following the tag `jwt_value`. + +Any non-JWT characters (i.e., anything _other than_ alphanumerics, `_`, `-`, and `.`) will be skipped, +and all following, contiguous, JWT-legal chars will be taken as the JWT. + +This means all of the following will return a JWT of `eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk`: + +```text +bespoke: jwt_value=eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk + +bespoke: {"jwt_value": "eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk"} + +bespoke: beta:true,jwt_value:"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk",trace=1234 +``` + +The header `name` may be `Authorization`. + +The `value_prefix` must match exactly, i.e., case-sensitively. +If the `value_prefix` is not found, the header is skipped: not considered as a source for a JWT token. + +If there are no JWT-legal characters after the `value_prefix`, the entire string after it +is taken to be the JWT token. This is unlikely to succeed; the error will reported by the JWT parser. \ No newline at end of file diff --git a/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto new file mode 100644 index 000000000000..9c7f10c4adbc --- /dev/null +++ b/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto @@ -0,0 +1,467 @@ + +syntax = "proto3"; + +package envoy.config.filter.http.jwt_authn.v3alpha; + +option java_outer_classname = "ConfigProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v3alpha"; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/core/http_uri.proto"; +import "envoy/api/v3alpha/route/route.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: JWT Authentication] +// JWT Authentication :ref:`configuration overview `. + +// Please see following for JWT authentication flow: +// +// * `JSON Web Token (JWT) `_ +// * `The OAuth 2.0 Authorization Framework `_ +// * `OpenID Connect `_ +// +// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: +// +// * issuer: the principal that issues the JWT. It has to match the one from the token. +// * allowed audiences: the ones in the token have to be listed here. +// * how to fetch public key JWKS to verify the token signature. +// * how to extract JWT token in the request. +// * how to pass successfully verified token payload. +// +// Example: +// +// .. code-block:: yaml +// +// issuer: https://example.com +// audiences: +// - bookstore_android.apps.googleusercontent.com +// - bookstore_web.apps.googleusercontent.com +// remote_jwks: +// http_uri: +// uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// cache_duration: +// seconds: 300 +// +message JwtProvider { + // Specify the `principal `_ that issued + // the JWT, usually a URL or an email address. + // + // Example: https://securetoken.google.com + // Example: 1234567-compute@developer.gserviceaccount.com + // + string issuer = 1 [(validate.rules).string.min_bytes = 1]; + + // The list of JWT `audiences `_ are + // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, + // will not check audiences in the token. + // + // Example: + // + // .. code-block:: yaml + // + // audiences: + // - bookstore_android.apps.googleusercontent.com + // - bookstore_web.apps.googleusercontent.com + // + repeated string audiences = 2; + + // `JSON Web Key Set (JWKS) `_ is needed to + // validate signature of a JWT. This field specifies where to fetch JWKS. + oneof jwks_source_specifier { + option (validate.required) = true; + + // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP + // URI and how the fetched JWKS should be cached. + // + // Example: + // + // .. code-block:: yaml + // + // remote_jwks: + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // cache_duration: + // seconds: 300 + // + RemoteJwks remote_jwks = 3; + + // JWKS is in local data source. It could be either in a local file or embedded in the + // inline_string. + // + // Example: local file + // + // .. code-block:: yaml + // + // local_jwks: + // filename: /etc/envoy/jwks/jwks1.txt + // + // Example: inline_string + // + // .. code-block:: yaml + // + // local_jwks: + // inline_string: ACADADADADA + // + envoy.api.v3alpha.core.DataSource local_jwks = 4; + } + + // If false, the JWT is removed in the request after a success verification. If true, the JWT is + // not removed in the request. Default value is false. + bool forward = 5; + + // Two fields below define where to extract the JWT from an HTTP request. + // + // If no explicit location is specified, the following default locations are tried in order: + // + // 1. The Authorization header using the `Bearer schema + // `_. Example:: + // + // Authorization: Bearer . + // + // 2. `access_token `_ query parameter. + // + // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations + // its provider specified or from the default locations. + // + // Specify the HTTP headers to extract JWT token. For examples, following config: + // + // .. code-block:: yaml + // + // from_headers: + // - name: x-goog-iap-jwt-assertion + // + // can be used to extract token from header:: + // + // x-goog-iap-jwt-assertion: . + // + repeated JwtHeader from_headers = 6; + + // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. + // + // For example, if config is: + // + // .. code-block:: yaml + // + // from_params: + // - jwt_token + // + // The JWT format in query parameter is:: + // + // /path?jwt_token= + // + repeated string from_params = 7; + + // This field specifies the header name to forward a successfully verified JWT payload to the + // backend. The forwarded data is:: + // + // base64_encoded(jwt_payload_in_JSON) + // + // If it is not specified, the payload will not be forwarded. + string forward_payload_header = 8; + + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata + // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** + // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* + // and the value is the *protobuf::Struct* converted from JWT JSON payload. + // + // For example, if payload_in_metadata is *my_payload*: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_payload: + // iss: https://example.com + // sub: test@example.com + // aud: https://example.com + // exp: 1501281058 + // + string payload_in_metadata = 9; +} + +// This message specifies how to fetch JWKS from remote and how to cache it. +message RemoteJwks { + // The HTTP URI to fetch the JWKS. For example: + // + // .. code-block:: yaml + // + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // + envoy.api.v3alpha.core.HttpUri http_uri = 1; + + // Duration after which the cached JWKS should be expired. If not specified, default cache + // duration is 5 minutes. + google.protobuf.Duration cache_duration = 2; +} + +// This message specifies a header location to extract JWT token. +message JwtHeader { + // The HTTP header name. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // The value prefix. The value format is "value_prefix" + // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the + // end. + string value_prefix = 2; +} + +// Specify a required provider with audiences. +message ProviderWithAudiences { + // Specify a required provider name. + string provider_name = 1; + + // This field overrides the one specified in the JwtProvider. + repeated string audiences = 2; +} + +// This message specifies a Jwt requirement. An empty message means JWT verification is not +// required. Here are some config examples: +// +// .. code-block:: yaml +// +// # Example 1: not required with an empty message +// +// # Example 2: require A +// provider_name: provider-A +// +// # Example 3: require A or B +// requires_any: +// requirements: +// - provider_name: provider-A +// - provider_name: provider-B +// +// # Example 4: require A and B +// requires_all: +// requirements: +// - provider_name: provider-A +// - provider_name: provider-B +// +// # Example 5: require A and (B or C) +// requires_all: +// requirements: +// - provider_name: provider-A +// - requires_any: +// requirements: +// - provider_name: provider-B +// - provider_name: provider-C +// +// # Example 6: require A or (B and C) +// requires_any: +// requirements: +// - provider_name: provider-A +// - requires_all: +// requirements: +// - provider_name: provider-B +// - provider_name: provider-C +// +message JwtRequirement { + oneof requires_type { + // Specify a required provider name. + string provider_name = 1; + + // Specify a required provider with audiences. + ProviderWithAudiences provider_and_audiences = 2; + + // Specify list of JwtRequirement. Their results are OR-ed. + // If any one of them passes, the result is passed. + JwtRequirementOrList requires_any = 3; + + // Specify list of JwtRequirement. Their results are AND-ed. + // All of them must pass, if one of them fails or missing, it fails. + JwtRequirementAndList requires_all = 4; + + // The requirement is always satisfied even if JWT is missing or the JWT + // verification fails. A typical usage is: this filter is used to only verify + // JWTs and pass the verified JWT payloads to another filter, the other filter + // will make decision. In this mode, all JWT tokens will be verified. + google.protobuf.Empty allow_missing_or_failed = 5; + } +} + +// This message specifies a list of RequiredProvider. +// Their results are OR-ed; if any one of them passes, the result is passed +message JwtRequirementOrList { + // Specify a list of JwtRequirement. + repeated JwtRequirement requirements = 1 [(validate.rules).repeated .min_items = 2]; +} + +// This message specifies a list of RequiredProvider. +// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. +message JwtRequirementAndList { + // Specify a list of JwtRequirement. + repeated JwtRequirement requirements = 1 [(validate.rules).repeated .min_items = 2]; +} + +// This message specifies a Jwt requirement for a specific Route condition. +// Example 1: +// +// .. code-block:: yaml +// +// - match: +// prefix: /healthz +// +// In above example, "requires" field is empty for /healthz prefix match, +// it means that requests matching the path prefix don't require JWT authentication. +// +// Example 2: +// +// .. code-block:: yaml +// +// - match: +// prefix: / +// requires: { provider_name: provider-A } +// +// In above example, all requests matched the path prefix require jwt authentication +// from "provider-A". +message RequirementRule { + // The route matching parameter. Only when the match is satisfied, the "requires" field will + // apply. + // + // For example: following match will match all requests. + // + // .. code-block:: yaml + // + // match: + // prefix: / + // + envoy.api.v3alpha.route.RouteMatch match = 1 [(validate.rules).message.required = true]; + + // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. + JwtRequirement requires = 2; +} + +// This message specifies Jwt requirements based on stream_info.filterState. +// This FilterState should use `Router::StringAccessor` object to set a string value. +// Other HTTP filters can use it to specify Jwt requirements dynamically. +// +// Example: +// +// .. code-block:: yaml +// +// name: jwt_selector +// requires: +// issuer_1: +// provider_name: issuer1 +// issuer_2: +// provider_name: issuer2 +// +// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, +// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. +message FilterStateRule { + // The filter state name to retrieve the `Router::StringAccessor` object. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // A map of string keys to requirements. The string key is the string value + // in the FilterState with the name specified in the *name* field above. + map requires = 3; +} + +// This is the Envoy HTTP filter config for JWT authentication. +// +// For example: +// +// .. code-block:: yaml +// +// providers: +// provider1: +// issuer: issuer1 +// audiences: +// - audience1 +// - audience2 +// remote_jwks: +// http_uri: +// uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// provider2: +// issuer: issuer2 +// local_jwks: +// inline_string: jwks_string +// +// rules: +// # Not jwt verification is required for /health path +// - match: +// prefix: /health +// +// # Jwt verification for provider1 is required for path prefixed with "prefix" +// - match: +// prefix: /prefix +// requires: +// provider_name: provider1 +// +// # Jwt verification for either provider1 or provider2 is required for all other requests. +// - match: +// prefix: / +// requires: +// requires_any: +// requirements: +// - provider_name: provider1 +// - provider_name: provider2 +// +message JwtAuthentication { + // Map of provider names to JwtProviders. + // + // .. code-block:: yaml + // + // providers: + // provider1: + // issuer: issuer1 + // audiences: + // - audience1 + // - audience2 + // remote_jwks: + // http_uri: + // uri: https://example.com/.well-known/jwks.json + // cluster: example_jwks_cluster + // provider2: + // issuer: provider2 + // local_jwks: + // inline_string: jwks_string + // + map providers = 1; + + // Specifies requirements based on the route matches. The first matched requirement will be + // applied. If there are overlapped match conditions, please put the most specific match first. + // + // Examples + // + // .. code-block:: yaml + // + // rules: + // - match: + // prefix: /healthz + // - match: + // prefix: /baz + // requires: + // provider_name: provider1 + // - match: + // prefix: /foo + // requires: + // requires_any: + // requirements: + // - provider_name: provider1 + // - provider_name: provider2 + // - match: + // prefix: /bar + // requires: + // requires_all: + // requirements: + // - provider_name: provider1 + // - provider_name: provider2 + // + repeated RequirementRule rules = 2; + + // This message specifies Jwt requirements based on stream_info.filterState. + // Other HTTP filters can use it to specify Jwt requirements dynamically. + // The *rules* field above is checked first, if it could not find any matches, + // check this one. + FilterStateRule filter_state_rules = 3; +} diff --git a/api/envoy/config/filter/http/lua/v3alpha/BUILD b/api/envoy/config/filter/http/lua/v3alpha/BUILD new file mode 100644 index 000000000000..6daf0c82f174 --- /dev/null +++ b/api/envoy/config/filter/http/lua/v3alpha/BUILD @@ -0,0 +1,8 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "lua", + srcs = ["lua.proto"], +) diff --git a/api/envoy/config/filter/http/lua/v3alpha/lua.proto b/api/envoy/config/filter/http/lua/v3alpha/lua.proto new file mode 100644 index 000000000000..ff586ca2429e --- /dev/null +++ b/api/envoy/config/filter/http/lua/v3alpha/lua.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package envoy.config.filter.http.lua.v3alpha; + +option java_outer_classname = "LuaProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v3alpha"; +option go_package = "v2"; + +import "validate/validate.proto"; + +// [#protodoc-title: Lua] +// Lua :ref:`configuration overview `. + +message Lua { + // The Lua code that Envoy will execute. This can be a very small script that + // further loads code from disk if desired. Note that if JSON configuration is used, the code must + // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line + // strings so complex scripts can be easily expressed inline in the configuration. + string inline_code = 1 [(validate.rules).string.min_bytes = 1]; +} diff --git a/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD b/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD new file mode 100644 index 000000000000..e131d3a92263 --- /dev/null +++ b/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD @@ -0,0 +1,11 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "rate_limit", + srcs = ["rate_limit.proto"], + deps = [ + "//envoy/config/ratelimit/v3alpha:rls", + ], +) diff --git a/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto new file mode 100644 index 000000000000..69e8d389bc4b --- /dev/null +++ b/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +package envoy.config.filter.http.rate_limit.v3alpha; + +option java_outer_classname = "RateLimitProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v3alpha"; +option go_package = "v2"; + +import "envoy/config/ratelimit/v3alpha/rls.proto"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Rate limit] +// Rate limit :ref:`configuration overview `. + +message RateLimit { + // The rate limit domain to use when calling the rate limit service. + string domain = 1 [(validate.rules).string.min_bytes = 1]; + + // Specifies the rate limit configurations to be applied with the same + // stage number. If not set, the default stage number is 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + uint32 stage = 2 [(validate.rules).uint32.lte = 10]; + + // The type of requests the filter should apply to. The supported + // types are *internal*, *external* or *both*. A request is considered internal if + // :ref:`x-envoy-internal` is set to true. If + // :ref:`x-envoy-internal` is not set or false, a + // request is considered external. The filter defaults to *both*, and it will apply to all request + // types. + string request_type = 3; + + // The timeout in milliseconds for the rate limit service RPC. If not + // set, this defaults to 20ms. + google.protobuf.Duration timeout = 4 [(gogoproto.stdduration) = true]; + + // The filter's behaviour in case the rate limiting service does + // not respond back. When it is set to true, Envoy will not allow traffic in case of + // communication failure between rate limiting service and the proxy. + // Defaults to false. + bool failure_mode_deny = 5; + + // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead + // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The + // HTTP code will be 200 for a gRPC response. + bool rate_limited_as_resource_exhausted = 6; + + // Configuration for an external rate limit service provider. If not + // specified, any calls to the rate limit service will immediately return + // success. + envoy.config.ratelimit.v3alpha.RateLimitServiceConfig rate_limit_service = 7 + [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/filter/http/rbac/v3alpha/BUILD b/api/envoy/config/filter/http/rbac/v3alpha/BUILD new file mode 100644 index 000000000000..a6ee42cf7893 --- /dev/null +++ b/api/envoy/config/filter/http/rbac/v3alpha/BUILD @@ -0,0 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "rbac", + srcs = ["rbac.proto"], + deps = ["//envoy/config/rbac/v3alpha:rbac"], +) diff --git a/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto b/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto new file mode 100644 index 000000000000..8ec8989652aa --- /dev/null +++ b/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package envoy.config.filter.http.rbac.v3alpha; + +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v3alpha"; +option go_package = "v2"; + +import "envoy/config/rbac/v3alpha/rbac.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: RBAC] +// Role-Based Access Control :ref:`configuration overview `. + +// RBAC filter config. +message RBAC { + // Specify the RBAC rules to be applied globally. + // If absent, no enforcing RBAC policy will be applied. + config.rbac.v3alpha.RBAC rules = 1; + + // Shadow rules are not enforced by the filter (i.e., returning a 403) + // but will emit stats and logs and can be used for rule testing. + // If absent, no shadow RBAC policy will be applied. + config.rbac.v3alpha.RBAC shadow_rules = 2; +} + +message RBACPerRoute { + reserved 1; + + reserved "disabled"; + + // Override the global configuration of the filter with this new config. + // If absent, the global RBAC policy will be disabled for this route. + RBAC rbac = 2; +} diff --git a/api/envoy/config/filter/http/router/v3alpha/BUILD b/api/envoy/config/filter/http/router/v3alpha/BUILD new file mode 100644 index 000000000000..f0b6c100d445 --- /dev/null +++ b/api/envoy/config/filter/http/router/v3alpha/BUILD @@ -0,0 +1,15 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "router", + srcs = ["router.proto"], + deps = ["//envoy/config/filter/accesslog/v3alpha:accesslog"], +) + +api_go_proto_library( + name = "router", + proto = ":router", + deps = ["//envoy/config/filter/accesslog/v3alpha:accesslog_go_proto"], +) diff --git a/api/envoy/config/filter/http/router/v3alpha/router.proto b/api/envoy/config/filter/http/router/v3alpha/router.proto new file mode 100644 index 000000000000..92efe315c6ff --- /dev/null +++ b/api/envoy/config/filter/http/router/v3alpha/router.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package envoy.config.filter.http.router.v3alpha; + +option java_outer_classname = "RouterProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.router.v3alpha"; +option go_package = "v2"; + +import "envoy/config/filter/accesslog/v3alpha/accesslog.proto"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Router] +// Router :ref:`configuration overview `. + +message Router { + // Whether the router generates dynamic cluster statistics. Defaults to + // true. Can be disabled in high performance scenarios. + google.protobuf.BoolValue dynamic_stats = 1; + + // Whether to start a child span for egress routed calls. This can be + // useful in scenarios where other filters (auth, ratelimit, etc.) make + // outbound calls and have child spans rooted at the same ingress + // parent. Defaults to false. + bool start_child_span = 2; + + // Configuration for HTTP upstream logs emitted by the router. Upstream logs + // are configured in the same way as access logs, but each log entry represents + // an upstream request. Presuming retries are configured, multiple upstream + // requests may be made for each downstream (inbound) request. + repeated envoy.config.filter.accesslog.v3alpha.AccessLog upstream_log = 3; + + // Do not add any additional *x-envoy-* headers to requests or responses. This + // only affects the :ref:`router filter generated *x-envoy-* headers + // `, other Envoy filters and the HTTP + // connection manager may continue to set *x-envoy-* headers. + bool suppress_envoy_headers = 4; + + // Specifies a list of HTTP headers to strictly validate. Envoy will reject a + // request and respond with HTTP status 400 if the request contains an invalid + // value for any of the headers listed in this field. Strict header checking + // is only supported for the following headers: + // + // Value must be a ','-delimited list (i.e. no spaces) of supported retry + // policy values: + // + // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` + // * :ref:`config_http_filters_router_x-envoy-retry-on` + // + // Value must be an integer: + // + // * :ref:`config_http_filters_router_x-envoy-max-retries` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` + repeated string strict_check_headers = 5 [(validate.rules).repeated .items.string = { + in: [ + "x-envoy-upstream-rq-timeout-ms", + "x-envoy-upstream-rq-per-try-timeout-ms", + "x-envoy-max-retries", + "x-envoy-retry-grpc-on", + "x-envoy-retry-on" + ] + }]; +} diff --git a/api/envoy/config/filter/http/squash/v3alpha/BUILD b/api/envoy/config/filter/http/squash/v3alpha/BUILD new file mode 100644 index 000000000000..86bd4e8cfb65 --- /dev/null +++ b/api/envoy/config/filter/http/squash/v3alpha/BUILD @@ -0,0 +1,8 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "squash", + srcs = ["squash.proto"], +) diff --git a/api/envoy/config/filter/http/squash/v3alpha/squash.proto b/api/envoy/config/filter/http/squash/v3alpha/squash.proto new file mode 100644 index 000000000000..43a62af98c1c --- /dev/null +++ b/api/envoy/config/filter/http/squash/v3alpha/squash.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; + +package envoy.config.filter.http.squash.v3alpha; + +option java_outer_classname = "SquashProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v3alpha"; +option go_package = "v2"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Squash] +// Squash :ref:`configuration overview `. + +// [#proto-status: experimental] +message Squash { + // The name of the cluster that hosts the Squash server. + string cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // When the filter requests the Squash server to create a DebugAttachment, it will use this + // structure as template for the body of the request. It can contain reference to environment + // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server + // with more information to find the process to attach the debugger to. For example, in a + // Istio/k8s environment, this will contain information on the pod: + // + // .. code-block:: json + // + // { + // "spec": { + // "attachment": { + // "pod": "{{ POD_NAME }}", + // "namespace": "{{ POD_NAMESPACE }}" + // }, + // "match_request": true + // } + // } + // + // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) + google.protobuf.Struct attachment_template = 2; + + // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second. + google.protobuf.Duration request_timeout = 3 [(gogoproto.stdduration) = true]; + + // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60 + // seconds. + google.protobuf.Duration attachment_timeout = 4 [(gogoproto.stdduration) = true]; + + // Amount of time to poll for the status of the attachment object in the Squash server + // (to check if has been attached). Defaults to 1 second. + google.protobuf.Duration attachment_poll_period = 5 [(gogoproto.stdduration) = true]; +} diff --git a/api/envoy/config/filter/http/tap/v3alpha/BUILD b/api/envoy/config/filter/http/tap/v3alpha/BUILD new file mode 100644 index 000000000000..a2af23059be6 --- /dev/null +++ b/api/envoy/config/filter/http/tap/v3alpha/BUILD @@ -0,0 +1,11 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "tap", + srcs = ["tap.proto"], + deps = [ + "//envoy/config/common/tap/v3alpha:common", + ], +) diff --git a/api/envoy/config/filter/http/tap/v3alpha/tap.proto b/api/envoy/config/filter/http/tap/v3alpha/tap.proto new file mode 100644 index 000000000000..e92c7d229f5e --- /dev/null +++ b/api/envoy/config/filter/http/tap/v3alpha/tap.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +import "envoy/config/common/tap/v3alpha/common.proto"; + +import "validate/validate.proto"; + +package envoy.config.filter.http.tap.v3alpha; + +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v3alpha"; + +// [#protodoc-title: Tap] +// Tap :ref:`configuration overview `. + +// Top level configuration for the tap filter. +message Tap { + // Common configuration for the HTTP tap filter. + common.tap.v3alpha.CommonExtensionConfig common_config = 1 + [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/filter/http/transcoder/v3alpha/BUILD b/api/envoy/config/filter/http/transcoder/v3alpha/BUILD new file mode 100644 index 000000000000..c1a845bcd96e --- /dev/null +++ b/api/envoy/config/filter/http/transcoder/v3alpha/BUILD @@ -0,0 +1,13 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "transcoder", + srcs = ["transcoder.proto"], +) + +api_go_proto_library( + name = "transcoder", + proto = ":transcoder", +) diff --git a/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto b/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto new file mode 100644 index 000000000000..078ac52473ac --- /dev/null +++ b/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto @@ -0,0 +1,123 @@ +syntax = "proto3"; + +package envoy.config.filter.http.transcoder.v3alpha; + +option java_outer_classname = "TranscoderProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v3alpha"; +option go_package = "v2"; + +import "validate/validate.proto"; + +// [#protodoc-title: gRPC-JSON transcoder] +// gRPC-JSON transcoder :ref:`configuration overview `. + +message GrpcJsonTranscoder { + oneof descriptor_set { + option (validate.required) = true; + + // Supplies the filename of + // :ref:`the proto descriptor set ` for the gRPC + // services. + string proto_descriptor = 1; + + // Supplies the binary content of + // :ref:`the proto descriptor set ` for the gRPC + // services. + bytes proto_descriptor_bin = 4; + } + + // A list of strings that + // supplies the fully qualified service names (i.e. "package_name.service_name") that + // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, + // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than + // the service names specified here, but they won't be translated. + repeated string services = 2 [(validate.rules).repeated .min_items = 1]; + + message PrintOptions { + // Whether to add spaces, line breaks and indentation to make the JSON + // output easy to read. Defaults to false. + bool add_whitespace = 1; + + // Whether to always print primitive fields. By default primitive + // fields with default values will be omitted in JSON output. For + // example, an int32 field set to 0 will be omitted. Setting this flag to + // true will override the default behavior and print primitive fields + // regardless of their values. Defaults to false. + bool always_print_primitive_fields = 2; + + // Whether to always print enums as ints. By default they are rendered + // as strings. Defaults to false. + bool always_print_enums_as_ints = 3; + + // Whether to preserve proto field names. By default protobuf will + // generate JSON field names using the ``json_name`` option, or lower camel case, + // in that order. Setting this flag will preserve the original field names. Defaults to false. + bool preserve_proto_field_names = 4; + }; + + // Control options for response JSON. These options are passed directly to + // `JsonPrintOptions `_. + PrintOptions print_options = 3; + + // Whether to keep the incoming request route after the outgoing headers have been transformed to + // the match the upstream gRPC service. Note: This means that routes for gRPC services that are + // not transcoded cannot be used in combination with *match_incoming_request_route*. + bool match_incoming_request_route = 5; + + // A list of query parameters to be ignored for transcoding method mapping. + // By default, the transcoder filter will not transcode a request if there are any + // unknown/invalid query parameters. + // + // Example : + // + // .. code-block:: proto + // + // service Bookstore { + // rpc GetShelf(GetShelfRequest) returns (Shelf) { + // option (google.api.http) = { + // get: "/shelves/{shelf}" + // }; + // } + // } + // + // message GetShelfRequest { + // int64 shelf = 1; + // } + // + // message Shelf {} + // + // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable + // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow + // the same request to be mapped to ``GetShelf``. + repeated string ignored_query_parameters = 6; + + // Whether to route methods without the ``google.api.http`` option. + // + // Example : + // + // .. code-block:: proto + // + // package bookstore; + // + // service Bookstore { + // rpc GetShelf(GetShelfRequest) returns (Shelf) {} + // } + // + // message GetShelfRequest { + // int64 shelf = 1; + // } + // + // message Shelf {} + // + // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of + // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. + bool auto_mapping = 7; + + // Whether to ignore query parameters that cannot be mapped to a corresponding + // protobuf field. Use this if you cannot control the query parameters and do + // not know them beforehand. Otherwise use ``ignored_query_parameters``. + // Defaults to false. + bool ignore_unknown_query_parameters = 8; +} diff --git a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD new file mode 100644 index 000000000000..bece14103bbe --- /dev/null +++ b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD @@ -0,0 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "client_ssl_auth", + srcs = ["client_ssl_auth.proto"], + deps = ["//envoy/api/v3alpha/core:address"], +) diff --git a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto new file mode 100644 index 000000000000..a0ea3bf0bfaa --- /dev/null +++ b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package envoy.config.filter.network.client_ssl_auth.v3alpha; + +option java_outer_classname = "ClientSslAuthProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/address.proto"; +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Client TLS authentication] +// Client TLS authentication +// :ref:`configuration overview `. + +message ClientSSLAuth { + // The :ref:`cluster manager ` cluster that runs + // the authentication service. The filter will connect to the service every 60s to fetch the list + // of principals. The service must support the expected :ref:`REST API + // `. + string auth_api_cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 2 [(validate.rules).string.min_bytes = 1]; + + // Time in milliseconds between principal refreshes from the + // authentication service. Default is 60000 (60s). The actual fetch time + // will be this value plus a random jittered value between + // 0-refresh_delay_ms milliseconds. + google.protobuf.Duration refresh_delay = 3 [(gogoproto.stdduration) = true]; + + // An optional list of IP address and subnet masks that should be white + // listed for access by the filter. If no list is provided, there is no + // IP white list. + repeated envoy.api.v3alpha.core.CidrRange ip_white_list = 4; +} diff --git a/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD b/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD new file mode 100644 index 000000000000..839724af13b4 --- /dev/null +++ b/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD @@ -0,0 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "ext_authz", + srcs = ["ext_authz.proto"], + deps = ["//envoy/api/v3alpha/core:grpc_service"], +) diff --git a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto new file mode 100644 index 000000000000..99c0c7239753 --- /dev/null +++ b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.config.filter.network.ext_authz.v3alpha; + +option java_outer_classname = "ExtAuthzProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/grpc_service.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Network External Authorization ] +// The network layer external authorization service configuration +// :ref:`configuration overview `. + +// External Authorization filter calls out to an external service over the +// gRPC Authorization API defined by +// :ref:`CheckRequest `. +// A failed check will cause this filter to close the TCP connection. +message ExtAuthz { + // The prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // The external authorization gRPC service configuration. + // The default timeout is set to 200ms by this filter. + envoy.api.v3alpha.core.GrpcService grpc_service = 2; + + // The filter's behaviour in case the external authorization service does + // not respond back. When it is set to true, Envoy will also allow traffic in case of + // communication failure between authorization service and the proxy. + // Defaults to false. + bool failure_mode_allow = 3; +} diff --git a/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD b/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD new file mode 100644 index 000000000000..300b3c8e671a --- /dev/null +++ b/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD @@ -0,0 +1,31 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "http_connection_manager", + srcs = ["http_connection_manager.proto"], + deps = [ + "//envoy/api/v3alpha:rds", + "//envoy/api/v3alpha:srds", + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:config_source", + "//envoy/api/v3alpha/core:protocol", + "//envoy/config/filter/accesslog/v3alpha:accesslog", + "//envoy/type:percent", + ], +) + +api_go_proto_library( + name = "http_connection_manager", + proto = ":http_connection_manager", + deps = [ + "//envoy/api/v3alpha:rds_go_grpc", + "//envoy/api/v3alpha:srds_go_grpc", + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/core:config_source_go_proto", + "//envoy/api/v3alpha/core:protocol_go_proto", + "//envoy/config/filter/accesslog/v3alpha:accesslog_go_proto", + "//envoy/type:percent_go_proto", + ], +) diff --git a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto new file mode 100644 index 000000000000..57e529b2164a --- /dev/null +++ b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto @@ -0,0 +1,599 @@ +syntax = "proto3"; + +package envoy.config.filter.network.http_connection_manager.v3alpha; + +option java_outer_classname = "HttpConnectionManagerProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/config_source.proto"; +import "envoy/api/v3alpha/core/protocol.proto"; +import "envoy/api/v3alpha/rds.proto"; +import "envoy/api/v3alpha/srds.proto"; +import "envoy/config/filter/accesslog/v3alpha/accesslog.proto"; +import "envoy/type/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: HTTP connection manager] +// HTTP connection manager :ref:`configuration overview `. + +// [#comment:next free field: 35] +message HttpConnectionManager { + enum CodecType { + option (gogoproto.goproto_enum_prefix) = false; + + // For every new connection, the connection manager will determine which + // codec to use. This mode supports both ALPN for TLS listeners as well as + // protocol inference for plaintext listeners. If ALPN data is available, it + // is preferred, otherwise protocol inference is used. In almost all cases, + // this is the right option to choose for this setting. + AUTO = 0; + + // The connection manager will assume that the client is speaking HTTP/1.1. + HTTP1 = 1; + + // The connection manager will assume that the client is speaking HTTP/2 + // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. + // Prior knowledge is allowed). + HTTP2 = 2; + } + + // Supplies the type of codec that the connection manager should use. + CodecType codec_type = 1 [(validate.rules).enum.defined_only = true]; + + // The human readable prefix to use when emitting statistics for the + // connection manager. See the :ref:`statistics documentation ` for + // more information. + string stat_prefix = 2 [(validate.rules).string.min_bytes = 1]; + + oneof route_specifier { + option (validate.required) = true; + + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; + + // The route table for the connection manager is static and is specified in this property. + envoy.api.v3alpha.RouteConfiguration route_config = 4; + + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes scoped_routes = 31; + } + + // A list of individual HTTP filters that make up the filter chain for + // requests made to the connection manager. Order matters as the filters are + // processed sequentially as request events happen. + repeated HttpFilter http_filters = 5; + + // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + // documentation for more information. Defaults to false. + google.protobuf.BoolValue add_user_agent = 6; + + message Tracing { + // [#comment:TODO(kyessenov): Align this field with listener traffic direction field.] + enum OperationName { + option (gogoproto.goproto_enum_prefix) = false; + + // The HTTP listener is used for ingress/incoming requests. + INGRESS = 0; + + // The HTTP listener is used for egress/outgoing requests. + EGRESS = 1; + } + + // The span name will be derived from this field. + OperationName operation_name = 1 [(validate.rules).enum.defined_only = true]; + + // A list of header names used to create tags for the active span. The header name is used to + // populate the tag name, and the header value is used to populate the tag value. The tag is + // created if the specified header name is present in the request's headers. + repeated string request_headers_for_tags = 2; + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + envoy.type.Percent client_sampling = 3; + + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + envoy.type.Percent random_sampling = 4; + + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + envoy.type.Percent overall_sampling = 5; + + // Whether to annotate spans with additional data. If true, spans will include logs for stream + // events. + bool verbose = 6; + } + + // Presence of the object defines whether the connection manager + // emits :ref:`tracing ` data to the :ref:`configured tracing provider + // `. + Tracing tracing = 7; + + // Additional HTTP/1 settings that are passed to the HTTP/1 codec. + envoy.api.v3alpha.core.Http1ProtocolOptions http_protocol_options = 8; + + // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. + envoy.api.v3alpha.core.Http2ProtocolOptions http2_protocol_options = 9; + + // An optional override that the connection manager will write to the server + // header in responses. If not set, the default is *envoy*. + string server_name = 10; + + enum ServerHeaderTransformation { + option (gogoproto.goproto_enum_prefix) = false; + + // Overwrite any Server header with the contents of server_name. + OVERWRITE = 0; + // If no Server header is present, append Server server_name + // If a Server header is present, pass it through. + APPEND_IF_ABSENT = 1; + // Pass through the value of the server header, and do not append a header + // if none is present. + PASS_THROUGH = 2; + } + // Defines the action to be applied to the Server header on the response path. + // By default, Envoy will overwrite the header with the value specified in + // server_name. + ServerHeaderTransformation server_header_transformation = 34 + [(validate.rules).enum.defined_only = true]; + + // The maximum request headers size for incoming connections. + // If unconfigured, the default max request headers allowed is 60 KiB. + // Requests that exceed this limit will receive a 431 response. + // The max configurable limit is 96 KiB, based on current implementation + // constraints. + google.protobuf.UInt32Value max_request_headers_kb = 29 + [(validate.rules).uint32.gt = 0, (validate.rules).uint32.lte = 96]; + + // The idle timeout for connections managed by the connection manager. The + // idle timeout is defined as the period in which there are no active + // requests. If not set, there is no idle timeout. When the idle timeout is + // reached the connection will be closed. If the connection is an HTTP/2 + // connection a drain sequence will occur prior to closing the connection. See + // :ref:`drain_timeout + // `. + google.protobuf.Duration idle_timeout = 11 [(gogoproto.stdduration) = true]; + + // The stream idle timeout for connections managed by the connection manager. + // If not specified, this defaults to 5 minutes. The default value was selected + // so as not to interfere with any smaller configured timeouts that may have + // existed in configurations prior to the introduction of this feature, while + // introducing robustness to TCP connections that terminate without a FIN. + // + // This idle timeout applies to new streams and is overridable by the + // :ref:`route-level idle_timeout + // `. Even on a stream in + // which the override applies, prior to receipt of the initial request + // headers, the :ref:`stream_idle_timeout + // ` + // applies. Each time an encode/decode event for headers or data is processed + // for the stream, the timer will be reset. If the timeout fires, the stream + // is terminated with a 408 Request Timeout error code if no upstream response + // header has been received, otherwise a stream reset occurs. + // + // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due + // to the granularity of events presented to the connection manager. For example, while receiving + // very large request headers, it may be the case that there is traffic regularly arriving on the + // wire while the connection manage is only able to observe the end-of-headers event, hence the + // stream may still idle timeout. + // + // A value of 0 will completely disable the connection manager stream idle + // timeout, although per-route idle timeout overrides will continue to apply. + google.protobuf.Duration stream_idle_timeout = 24 [(gogoproto.stdduration) = true]; + + // A timeout for idle requests managed by the connection manager. + // The timer is activated when the request is initiated, and is disarmed when the last byte of the + // request is sent upstream (i.e. all decoding filters have processed the request), OR when the + // response is initiated. If not specified or set to 0, this timeout is disabled. + google.protobuf.Duration request_timeout = 28 [(gogoproto.stdduration) = true]; + + // The time that Envoy will wait between sending an HTTP/2 “shutdown + // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. + // This is used so that Envoy provides a grace period for new streams that + // race with the final GOAWAY frame. During this grace period, Envoy will + // continue to accept new streams. After the grace period, a final GOAWAY + // frame is sent and Envoy will start refusing new streams. Draining occurs + // both when a connection hits the idle timeout or during general server + // draining. The default grace period is 5000 milliseconds (5 seconds) if this + // option is not specified. + google.protobuf.Duration drain_timeout = 12 [(gogoproto.stdduration) = true]; + + // The delayed close timeout is for downstream connections managed by the HTTP connection manager. + // It is defined as a grace period after connection close processing has been locally initiated + // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy + // from the downstream connection) prior to Envoy closing the socket associated with that + // connection. + // NOTE: This timeout is enforced even when the socket associated with the downstream connection + // is pending a flush of the write buffer. However, any progress made writing data to the socket + // will restart the timer associated with this timeout. This means that the total grace period for + // a socket in this state will be + // +. + // + // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close + // sequence mitigates a race condition that exists when downstream clients do not drain/process + // data in a connection's receive buffer after a remote close has been detected via a socket + // write(). This race leads to such clients failing to process the response code sent by Envoy, + // which could result in erroneous downstream processing. + // + // If the timeout triggers, Envoy will close the connection's socket. + // + // The default timeout is 1000 ms if this option is not specified. + // + // .. NOTE:: + // To be useful in avoiding the race condition described above, this timeout must be set + // to *at least* +<100ms to account for + // a reasonsable "worst" case processing time for a full iteration of Envoy's event loop>. + // + // .. WARNING:: + // A value of 0 will completely disable delayed close processing. When disabled, the downstream + // connection's socket will be closed immediately after the write flush is completed or will + // never close if the write flush does not complete. + google.protobuf.Duration delayed_close_timeout = 26 [(gogoproto.stdduration) = true]; + + // Configuration for :ref:`HTTP access logs ` + // emitted by the connection manager. + repeated envoy.config.filter.accesslog.v3alpha.AccessLog access_log = 13; + + // If set to true, the connection manager will use the real remote address + // of the client connection when determining internal versus external origin and manipulating + // various headers. If set to false or absent, the connection manager will use the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for`, + // :ref:`config_http_conn_man_headers_x-envoy-internal`, and + // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. + google.protobuf.BoolValue use_remote_address = 14; + + // The number of additional ingress proxy hops from the right side of the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when + // determining the origin client's IP address. The default is zero if this option + // is not specified. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. + uint32 xff_num_trusted_hops = 19; + + message InternalAddressConfig { + // Whether unix socket addresses should be considered internal. + bool unix_sockets = 1; + } + + // Configures what network addresses are considered internal for stats and header sanitation + // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. + // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information about internal/external addresses. + InternalAddressConfig internal_address_config = 25; + + // If set, Envoy will not append the remote address to the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in + // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager + // has mutated the request headers. While :ref:`use_remote_address + // ` + // will also suppress XFF addition, it has consequences for logging and other + // Envoy uses of the remote address, so *skip_xff_append* should be used + // when only an elision of XFF addition is intended. + bool skip_xff_append = 21; + + // Via header value to append to request and response headers. If this is + // empty, no via header will be appended. + string via = 22; + + // Whether the connection manager will generate the :ref:`x-request-id + // ` header if it does not exist. This defaults to + // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature + // is not desired it can be disabled. + google.protobuf.BoolValue generate_request_id = 15; + + // Whether the connection manager will keep the :ref:`x-request-id + // ` header if passed for a request that is edge + // (Edge request is the request from external clients to front Envoy) and not reset it, which + // is the current Envoy behaviour. This defaults to false. + bool preserve_external_request_id = 32; + + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + enum ForwardClientCertDetails { + option (gogoproto.goproto_enum_prefix) = false; + + // Do not send the XFCC header to the next hop. This is the default value. + SANITIZE = 0; + + // When the client connection is mTLS (Mutual TLS), forward the XFCC header + // in the request. + FORWARD_ONLY = 1; + + // When the client connection is mTLS, append the client certificate + // information to the request’s XFCC header and forward it. + APPEND_FORWARD = 2; + + // When the client connection is mTLS, reset the XFCC header with the client + // certificate information and send it to the next hop. + SANITIZE_SET = 3; + + // Always forward the XFCC header in the request, regardless of whether the + // client connection is mTLS. + ALWAYS_FORWARD_ONLY = 4; + }; + + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + ForwardClientCertDetails forward_client_cert_details = 16 + [(validate.rules).enum.defined_only = true]; + + // [#comment:next free field: 7] + message SetCurrentClientCertDetails { + // Whether to forward the subject of the client cert. Defaults to false. + google.protobuf.BoolValue subject = 1; + + reserved 2; // san deprecated by uri + + // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + // XFCC header comma separated from other values with the value Cert="PEM". + // Defaults to false. + bool cert = 3; + + // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM + // format. This will appear in the XFCC header comma separated from other values with the value + // Chain="PEM". + // Defaults to false. + bool chain = 6; + + // Whether to forward the DNS type Subject Alternative Names of the client cert. + // Defaults to false. + bool dns = 4; + + // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + // false. + bool uri = 5; + }; + + // This field is valid only when :ref:`forward_client_cert_details + // ` + // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in + // the client certificate to be forwarded. Note that in the + // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and + // *By* is always set when the client certificate presents the URI type Subject Alternative Name + // value. + SetCurrentClientCertDetails set_current_client_cert_details = 17; + + // If proxy_100_continue is true, Envoy will proxy incoming "Expect: + // 100-continue" headers upstream, and forward "100 Continue" responses + // downstream. If this is false or not set, Envoy will instead strip the + // "Expect: 100-continue" header, and send a "100 Continue" response itself. + bool proxy_100_continue = 18; + + // If + // :ref:`use_remote_address + // ` + // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is + // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. + // This is useful for testing compatibility of upstream services that parse the header value. For + // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses + // `_ for details. This will also affect the + // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See + // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 + // ` for runtime + // control. + // [#not-implemented-hide:] + bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; + + // The configuration for HTTP upgrades. + // For each upgrade type desired, an UpgradeConfig must be added. + // + // .. warning:: + // + // The current implementation of upgrade headers does not handle + // multi-valued upgrade headers. Support for multi-valued headers may be + // added in the future if needed. + // + // .. warning:: + // The current implementation of upgrade headers does not work with HTTP/2 + // upstreams. + message UpgradeConfig { + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] + // will be proxied upstream. + string upgrade_type = 1; + // If present, this represents the filter chain which will be created for + // this type of upgrade. If no filters are present, the filter chain for + // HTTP connections will be used for this upgrade type. + repeated HttpFilter filters = 2; + // Determines if upgrades are enabled or disabled by default. Defaults to true. + // This can be overridden on a per-route basis with :ref:`cluster + // ` as documented in the + // :ref:`upgrade documentation `. + google.protobuf.BoolValue enabled = 3; + }; + repeated UpgradeConfig upgrade_configs = 23; + + reserved 27; + + // Should paths be normalized according to RFC 3986 before any processing of + // requests by HTTP filters or routing? This affects the upstream *:path* header + // as well. For paths that fail this check, Envoy will respond with 400 to + // paths that are malformed. This defaults to false currently but will default + // true in the future. When not specified, this value may be overridden by the + // runtime variable + // :ref:`http_connection_manager.normalize_path`. + // See `Normalization and Comparison ` + // for details of normalization. + // Note that Envoy does not perform + // `case normalization ` + google.protobuf.BoolValue normalize_path = 30; + + // Determines if adjacent slashes in the path are merged into one before any processing of + // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without + // setting this option, incoming requests with path `//dir///file` will not match against route + // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of + // `HTTP spec ` and is provided for convenience. + bool merge_slashes = 33; +} + +message Rds { + // Configuration source specifier for RDS. + envoy.api.v3alpha.core.ConfigSource config_source = 1 [(validate.rules).message.required = true]; + + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + string route_config_name = 2 [(validate.rules).string.min_bytes = 1]; +} + +// This message is used to work around the limitations with 'oneof' and repeated fields. +message ScopedRouteConfigurationsList { + repeated envoy.api.v3alpha.ScopedRouteConfiguration scoped_route_configurations = 1 + [(validate.rules).repeated .min_items = 1]; +} + +message ScopedRoutes { + // The name assigned to the scoped routing configuration. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These + // keys are matched against a set of :ref:`Key` + // objects assembled from :ref:`ScopedRouteConfiguration` + // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via + // :ref:`scoped_route_configurations_list`. + // + // Upon receiving a request's headers, the Router will build a key using the algorithm specified + // by this message. This key will be used to look up the routing table (i.e., the + // :ref:`RouteConfiguration`) to use for the request. + message ScopeKeyBuilder { + // Specifies the mechanism for constructing key fragments which are composed into scope keys. + message FragmentBuilder { + // Specifies how the value of a header should be extracted. + // The following example maps the structure of a header to the fields in this message. + // + // .. code:: + // + // <0> <1> <-- index + // X-Header: a=b;c=d + // | || | + // | || \----> + // | || + // | |\----> + // | | + // | \----> + // | + // \----> + // + // Each 'a=b' key-value pair constitutes an 'element' of the header field. + message HeaderValueExtractor { + // The name of the header field to extract the value from. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // The element separator (e.g., ';' separates 'a;b;c;d'). + // Default: empty string. This causes the entirety of the header field to be extracted. + // If this field is set to an empty string and 'index' is used in the oneof below, 'index' + // must be set to 0. + string element_separator = 2; + + // Specifies a header field's key value pair to match on. + message KvElement { + // The separator between key and value (e.g., '=' separates 'k=v;...'). + // If an element is an empty string, the element is ignored. + // If an element contains no separator, the whole element is parsed as key and the + // fragment value is an empty string. + // If there are multiple values for a matched key, the first value is returned. + string separator = 1 [(validate.rules).string.min_bytes = 1]; + + // The key to match on. + string key = 2 [(validate.rules).string.min_bytes = 1]; + } + + oneof extract_type { + // Specifies the zero based index of the element to extract. + // Note Envoy concatenates multiple values of the same header key into a comma separated + // string, the splitting always happens after the concatenation. + uint32 index = 3; + + // Specifies the key value pair to extract the value from. + KvElement element = 4; + } + } + + oneof type { + option (validate.required) = true; + + // Specifies how a header field's value should be extracted. + HeaderValueExtractor header_value_extractor = 1; + } + } + + // The final scope key consists of the ordered union of these fragments. + repeated FragmentBuilder fragments = 1 [(validate.rules).repeated .min_items = 1]; + } + + // The algorithm to use for constructing a scope key for each request. + ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message.required = true]; + + // Configuration source specifier for RDS. + // This config source is used to subscribe to RouteConfiguration resources specified in + // ScopedRouteConfiguration messages. + envoy.api.v3alpha.core.ConfigSource rds_config_source = 3 + [(validate.rules).message.required = true]; + + oneof config_specifier { + option (validate.required) = true; + + // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by + // matching a key constructed from the request's attributes according to the algorithm specified + // by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRouteConfigurationsList scoped_route_configurations_list = 4; + + // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS + // API. A scope is assigned to a request by matching a key constructed from the request's + // attributes according to the algorithm specified by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRds scoped_rds = 5; + } +} + +message ScopedRds { + // Configuration source specifier for scoped RDS. + envoy.api.v3alpha.core.ConfigSource scoped_rds_config_source = 1 + [(validate.rules).message.required = true]; +} + +message HttpFilter { + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 4; + } + + reserved 3; +} diff --git a/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD new file mode 100644 index 000000000000..a2c09e709030 --- /dev/null +++ b/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD @@ -0,0 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "mongo_proxy", + srcs = ["mongo_proxy.proto"], + deps = ["//envoy/config/filter/fault/v3alpha:fault"], +) diff --git a/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto new file mode 100644 index 000000000000..9149b433e372 --- /dev/null +++ b/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.config.filter.network.mongo_proxy.v3alpha; + +option java_outer_classname = "MongoProxyProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v3alpha"; +option go_package = "v2"; + +import "envoy/config/filter/fault/v3alpha/fault.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Mongo proxy] +// MongoDB :ref:`configuration overview `. + +message MongoProxy { + // The human readable prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // The optional path to use for writing Mongo access logs. If not access log + // path is specified no access logs will be written. Note that access log is + // also gated :ref:`runtime `. + string access_log = 2; + + // Inject a fixed delay before proxying a Mongo operation. Delays are + // applied to the following MongoDB operations: Query, Insert, GetMore, + // and KillCursors. Once an active delay is in progress, all incoming + // data up until the timer event fires will be a part of the delay. + envoy.config.filter.fault.v3alpha.FaultDelay delay = 3; + + // Flag to specify whether :ref:`dynamic metadata + // ` should be emitted. Defaults to false. + bool emit_dynamic_metadata = 4; +} diff --git a/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD b/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD new file mode 100644 index 000000000000..9dc17266721c --- /dev/null +++ b/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD @@ -0,0 +1,12 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "rate_limit", + srcs = ["rate_limit.proto"], + deps = [ + "//envoy/api/v3alpha/ratelimit", + "//envoy/config/ratelimit/v3alpha:rls", + ], +) diff --git a/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto new file mode 100644 index 000000000000..a0edc98e561d --- /dev/null +++ b/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package envoy.config.filter.network.rate_limit.v3alpha; + +option java_outer_classname = "RateLimitProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/ratelimit/ratelimit.proto"; +import "envoy/config/ratelimit/v3alpha/rls.proto"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Rate limit] +// Rate limit :ref:`configuration overview `. + +message RateLimit { + // The prefix to use when emitting :ref:`statistics `. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // The rate limit domain to use in the rate limit service request. + string domain = 2 [(validate.rules).string.min_bytes = 1]; + + // The rate limit descriptor list to use in the rate limit service request. + repeated envoy.api.v3alpha.ratelimit.RateLimitDescriptor descriptors = 3 + [(validate.rules).repeated .min_items = 1]; + + // The timeout in milliseconds for the rate limit service RPC. If not + // set, this defaults to 20ms. + google.protobuf.Duration timeout = 4 [(gogoproto.stdduration) = true]; + + // The filter's behaviour in case the rate limiting service does + // not respond back. When it is set to true, Envoy will not allow traffic in case of + // communication failure between rate limiting service and the proxy. + // Defaults to false. + bool failure_mode_deny = 5; + + // Configuration for an external rate limit service provider. If not + // specified, any calls to the rate limit service will immediately return + // success. + envoy.config.ratelimit.v3alpha.RateLimitServiceConfig rate_limit_service = 6 + [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/filter/network/rbac/v3alpha/BUILD b/api/envoy/config/filter/network/rbac/v3alpha/BUILD new file mode 100644 index 000000000000..a6ee42cf7893 --- /dev/null +++ b/api/envoy/config/filter/network/rbac/v3alpha/BUILD @@ -0,0 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "rbac", + srcs = ["rbac.proto"], + deps = ["//envoy/config/rbac/v3alpha:rbac"], +) diff --git a/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto b/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto new file mode 100644 index 000000000000..5c2114cd6063 --- /dev/null +++ b/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package envoy.config.filter.network.rbac.v3alpha; + +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v3alpha"; +option go_package = "v2"; + +import "envoy/config/rbac/v3alpha/rbac.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: RBAC] +// Role-Based Access Control :ref:`configuration overview `. + +// RBAC network filter config. +// +// Header should not be used in rules/shadow_rules in RBAC network filter as +// this information is only available in :ref:`RBAC http filter `. +message RBAC { + // Specify the RBAC rules to be applied globally. + // If absent, no enforcing RBAC policy will be applied. + config.rbac.v3alpha.RBAC rules = 1; + + // Shadow rules are not enforced by the filter but will emit stats and logs + // and can be used for rule testing. + // If absent, no shadow RBAC policy will be applied. + config.rbac.v3alpha.RBAC shadow_rules = 2; + + // The prefix to use when emitting statistics. + string stat_prefix = 3 [(validate.rules).string.min_bytes = 1]; + + enum EnforcementType { + // Apply RBAC policies when the first byte of data arrives on the connection. + ONE_TIME_ON_FIRST_BYTE = 0; + + // Continuously apply RBAC policies as data arrives. Use this mode when + // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, + // etc. when the protocol decoders emit dynamic metadata such as the + // resources being accessed and the operations on the resources. + CONTINUOUS = 1; + }; + + // RBAC enforcement strategy. By default RBAC will be enforced only once + // when the first byte of data arrives from the downstream. When used in + // conjunction with filters that emit dynamic metadata after decoding + // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to + // CONTINUOUS to enforce RBAC policies on every message boundary. + EnforcementType enforcement_type = 4; +} diff --git a/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD new file mode 100644 index 000000000000..ef7cc5683f0c --- /dev/null +++ b/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD @@ -0,0 +1,12 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "redis_proxy", + srcs = ["redis_proxy.proto"], + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/type:percent", + ], +) diff --git a/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto new file mode 100644 index 000000000000..1bda0ab7c466 --- /dev/null +++ b/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto @@ -0,0 +1,236 @@ +syntax = "proto3"; + +package envoy.config.filter.network.redis_proxy.v3alpha; + +option java_outer_classname = "RedisProxyProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/base.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Redis Proxy] +// Redis Proxy :ref:`configuration overview `. + +message RedisProxy { + // The prefix to use when emitting :ref:`statistics `. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // Name of cluster from cluster manager. See the :ref:`configuration section + // ` of the architecture overview for recommendations on + // configuring the backing cluster. + // + // .. attention:: + // + // This field is deprecated. Use a :ref:`catch_all + // route` + // instead. + string cluster = 2 [deprecated = true]; + + // Redis connection pool settings. + message ConnPoolSettings { + // Per-operation timeout in milliseconds. The timer starts when the first + // command of a pipeline is written to the backend connection. Each response received from Redis + // resets the timer since it signifies that the next command is being processed by the backend. + // The only exception to this behavior is when a connection to a backend is not yet established. + // In that case, the connect timeout on the cluster will govern the timeout until the connection + // is ready. + google.protobuf.Duration op_timeout = 1 + [(validate.rules).duration.required = true, (gogoproto.stdduration) = true]; + + // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be + // forwarded to the same upstream. The hash key used for determining the upstream in a + // consistent hash ring configuration will be computed from the hash tagged key instead of the + // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster + // implementation `_. + // + // Examples: + // + // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream + // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream + bool enable_hashtagging = 2; + + // Accept `moved and ask redirection + // `_ errors from upstream + // redis servers, and retry commands to the specified target server. The target server does not + // need to be known to the cluster manager. If the command cannot be redirected, then the + // original error is passed downstream unchanged. By default, this support is not enabled. + bool enable_redirection = 3; + + // Maximum size of encoded request buffer before flush is triggered and encoded requests + // are sent upstream. If this is unset, the buffer flushes whenever it receives data + // and performs no batching. + // This feature makes it possible for multiple clients to send requests to Envoy and have + // them batched- for example if one is running several worker processes, each with its own + // Redis connection. There is no benefit to using this with a single downstream process. + // Recommended size (if enabled) is 1024 bytes. + uint32 max_buffer_size_before_flush = 4; + + // The encoded request buffer is flushed N milliseconds after the first request has been + // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`. + // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise, + // the timer should be set according to the number of clients, overall request rate and + // desired maximum latency for a single command. For example, if there are many requests + // being batched together at a high rate, the buffer will likely be filled before the timer + // fires. Alternatively, if the request rate is lower the buffer will not be filled as often + // before the timer fires. + // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter + // defaults to 3ms. + google.protobuf.Duration buffer_flush_timeout = 5 [(gogoproto.stdduration) = true]; + + // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts + // can be created at any given time by any given worker thread (see `enable_redirection` for + // more details). If the host is unknown and a connection cannot be created due to enforcing + // this limit, then redirection will fail and the original redirection error will be passed + // downstream unchanged. This limit defaults to 100. + google.protobuf.UInt32Value max_upstream_unknown_connections = 6; + + // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently + // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data + // because replication is asynchronous and requires some delay. You need to ensure that your + // application can tolerate stale data. + enum ReadPolicy { + // Default mode. Read from the current master node. + MASTER = 0; + // Read from the master, but if it is unavailable, read from replica nodes. + PREFER_MASTER = 1; + // Read from replica nodes. If multiple replica nodes are present within a shard, a random + // node is selected. Healthy nodes have precedent over unhealthy nodes. + REPLICA = 2; + // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not + // present or unhealthy), read from the master. + PREFER_REPLICA = 3; + // Read from any node of the cluster. A random node is selected among the master and replicas, + // healthy nodes have precedent over unhealthy nodes. + ANY = 4; + } + + // Read policy. The default is to read from the master. + ReadPolicy read_policy = 7 [(validate.rules).enum.defined_only = true]; + } + + // Network settings for the connection pool to the upstream clusters. + ConnPoolSettings settings = 3 [(validate.rules).message.required = true]; + + // Indicates that latency stat should be computed in microseconds. By default it is computed in + // milliseconds. + bool latency_in_micros = 4; + + message PrefixRoutes { + message Route { + // String prefix that must match the beginning of the keys. Envoy will always favor the + // longest match. + string prefix = 1; + + // Indicates if the prefix needs to be removed from the key when forwarded. + bool remove_prefix = 2; + + // Upstream cluster to forward the command to. + string cluster = 3 [(validate.rules).string.min_bytes = 1]; + + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + message RequestMirrorPolicy { + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + string cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // If not specified or the runtime key is not present, all requests to the target cluster + // will be mirrored. + // + // If specified, Envoy will lookup the runtime key to get the percentage of requests to the + // mirror. + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a :ref:`FractionalPercent ` proto represented + // as JSON/YAML and may also be represented as an integer with the assumption that the value + // is an integral percentage out of 100. For instance, a runtime key lookup returning the + // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is + // HUNDRED. + envoy.api.v3alpha.core.RuntimeFractionalPercent runtime_fraction = 2; + + // Set this to TRUE to only mirror write commands, this is effectively replicating the + // writes in a "fire and forget" manner. + bool exclude_read_commands = 3; + } + + // Indicates that the route has a request mirroring policy. + repeated RequestMirrorPolicy request_mirror_policy = 4; + } + + // List of prefix routes. + repeated Route routes = 1; + + // Indicates that prefix matching should be case insensitive. + bool case_insensitive = 2; + + // Optional catch-all route to forward commands that doesn't match any of the routes. The + // catch-all route becomes required when no routes are specified. + // .. attention:: + // + // This field is deprecated. Use a :ref:`catch_all + // route` + // instead. + string catch_all_cluster = 3 [deprecated = true]; + + // Optional catch-all route to forward commands that doesn't match any of the routes. The + // catch-all route becomes required when no routes are specified. + Route catch_all_route = 4; + } + + // List of **unique** prefixes used to separate keys from different workloads to different + // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all + // cluster can be used to forward commands when there is no match. Time complexity of the + // lookups are in O(min(longest key prefix, key length)). + // + // Example: + // + // .. code-block:: yaml + // + // prefix_routes: + // routes: + // - prefix: "ab" + // cluster: "cluster_a" + // - prefix: "abc" + // cluster: "cluster_b" + // + // When using the above routes, the following prefixes would be sent to: + // + // * 'get abc:users' would retrive the key 'abc:users' from cluster_b. + // * 'get ab:users' would retrive the key 'ab:users' from cluster_a. + // * 'get z:users' would return a NoUpstreamHost error. A :ref:`catch-all + // route` + // would have retrieved the key from that cluster instead. + // + // See the :ref:`configuration section + // ` of the architecture overview for recommendations on + // configuring the backing clusters. + PrefixRoutes prefix_routes = 5; + + // Authenticate Redis client connections locally by forcing downstream clients to issue a 'Redis + // AUTH command `_ with this password before enabling any other + // command. If an AUTH command's password matches this password, an "OK" response will be returned + // to the client. If the AUTH command password does not match this password, then an "ERR invalid + // password" error will be returned. If any other command is received before AUTH when this + // password is set, then a "NOAUTH Authentication required." error response will be sent to the + // client. If an AUTH command is received when the password is not set, then an "ERR Client sent + // AUTH, but no password is set" error will be returned. + envoy.api.v3alpha.core.DataSource downstream_auth_password = 6; +} + +// RedisProtocolOptions specifies Redis upstream protocol options. This object is used in +// :ref:`extension_protocol_options`, keyed +// by the name `envoy.redis_proxy`. +message RedisProtocolOptions { + // Upstream server password as defined by the `requirepass directive + // `_ in the server's configuration file. + envoy.api.v3alpha.core.DataSource auth_password = 1; +} diff --git a/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD new file mode 100644 index 000000000000..a9ea8de3bf1e --- /dev/null +++ b/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD @@ -0,0 +1,23 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "tcp_proxy", + srcs = ["tcp_proxy.proto"], + deps = [ + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + "//envoy/config/filter/accesslog/v3alpha:accesslog", + ], +) + +api_go_proto_library( + name = "tcp_proxy", + proto = ":tcp_proxy", + deps = [ + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/config/filter/accesslog/v3alpha:accesslog_go_proto", + ], +) diff --git a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto new file mode 100644 index 000000000000..f2597a3ab361 --- /dev/null +++ b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto @@ -0,0 +1,146 @@ +syntax = "proto3"; + +package envoy.config.filter.network.tcp_proxy.v3alpha; + +option java_outer_classname = "TcpProxyProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v3alpha"; +option go_package = "v2"; + +import "envoy/config/filter/accesslog/v3alpha/accesslog.proto"; +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/core/base.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: TCP Proxy] +// TCP Proxy :ref:`configuration overview `. + +message TcpProxy { + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + oneof cluster_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + // + string cluster = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 10; + } + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + envoy.api.v3alpha.core.Metadata metadata_match = 9; + + // The idle timeout for connections managed by the TCP proxy filter. The idle timeout + // is defined as the period in which there are no bytes sent or received on either + // the upstream or downstream connection. If not set, connections will never be closed + // by the TCP proxy due to being idle. + google.protobuf.Duration idle_timeout = 8 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy + // filter. The idle timeout is defined as the period in which there is no + // active traffic. If not set, there is no idle timeout. When the idle timeout + // is reached the connection will be closed. The distinction between + // downstream_idle_timeout/upstream_idle_timeout provides a means to set + // timeout based on the last byte sent on the downstream/upstream connection. + google.protobuf.Duration downstream_idle_timeout = 3; + + // [#not-implemented-hide:] + google.protobuf.Duration upstream_idle_timeout = 4; + + // Configuration for :ref:`access logs ` + // emitted by the this tcp_proxy. + repeated envoy.config.filter.accesslog.v3alpha.AccessLog access_log = 5; + + // [#not-implemented-hide:] Deprecated. + // TCP Proxy filter configuration using V1 format. + message DeprecatedV1 { + // A TCP proxy route consists of a set of optional L4 criteria and the + // name of a cluster. If a downstream connection matches all the + // specified criteria, the cluster in the route is used for the + // corresponding upstream connection. Routes are tried in the order + // specified until a match is found. If no match is found, the connection + // is closed. A route with no criteria is valid and always produces a + // match. + message TCPRoute { + // The cluster to connect to when a the downstream network connection + // matches the specified criteria. + string cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // An optional list of IP address subnets in the form + // “ip_address/xx”. The criteria is satisfied if the destination IP + // address of the downstream connection is contained in at least one of + // the specified subnets. If the parameter is not specified or the list + // is empty, the destination IP address is ignored. The destination IP + // address of the downstream connection might be different from the + // addresses on which the proxy is listening if the connection has been + // redirected. + repeated envoy.api.v3alpha.core.CidrRange destination_ip_list = 2; + + // An optional string containing a comma-separated list of port numbers + // or ranges. The criteria is satisfied if the destination port of the + // downstream connection is contained in at least one of the specified + // ranges. If the parameter is not specified, the destination port is + // ignored. The destination port address of the downstream connection + // might be different from the port on which the proxy is listening if + // the connection has been redirected. + string destination_ports = 3; + + // An optional list of IP address subnets in the form + // “ip_address/xx”. The criteria is satisfied if the source IP address + // of the downstream connection is contained in at least one of the + // specified subnets. If the parameter is not specified or the list is + // empty, the source IP address is ignored. + repeated envoy.api.v3alpha.core.CidrRange source_ip_list = 4; + + // An optional string containing a comma-separated list of port numbers + // or ranges. The criteria is satisfied if the source port of the + // downstream connection is contained in at least one of the specified + // ranges. If the parameter is not specified, the source port is + // ignored. + string source_ports = 5; + } + + // The route table for the filter. All filter instances must have a route + // table, even if it is empty. + repeated TCPRoute routes = 1 [(validate.rules).repeated .min_items = 1]; + } + + // [#not-implemented-hide:] Deprecated. + DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; + + // The maximum number of unsuccessful connection attempts that will be made before + // giving up. If the parameter is not specified, 1 connection attempt will be made. + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32.gte = 1]; + + // Allows for specification of multiple upstream clusters along with weights + // that indicate the percentage of traffic to be forwarded to each cluster. + // The router selects an upstream cluster based on these weights. + message WeightedCluster { + message ClusterWeight { + // Name of the upstream cluster. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // When a request matches the route, the choice of an upstream cluster is + // determined by its weight. The sum of weights across all entries in the + // clusters array determines the total weight. + uint32 weight = 2 [(validate.rules).uint32.gte = 1]; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + } +} diff --git a/api/envoy/config/grpc_credential/v3alpha/BUILD b/api/envoy/config/grpc_credential/v3alpha/BUILD new file mode 100644 index 000000000000..2f6736732881 --- /dev/null +++ b/api/envoy/config/grpc_credential/v3alpha/BUILD @@ -0,0 +1,27 @@ +licenses(["notice"]) # Apache 2 + +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +api_proto_library_internal( + name = "file_based_metadata", + srcs = ["file_based_metadata.proto"], + deps = ["//envoy/api/v3alpha/core:base"], +) + +api_go_proto_library( + name = "file_based_metadata", + proto = ":file_based_metadata", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + ], +) + +api_proto_library_internal( + name = "aws_iam", + srcs = ["aws_iam.proto"], +) + +api_go_proto_library( + name = "aws_iam", + proto = ":aws_iam", +) diff --git a/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto new file mode 100644 index 000000000000..33921db6d69a --- /dev/null +++ b/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +// [#protodoc-title: Grpc Credentials AWS IAM] +// Configuration for AWS IAM Grpc Credentials Plugin + +package envoy.config.grpc_credential.v3alpha; + +option java_outer_classname = "AwsIamProto"; +option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha"; +option java_multiple_files = true; +option go_package = "v2alpha"; + +import "validate/validate.proto"; + +message AwsIamConfig { + // The `service namespace + // `_ + // of the Grpc endpoint. + // + // Example: appmesh + string service_name = 1 [(validate.rules).string.min_bytes = 1]; + + // The `region `_ hosting the Grpc + // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment + // variable. + // + // Example: us-west-2 + string region = 2; +} diff --git a/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto new file mode 100644 index 000000000000..2886921b3415 --- /dev/null +++ b/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +// [#protodoc-title: Grpc Credentials File Based Metadata] +// Configuration for File Based Metadata Grpc Credentials Plugin + +package envoy.config.grpc_credential.v3alpha; + +option java_outer_classname = "FileBasedMetadataProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha"; +option go_package = "v2alpha"; + +import "envoy/api/v3alpha/core/base.proto"; + +message FileBasedMetadataConfig { + + // Location or inline data of secret to use for authentication of the Google gRPC connection + // this secret will be attached to a header of the gRPC connection + envoy.api.v3alpha.core.DataSource secret_data = 1; + + // Metadata header key to use for sending the secret data + // if no header key is set, "authorization" header will be used + string header_key = 2; + + // Prefix to prepend to the secret in the metadata header + // if no prefix is set, the default is to use no prefix + string header_prefix = 3; +} diff --git a/api/envoy/config/health_checker/redis/v3alpha/BUILD b/api/envoy/config/health_checker/redis/v3alpha/BUILD new file mode 100644 index 000000000000..239d1f224fc6 --- /dev/null +++ b/api/envoy/config/health_checker/redis/v3alpha/BUILD @@ -0,0 +1,8 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "redis", + srcs = ["redis.proto"], +) diff --git a/api/envoy/config/health_checker/redis/v3alpha/redis.proto b/api/envoy/config/health_checker/redis/v3alpha/redis.proto new file mode 100644 index 000000000000..234da40d56ba --- /dev/null +++ b/api/envoy/config/health_checker/redis/v3alpha/redis.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package envoy.config.health_checker.redis.v3alpha; + +option java_outer_classname = "RedisProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v3alpha"; +option go_package = "v2"; + +// [#protodoc-title: Redis] +// Redis health checker :ref:`configuration overview `. + +message Redis { + // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value + // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + // by setting the specified key to any value and waiting for traffic to drain. + string key = 1; +} diff --git a/api/envoy/config/metrics/v3alpha/BUILD b/api/envoy/config/metrics/v3alpha/BUILD new file mode 100644 index 000000000000..39d0b79654d0 --- /dev/null +++ b/api/envoy/config/metrics/v3alpha/BUILD @@ -0,0 +1,43 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "metrics_service", + srcs = ["metrics_service.proto"], + visibility = [ + "//envoy/config/bootstrap/v3alpha:__pkg__", + ], + deps = [ + "//envoy/api/v3alpha/core:grpc_service", + ], +) + +api_go_proto_library( + name = "metrics_service", + proto = ":metrics_service", + deps = [ + "//envoy/api/v3alpha/core:grpc_service_go_proto", + ], +) + +api_proto_library_internal( + name = "stats", + srcs = ["stats.proto"], + visibility = [ + "//envoy/config/bootstrap/v3alpha:__pkg__", + ], + deps = [ + "//envoy/api/v3alpha/core:address", + "//envoy/type/matcher:string", + ], +) + +api_go_proto_library( + name = "stats", + proto = ":stats", + deps = [ + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/type/matcher:string_go_proto", + ], +) diff --git a/api/envoy/config/metrics/v3alpha/metrics_service.proto b/api/envoy/config/metrics/v3alpha/metrics_service.proto new file mode 100644 index 000000000000..392ceb8d6fed --- /dev/null +++ b/api/envoy/config/metrics/v3alpha/metrics_service.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +// [#protodoc-title: Metrics service] + +package envoy.config.metrics.v3alpha; + +option java_outer_classname = "MetricsServiceProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.metrics.v3alpha"; + +import "envoy/api/v3alpha/core/grpc_service.proto"; + +import "validate/validate.proto"; + +// Metrics Service is configured as a built-in *envoy.metrics_service* :ref:`StatsSink +// `. This opaque configuration will be used to +// create Metrics Service. +message MetricsServiceConfig { + // The upstream gRPC cluster that hosts the metrics service. + envoy.api.v3alpha.core.GrpcService grpc_service = 1 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/metrics/v3alpha/stats.proto b/api/envoy/config/metrics/v3alpha/stats.proto new file mode 100644 index 000000000000..91324ed0ef61 --- /dev/null +++ b/api/envoy/config/metrics/v3alpha/stats.proto @@ -0,0 +1,331 @@ +// [#protodoc-title: Stats] +// Statistics :ref:`architecture overview `. + +syntax = "proto3"; + +package envoy.config.metrics.v3alpha; + +option java_outer_classname = "StatsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.metrics.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/type/matcher/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +// Configuration for pluggable stats sinks. +message StatsSink { + // The name of the stats sink to instantiate. The name must match a supported + // stats sink. The built-in stats sinks are: + // + // * :ref:`envoy.statsd ` + // * :ref:`envoy.dog_statsd ` + // * :ref:`envoy.metrics_service ` + // * :ref:`envoy.stat_sinks.hystrix ` + // + // Sinks optionally support tagged/multiple dimensional metrics. + string name = 1; + + // Stats sink specific configuration which depends on the sink being instantiated. See + // :ref:`StatsdSink ` for an example. + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } +} + +// Statistics configuration such as tagging. +message StatsConfig { + // Each stat name is iteratively processed through these tag specifiers. + // When a tag is matched, the first capture group is removed from the name so + // later :ref:`TagSpecifiers ` cannot match + // that same portion of the match. + repeated TagSpecifier stats_tags = 1; + + // Use all default tag regexes specified in Envoy. These can be combined with + // custom tags specified in :ref:`stats_tags + // `. They will be processed before + // the custom tags. + // + // .. note:: + // + // If any default tags are specified twice, the config will be considered + // invalid. + // + // See :repo:`well_known_names.h ` for a list of the + // default tags in Envoy. + // + // If not provided, the value is assumed to be true. + google.protobuf.BoolValue use_all_default_tags = 2; + + // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated + // as normal. Preventing the instantiation of certain families of stats can improve memory + // performance for Envoys running especially large configs. + StatsMatcher stats_matcher = 3; +} + +// Configuration for disabling stat instantiation. +message StatsMatcher { + // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to + // instantiate all stats, there is no need to construct a StatsMatcher. + // + // However, StatsMatcher can be used to limit the creation of families of stats in order to + // conserve memory. Stats can either be disabled entirely, or they can be + // limited by either an exclusion or an inclusion list of :ref:`StringMatcher + // ` protos: + // + // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to + // `false`, all stats will be instantiated. + // + // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the + // list will not instantiate. + // + // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of + // the StringMatchers in the list. + // + // + // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. + // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based + // matcher rather than a regex-based matcher. + // + // Example 1. Excluding all stats. + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "rejectAll": "true" + // } + // } + // + // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "exclusionList": { + // "patterns": [ + // { + // "prefix": "cluster." + // } + // ] + // } + // } + // } + // + // Example 3. Including only manager-related stats: + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "inclusionList": { + // "patterns": [ + // { + // "prefix": "cluster_manager." + // }, + // { + // "prefix": "listener_manager." + // } + // ] + // } + // } + // } + // + + oneof stats_matcher { + option (validate.required) = true; + + // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all + // stats are enabled. + bool reject_all = 1; + + // Exclusive match. All stats are enabled except for those matching one of the supplied + // StringMatcher protos. + envoy.type.matcher.ListStringMatcher exclusion_list = 2; + + // Inclusive match. No stats are enabled except for those matching one of the supplied + // StringMatcher protos. + envoy.type.matcher.ListStringMatcher inclusion_list = 3; + }; +} + +// Designates a tag name and value pair. The value may be either a fixed value +// or a regex providing the value via capture groups. The specified tag will be +// unconditionally set if a fixed value, otherwise it will only be set if one +// or more capture groups in the regex match. +message TagSpecifier { + // Attaches an identifier to the tag values to identify the tag being in the + // sink. Envoy has a set of default names and regexes to extract dynamic + // portions of existing stats, which can be found in :repo:`well_known_names.h + // ` in the Envoy repository. If a :ref:`tag_name + // ` is provided in the config and + // neither :ref:`regex ` or + // :ref:`fixed_value ` were + // specified, Envoy will attempt to find that name in its set of defaults and use the accompanying + // regex. + // + // .. note:: + // + // It is invalid to specify the same tag name twice in a config. + string tag_name = 1; + + oneof tag_value { + // Designates a tag to strip from the tag extracted name and provide as a named + // tag value for all statistics. This will only occur if any part of the name + // matches the regex provided with one or more capture groups. + // + // The first capture group identifies the portion of the name to remove. The + // second capture group (which will normally be nested inside the first) will + // designate the value of the tag for the statistic. If no second capture + // group is provided, the first will also be used to set the value of the tag. + // All other capture groups will be ignored. + // + // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and + // one tag specifier: + // + // .. code-block:: json + // + // { + // "tag_name": "envoy.cluster_name", + // "regex": "^cluster\.((.+?)\.)" + // } + // + // Note that the regex will remove ``foo_cluster.`` making the tag extracted + // name ``cluster.upstream_rq_timeout`` and the tag value for + // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no + // ``.`` character because of the second capture group). + // + // Example 2. a stat name + // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two + // tag specifiers: + // + // .. code-block:: json + // + // [ + // { + // "tag_name": "envoy.http_user_agent", + // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // }, + // { + // "tag_name": "envoy.http_conn_manager_prefix", + // "regex": "^http\.((.*?)\.)" + // } + // ] + // + // The two regexes of the specifiers will be processed in the definition order. + // + // The first regex will remove ``ios.``, leaving the tag extracted name + // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag + // ``envoy.http_user_agent`` will be added with tag value ``ios``. + // + // The second regex will remove ``connection_manager_1.`` from the tag + // extracted name produced by the first regex + // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving + // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag + // ``envoy.http_conn_manager_prefix`` will be added with the tag value + // ``connection_manager_1``. + string regex = 2 [(validate.rules).string.max_bytes = 1024]; + + // Specifies a fixed tag value for the ``tag_name``. + string fixed_value = 3; + } +} + +// Stats configuration proto schema for built-in *envoy.statsd* sink. This sink does not support +// tagged metrics. +message StatsdSink { + oneof statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + envoy.api.v3alpha.core.Address address = 1; + + // The name of a cluster that is running a TCP `statsd + // `_ compliant listener. If specified, + // Envoy will connect to this cluster to flush statistics. + string tcp_cluster_name = 2; + } + // Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms + string prefix = 3; +} + +// Stats configuration proto schema for built-in *envoy.dog_statsd* sink. +// The sink emits stats with `DogStatsD `_ +// compatible tags. Tags are configurable via :ref:`StatsConfig +// `. +// [#comment:next free field: 3] +message DogStatsdSink { + oneof dog_statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. + envoy.api.v3alpha.core.Address address = 1; + } + + reserved 2; + + // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field + // ` for more details. + string prefix = 3; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. +// The sink emits stats in `text/event-stream +// `_ +// formatted stream for use by `Hystrix dashboard +// `_. +// +// Note that only a single HystrixSink should be configured. +// +// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. +message HystrixSink { + // The number of buckets the rolling statistical window is divided into. + // + // Each time the sink is flushed, all relevant Envoy statistics are sampled and + // added to the rolling window (removing the oldest samples in the window + // in the process). The sink then outputs the aggregate statistics across the + // current rolling window to the event stream(s). + // + // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets + // + // More detailed explanation can be found in `Hystrix wiki + // `_. + int64 num_buckets = 1; +} diff --git a/api/envoy/config/overload/v3alpha/BUILD b/api/envoy/config/overload/v3alpha/BUILD new file mode 100644 index 000000000000..bfffb5639ca7 --- /dev/null +++ b/api/envoy/config/overload/v3alpha/BUILD @@ -0,0 +1,14 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "overload", + srcs = ["overload.proto"], + visibility = ["//visibility:public"], +) + +api_go_proto_library( + name = "overload", + proto = ":overload", +) diff --git a/api/envoy/config/overload/v3alpha/overload.proto b/api/envoy/config/overload/v3alpha/overload.proto new file mode 100644 index 000000000000..474c7677002b --- /dev/null +++ b/api/envoy/config/overload/v3alpha/overload.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; + +package envoy.config.overload.v3alpha; + +option java_outer_classname = "OverloadProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.overload.v3alpha"; +option go_package = "v2alpha"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Overload Manager] + +// The Overload Manager provides an extensible framework to protect Envoy instances +// from overload of various resources (memory, cpu, file descriptors, etc). +// It monitors a configurable set of resources and notifies registered listeners +// when triggers related to those resources fire. + +message ResourceMonitor { + // The name of the resource monitor to instantiate. Must match a registered + // resource monitor type. The built-in resource monitors are: + // + // * :ref:`envoy.resource_monitors.fixed_heap + // ` + // * :ref:`envoy.resource_monitors.injected_resource + // ` + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Configuration for the resource monitor being instantiated. + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } +} + +message ThresholdTrigger { + // If the resource pressure is greater than or equal to this value, the trigger + // will fire. + double value = 1 [(validate.rules).double = {gte: 0, lte: 1}]; +} + +message Trigger { + // The name of the resource this is a trigger for. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + oneof trigger_oneof { + option (validate.required) = true; + ThresholdTrigger threshold = 2; + } +} + +message OverloadAction { + // The name of the overload action. This is just a well-known string that listeners can + // use for registering callbacks. Custom overload actions should be named using reverse + // DNS to ensure uniqueness. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // A set of triggers for this action. If any of these triggers fire the overload action + // is activated. Listeners are notified when the overload action transitions from + // inactivated to activated, or vice versa. + repeated Trigger triggers = 2 [(validate.rules).repeated .min_items = 1]; +} + +message OverloadManager { + // The interval for refreshing resource usage. + google.protobuf.Duration refresh_interval = 1; + + // The set of resources to monitor. + repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated .min_items = 1]; + + // The set of overload actions. + repeated OverloadAction actions = 3; +} diff --git a/api/envoy/config/ratelimit/v3alpha/BUILD b/api/envoy/config/ratelimit/v3alpha/BUILD new file mode 100644 index 000000000000..571a768dde4b --- /dev/null +++ b/api/envoy/config/ratelimit/v3alpha/BUILD @@ -0,0 +1,20 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "rls", + srcs = ["rls.proto"], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v3alpha/core:grpc_service", + ], +) + +api_go_grpc_library( + name = "rls", + proto = ":rls", + deps = [ + "//envoy/api/v3alpha/core:grpc_service_go_proto", + ], +) diff --git a/api/envoy/config/ratelimit/v3alpha/rls.proto b/api/envoy/config/ratelimit/v3alpha/rls.proto new file mode 100644 index 000000000000..67ac6479cd23 --- /dev/null +++ b/api/envoy/config/ratelimit/v3alpha/rls.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.config.ratelimit.v3alpha; + +option java_outer_classname = "RlsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.ratelimit.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/grpc_service.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Rate limit service] + +// Rate limit :ref:`configuration overview `. +message RateLimitServiceConfig { + reserved 1; + + // Specifies the gRPC service that hosts the rate limit service. The client + // will connect to this cluster when it needs to make rate limit service + // requests. + envoy.api.v3alpha.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; + + reserved 3; +} diff --git a/api/envoy/config/rbac/v3alpha/BUILD b/api/envoy/config/rbac/v3alpha/BUILD new file mode 100644 index 000000000000..89f98c97d481 --- /dev/null +++ b/api/envoy/config/rbac/v3alpha/BUILD @@ -0,0 +1,36 @@ +licenses(["notice"]) # Apache 2 + +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +api_proto_library_internal( + name = "rbac", + srcs = ["rbac.proto"], + external_cc_proto_deps = [ + "@com_google_googleapis//google/api/expr/v1alpha1:syntax_cc_proto", + ], + external_proto_deps = [ + "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", + ], + external_py_proto_deps = [ + "@com_google_googleapis//google/api/expr/v1alpha1:syntax_py_proto", + ], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/route", + "//envoy/type/matcher:metadata", + "//envoy/type/matcher:string", + ], +) + +api_go_proto_library( + name = "rbac", + proto = ":rbac", + deps = [ + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/api/v3alpha/route:route_go_proto", + "//envoy/type/matcher:metadata_go_proto", + "//envoy/type/matcher:string_go_proto", + "@com_google_googleapis//google/api/expr/v1alpha1:cel_go_proto", + ], +) diff --git a/api/envoy/config/rbac/v3alpha/rbac.proto b/api/envoy/config/rbac/v3alpha/rbac.proto new file mode 100644 index 000000000000..d299c384da90 --- /dev/null +++ b/api/envoy/config/rbac/v3alpha/rbac.proto @@ -0,0 +1,215 @@ +syntax = "proto3"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/route/route.proto"; +import "envoy/type/matcher/metadata.proto"; +import "envoy/type/matcher/string.proto"; + +import "google/api/expr/v1alpha1/syntax.proto"; + +package envoy.config.rbac.v3alpha; + +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.rbac.v3alpha"; +option go_package = "v2"; + +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: Role Based Access Control (RBAC)] + +// Role Based Access Control (RBAC) provides service-level and method-level access control for a +// service. RBAC policies are additive. The policies are examined in order. A request is allowed +// once a matching policy is found (suppose the `action` is ALLOW). +// +// Here is an example of RBAC configuration. It has two policies: +// +// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so +// does "cluster.local/ns/default/sa/superuser". +// +// * Any user can read ("GET") the service at paths with prefix "/products", so long as the +// destination port is either 80 or 443. +// +// .. code-block:: yaml +// +// action: ALLOW +// policies: +// "service-admin": +// permissions: +// - any: true +// principals: +// - authenticated: +// principal_name: +// exact: "cluster.local/ns/default/sa/admin" +// - authenticated: +// principal_name: +// exact: "cluster.local/ns/default/sa/superuser" +// "product-viewer": +// permissions: +// - and_rules: +// rules: +// - header: { name: ":method", exact_match: "GET" } +// - header: { name: ":path", regex_match: "/products(/.*)?" } +// - or_rules: +// rules: +// - destination_port: 80 +// - destination_port: 443 +// principals: +// - any: true +// +message RBAC { + // Should we do safe-list or block-list style access control? + enum Action { + // The policies grant access to principals. The rest is denied. This is safe-list style + // access control. This is the default type. + ALLOW = 0; + + // The policies deny access to principals. The rest is allowed. This is block-list style + // access control. + DENY = 1; + } + + // The action to take if a policy matches. The request is allowed if and only if: + // + // * `action` is "ALLOWED" and at least one policy matches + // * `action` is "DENY" and none of the policies match + Action action = 1; + + // Maps from policy name to policy. A match occurs when at least one policy matches the request. + map policies = 2; +} + +// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if +// and only if at least one of its permissions match the action taking place AND at least one of its +// principals match the downstream AND the condition is true if specified. +message Policy { + // Required. The set of permissions that define a role. Each permission is matched with OR + // semantics. To match all actions for this policy, a single Permission with the `any` field set + // to true should be used. + repeated Permission permissions = 1 [(validate.rules).repeated .min_items = 1]; + + // Required. The set of principals that are assigned/denied the role based on “action”. Each + // principal is matched with OR semantics. To match all downstreams for this policy, a single + // Principal with the `any` field set to true should be used. + repeated Principal principals = 2 [(validate.rules).repeated .min_items = 1]; + + // An optional symbolic expression specifying an access control condition. + // The condition is combined with AND semantics. + google.api.expr.v1alpha1.Expr condition = 3; +} + +// Permission defines an action (or actions) that a principal can take. +message Permission { + + // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, + // each are applied with the associated behavior. + message Set { + repeated Permission rules = 1 [(validate.rules).repeated .min_items = 1]; + } + + oneof rule { + option (validate.required) = true; + + // A set of rules that all must match in order to define the action. + Set and_rules = 1; + + // A set of rules where at least one must match in order to define the action. + Set or_rules = 2; + + // When any is set, it matches any action. + bool any = 3 [(validate.rules).bool.const = true]; + + // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only + // available for HTTP request. + envoy.api.v3alpha.route.HeaderMatcher header = 4; + + // A CIDR block that describes the destination IP. + envoy.api.v3alpha.core.CidrRange destination_ip = 5; + + // A port number that describes the destination port connecting to. + uint32 destination_port = 6 [(validate.rules).uint32.lte = 65535]; + + // Metadata that describes additional information about the action. + envoy.type.matcher.MetadataMatcher metadata = 7; + + // Negates matching the provided permission. For instance, if the value of `not_rule` would + // match, this permission would not match. Conversely, if the value of `not_rule` would not + // match, this permission would match. + Permission not_rule = 8; + + // The request server from the client's connection request. This is + // typically TLS SNI. + // + // .. attention:: + // + // The behavior of this field may be affected by how Envoy is configured + // as explained below. + // + // * If the :ref:`TLS Inspector ` + // filter is not added, and if a `FilterChainMatch` is not defined for + // the :ref:`server name `, + // a TLS connection's requested SNI server name will be treated as if it + // wasn't present. + // + // * A :ref:`listener filter ` may + // overwrite a connection's requested server name within Envoy. + // + // Please refer to :ref:`this FAQ entry ` to learn to + // setup SNI. + envoy.type.matcher.StringMatcher requested_server_name = 9; + } +} + +// Principal defines an identity or a group of identities for a downstream subject. +message Principal { + + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, + // each are applied with the associated behavior. + message Set { + repeated Principal ids = 1 [(validate.rules).repeated .min_items = 1]; + } + + // Authentication attributes for a downstream. + message Authenticated { + reserved 1; + reserved "name"; + + // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the + // certificate, otherwise the subject field is used. If unset, it applies to any user that is + // authenticated. + envoy.type.matcher.StringMatcher principal_name = 2; + } + + oneof identifier { + option (validate.required) = true; + + // A set of identifiers that all must match in order to define the downstream. + Set and_ids = 1; + + // A set of identifiers at least one must match in order to define the downstream. + Set or_ids = 2; + + // When any is set, it matches any downstream. + bool any = 3 [(validate.rules).bool.const = true]; + + // Authenticated attributes that identify the downstream. + Authenticated authenticated = 4; + + // A CIDR block that describes the downstream IP. + envoy.api.v3alpha.core.CidrRange source_ip = 5; + + // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only + // available for HTTP request. + envoy.api.v3alpha.route.HeaderMatcher header = 6; + + // Metadata that describes additional information about the principal. + envoy.type.matcher.MetadataMatcher metadata = 7; + + // Negates matching the provided principal. For instance, if the value of `not_id` would match, + // this principal would not match. Conversely, if the value of `not_id` would not match, this + // principal would match. + Principal not_id = 8; + } +} diff --git a/api/envoy/config/resource_monitor/fixed_heap/v3alpha/BUILD b/api/envoy/config/resource_monitor/fixed_heap/v3alpha/BUILD new file mode 100644 index 000000000000..363d90f11808 --- /dev/null +++ b/api/envoy/config/resource_monitor/fixed_heap/v3alpha/BUILD @@ -0,0 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "fixed_heap", + srcs = ["fixed_heap.proto"], + visibility = ["//visibility:public"], +) diff --git a/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto b/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto new file mode 100644 index 000000000000..2bc1baf85243 --- /dev/null +++ b/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package envoy.config.resource_monitor.fixed_heap.v3alpha; + +option java_outer_classname = "FixedHeapProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v3alpha"; +option go_package = "v2alpha"; + +import "validate/validate.proto"; + +// [#protodoc-title: Fixed heap] + +// The fixed heap resource monitor reports the Envoy process memory pressure, computed as a +// fraction of currently reserved heap memory divided by a statically configured maximum +// specified in the FixedHeapConfig. +message FixedHeapConfig { + uint64 max_heap_size_bytes = 1 [(validate.rules).uint64.gt = 0]; +} diff --git a/api/envoy/config/resource_monitor/injected_resource/v3alpha/BUILD b/api/envoy/config/resource_monitor/injected_resource/v3alpha/BUILD new file mode 100644 index 000000000000..10abf09e9ef8 --- /dev/null +++ b/api/envoy/config/resource_monitor/injected_resource/v3alpha/BUILD @@ -0,0 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "injected_resource", + srcs = ["injected_resource.proto"], + visibility = ["//visibility:public"], +) diff --git a/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto b/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto new file mode 100644 index 000000000000..f5b41ef165c8 --- /dev/null +++ b/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.config.resource_monitor.injected_resource.v3alpha; + +option java_outer_classname = "InjectedResourceProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v3alpha"; +option go_package = "v2alpha"; + +import "validate/validate.proto"; + +// [#protodoc-title: Injected resource] + +// The injected resource monitor allows injecting a synthetic resource pressure into Envoy +// via a text file, which must contain a floating-point number in the range [0..1] representing +// the resource pressure and be updated atomically by a symbolic link swap. +// This is intended primarily for integration tests to force Envoy into an overloaded state. +message InjectedResourceConfig { + string filename = 1 [(validate.rules).string.min_bytes = 1]; +} diff --git a/api/envoy/config/trace/v3alpha/BUILD b/api/envoy/config/trace/v3alpha/BUILD new file mode 100644 index 000000000000..72056b3ad4b6 --- /dev/null +++ b/api/envoy/config/trace/v3alpha/BUILD @@ -0,0 +1,24 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "trace", + srcs = ["trace.proto"], + visibility = [ + "//envoy/config/bootstrap/v3alpha:__pkg__", + ], + deps = [ + "//envoy/api/v3alpha/core:grpc_service", + "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", + ], +) + +api_go_proto_library( + name = "trace", + proto = ":trace", + deps = [ + "//envoy/api/v3alpha/core:grpc_service_go_proto", + "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", + ], +) diff --git a/api/envoy/config/trace/v3alpha/trace.proto b/api/envoy/config/trace/v3alpha/trace.proto new file mode 100644 index 000000000000..2771c1b40f28 --- /dev/null +++ b/api/envoy/config/trace/v3alpha/trace.proto @@ -0,0 +1,204 @@ +// [#protodoc-title: Tracing] +// Tracing :ref:`architecture overview `. + +syntax = "proto3"; + +package envoy.config.trace.v3alpha; + +option java_outer_classname = "TraceProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.trace.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/grpc_service.proto"; +import "opencensus/proto/trace/v1/trace_config.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +// The tracing configuration specifies global +// settings for the HTTP tracer used by Envoy. The configuration is defined by +// the :ref:`Bootstrap ` :ref:`tracing +// ` field. Envoy may support other +// tracers in the future, but right now the HTTP tracer is the only one supported. +message Tracing { + message Http { + // The name of the HTTP trace driver to instantiate. The name must match a + // supported HTTP trace driver. Built-in trace drivers: + // + // - *envoy.lightstep* + // - *envoy.zipkin* + // - *envoy.dynamic.ot* + // - *envoy.tracers.datadog* + // - *envoy.tracers.opencensus* + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Trace driver specific configuration which depends on the driver being instantiated. + // See the trace drivers for examples: + // + // - :ref:`LightstepConfig ` + // - :ref:`ZipkinConfig ` + // - :ref:`DynamicOtConfig ` + // - :ref:`DatadogConfig ` + // - :ref:`OpenCensusConfig ` + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + // Provides configuration for the HTTP tracer. + Http http = 1; +} + +// Configuration for the LightStep tracer. +message LightstepConfig { + // The cluster manager cluster that hosts the LightStep collectors. + string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // File containing the access token to the `LightStep + // `_ API. + string access_token_file = 2 [(validate.rules).string.min_bytes = 1]; +} + +// Configuration for the Zipkin tracer. +message ZipkinConfig { + // The cluster manager cluster that hosts the Zipkin collectors. Note that the + // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster + // resources `. + string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // The API endpoint of the Zipkin service where the spans will be sent. When + // using a standard Zipkin installation, the API endpoint is typically + // /api/v1/spans, which is the default value. + string collector_endpoint = 2 [(validate.rules).string.min_bytes = 1]; + + // Determines whether a 128bit trace id will be used when creating a new + // trace instance. The default value is false, which will result in a 64 bit trace id being used. + bool trace_id_128bit = 3; + + // Determines whether client and server spans will share the same span context. + // The default value is true. + google.protobuf.BoolValue shared_span_context = 4; + + // Available Zipkin collector endpoint versions. + enum CollectorEndpointVersion { + // Zipkin API v1, JSON over HTTP. + // [#comment: The default implementation of Zipkin client before this field is added was only v1 + // and the way user configure this was by not explicitly specifying the version. Consequently, + // before this is added, the corresponding Zipkin collector expected to receive v1 payload. + // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when + // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, + // since in Zipkin realm this v1 version is considered to be not preferable anymore.] + HTTP_JSON_V1 = 0 [deprecated = true]; + + // Zipkin API v2, JSON over HTTP. + HTTP_JSON = 1; + + // Zipkin API v2, protobuf over HTTP. + HTTP_PROTO = 2; + + // [#not-implemented-hide:] + GRPC = 3; + } + + // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be + // used. + CollectorEndpointVersion collector_endpoint_version = 5; +} + +// DynamicOtConfig is used to dynamically load a tracer from a shared library +// that implements the `OpenTracing dynamic loading API +// `_. +message DynamicOtConfig { + // Dynamic library implementing the `OpenTracing API + // `_. + string library = 1 [(validate.rules).string.min_bytes = 1]; + + // The configuration to use when creating a tracer from the given dynamic + // library. + google.protobuf.Struct config = 2; +} + +// Configuration for the Datadog tracer. +message DatadogConfig { + // The cluster to use for submitting traces to the Datadog agent. + string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + // The name used for the service when traces are generated by envoy. + string service_name = 2 [(validate.rules).string.min_bytes = 1]; +} + +// Configuration for the OpenCensus tracer. +// [#proto-status: experimental] +message OpenCensusConfig { + // Configures tracing, e.g. the sampler, max number of annotations, etc. + opencensus.proto.trace.v1.TraceConfig trace_config = 1; + + // Enables the stdout exporter if set to true. This is intended for debugging + // purposes. + bool stdout_exporter_enabled = 2; + + // Enables the Stackdriver exporter if set to true. The project_id must also + // be set. + bool stackdriver_exporter_enabled = 3; + + // The Cloud project_id to use for Stackdriver tracing. + string stackdriver_project_id = 4; + + // (optional) By default, the Stackdriver exporter will connect to production + // Stackdriver. If stackdriver_address is non-empty, it will instead connect + // to this address, which is in the gRPC format: + // https://github.com/grpc/grpc/blob/master/doc/naming.md + string stackdriver_address = 10; + + // Enables the Zipkin exporter if set to true. The url and service name must + // also be set. + bool zipkin_exporter_enabled = 5; + + // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v3alpha/spans" + string zipkin_url = 6; + + // Enables the OpenCensus Agent exporter if set to true. The address must also + // be set. + bool ocagent_exporter_enabled = 11; + + // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC + // format: https://github.com/grpc/grpc/blob/master/doc/naming.md + string ocagent_address = 12; + + reserved 7; // Formerly zipkin_service_name. + + enum TraceContext { + // No-op default, no trace context is utilized. + NONE = 0; + + // W3C Trace-Context format "traceparent:" header. + TRACE_CONTEXT = 1; + + // Binary "grpc-trace-bin:" header. + GRPC_TRACE_BIN = 2; + + // "X-Cloud-Trace-Context:" header. + CLOUD_TRACE_CONTEXT = 3; + + // X-B3-* headers. + B3 = 4; + } + + // List of incoming trace context headers we will accept. First one found + // wins. + repeated TraceContext incoming_trace_context = 8; + + // List of outgoing trace context headers we will produce. + repeated TraceContext outgoing_trace_context = 9; +} + +// Configuration structure. +message TraceServiceConfig { + // The upstream gRPC cluster that hosts the metrics service. + envoy.api.v3alpha.core.GrpcService grpc_service = 1 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/transport_socket/alts/v3alpha/BUILD b/api/envoy/config/transport_socket/alts/v3alpha/BUILD new file mode 100644 index 000000000000..7ffc03097000 --- /dev/null +++ b/api/envoy/config/transport_socket/alts/v3alpha/BUILD @@ -0,0 +1,11 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "alts", + srcs = ["alts.proto"], + deps = [ + "//envoy/api/v3alpha/core:base", + ], +) diff --git a/api/envoy/config/transport_socket/alts/v3alpha/alts.proto b/api/envoy/config/transport_socket/alts/v3alpha/alts.proto new file mode 100644 index 000000000000..22684b862614 --- /dev/null +++ b/api/envoy/config/transport_socket/alts/v3alpha/alts.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.config.transport_socket.alts.v3alpha; + +option java_outer_classname = "AltsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v3alpha"; +option go_package = "v2"; + +// [#protodoc-title: ALTS] + +import "validate/validate.proto"; + +// Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy. +// https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ +message Alts { + // The location of a handshaker service, this is usually 169.254.169.254:8080 + // on GCE. + string handshaker_service = 1 [(validate.rules).string.min_bytes = 1]; + + // The acceptable service accounts from peer, peers not in the list will be rejected in the + // handshake validation step. If empty, no validation will be performed. + repeated string peer_service_accounts = 2; +} diff --git a/api/envoy/config/transport_socket/tap/v3alpha/BUILD b/api/envoy/config/transport_socket/tap/v3alpha/BUILD new file mode 100644 index 000000000000..8056ad6f17bb --- /dev/null +++ b/api/envoy/config/transport_socket/tap/v3alpha/BUILD @@ -0,0 +1,12 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "tap", + srcs = ["tap.proto"], + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/config/common/tap/v3alpha:common", + ], +) diff --git a/api/envoy/config/transport_socket/tap/v3alpha/tap.proto b/api/envoy/config/transport_socket/tap/v3alpha/tap.proto new file mode 100644 index 000000000000..1cca6814c803 --- /dev/null +++ b/api/envoy/config/transport_socket/tap/v3alpha/tap.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.config.transport_socket.tap.v3alpha; + +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v3alpha"; +option go_package = "v2"; + +// [#protodoc-title: Tap] + +import "envoy/config/common/tap/v3alpha/common.proto"; +import "envoy/api/v3alpha/core/base.proto"; + +import "validate/validate.proto"; + +// Configuration for tap transport socket. This wraps another transport socket, providing the +// ability to interpose and record in plain text any traffic that is surfaced to Envoy. +message Tap { + // Common configuration for the tap transport socket. + common.tap.v3alpha.CommonExtensionConfig common_config = 1 + [(validate.rules).message.required = true]; + + // The underlying transport socket being wrapped. + api.v3alpha.core.TransportSocket transport_socket = 2 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/data/accesslog/v3alpha/BUILD b/api/envoy/data/accesslog/v3alpha/BUILD new file mode 100644 index 000000000000..30157958e7fe --- /dev/null +++ b/api/envoy/data/accesslog/v3alpha/BUILD @@ -0,0 +1,24 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "accesslog", + srcs = ["accesslog.proto"], + visibility = [ + "//envoy/service/accesslog/v3alpha:__pkg__", + ], + deps = [ + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + ], +) + +api_go_proto_library( + name = "accesslog", + proto = ":accesslog", + deps = [ + "//envoy/api/v3alpha/core:address_go_proto", + "//envoy/api/v3alpha/core:base_go_proto", + ], +) diff --git a/api/envoy/data/accesslog/v3alpha/accesslog.proto b/api/envoy/data/accesslog/v3alpha/accesslog.proto new file mode 100644 index 000000000000..b4588ecd31ff --- /dev/null +++ b/api/envoy/data/accesslog/v3alpha/accesslog.proto @@ -0,0 +1,356 @@ +syntax = "proto3"; + +package envoy.data.accesslog.v3alpha; + +option java_outer_classname = "AccesslogProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.data.accesslog.v3alpha"; + +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/core/base.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "gogoproto/gogo.proto"; +import "validate/validate.proto"; + +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: gRPC access logs] +// Envoy access logs describe incoming interaction with Envoy over a fixed +// period of time, and typically cover a single request/response exchange, +// (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP). +// Access logs contain fields defined in protocol-specific protobuf messages. +// +// Except where explicitly declared otherwise, all fields describe +// *downstream* interaction between Envoy and a connected client. +// Fields describing *upstream* interaction will explicitly include ``upstream`` +// in their name. + +message TCPAccessLogEntry { + // Common properties shared by all Envoy access logs. + AccessLogCommon common_properties = 1; + + // Properties of the TCP connection. + ConnectionProperties connection_properties = 2; +} + +message HTTPAccessLogEntry { + // Common properties shared by all Envoy access logs. + AccessLogCommon common_properties = 1; + + // HTTP version + enum HTTPVersion { + PROTOCOL_UNSPECIFIED = 0; + HTTP10 = 1; + HTTP11 = 2; + HTTP2 = 3; + } + HTTPVersion protocol_version = 2; + + // Description of the incoming HTTP request. + HTTPRequestProperties request = 3; + + // Description of the outgoing HTTP response. + HTTPResponseProperties response = 4; +} + +// Defines fields for a connection +message ConnectionProperties { + // Number of bytes received from downstream. + uint64 received_bytes = 1; + + // Number of bytes sent to downstream. + uint64 sent_bytes = 2; +} + +// Defines fields that are shared by all Envoy access logs. +message AccessLogCommon { + // [#not-implemented-hide:] + // This field indicates the rate at which this log entry was sampled. + // Valid range is (0.0, 1.0]. + double sample_rate = 1 [(validate.rules).double.gt = 0.0, (validate.rules).double.lte = 1.0]; + + // This field is the remote/origin address on which the request from the user was received. + // Note: This may not be the physical peer. E.g, if the remote address is inferred from for + // example the x-forwarder-for header, proxy protocol, etc. + envoy.api.v3alpha.core.Address downstream_remote_address = 2; + + // This field is the local/destination address on which the request from the user was received. + envoy.api.v3alpha.core.Address downstream_local_address = 3; + + // If the connection is secure,S this field will contain TLS properties. + TLSProperties tls_properties = 4; + + // The time that Envoy started servicing this request. This is effectively the time that the first + // downstream byte is received. + google.protobuf.Timestamp start_time = 5 [(gogoproto.stdtime) = true]; + + // Interval between the first downstream byte received and the last + // downstream byte received (i.e. time it takes to receive a request). + google.protobuf.Duration time_to_last_rx_byte = 6 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the first upstream byte sent. There may + // by considerable delta between *time_to_last_rx_byte* and this value due to filters. + // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about + // not accounting for kernel socket buffer time, etc. + google.protobuf.Duration time_to_first_upstream_tx_byte = 7 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the last upstream byte sent. There may + // by considerable delta between *time_to_last_rx_byte* and this value due to filters. + // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about + // not accounting for kernel socket buffer time, etc. + google.protobuf.Duration time_to_last_upstream_tx_byte = 8 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the first upstream + // byte received (i.e. time it takes to start receiving a response). + google.protobuf.Duration time_to_first_upstream_rx_byte = 9 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the last upstream + // byte received (i.e. time it takes to receive a complete response). + google.protobuf.Duration time_to_last_upstream_rx_byte = 10 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the first downstream byte sent. + // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field + // due to filters. Additionally, the same caveats apply as documented in + // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc. + google.protobuf.Duration time_to_first_downstream_tx_byte = 11 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the last downstream byte sent. + // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta + // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate + // time. In the current implementation it does not include kernel socket buffer time. In the + // current implementation it also does not include send window buffering inside the HTTP/2 codec. + // In the future it is likely that work will be done to make this duration more accurate. + google.protobuf.Duration time_to_last_downstream_tx_byte = 12 [(gogoproto.stdduration) = true]; + + // The upstream remote/destination address that handles this exchange. This does not include + // retries. + envoy.api.v3alpha.core.Address upstream_remote_address = 13; + + // The upstream local/origin address that handles this exchange. This does not include retries. + envoy.api.v3alpha.core.Address upstream_local_address = 14; + + // The upstream cluster that *upstream_remote_address* belongs to. + string upstream_cluster = 15; + + // Flags indicating occurrences during request/response processing. + ResponseFlags response_flags = 16; + + // All metadata encountered during request processing, including endpoint + // selection. + // + // This can be used to associate IDs attached to the various configurations + // used to process this request with the access log entry. For example, a + // route created from a higher level forwarding rule with some ID can place + // that ID in this field and cross reference later. It can also be used to + // determine if a canary endpoint was used or not. + envoy.api.v3alpha.core.Metadata metadata = 17; + + // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the + // failure reason from the transport socket. The format of this field depends on the configured + // upstream transport socket. Common TLS failures are in + // :ref:`TLS trouble shooting `. + string upstream_transport_failure_reason = 18; + + // The name of the route + string route_name = 19; +} + +// Flags indicating occurrences during request/response processing. +message ResponseFlags { + // Indicates local server healthcheck failed. + bool failed_local_healthcheck = 1; + + // Indicates there was no healthy upstream. + bool no_healthy_upstream = 2; + + // Indicates an there was an upstream request timeout. + bool upstream_request_timeout = 3; + + // Indicates local codec level reset was sent on the stream. + bool local_reset = 4; + + // Indicates remote codec level reset was received on the stream. + bool upstream_remote_reset = 5; + + // Indicates there was a local reset by a connection pool due to an initial connection failure. + bool upstream_connection_failure = 6; + + // Indicates the stream was reset due to an upstream connection termination. + bool upstream_connection_termination = 7; + + // Indicates the stream was reset because of a resource overflow. + bool upstream_overflow = 8; + + // Indicates no route was found for the request. + bool no_route_found = 9; + + // Indicates that the request was delayed before proxying. + bool delay_injected = 10; + + // Indicates that the request was aborted with an injected error code. + bool fault_injected = 11; + + // Indicates that the request was rate-limited locally. + bool rate_limited = 12; + + message Unauthorized { + // Reasons why the request was unauthorized + enum Reason { + REASON_UNSPECIFIED = 0; + // The request was denied by the external authorization service. + EXTERNAL_SERVICE = 1; + } + + Reason reason = 1; + } + + // Indicates if the request was deemed unauthorized and the reason for it. + Unauthorized unauthorized_details = 13; + + // Indicates that the request was rejected because there was an error in rate limit service. + bool rate_limit_service_error = 14; + + // Indicates the stream was reset due to a downstream connection termination. + bool downstream_connection_termination = 15; + + // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. + bool upstream_retry_limit_exceeded = 16; + + // Indicates that the stream idle timeout was hit, resulting in a downstream 408. + bool stream_idle_timeout = 17; + + // Indicates that the request was rejected because an envoy request header failed strict + // validation. + bool invalid_envoy_request_headers = 18; +} + +// Properties of a negotiated TLS connection. +message TLSProperties { + enum TLSVersion { + VERSION_UNSPECIFIED = 0; + TLSv1 = 1; + TLSv1_1 = 2; + TLSv1_2 = 3; + TLSv1_3 = 4; + } + // Version of TLS that was negotiated. + TLSVersion tls_version = 1; + + // TLS cipher suite negotiated during handshake. The value is a + // four-digit hex code defined by the IANA TLS Cipher Suite Registry + // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). + // + // Here it is expressed as an integer. + google.protobuf.UInt32Value tls_cipher_suite = 2; + + // SNI hostname from handshake. + string tls_sni_hostname = 3; + + message CertificateProperties { + message SubjectAltName { + oneof san { + string uri = 1; + // [#not-implemented-hide:] + string dns = 2; + } + } + + // SANs present in the certificate. + repeated SubjectAltName subject_alt_name = 1; + + // The subject field of the certificate. + string subject = 2; + } + + // Properties of the local certificate used to negotiate TLS. + CertificateProperties local_certificate_properties = 4; + + // Properties of the peer certificate used to negotiate TLS. + CertificateProperties peer_certificate_properties = 5; + + // The TLS session ID. + string tls_session_id = 6; +} + +message HTTPRequestProperties { + // The request method (RFC 7231/2616). + // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once + // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] + envoy.api.v3alpha.core.RequestMethod request_method = 1; + + // The scheme portion of the incoming request URI. + string scheme = 2; + + // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. + string authority = 3; + + // The port of the incoming request URI + // (unused currently, as port is composed onto authority). + google.protobuf.UInt32Value port = 4; + + // The path portion from the incoming request URI. + string path = 5; + + // Value of the ``User-Agent`` request header. + string user_agent = 6; + + // Value of the ``Referer`` request header. + string referer = 7; + + // Value of the ``X-Forwarded-For`` request header. + string forwarded_for = 8; + + // Value of the ``X-Request-Id`` request header + // + // This header is used by Envoy to uniquely identify a request. + // It will be generated for all external requests and internal requests that + // do not already have a request ID. + string request_id = 9; + + // Value of the ``X-Envoy-Original-Path`` request header. + string original_path = 10; + + // Size of the HTTP request headers in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + uint64 request_headers_bytes = 11; + + // Size of the HTTP request body in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + uint64 request_body_bytes = 12; + + // Map of additional headers that have been configured to be logged. + map request_headers = 13; +} + +message HTTPResponseProperties { + // The HTTP response code returned by Envoy. + google.protobuf.UInt32Value response_code = 1; + + // Size of the HTTP response headers in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + uint64 response_headers_bytes = 2; + + // Size of the HTTP response body in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + uint64 response_body_bytes = 3; + + // Map of additional headers configured to be logged. + map response_headers = 4; + + // Map of trailers configured to be logged. + map response_trailers = 5; + + // The HTTP response code details. + string response_code_details = 6; +} diff --git a/api/envoy/data/cluster/v3alpha/BUILD b/api/envoy/data/cluster/v3alpha/BUILD new file mode 100644 index 000000000000..00edd8294b6f --- /dev/null +++ b/api/envoy/data/cluster/v3alpha/BUILD @@ -0,0 +1,11 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "outlier_detection_event", + srcs = ["outlier_detection_event.proto"], + visibility = [ + "//visibility:public", + ], +) diff --git a/api/envoy/data/cluster/v3alpha/outlier_detection_event.proto b/api/envoy/data/cluster/v3alpha/outlier_detection_event.proto new file mode 100644 index 000000000000..48f0e27b86bf --- /dev/null +++ b/api/envoy/data/cluster/v3alpha/outlier_detection_event.proto @@ -0,0 +1,102 @@ +syntax = "proto3"; + +package envoy.data.cluster.v3alpha; + +option java_outer_classname = "OutlierDetectionEventProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.data.cluster.v3alpha"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Outlier detection logging events] +// :ref:`Outlier detection logging `. + +message OutlierDetectionEvent { + // In case of eject represents type of ejection that took place. + OutlierEjectionType type = 1 [(validate.rules).enum.defined_only = true]; + // Timestamp for event. + google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true]; + // The time in seconds since the last action (either an ejection or unejection) took place. + google.protobuf.UInt64Value secs_since_last_action = 3; + // The :ref:`cluster ` that owns the ejected host. + string cluster_name = 4 [(validate.rules).string.min_bytes = 1]; + // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. + string upstream_url = 5 [(validate.rules).string.min_bytes = 1]; + // The action that took place. + Action action = 6 [(validate.rules).enum.defined_only = true]; + // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to + // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and + // then re-added). + uint32 num_ejections = 7; + // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was + // ejected. ``false`` means the event was logged but the host was not actually ejected. + bool enforced = 8; + + oneof event { + option (validate.required) = true; + OutlierEjectSuccessRate eject_success_rate_event = 9; + OutlierEjectConsecutive eject_consecutive_event = 10; + } +} + +// Type of ejection that took place +enum OutlierEjectionType { + // In case upstream host returns certain number of consecutive 5xx. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *false*, all type of errors are treated as HTTP 5xx errors. + // See :ref:`Cluster outlier detection ` documentation for + // details. + CONSECUTIVE_5XX = 0; + // In case upstream host returns certain number of consecutive gateway errors + CONSECUTIVE_GATEWAY_FAILURE = 1; + // Runs over aggregated success rate statistics from every host in cluster + // and selects hosts for which ratio of successful replies deviates from other hosts + // in the cluster. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *false*, all errors (externally and locally generated) are used to calculate success rate + // statistics. See :ref:`Cluster outlier detection ` + // documentation for details. + SUCCESS_RATE = 2; + // Consecutive local origin failures: Connection failures, resets, timeouts, etc + // This type of ejection happens only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is set to *true*. + // See :ref:`Cluster outlier detection ` documentation for + CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; + // Runs over aggregated success rate statistics for local origin failures + // for all hosts in the cluster and selects hosts for which success rate deviates from other + // hosts in the cluster. This type of ejection happens only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is set to *true*. + // See :ref:`Cluster outlier detection ` documentation for + SUCCESS_RATE_LOCAL_ORIGIN = 4; +} + +// Represents possible action applied to upstream host +enum Action { + // In case host was excluded from service + EJECT = 0; + // In case host was brought back into service + UNEJECT = 1; +} + +message OutlierEjectSuccessRate { + // Host’s success rate at the time of the ejection event on a 0-100 range. + uint32 host_success_rate = 1 [(validate.rules).uint32.lte = 100]; + // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 + // range. + uint32 cluster_average_success_rate = 2 [(validate.rules).uint32.lte = 100]; + // Success rate ejection threshold at the time of the ejection event. + uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32.lte = 100]; +} + +message OutlierEjectConsecutive { +} diff --git a/api/envoy/data/core/v3alpha/BUILD b/api/envoy/data/core/v3alpha/BUILD new file mode 100644 index 000000000000..9e82e3eb1731 --- /dev/null +++ b/api/envoy/data/core/v3alpha/BUILD @@ -0,0 +1,15 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "health_check_event", + srcs = ["health_check_event.proto"], + visibility = [ + "//visibility:public", + ], + deps = [ + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + ], +) diff --git a/api/envoy/data/core/v3alpha/health_check_event.proto b/api/envoy/data/core/v3alpha/health_check_event.proto new file mode 100644 index 000000000000..628b6870b64d --- /dev/null +++ b/api/envoy/data/core/v3alpha/health_check_event.proto @@ -0,0 +1,85 @@ +syntax = "proto3"; + +package envoy.data.core.v3alpha; + +option java_outer_classname = "HealthCheckEventProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.data.core.v3alpha"; + +import "envoy/api/v3alpha/core/address.proto"; + +import "google/protobuf/timestamp.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Health check logging events] +// :ref:`Health check logging `. + +message HealthCheckEvent { + HealthCheckerType health_checker_type = 1 [(validate.rules).enum.defined_only = true]; + envoy.api.v3alpha.core.Address host = 2; + string cluster_name = 3 [(validate.rules).string.min_bytes = 1]; + + oneof event { + option (validate.required) = true; + + // Host ejection. + HealthCheckEjectUnhealthy eject_unhealthy_event = 4; + + // Host addition. + HealthCheckAddHealthy add_healthy_event = 5; + + // Host failure. + HealthCheckFailure health_check_failure_event = 7; + + // Healthy host became degraded. + DegradedHealthyHost degraded_healthy_host = 8; + + // A degraded host returned to being healthy. + NoLongerDegradedHost no_longer_degraded_host = 9; + } + + // Timestamp for event. + google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true]; +} + +enum HealthCheckFailureType { + ACTIVE = 0; + PASSIVE = 1; + NETWORK = 2; +} + +enum HealthCheckerType { + HTTP = 0; + TCP = 1; + GRPC = 2; + REDIS = 3; +} + +message HealthCheckEjectUnhealthy { + // The type of failure that caused this ejection. + HealthCheckFailureType failure_type = 1 [(validate.rules).enum.defined_only = true]; +} + +message HealthCheckAddHealthy { + // Whether this addition is the result of the first ever health check on a host, in which case + // the configured :ref:`healthy threshold ` + // is bypassed and the host is immediately added. + bool first_check = 1; +} + +message HealthCheckFailure { + // The type of failure that caused this event. + HealthCheckFailureType failure_type = 1 [(validate.rules).enum.defined_only = true]; + // Whether this event is the result of the first ever health check on a host. + bool first_check = 2; +} + +message DegradedHealthyHost { +} + +message NoLongerDegradedHost { +} diff --git a/api/envoy/data/tap/v3alpha/BUILD b/api/envoy/data/tap/v3alpha/BUILD new file mode 100644 index 000000000000..ab9be74ce98a --- /dev/null +++ b/api/envoy/data/tap/v3alpha/BUILD @@ -0,0 +1,37 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "common", + srcs = ["common.proto"], +) + +api_proto_library_internal( + name = "transport", + srcs = ["transport.proto"], + deps = [ + ":common", + "//envoy/api/v3alpha/core:address", + ], +) + +api_proto_library_internal( + name = "http", + srcs = ["http.proto"], + deps = [ + ":common", + "//envoy/api/v3alpha/core:base", + ], +) + +api_proto_library_internal( + name = "wrapper", + srcs = ["wrapper.proto"], + visibility = ["//visibility:public"], + deps = [ + ":http", + ":transport", + "//envoy/api/v3alpha/core:address", + ], +) diff --git a/api/envoy/data/tap/v3alpha/common.proto b/api/envoy/data/tap/v3alpha/common.proto new file mode 100644 index 000000000000..21da336e7485 --- /dev/null +++ b/api/envoy/data/tap/v3alpha/common.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package envoy.data.tap.v3alpha; + +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.data.tap.v3alpha"; + +// [#protodoc-title: Tap common data] + +// Wrapper for tapped body data. This includes HTTP request/response body, transport socket received +// and transmitted data, etc. +message Body { + oneof body_type { + // Body data as bytes. By default, tap body data will be present in this field, as the proto + // `bytes` type can contain any valid byte. + bytes as_bytes = 1; + + // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING + // ` sink + // format type is selected. See the documentation for that option for why this is useful. + string as_string = 2; + } + + // Specifies whether body data has been truncated to fit within the specified + // :ref:`max_buffered_rx_bytes + // ` and + // :ref:`max_buffered_tx_bytes + // ` settings. + bool truncated = 3; +} diff --git a/api/envoy/data/tap/v3alpha/http.proto b/api/envoy/data/tap/v3alpha/http.proto new file mode 100644 index 000000000000..36d82f7a048c --- /dev/null +++ b/api/envoy/data/tap/v3alpha/http.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +package envoy.data.tap.v3alpha; + +option java_outer_classname = "HttpProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.data.tap.v3alpha"; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/data/tap/v3alpha/common.proto"; + +// [#protodoc-title: HTTP tap data] + +// A fully buffered HTTP trace message. +message HttpBufferedTrace { + // HTTP message wrapper. + message Message { + // Message headers. + repeated api.v3alpha.core.HeaderValue headers = 1; + + // Message body. + Body body = 2; + + // Message trailers. + repeated api.v3alpha.core.HeaderValue trailers = 3; + } + + // Request message. + Message request = 1; + + // Response message. + Message response = 2; +} + +// A streamed HTTP trace segment. Multiple segments make up a full trace. +message HttpStreamedTraceSegment { + // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used + // for long term stable uniqueness. + uint64 trace_id = 1; + + oneof message_piece { + // Request headers. + api.v3alpha.core.HeaderMap request_headers = 2; + + // Request body chunk. + Body request_body_chunk = 3; + + // Request trailers. + api.v3alpha.core.HeaderMap request_trailers = 4; + + // Response headers. + api.v3alpha.core.HeaderMap response_headers = 5; + + // Response body chunk. + Body response_body_chunk = 6; + + // Response trailers. + api.v3alpha.core.HeaderMap response_trailers = 7; + } +} diff --git a/api/envoy/data/tap/v3alpha/transport.proto b/api/envoy/data/tap/v3alpha/transport.proto new file mode 100644 index 000000000000..3dfb0c6478ba --- /dev/null +++ b/api/envoy/data/tap/v3alpha/transport.proto @@ -0,0 +1,97 @@ +syntax = "proto3"; + +// [#protodoc-title: Transport tap data] +// Trace format for the tap transport socket extension. This dumps plain text read/write +// sequences on a socket. + +package envoy.data.tap.v3alpha; + +option java_outer_classname = "TransportProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.data.tap.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/data/tap/v3alpha/common.proto"; + +import "google/protobuf/timestamp.proto"; + +// Connection properties. +message Connection { + // Local address. + envoy.api.v3alpha.core.Address local_address = 2; + + // Remote address. + envoy.api.v3alpha.core.Address remote_address = 3; +} + +// Event in a socket trace. +message SocketEvent { + // Timestamp for event. + google.protobuf.Timestamp timestamp = 1; + + // Data read by Envoy from the transport socket. + message Read { + // Binary data read. + Body data = 1; + + // TODO(htuch): Half-close for reads. + } + + // Data written by Envoy to the transport socket. + message Write { + // Binary data written. + Body data = 1; + + // Stream was half closed after this write. + bool end_stream = 2; + } + + // The connection was closed. + message Closed { + // TODO(mattklein123): Close event type. + } + + // Read or write with content as bytes string. + oneof event_selector { + Read read = 2; + Write write = 3; + Closed closed = 4; + } +} + +// Sequence of read/write events that constitute a buffered trace on a socket. +message SocketBufferedTrace { + // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used + // for long term stable uniqueness. Matches connection IDs used in Envoy logs. + uint64 trace_id = 1; + + // Connection properties. + Connection connection = 2; + + // Sequence of observed events. + repeated SocketEvent events = 3; + + // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes + // ` setting. + bool read_truncated = 4; + + // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes + // ` setting. + bool write_truncated = 5; +} + +// A streamed socket trace segment. Multiple segments make up a full trace. +message SocketStreamedTraceSegment { + // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used + // for long term stable uniqueness. Matches connection IDs used in Envoy logs. + uint64 trace_id = 1; + + oneof message_piece { + // Connection properties. + Connection connection = 2; + + // Socket event. + SocketEvent event = 3; + } +} diff --git a/api/envoy/data/tap/v3alpha/wrapper.proto b/api/envoy/data/tap/v3alpha/wrapper.proto new file mode 100644 index 000000000000..1aff052e90d1 --- /dev/null +++ b/api/envoy/data/tap/v3alpha/wrapper.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +import "envoy/data/tap/v3alpha/http.proto"; +import "envoy/data/tap/v3alpha/transport.proto"; + +import "validate/validate.proto"; + +package envoy.data.tap.v3alpha; + +option java_outer_classname = "WrapperProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.data.tap.v3alpha"; + +// [#protodoc-title: Tap data wrappers] + +// Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for +// sending traces over gRPC APIs or more easily persisting binary messages to files. +message TraceWrapper { + oneof trace { + option (validate.required) = true; + + // An HTTP buffered tap trace. + HttpBufferedTrace http_buffered_trace = 1; + + // An HTTP streamed tap trace segment. + HttpStreamedTraceSegment http_streamed_trace_segment = 2; + + // A socket buffered tap trace. + SocketBufferedTrace socket_buffered_trace = 3; + + // A socket streamed tap trace segment. + SocketStreamedTraceSegment socket_streamed_trace_segment = 4; + } +} diff --git a/api/envoy/service/accesslog/v3alpha/BUILD b/api/envoy/service/accesslog/v3alpha/BUILD new file mode 100644 index 000000000000..1a8eab975b56 --- /dev/null +++ b/api/envoy/service/accesslog/v3alpha/BUILD @@ -0,0 +1,23 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "als", + srcs = ["als.proto"], + has_services = 1, + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:grpc_service", + "//envoy/data/accesslog/v3alpha:accesslog", + ], +) + +api_go_grpc_library( + name = "als", + proto = ":als", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/data/accesslog/v3alpha:accesslog_go_proto", + ], +) diff --git a/api/envoy/service/accesslog/v3alpha/als.proto b/api/envoy/service/accesslog/v3alpha/als.proto new file mode 100644 index 000000000000..092d4d17696c --- /dev/null +++ b/api/envoy/service/accesslog/v3alpha/als.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.service.accesslog.v3alpha; + +option java_outer_classname = "AlsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.accesslog.v3alpha"; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/data/accesslog/v3alpha/accesslog.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: gRPC Access Log Service (ALS)] + +// Service for streaming access logs from Envoy to an access log server. +service AccessLogService { + // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any + // response to be sent as nothing would be done in the case of failure. The server should + // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different + // API for "critical" access logs in which Envoy will buffer access logs for some period of time + // until it gets an ACK so it could then retry. This API is designed for high throughput with the + // expectation that it might be lossy. + rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { + } +} + +// Empty response for the StreamAccessLogs API. Will never be sent. See below. +message StreamAccessLogsResponse { +} + +// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream +// access logs without ever expecting a response. +message StreamAccessLogsMessage { + message Identifier { + // The node sending the access log messages over the stream. + envoy.api.v3alpha.core.Node node = 1 [(validate.rules).message.required = true]; + + // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig + // `. + string log_name = 2 [(validate.rules).string.min_bytes = 1]; + } + + // Identifier data that will only be sent in the first message on the stream. This is effectively + // structured metadata and is a performance optimization. + Identifier identifier = 1; + + // Wrapper for batches of HTTP access log entries. + message HTTPAccessLogEntries { + repeated envoy.data.accesslog.v3alpha.HTTPAccessLogEntry log_entry = 1 + [(validate.rules).repeated .min_items = 1]; + } + + // Wrapper for batches of TCP access log entries. + message TCPAccessLogEntries { + repeated envoy.data.accesslog.v3alpha.TCPAccessLogEntry log_entry = 1 + [(validate.rules).repeated .min_items = 1]; + } + + // Batches of log entries of a single type. Generally speaking, a given stream should only + // ever include one type of log entry. + oneof log_entries { + option (validate.required) = true; + + HTTPAccessLogEntries http_logs = 2; + + TCPAccessLogEntries tcp_logs = 3; + } +} diff --git a/api/envoy/service/auth/v3alpha/BUILD b/api/envoy/service/auth/v3alpha/BUILD new file mode 100644 index 000000000000..6a335f88f949 --- /dev/null +++ b/api/envoy/service/auth/v3alpha/BUILD @@ -0,0 +1,28 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "attribute_context", + srcs = [ + "attribute_context.proto", + ], + deps = [ + "//envoy/api/v3alpha/core:address", + "//envoy/api/v3alpha/core:base", + ], +) + +api_proto_library_internal( + name = "external_auth", + srcs = [ + "external_auth.proto", + ], + has_services = 1, + visibility = ["//visibility:public"], + deps = [ + ":attribute_context", + "//envoy/api/v3alpha/core:base", + "//envoy/type:http_status", + ], +) diff --git a/api/envoy/service/auth/v3alpha/attribute_context.proto b/api/envoy/service/auth/v3alpha/attribute_context.proto new file mode 100644 index 000000000000..de3164583167 --- /dev/null +++ b/api/envoy/service/auth/v3alpha/attribute_context.proto @@ -0,0 +1,154 @@ +syntax = "proto3"; + +package envoy.service.auth.v3alpha; + +option java_outer_classname = "AttributeContextProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.auth.v3alpha"; + +import "envoy/api/v3alpha/core/address.proto"; +import "envoy/api/v3alpha/core/base.proto"; + +import "google/protobuf/timestamp.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.stable_marshaler_all) = true; + +// [#protodoc-title: Attribute Context ] + +// See :ref:`network filter configuration overview ` +// and :ref:`HTTP filter configuration overview `. + +// An attribute is a piece of metadata that describes an activity on a network. +// For example, the size of an HTTP request, or the status code of an HTTP response. +// +// Each attribute has a type and a name, which is logically defined as a proto message field +// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes +// supported by Envoy authorization system. +message AttributeContext { + // This message defines attributes for a node that handles a network request. + // The node can be either a service or an application that sends, forwards, + // or receives the request. Service peers should fill in the `service`, + // `principal`, and `labels` as appropriate. + message Peer { + // The address of the peer, this is typically the IP address. + // It can also be UDS path, or others. + envoy.api.v3alpha.core.Address address = 1; + + // The canonical service name of the peer. + // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster + // ` + // If a more trusted source of the service name is available through mTLS/secure naming, it + // should be used. + string service = 2; + + // The labels associated with the peer. + // These could be pod labels for Kubernetes or tags for VMs. + // The source of the labels could be an X.509 certificate or other configuration. + map labels = 3; + + // The authenticated identity of this peer. + // For example, the identity associated with the workload such as a service account. + // If an X.509 certificate is used to assert the identity this field should be sourced from + // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order. + // The primary identity should be the principal. The principal format is issuer specific. + // + // Example: + // * SPIFFE format is `spiffe://trust-domain/path` + // * Google account format is `https://accounts.google.com/{userid}` + string principal = 4; + } + + // Represents a network request, such as an HTTP request. + message Request { + // The timestamp when the proxy receives the first byte of the request. + google.protobuf.Timestamp time = 1; + + // Represents an HTTP request or an HTTP-like request. + HttpRequest http = 2; + + // More request types are added here as necessary. + } + + // This message defines attributes for an HTTP request. + // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. + message HttpRequest { + // The unique ID for a request, which can be propagated to downstream + // systems. The ID should have low probability of collision + // within a single day for a specific service. + // For HTTP requests, it should be X-Request-ID or equivalent. + string id = 1; + + // The HTTP request method, such as `GET`, `POST`. + string method = 2; + + // The HTTP request headers. If multiple headers share the same key, they + // must be merged according to the HTTP spec. All header keys must be + // lowercased, because HTTP header keys are case-insensitive. + map headers = 3; + + // The request target, as it appears in the first line of the HTTP request. This includes + // the URL path and query-string. No decoding is performed. + string path = 4; + + // The HTTP request `Host` or 'Authority` header value. + string host = 5; + + // The HTTP URL scheme, such as `http` and `https`. + string scheme = 6; + + // This field is always empty, and exists for compatibility reasons. The HTTP URL query is + // included in `path` field. + string query = 7; + + // This field is always empty, and exists for compatibility reasons. The URL fragment is + // not submitted as part of HTTP requests; it is unknowable. + string fragment = 8; + + // The HTTP request size in bytes. If unknown, it must be -1. + int64 size = 9; + + // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". + // + // See :repo:`headers.h:ProtocolStrings ` for a list of all + // possible values. + string protocol = 10; + + // The HTTP request body. + string body = 11; + } + + // The source of a network activity, such as starting a TCP connection. + // In a multi hop network activity, the source represents the sender of the + // last hop. + Peer source = 1; + + // The destination of a network activity, such as accepting a TCP connection. + // In a multi hop network activity, the destination represents the receiver of + // the last hop. + Peer destination = 2; + + // Represents a network request, such as an HTTP request. + Request request = 4; + + // This is analogous to http_request.headers, however these contents will not be sent to the + // upstream server. Context_extensions provide an extension mechanism for sending additional + // information to the auth server without modifying the proto definition. It maps to the + // internal opaque context in the filter chain. + map context_extensions = 10; + + // Dynamic metadata associated with the request. + envoy.api.v3alpha.core.Metadata metadata_context = 11; +} + +// The following items are left out of this proto +// Request.Auth field for jwt tokens +// Request.Api for api management +// Origin peer that originated the request +// Caching Protocol +// request_context return values to inject back into the filter chain +// peer.claims -- from X.509 extensions +// Configuration +// - field mask to send +// - which return values from request_context are copied back +// - which return values are copied into request_headers diff --git a/api/envoy/service/auth/v3alpha/external_auth.proto b/api/envoy/service/auth/v3alpha/external_auth.proto new file mode 100644 index 000000000000..4b7e459a4436 --- /dev/null +++ b/api/envoy/service/auth/v3alpha/external_auth.proto @@ -0,0 +1,77 @@ +syntax = "proto3"; + +package envoy.service.auth.v3alpha; + +option java_outer_classname = "ExternalAuthProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.auth.v3alpha"; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/type/http_status.proto"; +import "envoy/service/auth/v3alpha/attribute_context.proto"; + +import "google/rpc/status.proto"; +import "validate/validate.proto"; + +// [#protodoc-title: Authorization Service ] + +// The authorization service request messages used by external authorization :ref:`network filter +// ` and :ref:`HTTP filter `. + +// A generic interface for performing authorization check on incoming +// requests to a networked service. +service Authorization { + // Performs authorization check based on the attributes associated with the + // incoming request, and returns status `OK` or not `OK`. + rpc Check(CheckRequest) returns (CheckResponse); +} + +message CheckRequest { + // The request attributes. + AttributeContext attributes = 1; +} + +// HTTP attributes for a denied response. +message DeniedHttpResponse { + // This field allows the authorization service to send a HTTP response status + // code to the downstream client other than 403 (Forbidden). + envoy.type.HttpStatus status = 1 [(validate.rules).message.required = true]; + + // This field allows the authorization service to send HTTP response headers + // to the downstream client. + repeated envoy.api.v3alpha.core.HeaderValueOption headers = 2; + + // This field allows the authorization service to send a response body data + // to the downstream client. + string body = 3; +} + +// HTTP attributes for an ok response. +message OkHttpResponse { + // HTTP entity headers in addition to the original request headers. This allows the authorization + // service to append, to add or to override headers from the original request before + // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`, + // the filter will append the correspondent header value to the matched request header. Note that + // by Leaving `append` as false, the filter will either add a new header, or override an existing + // one if there is a match. + repeated envoy.api.v3alpha.core.HeaderValueOption headers = 2; +} + +// Intended for gRPC and Network Authorization servers `only`. +message CheckResponse { + // Status `OK` allows the request. Any other status indicates the request should be denied. + google.rpc.Status status = 1; + + // An message that contains HTTP response attributes. This message is + // used when the authorization service needs to send custom responses to the + // downstream client or, to modify/add request headers being dispatched to the upstream. + oneof http_response { + // Supplies http attributes for a denied response. + DeniedHttpResponse denied_response = 2; + + // Supplies http attributes for an ok response. + OkHttpResponse ok_response = 3; + } +} diff --git a/api/envoy/service/discovery/v3alpha/BUILD b/api/envoy/service/discovery/v3alpha/BUILD new file mode 100644 index 000000000000..d34955c1cb5a --- /dev/null +++ b/api/envoy/service/discovery/v3alpha/BUILD @@ -0,0 +1,75 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "ads", + srcs = ["ads.proto"], + has_services = 1, + deps = [ + "//envoy/api/v3alpha:discovery", + ], +) + +api_go_grpc_library( + name = "ads", + proto = ":ads", + deps = [ + "//envoy/api/v3alpha:discovery_go_proto", + ], +) + +api_proto_library_internal( + name = "hds", + srcs = ["hds.proto"], + has_services = 1, + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:health_check", + "//envoy/api/v3alpha/endpoint", + ], +) + +api_go_grpc_library( + name = "hds", + proto = ":hds", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/core:health_check_go_proto", + "//envoy/api/v3alpha/endpoint:endpoint_go_proto", + ], +) + +api_proto_library_internal( + name = "sds", + srcs = ["sds.proto"], + has_services = 1, + deps = [ + "//envoy/api/v3alpha:discovery", + ], +) + +api_go_grpc_library( + name = "sds", + proto = ":sds", + deps = [ + "//envoy/api/v3alpha:discovery_go_proto", + ], +) + +api_proto_library_internal( + name = "rtds", + srcs = ["rtds.proto"], + has_services = 1, + deps = [ + "//envoy/api/v3alpha:discovery", + ], +) + +api_go_grpc_library( + name = "rtds", + proto = ":rtds", + deps = [ + "//envoy/api/v3alpha:discovery_go_proto", + ], +) diff --git a/api/envoy/service/discovery/v3alpha/ads.proto b/api/envoy/service/discovery/v3alpha/ads.proto new file mode 100644 index 000000000000..d6b7897ba7a1 --- /dev/null +++ b/api/envoy/service/discovery/v3alpha/ads.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package envoy.service.discovery.v3alpha; + +option java_outer_classname = "AdsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha"; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v3alpha/discovery.proto"; + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message AdsDummy { +} + +// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, +// and listeners are retained in the package `envoy.api.v3alpha` for backwards +// compatibility with existing management servers. New development in discovery +// services should proceed in the package `envoy.service.discovery.v3alpha`. + +// See https://github.com/lyft/envoy-api#apis for a description of the role of +// ADS and how it is intended to be used by a management server. ADS requests +// have the same structure as their singleton xDS counterparts, but can +// multiplex many resource types on a single stream. The type_url in the +// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover +// the multiplexed singleton APIs at the Envoy instance and management server. +service AggregatedDiscoveryService { + // This is a gRPC-only API. + rpc StreamAggregatedResources(stream envoy.api.v3alpha.DiscoveryRequest) + returns (stream envoy.api.v3alpha.DiscoveryResponse) { + } + + rpc DeltaAggregatedResources(stream envoy.api.v3alpha.DeltaDiscoveryRequest) + returns (stream envoy.api.v3alpha.DeltaDiscoveryResponse) { + } +} diff --git a/api/envoy/service/discovery/v3alpha/hds.proto b/api/envoy/service/discovery/v3alpha/hds.proto new file mode 100644 index 000000000000..5ec7f491c8f2 --- /dev/null +++ b/api/envoy/service/discovery/v3alpha/hds.proto @@ -0,0 +1,127 @@ +syntax = "proto3"; + +package envoy.service.discovery.v3alpha; + +option java_outer_classname = "HdsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha"; + +option java_generic_services = true; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/core/health_check.proto"; +import "envoy/api/v3alpha/endpoint/endpoint.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; + +// [#proto-status: experimental] +// HDS is Health Discovery Service. It compliments Envoy’s health checking +// service by designating this Envoy to be a healthchecker for a subset of hosts +// in the cluster. The status of these health checks will be reported to the +// management server, where it can be aggregated etc and redistributed back to +// Envoy through EDS. +service HealthDiscoveryService { + // 1. Envoy starts up and if its can_healthcheck option in the static + // bootstrap config is enabled, sends HealthCheckRequest to the management + // server. It supplies its capabilities (which protocol it can health check + // with, what zone it resides in, etc.). + // 2. In response to (1), the management server designates this Envoy as a + // healthchecker to health check a subset of all upstream hosts for a given + // cluster (for example upstream Host 1 and Host 2). It streams + // HealthCheckSpecifier messages with cluster related configuration for all + // clusters this Envoy is designated to health check. Subsequent + // HealthCheckSpecifier message will be sent on changes to: + // a. Endpoints to health checks + // b. Per cluster configuration change + // 3. Envoy creates a health probe based on the HealthCheck config and sends + // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck + // configuration Envoy waits upon the arrival of the probe response and + // looks at the content of the response to decide whether the endpoint is + // healthy or not. If a response hasn't been received within the timeout + // interval, the endpoint health status is considered TIMEOUT. + // 4. Envoy reports results back in an EndpointHealthResponse message. + // Envoy streams responses as often as the interval configured by the + // management server in HealthCheckSpecifier. + // 5. The management Server collects health statuses for all endpoints in the + // cluster (for all clusters) and uses this information to construct + // EndpointDiscoveryResponse messages. + // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load + // balances traffic to them without additional health checking. It may + // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection + // failed to a particular endpoint to account for health status propagation + // delay between HDS and EDS). + // By default, can_healthcheck is true. If can_healthcheck is false, Cluster + // configuration may not contain HealthCheck message. + // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above + // invariant? + // TODO(htuch): Add @amb67's diagram. + rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) + returns (stream HealthCheckSpecifier) { + } + + // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of + // request/response. Should we add an identifier to the HealthCheckSpecifier + // to bind with the response? + rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { + option (google.api.http) = { + post: "/v2/discovery:health_check" + body: "*" + }; + } +} + +// Defines supported protocols etc, so the management server can assign proper +// endpoints to healthcheck. +message Capability { + // Different Envoy instances may have different capabilities (e.g. Redis) + // and/or have ports enabled for different protocols. + enum Protocol { + HTTP = 0; + TCP = 1; + REDIS = 2; + } + repeated Protocol health_check_protocols = 1; +} + +message HealthCheckRequest { + envoy.api.v3alpha.core.Node node = 1; + Capability capability = 2; +} + +message EndpointHealth { + envoy.api.v3alpha.endpoint.Endpoint endpoint = 1; + envoy.api.v3alpha.core.HealthStatus health_status = 2; +} + +message EndpointHealthResponse { + repeated EndpointHealth endpoints_health = 1; +} + +message HealthCheckRequestOrEndpointHealthResponse { + oneof request_type { + HealthCheckRequest health_check_request = 1; + EndpointHealthResponse endpoint_health_response = 2; + } +} + +message LocalityEndpoints { + envoy.api.v3alpha.core.Locality locality = 1; + repeated envoy.api.v3alpha.endpoint.Endpoint endpoints = 2; +} + +// The cluster name and locality is provided to Envoy for the endpoints that it +// health checks to support statistics reporting, logging and debugging by the +// Envoy instance (outside of HDS). For maximum usefulness, it should match the +// same cluster structure as that provided by EDS. +message ClusterHealthCheck { + string cluster_name = 1; + repeated envoy.api.v3alpha.core.HealthCheck health_checks = 2; + repeated LocalityEndpoints locality_endpoints = 3; +} + +message HealthCheckSpecifier { + repeated ClusterHealthCheck cluster_health_checks = 1; + // The default is 1 second. + google.protobuf.Duration interval = 2; +} diff --git a/api/envoy/service/discovery/v3alpha/rtds.proto b/api/envoy/service/discovery/v3alpha/rtds.proto new file mode 100644 index 000000000000..5a59cf13f814 --- /dev/null +++ b/api/envoy/service/discovery/v3alpha/rtds.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package envoy.service.discovery.v3alpha; + +option java_outer_classname = "RtdsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha"; +option java_generic_services = true; + +import "envoy/api/v3alpha/discovery.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/struct.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Runtime Discovery Service (RTDS)] +// RTDS :ref:`configuration overview ` + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message RtdsDummy { +} + +// Discovery service for Runtime resources. +service RuntimeDiscoveryService { + rpc StreamRuntime(stream envoy.api.v3alpha.DiscoveryRequest) + returns (stream envoy.api.v3alpha.DiscoveryResponse) { + } + + rpc DeltaRuntime(stream envoy.api.v3alpha.DeltaDiscoveryRequest) + returns (stream envoy.api.v3alpha.DeltaDiscoveryResponse) { + } + + rpc FetchRuntime(envoy.api.v3alpha.DiscoveryRequest) + returns (envoy.api.v3alpha.DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:runtime" + body: "*" + }; + } +} + +// RTDS resource type. This describes a layer in the runtime virtual filesystem. +message Runtime { + // Runtime resource name. This makes the Runtime a self-describing xDS + // resource. + string name = 1 [(validate.rules).string.min_bytes = 1]; + google.protobuf.Struct layer = 2; +} diff --git a/api/envoy/service/discovery/v3alpha/sds.proto b/api/envoy/service/discovery/v3alpha/sds.proto new file mode 100644 index 000000000000..9f8aa92befa2 --- /dev/null +++ b/api/envoy/service/discovery/v3alpha/sds.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package envoy.service.discovery.v3alpha; + +option java_outer_classname = "SdsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha"; + +import "envoy/api/v3alpha/discovery.proto"; + +import "google/api/annotations.proto"; + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message SdsDummy { +} + +service SecretDiscoveryService { + rpc DeltaSecrets(stream envoy.api.v3alpha.DeltaDiscoveryRequest) + returns (stream envoy.api.v3alpha.DeltaDiscoveryResponse) { + } + + rpc StreamSecrets(stream envoy.api.v3alpha.DiscoveryRequest) + returns (stream envoy.api.v3alpha.DiscoveryResponse) { + } + + rpc FetchSecrets(envoy.api.v3alpha.DiscoveryRequest) + returns (envoy.api.v3alpha.DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:secrets" + body: "*" + }; + } +} diff --git a/api/envoy/service/load_stats/v3alpha/BUILD b/api/envoy/service/load_stats/v3alpha/BUILD new file mode 100644 index 000000000000..42c7ce8438da --- /dev/null +++ b/api/envoy/service/load_stats/v3alpha/BUILD @@ -0,0 +1,22 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "lrs", + srcs = ["lrs.proto"], + has_services = 1, + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/endpoint:load_report", + ], +) + +api_go_grpc_library( + name = "lrs", + proto = ":lrs", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/endpoint:load_report_go_proto", + ], +) diff --git a/api/envoy/service/load_stats/v3alpha/lrs.proto b/api/envoy/service/load_stats/v3alpha/lrs.proto new file mode 100644 index 000000000000..81058ed574a7 --- /dev/null +++ b/api/envoy/service/load_stats/v3alpha/lrs.proto @@ -0,0 +1,82 @@ +syntax = "proto3"; + +package envoy.service.load_stats.v3alpha; + +option java_outer_classname = "LrsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.load_stats.v3alpha"; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/endpoint/load_report.proto"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Load reporting service] + +service LoadReportingService { + // Advanced API to allow for multi-dimensional load balancing by remote + // server. For receiving LB assignments, the steps are: + // 1, The management server is configured with per cluster/zone/load metric + // capacity configuration. The capacity configuration definition is + // outside of the scope of this document. + // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters + // to balance. + // + // Independently, Envoy will initiate a StreamLoadStats bidi stream with a + // management server: + // 1. Once a connection establishes, the management server publishes a + // LoadStatsResponse for all clusters it is interested in learning load + // stats about. + // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts + // based on per-zone weights and/or per-instance weights (if specified) + // based on intra-zone LbPolicy. This information comes from the above + // {Stream,Fetch}Endpoints. + // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. + // 4. Envoy aggregates load reports over the period of time given to it in + // LoadStatsResponse.load_reporting_interval. This includes aggregation + // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as + // well as load metrics from upstream hosts. + // 5. When the timer of load_reporting_interval expires, Envoy sends new + // LoadStatsRequest filled with load reports for each cluster. + // 6. The management server uses the load reports from all reported Envoys + // from around the world, computes global assignment and prepares traffic + // assignment destined for each zone Envoys are located in. Goto 2. + rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { + } +} + +// A load report Envoy sends to the management server. +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message LoadStatsRequest { + // Node identifier for Envoy instance. + envoy.api.v3alpha.core.Node node = 1; + + // A list of load stats to report. + repeated envoy.api.v3alpha.endpoint.ClusterStats cluster_stats = 2; +} + +// The management server sends envoy a LoadStatsResponse with all clusters it +// is interested in learning load stats about. +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message LoadStatsResponse { + // Clusters to report stats for. + repeated string clusters = 1 [(validate.rules).repeated .min_items = 1]; + + // The minimum interval of time to collect stats over. This is only a minimum for two reasons: + // 1. There may be some delay from when the timer fires until stats sampling occurs. + // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic + // that is observed in between the corresponding previous *LoadStatsRequest* and this + // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period + // of inobservability that might otherwise exists between the messages. New clusters are not + // subject to this consideration. + google.protobuf.Duration load_reporting_interval = 2; + + // Set to *true* if the management server supports endpoint granularity + // report. + bool report_endpoint_granularity = 3; +} diff --git a/api/envoy/service/metrics/v3alpha/BUILD b/api/envoy/service/metrics/v3alpha/BUILD new file mode 100644 index 000000000000..1f1bb553cd82 --- /dev/null +++ b/api/envoy/service/metrics/v3alpha/BUILD @@ -0,0 +1,24 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "metrics_service", + srcs = ["metrics_service.proto"], + has_services = 1, + require_py = 0, + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:grpc_service", + "@prometheus_metrics_model//:client_model", + ], +) + +api_go_grpc_library( + name = "metrics_service", + proto = ":metrics_service", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + "@prometheus_metrics_model//:client_model_go_proto", + ], +) diff --git a/api/envoy/service/metrics/v3alpha/metrics_service.proto b/api/envoy/service/metrics/v3alpha/metrics_service.proto new file mode 100644 index 000000000000..9a5306553b18 --- /dev/null +++ b/api/envoy/service/metrics/v3alpha/metrics_service.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package envoy.service.metrics.v3alpha; + +option java_outer_classname = "MetricsServiceProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.metrics.v3alpha"; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v3alpha/core/base.proto"; + +import "metrics.proto"; + +import "validate/validate.proto"; + +// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric +// data model as a standard to represent metrics information. +service MetricsService { + // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any + // response to be sent as nothing would be done in the case of failure. + rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { + } +} + +message StreamMetricsResponse { +} + +message StreamMetricsMessage { + message Identifier { + // The node sending metrics over the stream. + envoy.api.v3alpha.core.Node node = 1 [(validate.rules).message.required = true]; + } + + // Identifier data effectively is a structured metadata. As a performance optimization this will + // only be sent in the first message on the stream. + Identifier identifier = 1; + + // A list of metric entries + repeated io.prometheus.client.MetricFamily envoy_metrics = 2; +} diff --git a/api/envoy/service/ratelimit/v3alpha/BUILD b/api/envoy/service/ratelimit/v3alpha/BUILD new file mode 100644 index 000000000000..19954c5bfcc9 --- /dev/null +++ b/api/envoy/service/ratelimit/v3alpha/BUILD @@ -0,0 +1,24 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "rls", + srcs = ["rls.proto"], + has_services = 1, + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:grpc_service", + "//envoy/api/v3alpha/ratelimit", + ], +) + +api_go_grpc_library( + name = "rls", + proto = ":rls", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + "//envoy/api/v3alpha/core:grpc_service_go_proto", + "//envoy/api/v3alpha/ratelimit:ratelimit_go_proto", + ], +) diff --git a/api/envoy/service/ratelimit/v3alpha/rls.proto b/api/envoy/service/ratelimit/v3alpha/rls.proto new file mode 100644 index 000000000000..7bbd2e3ec183 --- /dev/null +++ b/api/envoy/service/ratelimit/v3alpha/rls.proto @@ -0,0 +1,95 @@ +syntax = "proto3"; + +package envoy.service.ratelimit.v3alpha; + +option java_outer_classname = "RlsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.ratelimit.v3alpha"; +option go_package = "v2"; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/ratelimit/ratelimit.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Rate Limit Service (RLS)] + +service RateLimitService { + // Determine whether rate limiting should take place. + rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) { + } +} + +// Main message for a rate limit request. The rate limit service is designed to be fully generic +// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded +// configuration will parse the request and find the most specific limit to apply. In addition, +// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors +// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any +// of them are over limit. This enables more complex application level rate limiting scenarios +// if desired. +message RateLimitRequest { + // All rate limit requests must specify a domain. This enables the configuration to be per + // application without fear of overlap. E.g., "envoy". + string domain = 1; + + // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is + // processed by the service (see below). If any of the descriptors are over limit, the entire + // request is considered to be over limit. + repeated envoy.api.v3alpha.ratelimit.RateLimitDescriptor descriptors = 2; + + // Rate limit requests can optionally specify the number of hits a request adds to the matched + // limit. If the value is not set in the message, a request increases the matched limit by 1. + uint32 hits_addend = 3; +} + +// A response from a ShouldRateLimit call. +message RateLimitResponse { + enum Code { + // The response code is not known. + UNKNOWN = 0; + // The response code to notify that the number of requests are under limit. + OK = 1; + // The response code to notify that the number of requests are over limit. + OVER_LIMIT = 2; + } + + // Defines an actual rate limit in terms of requests per unit of time and the unit itself. + message RateLimit { + enum Unit { + // The time unit is not known. + UNKNOWN = 0; + // The time unit representing a second. + SECOND = 1; + // The time unit representing a minute. + MINUTE = 2; + // The time unit representing an hour. + HOUR = 3; + // The time unit representing a day. + DAY = 4; + } + + // The number of requests per unit of time. + uint32 requests_per_unit = 1; + // The unit of time. + Unit unit = 2; + } + + message DescriptorStatus { + // The response code for an individual descriptor. + Code code = 1; + // The current limit as configured by the server. Useful for debugging, etc. + RateLimit current_limit = 2; + // The limit remaining in the current time unit. + uint32 limit_remaining = 3; + } + + // The overall response code which takes into account all of the descriptors that were passed + // in the RateLimitRequest message. + Code overall_code = 1; + // A list of DescriptorStatus messages which matches the length of the descriptor list passed + // in the RateLimitRequest. This can be used by the caller to determine which individual + // descriptors failed and/or what the currently configured limits are for all of them. + repeated DescriptorStatus statuses = 2; + // A list of headers to add to the response + repeated envoy.api.v3alpha.core.HeaderValue headers = 3; +} diff --git a/api/envoy/service/tap/v3alpha/BUILD b/api/envoy/service/tap/v3alpha/BUILD new file mode 100644 index 000000000000..a90b2d819297 --- /dev/null +++ b/api/envoy/service/tap/v3alpha/BUILD @@ -0,0 +1,36 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "common", + srcs = ["common.proto"], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/api/v3alpha/core:grpc_service", + "//envoy/api/v3alpha/route", + ], +) + +api_proto_library_internal( + name = "tap", + srcs = ["tap.proto"], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v3alpha:discovery", + "//envoy/api/v3alpha/core:base", + "//envoy/data/tap/v3alpha:wrapper", + ], +) + +api_proto_library_internal( + name = "tapds", + srcs = ["tapds.proto"], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v3alpha:discovery", + "//envoy/api/v3alpha/core:base", + "//envoy/service/tap/v3alpha:common", + ], +) diff --git a/api/envoy/service/tap/v3alpha/common.proto b/api/envoy/service/tap/v3alpha/common.proto new file mode 100644 index 000000000000..7c375d913d7a --- /dev/null +++ b/api/envoy/service/tap/v3alpha/common.proto @@ -0,0 +1,200 @@ +syntax = "proto3"; + +import "envoy/api/v3alpha/route/route.proto"; +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/api/v3alpha/core/grpc_service.proto"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +package envoy.service.tap.v3alpha; + +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.tap.v3alpha"; + +// [#protodoc-title: Common tap configuration] + +// Tap configuration. +message TapConfig { + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + MatchPredicate match_config = 1 [(validate.rules).message.required = true]; + + // The tap output configuration. If a match configuration matches a data source being tapped, + // a tap will occur and the data will be written to the configured output. + OutputConfig output_config = 2 [(validate.rules).message.required = true]; + + // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for + // which the tap matching is enabled. When not enabled, the request\connection will not be + // recorded. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + envoy.api.v3alpha.core.RuntimeFractionalPercent tap_enabled = 3; + + // [#comment:TODO(mattklein123): Rate limiting] +} + +// Tap match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +message MatchPredicate { + // A set of match configurations used for logical operations. + message MatchSet { + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated .min_items = 2]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool.const = true]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + // HTTP headers to match. + repeated api.v3alpha.route.HeaderMatcher headers = 1; +} + +// Tap output configuration. +message OutputConfig { + // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple + // sink types are supported this constraint will be relaxed. + repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1, max_items: 1}]; + + // For buffered tapping, the maximum amount of received body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_rx_bytes = 2; + + // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_tx_bytes = 3; + + // Indicates whether taps produce a single buffered message per tap, or multiple streamed + // messages per tap in the emitted :ref:`TraceWrapper + // ` messages. Note that streamed tapping does not + // mean that no buffering takes place. Buffering may be required if data is processed before a + // match can be determined. See the HTTP tap filter :ref:`streaming + // ` documentation for more information. + bool streaming = 4; +} + +// Tap output sink configuration. +message OutputSink { + // Output format. All output is in the form of one or more :ref:`TraceWrapper + // ` messages. This enumeration indicates + // how those messages are written. Note that not all sinks support all output formats. See + // individual sink documentation for more information. + enum Format { + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_bytes + // ` field. This means that body data will be + // base64 encoded as per the `proto3 JSON mappings + // `_. + JSON_BODY_AS_BYTES = 0; + + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_string + // ` field. This means that body data will be + // string encoded as per the `proto3 JSON mappings + // `_. This format type is + // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the + // user wishes to view it directly without being forced to base64 decode the body. + JSON_BODY_AS_STRING = 1; + + // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes + // multiple binary messages without any length information the data stream will not be + // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) + // this output format makes consumption simpler. + PROTO_BINARY = 2; + + // Messages are written as a sequence tuples, where each tuple is the message length encoded + // as a `protobuf 32-bit varint + // `_ + // followed by the binary message. The messages can be read back using the language specific + // protobuf coded stream implementation to obtain the message length and the message. + PROTO_BINARY_LENGTH_DELIMITED = 3; + + // Text proto format. + PROTO_TEXT = 4; + } + + // Sink output format. + Format format = 1 [(validate.rules).enum.defined_only = true]; + + oneof output_sink_type { + option (validate.required) = true; + + // Tap output will be streamed out the :http:post:`/tap` admin endpoint. + // + // .. attention:: + // + // It is only allowed to specify the streaming admin output sink if the tap is being + // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has + // been configured to receive tap configuration from some other source (e.g., static + // file, XDS, etc.) configuring the streaming admin output type will fail. + StreamingAdminSink streaming_admin = 2; + + // Tap output will be written to a file per tap sink. + FilePerTapSink file_per_tap = 3; + + // [#not-implemented-hide:] + // GrpcService to stream data to. The format argument must be PROTO_BINARY. + StreamingGrpcSink streaming_grpc = 4; + } +} + +// Streaming admin sink configuration. +message StreamingAdminSink { +} + +// The file per tap sink outputs a discrete file for every tapped stream. +message FilePerTapSink { + // Path prefix. The output file will be of the form _.pb, where is an + // identifier distinguishing the recorded trace for stream instances (the Envoy + // connection ID, HTTP stream ID, etc.). + string path_prefix = 1 [(validate.rules).string.min_bytes = 1]; +} + +// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC +// server. +message StreamingGrpcSink { + // Opaque identifier, that will be sent back to the streaming grpc server. + string tap_id = 1; + + // The gRPC server that hosts the Tap Sink Service. + envoy.api.v3alpha.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/service/tap/v3alpha/tap.proto b/api/envoy/service/tap/v3alpha/tap.proto new file mode 100644 index 000000000000..1e69d421915c --- /dev/null +++ b/api/envoy/service/tap/v3alpha/tap.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +import "envoy/api/v3alpha/core/base.proto"; +import "envoy/data/tap/v3alpha/wrapper.proto"; + +package envoy.service.tap.v3alpha; + +import "validate/validate.proto"; + +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.tap.v3alpha"; + +// [#protodoc-title: Tap Sink Service] + +// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server +// and stream taps without ever expecting a response. +message StreamTapsRequest { + message Identifier { + // The node sending taps over the stream. + envoy.api.v3alpha.core.Node node = 1 [(validate.rules).message.required = true]; + // The opaque identifier that was set in the :ref:`output config + // `. + string tap_id = 2; + } + + // Identifier data effectively is a structured metadata. As a performance optimization this will + // only be sent in the first message on the stream. + Identifier identifier = 1; + // The trace id. this can be used to merge together a streaming trace. Note that the trace_id + // is not guaranteed to be spatially or temporally unique. + uint64 trace_id = 2; + // The trace data. + envoy.data.tap.v3alpha.TraceWrapper trace = 3; +} + +// [#not-implemented-hide:] +message StreamTapsResponse { +} + +// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call +// StreamTaps to deliver captured taps to the server +service TapSinkService { + + // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any + // response to be sent as nothing would be done in the case of failure. The server should + // disconnect if it expects Envoy to reconnect. + rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { + } +} \ No newline at end of file diff --git a/api/envoy/service/tap/v3alpha/tapds.proto b/api/envoy/service/tap/v3alpha/tapds.proto new file mode 100644 index 000000000000..542d88ed2285 --- /dev/null +++ b/api/envoy/service/tap/v3alpha/tapds.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +import "envoy/api/v3alpha/discovery.proto"; +import "envoy/service/tap/v3alpha/common.proto"; +import "validate/validate.proto"; + +package envoy.service.tap.v3alpha; + +import "google/api/annotations.proto"; + +option java_outer_classname = "TapDsProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.tap.v3alpha"; + +// [#protodoc-title: Tap discovery service] + +// [#not-implemented-hide:] Tap discovery service. +service TapDiscoveryService { + rpc StreamTapConfigs(stream envoy.api.v3alpha.DiscoveryRequest) + returns (stream envoy.api.v3alpha.DiscoveryResponse) { + } + + rpc DeltaTapConfigs(stream envoy.api.v3alpha.DeltaDiscoveryRequest) + returns (stream envoy.api.v3alpha.DeltaDiscoveryResponse) { + } + + rpc FetchTapConfigs(envoy.api.v3alpha.DiscoveryRequest) + returns (envoy.api.v3alpha.DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:tap_configs" + body: "*" + }; + } +} + +// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name +// The filter TapDS config references this name. +message TapResource { + // The name of the tap configuration. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Tap config to apply + TapConfig config = 2; +} \ No newline at end of file diff --git a/api/envoy/service/trace/v3alpha/BUILD b/api/envoy/service/trace/v3alpha/BUILD new file mode 100644 index 000000000000..815d0c4c93cc --- /dev/null +++ b/api/envoy/service/trace/v3alpha/BUILD @@ -0,0 +1,23 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "trace_service", + srcs = ["trace_service.proto"], + has_services = 1, + require_py = 0, + deps = [ + "//envoy/api/v3alpha/core:base", + "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", + ], +) + +api_go_grpc_library( + name = "trace_service", + proto = ":trace_service", + deps = [ + "//envoy/api/v3alpha/core:base_go_proto", + "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", + ], +) diff --git a/api/envoy/service/trace/v3alpha/trace_service.proto b/api/envoy/service/trace/v3alpha/trace_service.proto new file mode 100644 index 000000000000..521139a084e5 --- /dev/null +++ b/api/envoy/service/trace/v3alpha/trace_service.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +// [#proto-status: draft] + +package envoy.service.trace.v3alpha; + +option java_outer_classname = "TraceServiceProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.trace.v3alpha"; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v3alpha/core/base.proto"; +import "opencensus/proto/trace/v1/trace.proto"; + +import "google/api/annotations.proto"; + +import "validate/validate.proto"; + +// Service for streaming traces to server that consumes the trace data. It +// uses OpenCensus data model as a standard to represent trace information. +service TraceService { + // Envoy will connect and send StreamTracesMessage messages forever. It does + // not expect any response to be sent as nothing would be done in the case + // of failure. + rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { + } +} + +message StreamTracesResponse { +} + +message StreamTracesMessage { + message Identifier { + // The node sending the access log messages over the stream. + envoy.api.v3alpha.core.Node node = 1 [(validate.rules).message.required = true]; + } + + // Identifier data effectively is a structured metadata. + // As a performance optimization this will only be sent in the first message + // on the stream. + Identifier identifier = 1; + + // A list of Span entries + repeated opencensus.proto.trace.v1.Span spans = 2; +} diff --git a/api/migration/v3alpha.sh b/api/migration/v3alpha.sh new file mode 100755 index 000000000000..2b081dabaaaf --- /dev/null +++ b/api/migration/v3alpha.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +set -e + +./tools/api/clone.sh v2 v3alpha +./tools/check_format.py fix diff --git a/tools/api/clone.sh b/tools/api/clone.sh new file mode 100755 index 000000000000..a9cdb63ffd91 --- /dev/null +++ b/tools/api/clone.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +# Simple script to clone vM to vN API, performing sed-style heuristic fixup of +# build paths and package references. +# +# Usage: +# +# ./tools/api/clone.sh v2 v3 + +set -e + +declare -r OLD_VERSION="$1" +declare -r NEW_VERSION="$2" + +# For vM -> vN, replace //$1*/vMalpha with //$1*/vN in BUILD file $2 +# For vM -> vN, replace //$1*/vM with //$1*/vN in BUILD file $2 +function replace_build() { + sed -i -e "s#\(//$1[^\S]*\)/${OLD_VERSION}alpha#\1/${NEW_VERSION}#g" "$2" + sed -i -e "s#\(//$1[^\S]*\)/${OLD_VERSION}#\1/${NEW_VERSION}#g" "$2" +} + +# For vM -> vN, replace $1*[./]vMalpha with $1*[./]vN in .proto file $2 +# For vM -> vN, replace $1*[./]vM with $1*[./]vN in .proto file $2 +function replace_proto() { + sed -i -e "s#\($1\S*[\./]\)${OLD_VERSION}alpha#\1${NEW_VERSION}#g" "$2" + sed -i -e "s#\($1\S*[\./]\)${OLD_VERSION}#\1${NEW_VERSION}#g" "$2" +} + +# We consider both {vM, vMalpha} to deal with the multiple possible combinations +# of {vM, vMalpha} existence for a given package. +for p in $(find api/ -name "${OLD_VERSION}" -o -name "${OLD_VERSION}alpha") +do + declare PACKAGE_ROOT="$(dirname "$p")" + declare OLD_VERSION_ROOT="${PACKAGE_ROOT}/${OLD_VERSION}" + declare NEW_VERSION_ROOT="${PACKAGE_ROOT}/${NEW_VERSION}" + + # Deal with the situation where there is both vM and vMalpha, we only want vM. + if [[ -a "${OLD_VERSION_ROOT}" && "$p" != "${OLD_VERSION_ROOT}" ]] + then + continue + fi + + # Copy BUILD and .protos across + rsync -a "${p}"/ "${NEW_VERSION_ROOT}/" + + # Update BUILD files with vM -> vN + for b in $(find "${NEW_VERSION_ROOT}" -name BUILD) + do + replace_build envoy "$b" + done + + # Update .proto files with vM -> vN + for f in $(find "${NEW_VERSION_ROOT}" -name "*.proto") + do + replace_proto envoy "$f" + replace_proto api "$f" + replace_proto service "$f" + replace_proto common "$f" + replace_proto config "$f" + replace_proto filter "$f" + done +done From 0ef31372695fc27b5c5dee10edc2563b94e665e0 Mon Sep 17 00:00:00 2001 From: zyfjeff Date: Wed, 4 Sep 2019 04:50:52 +0800 Subject: [PATCH 27/31] dubbo: Fix heartbeat packet parsing error (#8103) Description: The heartbeat packet may carry data, and it is treated as null data when processing the heartbeat packet, causing some data to remain in the buffer. Risk Level: low Testing: Existing unit test Docs Changes: N/A Release Notes: N/A Fixes #7970 Signed-off-by: tianqian.zyf --- source/extensions/filters/network/dubbo_proxy/decoder.cc | 8 ++++++-- .../filters/network/dubbo_proxy/conn_manager_test.cc | 4 +++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/source/extensions/filters/network/dubbo_proxy/decoder.cc b/source/extensions/filters/network/dubbo_proxy/decoder.cc index f7e9cfc84a24..3715acf865d5 100644 --- a/source/extensions/filters/network/dubbo_proxy/decoder.cc +++ b/source/extensions/filters/network/dubbo_proxy/decoder.cc @@ -18,12 +18,16 @@ DecoderStateMachine::onDecodeStreamHeader(Buffer::Instance& buffer) { return {ProtocolState::WaitForData}; } - // The heartbeat message has no body. auto context = ret.first; if (metadata->message_type() == MessageType::HeartbeatRequest || metadata->message_type() == MessageType::HeartbeatResponse) { + if (buffer.length() < (context->header_size() + context->body_size())) { + ENVOY_LOG(debug, "dubbo decoder: need more data for {} protocol heartbeat", protocol_.name()); + return {ProtocolState::WaitForData}; + } + ENVOY_LOG(debug, "dubbo decoder: this is the {} heartbeat message", protocol_.name()); - buffer.drain(context->header_size()); + buffer.drain(context->header_size() + context->body_size()); delegate_.onHeartbeat(metadata); return {ProtocolState::Done}; } diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index 536e167b0209..008fb1b9fc5a 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -306,7 +306,8 @@ class ConnectionManagerTest : public testing::Test { buffer.add(static_cast(&msg_type), 1); buffer.add(std::string{0x14}); addInt64(buffer, request_id); // Request Id - buffer.add(std::string{0x00, 0x00, 0x00, 0x00}); // Body Length + buffer.add(std::string{0x00, 0x00, 0x00, 0x01}); // Body Length + buffer.add(std::string{0x01}); // Body } NiceMock factory_context_; @@ -377,6 +378,7 @@ TEST_F(ConnectionManagerTest, OnDataHandlesHeartbeatEvent) { })); EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, buffer_.length()); filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(0U, store_.counter("test.request").value()); From cf55298b2db5155f2a12e1e71b840f8a300b205e Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Tue, 3 Sep 2019 17:08:40 -0400 Subject: [PATCH 28/31] stats: Shared cluster isolated stats (#8118) * shared the main symbol-table with the isolated stats used for cluster info. Signed-off-by: Joshua Marantz --- source/common/upstream/upstream_impl.cc | 2 +- source/common/upstream/upstream_impl.h | 4 +++- test/integration/stats_integration_test.cc | 6 ++++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index c50aa84f85d4..18aac17131d9 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -598,7 +598,7 @@ ClusterInfoImpl::ClusterInfoImpl( per_connection_buffer_limit_bytes_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)), transport_socket_factory_(std::move(socket_factory)), stats_scope_(std::move(stats_scope)), - stats_(generateStats(*stats_scope_)), + stats_(generateStats(*stats_scope_)), load_report_stats_store_(stats_scope_->symbolTable()), load_report_stats_(generateLoadReportStats(load_report_stats_store_)), features_(parseFeatures(config)), http2_settings_(Http::Utility::parseHttp2Settings(config.http2_protocol_options())), diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 07469e36d0f3..3f0f34d7545b 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -75,7 +75,9 @@ class HostDescriptionImpl : virtual public HostDescription { .bool_value()), metadata_(std::make_shared(metadata)), locality_(locality), locality_zone_stat_name_(locality.zone(), cluster->statsScope().symbolTable()), - stats_{ALL_HOST_STATS(POOL_COUNTER(stats_store_), POOL_GAUGE(stats_store_))}, + stats_store_(cluster->statsScope().symbolTable()), stats_{ALL_HOST_STATS( + POOL_COUNTER(stats_store_), + POOL_GAUGE(stats_store_))}, priority_(priority) { if (health_check_config.port_value() != 0 && dest_address->type() != Network::Address::Type::Ip) { diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 746baf523926..4cfacefe2d4a 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -225,6 +225,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2019/07/15 7555 42806 43000 static link libstdc++ in tests // 2019/07/24 7503 43030 44000 add upstream filters to clusters // 2019/08/13 7877 42838 44000 skip EdfScheduler creation if all host weights equal + // 2019/09/02 8118 42830 43000 Share symbol-tables in cluster/host stats. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -234,7 +235,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // On a local clang8/libstdc++/linux flow, the memory usage was observed in // June 2019 to be 64 bytes higher than it is in CI/release. Your mileage may // vary. - EXPECT_MEMORY_EQ(m_per_cluster, 42838); // 104 bytes higher than a debug build. + EXPECT_MEMORY_EQ(m_per_cluster, 42830); // 104 bytes higher than a debug build. EXPECT_MEMORY_LE(m_per_cluster, 44000); } @@ -260,6 +261,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // exact upper-bound // ---------- ----- ----------------- ----- // 2019/08/09 7882 35489 36000 Initial version + // 2019/09/02 8118 34585 34500 Share symbol-tables in cluster/host stats. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -269,7 +271,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // On a local clang8/libstdc++/linux flow, the memory usage was observed in // June 2019 to be 64 bytes higher than it is in CI/release. Your mileage may // vary. - EXPECT_MEMORY_EQ(m_per_cluster, 35489); // 104 bytes higher than a debug build. + EXPECT_MEMORY_EQ(m_per_cluster, 34585); // 104 bytes higher than a debug build. EXPECT_MEMORY_LE(m_per_cluster, 36000); } From 39a4423a274278f9b9e8399c901734fc2e87a0aa Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 3 Sep 2019 17:23:07 -0400 Subject: [PATCH 29/31] protodoc: upgrade to Python 3. (#8129) Risk level: Low Testing: Rebuilt docs, manual inspection of some example generated files. Signed-off-by: Harvey Tuch --- docs/build.sh | 6 +++--- tools/protodoc/BUILD | 2 +- tools/protodoc/protodoc.py | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/build.sh b/docs/build.sh index 1b8cac8b06fa..b959b159c40b 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -44,11 +44,11 @@ rm -rf "${GENERATED_RST_DIR}" mkdir -p "${GENERATED_RST_DIR}" source_venv "$BUILD_DIR" -pip install -r "${SCRIPT_DIR}"/requirements.txt +pip3 install -r "${SCRIPT_DIR}"/requirements.txt bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \ - tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED \ - --action_env=ENVOY_BLOB_SHA --spawn_strategy=standalone --host_force_python=PY2 + tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED=1 \ + --action_env=ENVOY_BLOB_SHA --spawn_strategy=standalone --host_force_python=PY3 # These are the protos we want to put in docs, this list will grow. # TODO(htuch): Factor this out of this script. diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index 8e428b5d24fd..b4b3c3f39acb 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -3,7 +3,7 @@ licenses(["notice"]) # Apache 2 py_binary( name = "protodoc", srcs = ["protodoc.py"], - python_version = "PY2", + python_version = "PY3", visibility = ["//visibility:public"], deps = [ "@com_envoyproxy_protoc_gen_validate//validate:validate_py", diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index bf6a869f0797..562d6de2ac5f 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -6,9 +6,9 @@ from collections import defaultdict import cProfile import functools +import io import os import pstats -import StringIO import re import sys @@ -709,7 +709,7 @@ def GenerateRst(proto_file): def Main(): # http://www.expobrain.net/2015/09/13/create-a-plugin-for-google-protocol-buffer/ request = plugin_pb2.CodeGeneratorRequest() - request.ParseFromString(sys.stdin.read()) + request.ParseFromString(sys.stdin.buffer.read()) response = plugin_pb2.CodeGeneratorResponse() cprofile_enabled = os.getenv('CPROFILE_ENABLED') @@ -724,14 +724,14 @@ def Main(): f.content = GenerateRst(proto_file) if cprofile_enabled: pr.disable() - stats_stream = StringIO.StringIO() + stats_stream = io.StringIO() ps = pstats.Stats(pr, stream=stats_stream).sort_stats(os.getenv('CPROFILE_SORTBY', 'cumulative')) stats_file = response.file.add() stats_file.name = proto_file.name + '.rst.profile' ps.print_stats() stats_file.content = stats_stream.getvalue() - sys.stdout.write(response.SerializeToString()) + sys.stdout.buffer.write(response.SerializeToString()) if __name__ == '__main__': From b06e2b5c473b4b88e23ed66abd501e29f6bddbee Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 3 Sep 2019 19:25:38 -0400 Subject: [PATCH 30/31] protodoc: single source-of-truth for doc protos. (#8132) This avoids having to list new docs protos in both docs/build.sh and api/docs/BUILD. This technical debt cleanup is helpful in v3 proto work to simplify collecting proto artifacts from a Bazel aspect. Risk level: Low Testing: docs/build.sh, visual inspection of docs. Signed-off-by: Harvey Tuch --- api/CONTRIBUTING.md | 2 +- api/docs/BUILD | 5 +- docs/build.sh | 125 +++---------------------------------- tools/protodoc/protodoc.py | 12 +++- 4 files changed, 22 insertions(+), 122 deletions(-) diff --git a/api/CONTRIBUTING.md b/api/CONTRIBUTING.md index 02f8536906b5..d07e1820a0ab 100644 --- a/api/CONTRIBUTING.md +++ b/api/CONTRIBUTING.md @@ -8,7 +8,7 @@ API changes are regular PRs in https://github.com/envoyproxy/envoy for the API/c changes. They may be as part of a larger implementation PR. Please follow the standard Bazel and CI process for validating build/test sanity of `api/` before submitting a PR. -*Note: New .proto files should be also included to [build.sh](https://github.com/envoyproxy/envoy/blob/master/docs/build.sh) and +*Note: New .proto files should be added to [BUILD](https://github.com/envoyproxy/envoy/blob/master/api/docs/BUILD) in order to get the RSTs generated.* ## Documentation changes diff --git a/api/docs/BUILD b/api/docs/BUILD index 11ef3876b218..5784ecc2d2af 100644 --- a/api/docs/BUILD +++ b/api/docs/BUILD @@ -7,8 +7,7 @@ package_group( ], ) -# TODO(htuch): Grow this to cover everything we want to generate docs for, so we can just invoke -# bazel build //docs:protos --aspects tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst +# This is where you add protos that will participate in docs RST generation. proto_library( name = "protos", deps = [ @@ -94,8 +93,6 @@ proto_library( "//envoy/service/auth/v2:external_auth", "//envoy/service/discovery/v2:ads", "//envoy/service/discovery/v2:rtds", - "//envoy/service/load_stats/v2:lrs", - "//envoy/service/metrics/v2:metrics_service", "//envoy/service/ratelimit/v2:rls", "//envoy/service/tap/v2alpha:common", "//envoy/type:percent", diff --git a/docs/build.sh b/docs/build.sh index b959b159c40b..b75ebac03e65 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -50,126 +50,19 @@ bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \ tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED=1 \ --action_env=ENVOY_BLOB_SHA --spawn_strategy=standalone --host_force_python=PY3 -# These are the protos we want to put in docs, this list will grow. -# TODO(htuch): Factor this out of this script. -PROTO_RST=" - /envoy/admin/v2alpha/certs/envoy/admin/v2alpha/certs.proto.rst - /envoy/admin/v2alpha/clusters/envoy/admin/v2alpha/clusters.proto.rst - /envoy/admin/v2alpha/config_dump/envoy/admin/v2alpha/config_dump.proto.rst - /envoy/admin/v2alpha/listeners/envoy/admin/v2alpha/listeners.proto.rst - /envoy/admin/v2alpha/memory/envoy/admin/v2alpha/memory.proto.rst - /envoy/admin/v2alpha/clusters/envoy/admin/v2alpha/metrics.proto.rst - /envoy/admin/v2alpha/mutex_stats/envoy/admin/v2alpha/mutex_stats.proto.rst - /envoy/admin/v2alpha/server_info/envoy/admin/v2alpha/server_info.proto.rst - /envoy/admin/v2alpha/tap/envoy/admin/v2alpha/tap.proto.rst - /envoy/api/v2/core/address/envoy/api/v2/core/address.proto.rst - /envoy/api/v2/core/base/envoy/api/v2/core/base.proto.rst - /envoy/api/v2/core/http_uri/envoy/api/v2/core/http_uri.proto.rst - /envoy/api/v2/core/config_source/envoy/api/v2/core/config_source.proto.rst - /envoy/api/v2/core/grpc_service/envoy/api/v2/core/grpc_service.proto.rst - /envoy/api/v2/core/health_check/envoy/api/v2/core/health_check.proto.rst - /envoy/api/v2/core/protocol/envoy/api/v2/core/protocol.proto.rst - /envoy/api/v2/discovery/envoy/api/v2/discovery.proto.rst - /envoy/api/v2/auth/cert/envoy/api/v2/auth/cert.proto.rst - /envoy/api/v2/eds/envoy/api/v2/eds.proto.rst - /envoy/api/v2/endpoint/endpoint/envoy/api/v2/endpoint/endpoint.proto.rst - /envoy/api/v2/cds/envoy/api/v2/cds.proto.rst - /envoy/api/v2/cluster/outlier_detection/envoy/api/v2/cluster/outlier_detection.proto.rst - /envoy/api/v2/cluster/circuit_breaker/envoy/api/v2/cluster/circuit_breaker.proto.rst - /envoy/api/v2/cluster/filter/envoy/api/v2/cluster/filter.proto.rst - /envoy/api/v2/rds/envoy/api/v2/rds.proto.rst - /envoy/api/v2/route/route/envoy/api/v2/route/route.proto.rst - /envoy/api/v2/srds/envoy/api/v2/srds.proto.rst - /envoy/api/v2/lds/envoy/api/v2/lds.proto.rst - /envoy/api/v2/listener/listener/envoy/api/v2/listener/listener.proto.rst - /envoy/api/v2/listener/udp_listener_config/envoy/api/v2/listener/udp_listener_config.proto.rst - /envoy/api/v2/ratelimit/ratelimit/envoy/api/v2/ratelimit/ratelimit.proto.rst - /envoy/config/accesslog/v2/als/envoy/config/accesslog/v2/als.proto.rst - /envoy/config/accesslog/v2/file/envoy/config/accesslog/v2/file.proto.rst - /envoy/config/bootstrap/v2/bootstrap/envoy/config/bootstrap/v2/bootstrap.proto.rst - /envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto.rst - /envoy/config/cluster/redis/redis_cluster/envoy/config/cluster/redis/redis_cluster.proto.rst - /envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto.rst - /envoy/config/common/tap/v2alpha/common/envoy/config/common/tap/v2alpha/common.proto.rst - /envoy/config/ratelimit/v2/rls/envoy/config/ratelimit/v2/rls.proto.rst - /envoy/config/metrics/v2/metrics_service/envoy/config/metrics/v2/metrics_service.proto.rst - /envoy/config/metrics/v2/stats/envoy/config/metrics/v2/stats.proto.rst - /envoy/config/trace/v2/trace/envoy/config/trace/v2/trace.proto.rst - /envoy/config/filter/accesslog/v2/accesslog/envoy/config/filter/accesslog/v2/accesslog.proto.rst - /envoy/config/filter/fault/v2/fault/envoy/config/filter/fault/v2/fault.proto.rst - /envoy/config/filter/http/buffer/v2/buffer/envoy/config/filter/http/buffer/v2/buffer.proto.rst - /envoy/config/filter/http/csrf/v2/csrf/envoy/config/filter/http/csrf/v2/csrf.proto.rst - /envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto.rst - /envoy/config/filter/http/ext_authz/v2/ext_authz/envoy/config/filter/http/ext_authz/v2/ext_authz.proto.rst - /envoy/config/filter/http/fault/v2/fault/envoy/config/filter/http/fault/v2/fault.proto.rst - /envoy/config/filter/http/gzip/v2/gzip/envoy/config/filter/http/gzip/v2/gzip.proto.rst - /envoy/config/filter/http/health_check/v2/health_check/envoy/config/filter/http/health_check/v2/health_check.proto.rst - /envoy/config/filter/http/header_to_metadata/v2/header_to_metadata/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto.rst - /envoy/config/filter/http/ip_tagging/v2/ip_tagging/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto.rst - /envoy/config/filter/http/jwt_authn/v2alpha/jwt_authn/envoy/config/filter/http/jwt_authn/v2alpha/config.proto.rst - /envoy/config/filter/http/lua/v2/lua/envoy/config/filter/http/lua/v2/lua.proto.rst - /envoy/config/filter/http/original_src/v2alpha1/original_src/envoy/config/filter/http/original_src/v2alpha1/original_src.proto.rst - /envoy/config/filter/http/rate_limit/v2/rate_limit/envoy/config/filter/http/rate_limit/v2/rate_limit.proto.rst - /envoy/config/filter/http/rbac/v2/rbac/envoy/config/filter/http/rbac/v2/rbac.proto.rst - /envoy/config/filter/http/router/v2/router/envoy/config/filter/http/router/v2/router.proto.rst - /envoy/config/filter/http/squash/v2/squash/envoy/config/filter/http/squash/v2/squash.proto.rst - /envoy/config/filter/http/tap/v2alpha/tap/envoy/config/filter/http/tap/v2alpha/tap.proto.rst - /envoy/config/filter/http/transcoder/v2/transcoder/envoy/config/filter/http/transcoder/v2/transcoder.proto.rst - /envoy/config/filter/listener/original_src/v2alpha1/original_src/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto.rst - /envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto.rst - /envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto.rst - /envoy/config/filter/dubbo/router/v2alpha1/router/envoy/config/filter/dubbo/router/v2alpha1/router.proto.rst - /envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto.rst - /envoy/config/filter/network/ext_authz/v2/ext_authz/envoy/config/filter/network/ext_authz/v2/ext_authz.proto.rst - /envoy/config/filter/network/http_connection_manager/v2/http_connection_manager/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto.rst - /envoy/config/filter/network/mongo_proxy/v2/mongo_proxy/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto.rst - /envoy/config/filter/network/rate_limit/v2/rate_limit/envoy/config/filter/network/rate_limit/v2/rate_limit.proto.rst - /envoy/config/filter/network/rbac/v2/rbac/envoy/config/filter/network/rbac/v2/rbac.proto.rst - /envoy/config/filter/network/redis_proxy/v2/redis_proxy/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto.rst - /envoy/config/filter/network/tcp_proxy/v2/tcp_proxy/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto.rst - /envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto.rst - /envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto.rst - /envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto.rst - /envoy/config/filter/thrift/router/v2alpha1/router/envoy/config/filter/thrift/router/v2alpha1/router.proto.rst - /envoy/config/grpc_credential/v2alpha/aws_iam/envoy/config/grpc_credential/v2alpha/aws_iam.proto.rst - /envoy/config/health_checker/redis/v2/redis/envoy/config/health_checker/redis/v2/redis.proto.rst - /envoy/config/overload/v2alpha/overload/envoy/config/overload/v2alpha/overload.proto.rst - /envoy/config/rbac/v2/rbac/envoy/config/rbac/v2/rbac.proto.rst - /envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto.rst - /envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto.rst - /envoy/config/transport_socket/tap/v2alpha/tap/envoy/config/transport_socket/tap/v2alpha/tap.proto.rst - /envoy/data/accesslog/v2/accesslog/envoy/data/accesslog/v2/accesslog.proto.rst - /envoy/data/core/v2alpha/health_check_event/envoy/data/core/v2alpha/health_check_event.proto.rst - /envoy/data/tap/v2alpha/common/envoy/data/tap/v2alpha/common.proto.rst - /envoy/data/tap/v2alpha/transport/envoy/data/tap/v2alpha/transport.proto.rst - /envoy/data/tap/v2alpha/http/envoy/data/tap/v2alpha/http.proto.rst - /envoy/data/tap/v2alpha/wrapper/envoy/data/tap/v2alpha/wrapper.proto.rst - /envoy/data/cluster/v2alpha/outlier_detection_event/envoy/data/cluster/v2alpha/outlier_detection_event.proto.rst - /envoy/service/accesslog/v2/als/envoy/service/accesslog/v2/als.proto.rst - /envoy/service/auth/v2/external_auth/envoy/service/auth/v2/attribute_context.proto.rst - /envoy/service/auth/v2/external_auth/envoy/service/auth/v2/external_auth.proto.rst - /envoy/service/discovery/v2/rtds/envoy/service/discovery/v2/rtds.proto.rst - /envoy/service/ratelimit/v2/rls/envoy/service/ratelimit/v2/rls.proto.rst - /envoy/service/tap/v2alpha/common/envoy/service/tap/v2alpha/common.proto.rst - /envoy/type/http_status/envoy/type/http_status.proto.rst - /envoy/type/percent/envoy/type/percent.proto.rst - /envoy/type/range/envoy/type/range.proto.rst - /envoy/type/matcher/metadata/envoy/type/matcher/metadata.proto.rst - /envoy/type/matcher/value/envoy/type/matcher/value.proto.rst - /envoy/type/matcher/number/envoy/type/matcher/number.proto.rst - /envoy/type/matcher/regex/envoy/type/matcher/regex.proto.rst - /envoy/type/matcher/string/envoy/type/matcher/string.proto.rst -" - -# Dump all the generated RST so they can be added to PROTO_RST easily. -find -L bazel-bin/external/envoy_api -name "*.proto.rst" +declare -r DOC_PROTOS=$(bazel query "deps(@envoy_api//docs:protos)" | grep "^@envoy_api.*proto$") # Only copy in the protos we care about and know how to deal with in protodoc. -for p in $PROTO_RST +for p in ${DOC_PROTOS} do - DEST="${GENERATED_RST_DIR}/api-v2/$(sed -e 's#/envoy\/.*/envoy/##' <<< "$p")" + declare PROTO_TARGET=$(bazel query "kind(proto_library, same_pkg_direct_rdeps($p))") + declare PROTO_TARGET_WITHOUT_PREFIX="${PROTO_TARGET#@envoy_api//}" + declare PROTO_TARGET_CANONICAL="${PROTO_TARGET_WITHOUT_PREFIX/:/\/}" + declare PROTO_FILE_WITHOUT_PREFIX="${p#@envoy_api//}" + declare PROTO_FILE_CANONICAL="${PROTO_FILE_WITHOUT_PREFIX/:/\/}" + declare DEST="${GENERATED_RST_DIR}/api-v2/${PROTO_FILE_CANONICAL#envoy/}".rst mkdir -p "$(dirname "${DEST}")" - cp -f bazel-bin/external/envoy_api/"${p}" "$(dirname "${DEST}")" + cp -f bazel-bin/external/envoy_api/"${PROTO_TARGET_CANONICAL}/${PROTO_FILE_CANONICAL}.rst" "$(dirname "${DEST}")" [ -n "${CPROFILE_ENABLED}" ] && cp -f bazel-bin/"${p}".profile "$(dirname "${DEST}")" done diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index 562d6de2ac5f..83bf0946a961 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -713,7 +713,17 @@ def Main(): response = plugin_pb2.CodeGeneratorResponse() cprofile_enabled = os.getenv('CPROFILE_ENABLED') - for proto_file in request.proto_file: + # We use file_to_generate rather than proto_file here since we are invoked + # inside a Bazel aspect, each node in the DAG will be visited once by the + # aspect and we only want to generate docs for the current node. + for file_to_generate in request.file_to_generate: + # Find the FileDescriptorProto for the file we actually are generating. + proto_file = None + for pf in request.proto_file: + if pf.name == file_to_generate: + proto_file = pf + break + assert (proto_file is not None) f = response.file.add() f.name = proto_file.name + '.rst' if cprofile_enabled: From d504fde0ffd97017d1ddff8caa9a3b46bba9ae48 Mon Sep 17 00:00:00 2001 From: Kuat Date: Tue, 3 Sep 2019 17:08:45 -1000 Subject: [PATCH 31/31] api: organize go_proto_libraries (#8003) Fixes #7982 Defines a package level proto library and its associated internal go_proto_library. Deletes all existing api_go_proto_library, api_go_grpc_library, and go_package annotations in protos (they are not required and pollute the sources). I deliberately avoided touching anything under udpa since it's being moved to another repository. Risk Level: low Testing: build completes Signed-off-by: Kuat Yessenov --- api/bazel/BUILD | 12 ++ api/bazel/api_build_system.bzl | 115 ++++++++++-------- api/bazel/repositories.bzl | 7 +- api/envoy/admin/v2alpha/BUILD | 13 +- api/envoy/admin/v3alpha/BUILD | 13 +- api/envoy/api/v2/BUILD | 88 +++----------- api/envoy/api/v2/auth/BUILD | 18 ++- api/envoy/api/v2/auth/cert.proto | 1 - api/envoy/api/v2/cluster/BUILD | 27 ++-- .../api/v2/cluster/circuit_breaker.proto | 1 - api/envoy/api/v2/core/BUILD | 58 ++------- api/envoy/api/v2/core/base.proto | 1 - api/envoy/api/v2/discovery.proto | 1 - api/envoy/api/v2/endpoint/BUILD | 31 ++--- api/envoy/api/v2/endpoint/endpoint.proto | 1 - api/envoy/api/v2/listener/BUILD | 27 ++-- api/envoy/api/v2/listener/listener.proto | 1 - .../api/v2/listener/udp_listener_config.proto | 1 - api/envoy/api/v2/ratelimit/BUILD | 9 +- api/envoy/api/v2/ratelimit/ratelimit.proto | 1 - api/envoy/api/v2/route/BUILD | 22 ++-- api/envoy/api/v2/route/route.proto | 1 - api/envoy/api/v3alpha/BUILD | 88 +++----------- api/envoy/api/v3alpha/auth/BUILD | 18 ++- api/envoy/api/v3alpha/auth/cert.proto | 1 - api/envoy/api/v3alpha/cluster/BUILD | 27 ++-- .../api/v3alpha/cluster/circuit_breaker.proto | 1 - api/envoy/api/v3alpha/core/BUILD | 58 ++------- api/envoy/api/v3alpha/core/base.proto | 1 - api/envoy/api/v3alpha/discovery.proto | 1 - api/envoy/api/v3alpha/endpoint/BUILD | 31 ++--- api/envoy/api/v3alpha/endpoint/endpoint.proto | 1 - api/envoy/api/v3alpha/listener/BUILD | 27 ++-- api/envoy/api/v3alpha/listener/listener.proto | 1 - .../listener/udp_listener_config.proto | 1 - api/envoy/api/v3alpha/ratelimit/BUILD | 9 +- .../api/v3alpha/ratelimit/ratelimit.proto | 1 - api/envoy/api/v3alpha/route/BUILD | 22 ++-- api/envoy/api/v3alpha/route/route.proto | 1 - api/envoy/config/accesslog/v2/BUILD | 12 +- api/envoy/config/accesslog/v2/als.proto | 1 - api/envoy/config/accesslog/v2/file.proto | 1 - api/envoy/config/accesslog/v3alpha/BUILD | 12 +- api/envoy/config/accesslog/v3alpha/als.proto | 1 - api/envoy/config/accesslog/v3alpha/file.proto | 1 - api/envoy/config/bootstrap/v2/BUILD | 32 ++--- api/envoy/config/bootstrap/v2/bootstrap.proto | 1 - api/envoy/config/bootstrap/v3alpha/BUILD | 32 ++--- .../config/bootstrap/v3alpha/bootstrap.proto | 1 - .../dynamic_forward_proxy/v2alpha/BUILD | 6 +- .../v2alpha/cluster.proto | 1 - .../dynamic_forward_proxy/v3alpha/BUILD | 6 +- .../v3alpha/cluster.proto | 1 - api/envoy/config/cluster/redis/BUILD | 4 +- .../config/cluster/redis/redis_cluster.proto | 1 - .../dynamic_forward_proxy/v2alpha/BUILD | 6 +- .../dynamic_forward_proxy/v3alpha/BUILD | 6 +- api/envoy/config/common/tap/v2alpha/BUILD | 9 +- api/envoy/config/common/tap/v3alpha/BUILD | 9 +- api/envoy/config/filter/accesslog/v2/BUILD | 20 ++- .../filter/accesslog/v2/accesslog.proto | 1 - .../config/filter/accesslog/v3alpha/BUILD | 20 ++- .../filter/accesslog/v3alpha/accesslog.proto | 1 - .../config/filter/dubbo/router/v2alpha1/BUILD | 4 +- .../filter/dubbo/router/v2alpha1/router.proto | 1 - api/envoy/config/filter/fault/v2/BUILD | 6 +- api/envoy/config/filter/fault/v2/fault.proto | 1 - api/envoy/config/filter/fault/v3alpha/BUILD | 6 +- .../config/filter/fault/v3alpha/fault.proto | 1 - .../http/adaptive_concurrency/v2alpha/BUILD | 6 +- .../v2alpha/adaptive_concurrency.proto | 1 - .../http/adaptive_concurrency/v3alpha/BUILD | 6 +- .../v3alpha/adaptive_concurrency.proto | 1 - api/envoy/config/filter/http/buffer/v2/BUILD | 4 +- .../config/filter/http/buffer/v2/buffer.proto | 1 - .../config/filter/http/buffer/v3alpha/BUILD | 4 +- .../filter/http/buffer/v3alpha/buffer.proto | 1 - api/envoy/config/filter/http/csrf/v2/BUILD | 9 +- .../config/filter/http/csrf/v2/csrf.proto | 1 - .../config/filter/http/csrf/v3alpha/BUILD | 9 +- .../filter/http/csrf/v3alpha/csrf.proto | 1 - .../http/dynamic_forward_proxy/v2alpha/BUILD | 6 +- .../v2alpha/dynamic_forward_proxy.proto | 1 - .../http/dynamic_forward_proxy/v3alpha/BUILD | 6 +- .../v3alpha/dynamic_forward_proxy.proto | 1 - .../config/filter/http/ext_authz/v2/BUILD | 10 +- .../filter/http/ext_authz/v2/ext_authz.proto | 1 - .../filter/http/ext_authz/v3alpha/BUILD | 10 +- .../http/ext_authz/v3alpha/ext_authz.proto | 1 - api/envoy/config/filter/http/fault/v2/BUILD | 10 +- .../config/filter/http/fault/v2/fault.proto | 1 - .../config/filter/http/fault/v3alpha/BUILD | 10 +- .../filter/http/fault/v3alpha/fault.proto | 1 - .../grpc_http1_reverse_bridge/v2alpha1/BUILD | 4 +- .../v2alpha1/config.proto | 1 - api/envoy/config/filter/http/gzip/v2/BUILD | 4 +- .../config/filter/http/gzip/v2/gzip.proto | 1 - .../config/filter/http/gzip/v3alpha/BUILD | 4 +- .../filter/http/gzip/v3alpha/gzip.proto | 1 - .../filter/http/header_to_metadata/v2/BUILD | 5 +- .../v2/header_to_metadata.proto | 1 - .../http/header_to_metadata/v3alpha/BUILD | 5 +- .../v3alpha/header_to_metadata.proto | 1 - .../config/filter/http/health_check/v2/BUILD | 18 ++- .../http/health_check/v2/health_check.proto | 1 - .../filter/http/health_check/v3alpha/BUILD | 18 ++- .../health_check/v3alpha/health_check.proto | 1 - .../config/filter/http/ip_tagging/v2/BUILD | 6 +- .../http/ip_tagging/v2/ip_tagging.proto | 1 - .../filter/http/ip_tagging/v3alpha/BUILD | 6 +- .../http/ip_tagging/v3alpha/ip_tagging.proto | 1 - .../filter/http/jwt_authn/v2alpha/BUILD | 19 ++- .../filter/http/jwt_authn/v3alpha/BUILD | 19 ++- api/envoy/config/filter/http/lua/v2/BUILD | 4 +- api/envoy/config/filter/http/lua/v2/lua.proto | 1 - .../config/filter/http/lua/v3alpha/BUILD | 4 +- .../config/filter/http/lua/v3alpha/lua.proto | 1 - .../filter/http/original_src/v2alpha1/BUILD | 4 +- .../original_src/v2alpha1/original_src.proto | 2 - .../config/filter/http/rate_limit/v2/BUILD | 6 +- .../http/rate_limit/v2/rate_limit.proto | 1 - .../filter/http/rate_limit/v3alpha/BUILD | 6 +- .../http/rate_limit/v3alpha/rate_limit.proto | 1 - api/envoy/config/filter/http/rbac/v2/BUILD | 6 +- .../config/filter/http/rbac/v2/rbac.proto | 1 - .../config/filter/http/rbac/v3alpha/BUILD | 6 +- .../filter/http/rbac/v3alpha/rbac.proto | 1 - api/envoy/config/filter/http/router/v2/BUILD | 12 +- .../config/filter/http/router/v2/router.proto | 1 - .../config/filter/http/router/v3alpha/BUILD | 12 +- .../filter/http/router/v3alpha/router.proto | 1 - api/envoy/config/filter/http/squash/v2/BUILD | 4 +- .../config/filter/http/squash/v2/squash.proto | 1 - .../config/filter/http/squash/v3alpha/BUILD | 4 +- .../filter/http/squash/v3alpha/squash.proto | 1 - .../config/filter/http/tap/v2alpha/BUILD | 6 +- .../config/filter/http/tap/v3alpha/BUILD | 6 +- .../config/filter/http/transcoder/v2/BUILD | 9 +- .../http/transcoder/v2/transcoder.proto | 1 - .../filter/http/transcoder/v3alpha/BUILD | 9 +- .../http/transcoder/v3alpha/transcoder.proto | 1 - .../listener/original_src/v2alpha1/BUILD | 4 +- .../original_src/v2alpha1/original_src.proto | 2 - .../filter/network/client_ssl_auth/v2/BUILD | 6 +- .../client_ssl_auth/v2/client_ssl_auth.proto | 1 - .../network/client_ssl_auth/v3alpha/BUILD | 6 +- .../v3alpha/client_ssl_auth.proto | 1 - .../filter/network/dubbo_proxy/v2alpha1/BUILD | 11 +- .../dubbo_proxy/v2alpha1/dubbo_proxy.proto | 3 +- .../network/dubbo_proxy/v2alpha1/route.proto | 1 - .../config/filter/network/ext_authz/v2/BUILD | 6 +- .../network/ext_authz/v2/ext_authz.proto | 1 - .../filter/network/ext_authz/v3alpha/BUILD | 6 +- .../network/ext_authz/v3alpha/ext_authz.proto | 1 - .../network/http_connection_manager/v2/BUILD | 25 ++-- .../v2/http_connection_manager.proto | 1 - .../http_connection_manager/v3alpha/BUILD | 25 ++-- .../v3alpha/http_connection_manager.proto | 1 - .../filter/network/mongo_proxy/v2/BUILD | 6 +- .../network/mongo_proxy/v2/mongo_proxy.proto | 1 - .../filter/network/mongo_proxy/v3alpha/BUILD | 6 +- .../mongo_proxy/v3alpha/mongo_proxy.proto | 1 - .../filter/network/mysql_proxy/v1alpha1/BUILD | 4 +- .../mysql_proxy/v1alpha1/mysql_proxy.proto | 1 - .../config/filter/network/rate_limit/v2/BUILD | 9 +- .../network/rate_limit/v2/rate_limit.proto | 1 - .../filter/network/rate_limit/v3alpha/BUILD | 9 +- .../rate_limit/v3alpha/rate_limit.proto | 1 - api/envoy/config/filter/network/rbac/v2/BUILD | 6 +- .../config/filter/network/rbac/v2/rbac.proto | 1 - .../config/filter/network/rbac/v3alpha/BUILD | 6 +- .../filter/network/rbac/v3alpha/rbac.proto | 1 - .../filter/network/redis_proxy/v2/BUILD | 9 +- .../network/redis_proxy/v2/redis_proxy.proto | 1 - .../filter/network/redis_proxy/v3alpha/BUILD | 9 +- .../redis_proxy/v3alpha/redis_proxy.proto | 1 - .../config/filter/network/tcp_proxy/v2/BUILD | 19 ++- .../network/tcp_proxy/v2/tcp_proxy.proto | 1 - .../filter/network/tcp_proxy/v3alpha/BUILD | 19 ++- .../network/tcp_proxy/v3alpha/tcp_proxy.proto | 1 - .../network/thrift_proxy/v2alpha1/BUILD | 9 +- .../network/thrift_proxy/v2alpha1/route.proto | 1 - .../thrift_proxy/v2alpha1/thrift_proxy.proto | 1 - .../network/zookeeper_proxy/v1alpha1/BUILD | 4 +- .../v1alpha1/zookeeper_proxy.proto | 1 - .../filter/thrift/rate_limit/v2alpha1/BUILD | 9 +- .../rate_limit/v2alpha1/rate_limit.proto | 1 - .../filter/thrift/router/v2alpha1/BUILD | 4 +- .../thrift/router/v2alpha1/router.proto | 1 - .../config/grpc_credential/v2alpha/BUILD | 19 +-- .../grpc_credential/v2alpha/aws_iam.proto | 1 - .../v2alpha/file_based_metadata.proto | 1 - .../config/grpc_credential/v3alpha/BUILD | 19 +-- .../grpc_credential/v3alpha/aws_iam.proto | 1 - .../v3alpha/file_based_metadata.proto | 1 - .../config/health_checker/redis/v2/BUILD | 4 +- .../health_checker/redis/v2/redis.proto | 1 - .../config/health_checker/redis/v3alpha/BUILD | 4 +- .../health_checker/redis/v3alpha/redis.proto | 1 - api/envoy/config/metrics/v2/BUILD | 26 ++-- api/envoy/config/metrics/v2/stats.proto | 1 - api/envoy/config/metrics/v3alpha/BUILD | 26 ++-- api/envoy/config/metrics/v3alpha/stats.proto | 1 - api/envoy/config/overload/v2alpha/BUILD | 9 +- .../config/overload/v2alpha/overload.proto | 1 - api/envoy/config/overload/v3alpha/BUILD | 9 +- .../config/overload/v3alpha/overload.proto | 1 - api/envoy/config/ratelimit/v2/BUILD | 14 +-- api/envoy/config/ratelimit/v2/rls.proto | 1 - api/envoy/config/ratelimit/v3alpha/BUILD | 14 +-- api/envoy/config/ratelimit/v3alpha/rls.proto | 1 - api/envoy/config/rbac/v2/BUILD | 23 ++-- api/envoy/config/rbac/v2/rbac.proto | 1 - api/envoy/config/rbac/v3alpha/BUILD | 23 ++-- api/envoy/config/rbac/v3alpha/rbac.proto | 1 - .../resource_monitor/fixed_heap/v2alpha/BUILD | 4 +- .../fixed_heap/v2alpha/fixed_heap.proto | 1 - .../resource_monitor/fixed_heap/v3alpha/BUILD | 4 +- .../fixed_heap/v3alpha/fixed_heap.proto | 1 - .../injected_resource/v2alpha/BUILD | 4 +- .../v2alpha/injected_resource.proto | 1 - .../injected_resource/v3alpha/BUILD | 4 +- .../v3alpha/injected_resource.proto | 1 - .../config/retry/previous_priorities/BUILD | 6 +- api/envoy/config/trace/v2/BUILD | 18 ++- api/envoy/config/trace/v2/trace.proto | 1 - api/envoy/config/trace/v3alpha/BUILD | 18 ++- api/envoy/config/trace/v3alpha/trace.proto | 1 - .../transport_socket/alts/v2alpha/BUILD | 6 +- .../transport_socket/alts/v2alpha/alts.proto | 1 - .../transport_socket/alts/v3alpha/BUILD | 6 +- .../transport_socket/alts/v3alpha/alts.proto | 1 - .../config/transport_socket/tap/v2alpha/BUILD | 9 +- .../transport_socket/tap/v2alpha/tap.proto | 1 - .../config/transport_socket/tap/v3alpha/BUILD | 9 +- .../transport_socket/tap/v3alpha/tap.proto | 1 - api/envoy/data/accesslog/v2/BUILD | 15 +-- api/envoy/data/accesslog/v3alpha/BUILD | 15 +-- api/envoy/data/cluster/v2alpha/BUILD | 4 +- api/envoy/data/cluster/v3alpha/BUILD | 4 +- api/envoy/data/core/v2alpha/BUILD | 6 +- api/envoy/data/core/v3alpha/BUILD | 6 +- api/envoy/data/tap/v2alpha/BUILD | 6 +- api/envoy/data/tap/v2alpha/transport.proto | 1 - api/envoy/data/tap/v3alpha/BUILD | 6 +- api/envoy/data/tap/v3alpha/transport.proto | 1 - api/envoy/service/accesslog/v2/BUILD | 19 ++- api/envoy/service/accesslog/v2/als.proto | 1 - api/envoy/service/accesslog/v3alpha/BUILD | 19 ++- api/envoy/service/accesslog/v3alpha/als.proto | 1 - api/envoy/service/auth/v2/BUILD | 10 +- api/envoy/service/auth/v2/external_auth.proto | 1 - api/envoy/service/auth/v2alpha/BUILD | 9 +- .../service/auth/v2alpha/external_auth.proto | 2 - api/envoy/service/auth/v3alpha/BUILD | 10 +- .../service/auth/v3alpha/external_auth.proto | 1 - api/envoy/service/discovery/v2/BUILD | 45 ++----- api/envoy/service/discovery/v2/ads.proto | 1 - api/envoy/service/discovery/v3alpha/BUILD | 45 ++----- api/envoy/service/discovery/v3alpha/ads.proto | 1 - api/envoy/service/load_stats/v2/BUILD | 19 ++- api/envoy/service/load_stats/v2/lrs.proto | 1 - api/envoy/service/load_stats/v3alpha/BUILD | 19 ++- .../service/load_stats/v3alpha/lrs.proto | 1 - api/envoy/service/metrics/v2/BUILD | 19 ++- .../service/metrics/v2/metrics_service.proto | 1 - api/envoy/service/metrics/v3alpha/BUILD | 19 ++- .../metrics/v3alpha/metrics_service.proto | 1 - api/envoy/service/ratelimit/v2/BUILD | 20 ++- api/envoy/service/ratelimit/v2/rls.proto | 1 - api/envoy/service/ratelimit/v3alpha/BUILD | 20 ++- api/envoy/service/ratelimit/v3alpha/rls.proto | 1 - api/envoy/service/tap/v2alpha/BUILD | 12 +- api/envoy/service/tap/v3alpha/BUILD | 12 +- api/envoy/service/trace/v2/BUILD | 19 ++- .../service/trace/v2/trace_service.proto | 1 - api/envoy/service/trace/v3alpha/BUILD | 19 ++- .../service/trace/v3alpha/trace_service.proto | 1 - api/envoy/type/BUILD | 21 +--- api/envoy/type/matcher/BUILD | 45 +------ api/envoy/type/matcher/metadata.proto | 1 - api/envoy/type/matcher/number.proto | 1 - api/envoy/type/matcher/regex.proto | 1 - api/envoy/type/matcher/string.proto | 1 - api/envoy/type/matcher/value.proto | 1 - api/envoy/type/range.proto | 1 - api/test/build/BUILD | 21 ++-- api/test/build/go_build_test.go | 21 ++-- tools/check_format.py | 15 +++ tools/check_format_test_helper.py | 1 + .../check_format/api/go_package.proto | 5 + 291 files changed, 1057 insertions(+), 1313 deletions(-) create mode 100644 tools/testdata/check_format/api/go_package.proto diff --git a/api/bazel/BUILD b/api/bazel/BUILD index e69de29bb2d1..4b582bb8be3f 100644 --- a/api/bazel/BUILD +++ b/api/bazel/BUILD @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") + +licenses(["notice"]) # Apache 2 + +go_proto_compiler( + name = "pgv_plugin_go", + options = ["lang=go"], + plugin = "@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate", + suffix = ".pb.validate.go", + valid_archive = False, + visibility = ["//visibility:public"], +) diff --git a/api/bazel/api_build_system.bzl b/api/bazel/api_build_system.bzl index 14cc6f89359d..0ad60bf84a32 100644 --- a/api/bazel/api_build_system.bzl +++ b/api/bazel/api_build_system.bzl @@ -7,9 +7,23 @@ _PY_SUFFIX = "_py" _CC_SUFFIX = "_cc" _CC_EXPORT_SUFFIX = "_export_cc" _GO_PROTO_SUFFIX = "_go_proto" -_GO_GRPC_SUFFIX = "_go_grpc" _GO_IMPORTPATH_PREFIX = "github.com/envoyproxy/data-plane-api/api/" +_COMMON_PROTO_DEPS = [ + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:descriptor_proto", + "@com_google_protobuf//:duration_proto", + "@com_google_protobuf//:empty_proto", + "@com_google_protobuf//:struct_proto", + "@com_google_protobuf//:timestamp_proto", + "@com_google_protobuf//:wrappers_proto", + "@com_google_googleapis//google/api:http_proto", + "@com_google_googleapis//google/api:annotations_proto", + "@com_google_googleapis//google/rpc:status_proto", + "@com_github_gogo_protobuf//:gogo_proto", + "@com_envoyproxy_protoc_gen_validate//validate:validate_proto", +] + def _Suffix(d, suffix): return d + suffix @@ -61,41 +75,6 @@ def py_proto_library(name, deps = []): visibility = ["//visibility:public"], ) -def api_go_proto_library(name, proto, deps = []): - go_proto_library( - name = _Suffix(name, _GO_PROTO_SUFFIX), - importpath = _Suffix(_GO_IMPORTPATH_PREFIX, name), - proto = proto, - visibility = ["//visibility:public"], - deps = deps + [ - "@com_github_gogo_protobuf//:gogo_proto_go", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@com_envoyproxy_protoc_gen_validate//validate:go_default_library", - "@com_google_googleapis//google/rpc:status_go_proto", - ], - ) - -def api_go_grpc_library(name, proto, deps = []): - go_grpc_library( - name = _Suffix(name, _GO_GRPC_SUFFIX), - importpath = _Suffix(_GO_IMPORTPATH_PREFIX, name), - proto = proto, - visibility = ["//visibility:public"], - deps = deps + [ - "@com_github_gogo_protobuf//:gogo_proto_go", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@com_envoyproxy_protoc_gen_validate//validate:go_default_library", - "@com_google_googleapis//google/api:annotations_go_proto", - ], - ) - # This is api_proto_library plus some logic internal to //envoy/api. def api_proto_library_internal(visibility = ["//visibility:private"], **kwargs): # //envoy/docs/build.sh needs visibility in order to generate documents. @@ -108,8 +87,6 @@ def api_proto_library_internal(visibility = ["//visibility:private"], **kwargs): # TODO(htuch): has_services is currently ignored but will in future support # gRPC stub generation. -# TODO(htuch): Automatically generate go_proto_library and go_grpc_library -# from api_proto_library. def api_proto_library( name, visibility = ["//visibility:private"], @@ -124,20 +101,7 @@ def api_proto_library( native.proto_library( name = name, srcs = srcs, - deps = deps + external_proto_deps + [ - "@com_google_protobuf//:any_proto", - "@com_google_protobuf//:descriptor_proto", - "@com_google_protobuf//:duration_proto", - "@com_google_protobuf//:empty_proto", - "@com_google_protobuf//:struct_proto", - "@com_google_protobuf//:timestamp_proto", - "@com_google_protobuf//:wrappers_proto", - "@com_google_googleapis//google/api:http_proto", - "@com_google_googleapis//google/api:annotations_proto", - "@com_google_googleapis//google/rpc:status_proto", - "@com_github_gogo_protobuf//:gogo_proto", - "@com_envoyproxy_protoc_gen_validate//validate:validate_proto", - ], + deps = deps + external_proto_deps + _COMMON_PROTO_DEPS, visibility = visibility, ) pgv_cc_proto_library( @@ -181,3 +145,50 @@ def api_go_test(name, size, importpath, srcs = [], deps = []): importpath = importpath, deps = deps, ) + +_GO_BAZEL_RULE_MAPPING = { + "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", + "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", + "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:cel_go_proto", +} + +def go_proto_mapping(dep): + mapped = _GO_BAZEL_RULE_MAPPING.get(dep) + if mapped == None: + return _Suffix("@" + Label(dep).workspace_name + "//" + Label(dep).package + ":" + Label(dep).name, _GO_PROTO_SUFFIX) + return mapped + +def api_proto_package(name = "pkg", srcs = [], deps = [], has_services = False, visibility = ["//visibility:public"]): + if srcs == []: + srcs = native.glob(["*.proto"]) + + native.proto_library( + name = name, + srcs = srcs, + deps = deps + _COMMON_PROTO_DEPS, + visibility = visibility, + ) + + compilers = ["@io_bazel_rules_go//proto:go_proto", "//bazel:pgv_plugin_go"] + if has_services: + compilers = ["@io_bazel_rules_go//proto:go_grpc", "//bazel:pgv_plugin_go"] + + go_proto_library( + name = _Suffix(name, _GO_PROTO_SUFFIX), + compilers = compilers, + importpath = _Suffix(_GO_IMPORTPATH_PREFIX, native.package_name()), + proto = name, + visibility = ["//visibility:public"], + deps = [go_proto_mapping(dep) for dep in deps] + [ + "@com_github_gogo_protobuf//:gogo_proto_go", + "@com_github_golang_protobuf//ptypes:go_default_library", + "@com_github_golang_protobuf//ptypes/any:go_default_library", + "@com_github_golang_protobuf//ptypes/duration:go_default_library", + "@com_github_golang_protobuf//ptypes/struct:go_default_library", + "@com_github_golang_protobuf//ptypes/timestamp:go_default_library", + "@com_github_golang_protobuf//ptypes/wrappers:go_default_library", + "@com_envoyproxy_protoc_gen_validate//validate:go_default_library", + "@com_google_googleapis//google/api:annotations_go_proto", + "@com_google_googleapis//google/rpc:status_go_proto", + ], + ) diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl index 7af054b20f4f..b992717d273a 100644 --- a/api/bazel/repositories.bzl +++ b/api/bazel/repositories.bzl @@ -161,7 +161,7 @@ filegroup( ZIPKINAPI_BUILD_CONTENT = """ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") api_proto_library( @@ -173,8 +173,9 @@ api_proto_library( visibility = ["//visibility:public"], ) -api_go_proto_library( - name = "zipkin", +go_proto_library( + name = "zipkin_go_proto", proto = ":zipkin", + visibility = ["//visibility:public"], ) """ diff --git a/api/envoy/admin/v2alpha/BUILD b/api/envoy/admin/v2alpha/BUILD index aa35aa7c8d7d..850eb0515865 100644 --- a/api/envoy/admin/v2alpha/BUILD +++ b/api/envoy/admin/v2alpha/BUILD @@ -1,7 +1,18 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2", + "//envoy/api/v2/auth", + "//envoy/api/v2/core", + "//envoy/config/bootstrap/v2:pkg", + "//envoy/service/tap/v2alpha:pkg", + "//envoy/type", + ], +) + api_proto_library_internal( name = "config_dump", srcs = ["config_dump.proto"], diff --git a/api/envoy/admin/v3alpha/BUILD b/api/envoy/admin/v3alpha/BUILD index 71b0790b69a9..9849282084a6 100644 --- a/api/envoy/admin/v3alpha/BUILD +++ b/api/envoy/admin/v3alpha/BUILD @@ -1,7 +1,18 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha", + "//envoy/api/v3alpha/auth", + "//envoy/api/v3alpha/core", + "//envoy/config/bootstrap/v3alpha:pkg", + "//envoy/service/tap/v3alpha:pkg", + "//envoy/type", + ], +) + api_proto_library_internal( name = "config_dump", srcs = ["config_dump.proto"], diff --git a/api/envoy/api/v2/BUILD b/api/envoy/api/v2/BUILD index b86ca2a788bf..72fcb5f56299 100644 --- a/api/envoy/api/v2/BUILD +++ b/api/envoy/api/v2/BUILD @@ -1,4 +1,4 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 @@ -16,6 +16,21 @@ package_group( ], ) +api_proto_package( + name = "v2", + has_services = True, + deps = [ + "//envoy/api/v2/auth", + "//envoy/api/v2/cluster", + "//envoy/api/v2/core", + "//envoy/api/v2/endpoint:pkg", + "//envoy/api/v2/listener:pkg", + "//envoy/api/v2/ratelimit:pkg", + "//envoy/api/v2/route:pkg", + "//envoy/type", + ], +) + api_proto_library_internal( name = "discovery", srcs = ["discovery.proto"], @@ -23,12 +38,6 @@ api_proto_library_internal( deps = ["//envoy/api/v2/core:base"], ) -api_go_proto_library( - name = "discovery", - proto = ":discovery", - deps = ["//envoy/api/v2/core:base_go_proto"], -) - api_proto_library_internal( name = "eds", srcs = ["eds.proto"], @@ -44,19 +53,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "eds", - proto = ":eds", - deps = [ - ":discovery_go_proto", - "//envoy/api/v2/core:address_go_proto", - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/core:health_check_go_proto", - "//envoy/api/v2/endpoint:endpoint_go_proto", - "//envoy/type:percent_go_proto", - ], -) - api_proto_library_internal( name = "cds", srcs = ["cds.proto"], @@ -79,26 +75,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "cds", - proto = ":cds", - deps = [ - ":discovery_go_proto", - ":eds_go_grpc", - "//envoy/api/v2/auth:cert_go_proto", - "//envoy/api/v2/cluster:circuit_breaker_go_proto", - "//envoy/api/v2/cluster:filter_go_proto", - "//envoy/api/v2/cluster:outlier_detection_go_proto", - "//envoy/api/v2/core:address_go_proto", - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/core:config_source_go_proto", - "//envoy/api/v2/core:health_check_go_proto", - "//envoy/api/v2/core:protocol_go_proto", - "//envoy/api/v2/endpoint:endpoint_go_proto", - "//envoy/type:percent_go_proto", - ], -) - api_proto_library_internal( name = "lds", srcs = ["lds.proto"], @@ -113,18 +89,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "lds", - proto = ":lds", - deps = [ - ":discovery_go_proto", - "//envoy/api/v2/core:address_go_proto", - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/listener:listener_go_proto", - "//envoy/api/v2/listener:udp_listener_config_go_proto", - ], -) - api_proto_library_internal( name = "rds", srcs = ["rds.proto"], @@ -138,17 +102,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "rds", - proto = ":rds", - deps = [ - ":discovery_go_proto", - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/core:config_source_go_proto", - "//envoy/api/v2/route:route_go_proto", - ], -) - api_proto_library_internal( name = "srds", srcs = ["srds.proto"], @@ -160,12 +113,3 @@ api_proto_library_internal( "//envoy/api/v2/route", ], ) - -api_go_grpc_library( - name = "srds", - proto = ":srds", - deps = [ - ":discovery_go_proto", - "//envoy/api/v2/core:base_go_proto", - ], -) diff --git a/api/envoy/api/v2/auth/BUILD b/api/envoy/api/v2/auth/BUILD index acc28aacff05..bb3951fb95aa 100644 --- a/api/envoy/api/v2/auth/BUILD +++ b/api/envoy/api/v2/auth/BUILD @@ -1,4 +1,4 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 @@ -15,6 +15,13 @@ package_group( ], ) +api_proto_package( + name = "auth", + deps = [ + "//envoy/api/v2/core", + ], +) + api_proto_library_internal( name = "cert", srcs = ["cert.proto"], @@ -24,12 +31,3 @@ api_proto_library_internal( "//envoy/api/v2/core:config_source", ], ) - -api_go_proto_library( - name = "cert", - proto = ":cert", - deps = [ - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/core:config_source_go_proto", - ], -) diff --git a/api/envoy/api/v2/auth/cert.proto b/api/envoy/api/v2/auth/cert.proto index 30db22c6d7a6..2a8267805161 100644 --- a/api/envoy/api/v2/auth/cert.proto +++ b/api/envoy/api/v2/auth/cert.proto @@ -5,7 +5,6 @@ package envoy.api.v2.auth; option java_outer_classname = "CertProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.auth"; -option go_package = "auth"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; diff --git a/api/envoy/api/v2/cluster/BUILD b/api/envoy/api/v2/cluster/BUILD index 5589905d859b..baf9a4bfdeb7 100644 --- a/api/envoy/api/v2/cluster/BUILD +++ b/api/envoy/api/v2/cluster/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + name = "cluster", + deps = [ + "//envoy/api/v2/core", + ], +) + api_proto_library_internal( name = "circuit_breaker", srcs = ["circuit_breaker.proto"], @@ -13,14 +20,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "circuit_breaker", - proto = ":circuit_breaker", - deps = [ - "//envoy/api/v2/core:base_go_proto", - ], -) - api_proto_library_internal( name = "outlier_detection", srcs = ["outlier_detection.proto"], @@ -29,11 +28,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "outlier_detection", - proto = ":outlier_detection", -) - api_proto_library_internal( name = "filter", srcs = ["filter.proto"], @@ -41,8 +35,3 @@ api_proto_library_internal( "//envoy/api/v2:__pkg__", ], ) - -api_go_proto_library( - name = "filter", - proto = ":filter", -) diff --git a/api/envoy/api/v2/cluster/circuit_breaker.proto b/api/envoy/api/v2/cluster/circuit_breaker.proto index bc2bcf2548d2..5ae8cc3d1a01 100644 --- a/api/envoy/api/v2/cluster/circuit_breaker.proto +++ b/api/envoy/api/v2/cluster/circuit_breaker.proto @@ -5,7 +5,6 @@ package envoy.api.v2.cluster; option java_outer_classname = "CircuitBreakerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option go_package = "cluster"; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; diff --git a/api/envoy/api/v2/core/BUILD b/api/envoy/api/v2/core/BUILD index b3d2be2301f1..01234d07b198 100644 --- a/api/envoy/api/v2/core/BUILD +++ b/api/envoy/api/v2/core/BUILD @@ -1,4 +1,4 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 @@ -16,6 +16,13 @@ package_group( ], ) +api_proto_package( + name = "core", + deps = [ + "//envoy/type", + ], +) + api_proto_library_internal( name = "address", srcs = ["address.proto"], @@ -25,12 +32,6 @@ api_proto_library_internal( deps = [":base"], ) -api_go_proto_library( - name = "address", - proto = ":address", - deps = [":base_go_proto"], -) - api_proto_library_internal( name = "base", srcs = ["base.proto"], @@ -43,15 +44,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "base", - proto = ":base", - deps = [ - ":http_uri_go_proto", - "//envoy/type:percent_go_proto", - ], -) - api_proto_library_internal( name = "health_check", srcs = ["health_check.proto"], @@ -64,15 +56,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "health_check", - proto = ":health_check", - deps = [ - ":base_go_proto", - "//envoy/type:range_go_proto", - ], -) - api_proto_library_internal( name = "config_source", srcs = ["config_source.proto"], @@ -85,20 +68,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "config_source", - proto = ":config_source", - deps = [ - ":base_go_proto", - ":grpc_service_go_proto", - ], -) - -api_go_proto_library( - name = "http_uri", - proto = ":http_uri", -) - api_proto_library_internal( name = "http_uri", srcs = ["http_uri.proto"], @@ -116,12 +85,6 @@ api_proto_library_internal( deps = [":base"], ) -api_go_proto_library( - name = "grpc_service", - proto = ":grpc_service", - deps = [":base_go_proto"], -) - api_proto_library_internal( name = "protocol", srcs = ["protocol.proto"], @@ -129,8 +92,3 @@ api_proto_library_internal( ":friends", ], ) - -api_go_proto_library( - name = "protocol", - proto = ":protocol", -) diff --git a/api/envoy/api/v2/core/base.proto b/api/envoy/api/v2/core/base.proto index 2553fe04de27..0f73b83af03f 100644 --- a/api/envoy/api/v2/core/base.proto +++ b/api/envoy/api/v2/core/base.proto @@ -5,7 +5,6 @@ package envoy.api.v2.core; option java_outer_classname = "BaseProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.core"; -option go_package = "core"; import "envoy/api/v2/core/http_uri.proto"; diff --git a/api/envoy/api/v2/discovery.proto b/api/envoy/api/v2/discovery.proto index a3072a817aa4..5328e515bcbb 100644 --- a/api/envoy/api/v2/discovery.proto +++ b/api/envoy/api/v2/discovery.proto @@ -5,7 +5,6 @@ package envoy.api.v2; option java_outer_classname = "DiscoveryProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2"; -option go_package = "v2"; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/api/v2/endpoint/BUILD b/api/envoy/api/v2/endpoint/BUILD index 0dead0f57033..a12db37309ce 100644 --- a/api/envoy/api/v2/endpoint/BUILD +++ b/api/envoy/api/v2/endpoint/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/auth", + "//envoy/api/v2/core", + ], +) + api_proto_library_internal( name = "endpoint", srcs = ["endpoint.proto"], @@ -16,19 +23,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "endpoint", - proto = ":endpoint", - deps = [ - "//envoy/api/v2/auth:cert_go_proto", - "//envoy/api/v2/core:address_go_proto", - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/core:config_source_go_proto", - "//envoy/api/v2/core:health_check_go_proto", - "//envoy/api/v2/core:protocol_go_proto", - ], -) - api_proto_library_internal( name = "load_report", srcs = ["load_report.proto"], @@ -38,12 +32,3 @@ api_proto_library_internal( "//envoy/api/v2/core:base", ], ) - -api_go_proto_library( - name = "load_report", - proto = ":load_report", - deps = [ - "//envoy/api/v2/core:address_go_proto", - "//envoy/api/v2/core:base_go_proto", - ], -) diff --git a/api/envoy/api/v2/endpoint/endpoint.proto b/api/envoy/api/v2/endpoint/endpoint.proto index 7abb7ea5f3c9..6327af00ac99 100644 --- a/api/envoy/api/v2/endpoint/endpoint.proto +++ b/api/envoy/api/v2/endpoint/endpoint.proto @@ -5,7 +5,6 @@ package envoy.api.v2.endpoint; option java_outer_classname = "EndpointProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; -option go_package = "endpoint"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/api/v2/listener/BUILD b/api/envoy/api/v2/listener/BUILD index e539c4b8c090..42c79fe45483 100644 --- a/api/envoy/api/v2/listener/BUILD +++ b/api/envoy/api/v2/listener/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/auth", + "//envoy/api/v2/core", + ], +) + api_proto_library_internal( name = "listener", srcs = ["listener.proto"], @@ -13,16 +20,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "listener", - proto = ":listener", - deps = [ - "//envoy/api/v2/auth:cert_go_proto", - "//envoy/api/v2/core:address_go_proto", - "//envoy/api/v2/core:base_go_proto", - ], -) - api_proto_library_internal( name = "udp_listener_config", srcs = ["udp_listener_config.proto"], @@ -31,11 +28,3 @@ api_proto_library_internal( "//envoy/api/v2/core:base", ], ) - -api_go_proto_library( - name = "udp_listener_config", - proto = ":udp_listener_config", - deps = [ - "//envoy/api/v2/core:base_go_proto", - ], -) diff --git a/api/envoy/api/v2/listener/listener.proto b/api/envoy/api/v2/listener/listener.proto index 293a4d2da221..3b6cf74ab0a2 100644 --- a/api/envoy/api/v2/listener/listener.proto +++ b/api/envoy/api/v2/listener/listener.proto @@ -5,7 +5,6 @@ package envoy.api.v2.listener; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option go_package = "listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy::Api::V2::ListenerNS"; diff --git a/api/envoy/api/v2/listener/udp_listener_config.proto b/api/envoy/api/v2/listener/udp_listener_config.proto index f75383bab232..28d8233f5ff0 100644 --- a/api/envoy/api/v2/listener/udp_listener_config.proto +++ b/api/envoy/api/v2/listener/udp_listener_config.proto @@ -5,7 +5,6 @@ package envoy.api.v2.listener; option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option go_package = "listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy::Api::V2::ListenerNS"; diff --git a/api/envoy/api/v2/ratelimit/BUILD b/api/envoy/api/v2/ratelimit/BUILD index 5f2a9201463d..234a3b20f16b 100644 --- a/api/envoy/api/v2/ratelimit/BUILD +++ b/api/envoy/api/v2/ratelimit/BUILD @@ -1,14 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "ratelimit", srcs = ["ratelimit.proto"], visibility = ["//envoy/api/v2:friends"], ) - -api_go_proto_library( - name = "ratelimit", - proto = ":ratelimit", -) diff --git a/api/envoy/api/v2/ratelimit/ratelimit.proto b/api/envoy/api/v2/ratelimit/ratelimit.proto index 8ebec7182257..6f4cd6258283 100644 --- a/api/envoy/api/v2/ratelimit/ratelimit.proto +++ b/api/envoy/api/v2/ratelimit/ratelimit.proto @@ -5,7 +5,6 @@ package envoy.api.v2.ratelimit; option java_outer_classname = "RatelimitProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.ratelimit"; -option go_package = "ratelimit"; import "validate/validate.proto"; diff --git a/api/envoy/api/v2/route/BUILD b/api/envoy/api/v2/route/BUILD index 968ab1c67be2..163281ca35df 100644 --- a/api/envoy/api/v2/route/BUILD +++ b/api/envoy/api/v2/route/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/type", + "//envoy/type/matcher", + ], +) + api_proto_library_internal( name = "route", srcs = ["route.proto"], @@ -14,15 +22,3 @@ api_proto_library_internal( "//envoy/type/matcher:string", ], ) - -api_go_proto_library( - name = "route", - proto = ":route", - deps = [ - "//envoy/api/v2/core:base_go_proto", - "//envoy/type:percent_go_proto", - "//envoy/type:range_go_proto", - "//envoy/type/matcher:regex_go_proto", - "//envoy/type/matcher:string_go_proto", - ], -) diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index e0fbaf0fd685..d396344a1ff2 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -5,7 +5,6 @@ package envoy.api.v2.route; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.route"; -option go_package = "route"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/api/v3alpha/BUILD b/api/envoy/api/v3alpha/BUILD index 0e2892e87e69..e61a715ab9de 100644 --- a/api/envoy/api/v3alpha/BUILD +++ b/api/envoy/api/v3alpha/BUILD @@ -1,4 +1,4 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 @@ -16,6 +16,21 @@ package_group( ], ) +api_proto_package( + name = "v3alpha", + has_services = True, + deps = [ + "//envoy/api/v3alpha/auth", + "//envoy/api/v3alpha/cluster", + "//envoy/api/v3alpha/core", + "//envoy/api/v3alpha/endpoint:pkg", + "//envoy/api/v3alpha/listener:pkg", + "//envoy/api/v3alpha/ratelimit:pkg", + "//envoy/api/v3alpha/route:pkg", + "//envoy/type", + ], +) + api_proto_library_internal( name = "discovery", srcs = ["discovery.proto"], @@ -23,12 +38,6 @@ api_proto_library_internal( deps = ["//envoy/api/v3alpha/core:base"], ) -api_go_proto_library( - name = "discovery", - proto = ":discovery", - deps = ["//envoy/api/v3alpha/core:base_go_proto"], -) - api_proto_library_internal( name = "eds", srcs = ["eds.proto"], @@ -44,19 +53,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "eds", - proto = ":eds", - deps = [ - ":discovery_go_proto", - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/core:health_check_go_proto", - "//envoy/api/v3alpha/endpoint:endpoint_go_proto", - "//envoy/type:percent_go_proto", - ], -) - api_proto_library_internal( name = "cds", srcs = ["cds.proto"], @@ -79,26 +75,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "cds", - proto = ":cds", - deps = [ - ":discovery_go_proto", - ":eds_go_grpc", - "//envoy/api/v3alpha/auth:cert_go_proto", - "//envoy/api/v3alpha/cluster:circuit_breaker_go_proto", - "//envoy/api/v3alpha/cluster:filter_go_proto", - "//envoy/api/v3alpha/cluster:outlier_detection_go_proto", - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/core:config_source_go_proto", - "//envoy/api/v3alpha/core:health_check_go_proto", - "//envoy/api/v3alpha/core:protocol_go_proto", - "//envoy/api/v3alpha/endpoint:endpoint_go_proto", - "//envoy/type:percent_go_proto", - ], -) - api_proto_library_internal( name = "lds", srcs = ["lds.proto"], @@ -113,18 +89,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "lds", - proto = ":lds", - deps = [ - ":discovery_go_proto", - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/listener:listener_go_proto", - "//envoy/api/v3alpha/listener:udp_listener_config_go_proto", - ], -) - api_proto_library_internal( name = "rds", srcs = ["rds.proto"], @@ -138,17 +102,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "rds", - proto = ":rds", - deps = [ - ":discovery_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/core:config_source_go_proto", - "//envoy/api/v3alpha/route:route_go_proto", - ], -) - api_proto_library_internal( name = "srds", srcs = ["srds.proto"], @@ -160,12 +113,3 @@ api_proto_library_internal( "//envoy/api/v3alpha/route", ], ) - -api_go_grpc_library( - name = "srds", - proto = ":srds", - deps = [ - ":discovery_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - ], -) diff --git a/api/envoy/api/v3alpha/auth/BUILD b/api/envoy/api/v3alpha/auth/BUILD index f206a35f97f2..6c47aff6e2a3 100644 --- a/api/envoy/api/v3alpha/auth/BUILD +++ b/api/envoy/api/v3alpha/auth/BUILD @@ -1,4 +1,4 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 @@ -15,6 +15,13 @@ package_group( ], ) +api_proto_package( + name = "auth", + deps = [ + "//envoy/api/v3alpha/core", + ], +) + api_proto_library_internal( name = "cert", srcs = ["cert.proto"], @@ -24,12 +31,3 @@ api_proto_library_internal( "//envoy/api/v3alpha/core:config_source", ], ) - -api_go_proto_library( - name = "cert", - proto = ":cert", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/core:config_source_go_proto", - ], -) diff --git a/api/envoy/api/v3alpha/auth/cert.proto b/api/envoy/api/v3alpha/auth/cert.proto index 925453074ac1..2be82a1c0496 100644 --- a/api/envoy/api/v3alpha/auth/cert.proto +++ b/api/envoy/api/v3alpha/auth/cert.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha.auth; option java_outer_classname = "CertProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.auth"; -option go_package = "auth"; import "envoy/api/v3alpha/core/base.proto"; import "envoy/api/v3alpha/core/config_source.proto"; diff --git a/api/envoy/api/v3alpha/cluster/BUILD b/api/envoy/api/v3alpha/cluster/BUILD index 942701221a37..ef01624057e0 100644 --- a/api/envoy/api/v3alpha/cluster/BUILD +++ b/api/envoy/api/v3alpha/cluster/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + name = "cluster", + deps = [ + "//envoy/api/v3alpha/core", + ], +) + api_proto_library_internal( name = "circuit_breaker", srcs = ["circuit_breaker.proto"], @@ -13,14 +20,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "circuit_breaker", - proto = ":circuit_breaker", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - ], -) - api_proto_library_internal( name = "outlier_detection", srcs = ["outlier_detection.proto"], @@ -29,11 +28,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "outlier_detection", - proto = ":outlier_detection", -) - api_proto_library_internal( name = "filter", srcs = ["filter.proto"], @@ -41,8 +35,3 @@ api_proto_library_internal( "//envoy/api/v3alpha:__pkg__", ], ) - -api_go_proto_library( - name = "filter", - proto = ":filter", -) diff --git a/api/envoy/api/v3alpha/cluster/circuit_breaker.proto b/api/envoy/api/v3alpha/cluster/circuit_breaker.proto index 39f4f77c5ddd..8a70008e49f5 100644 --- a/api/envoy/api/v3alpha/cluster/circuit_breaker.proto +++ b/api/envoy/api/v3alpha/cluster/circuit_breaker.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha.cluster; option java_outer_classname = "CircuitBreakerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.cluster"; -option go_package = "cluster"; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; diff --git a/api/envoy/api/v3alpha/core/BUILD b/api/envoy/api/v3alpha/core/BUILD index cfc6bd83ca78..871c9fe0e838 100644 --- a/api/envoy/api/v3alpha/core/BUILD +++ b/api/envoy/api/v3alpha/core/BUILD @@ -1,4 +1,4 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 @@ -16,6 +16,13 @@ package_group( ], ) +api_proto_package( + name = "core", + deps = [ + "//envoy/type", + ], +) + api_proto_library_internal( name = "address", srcs = ["address.proto"], @@ -25,12 +32,6 @@ api_proto_library_internal( deps = [":base"], ) -api_go_proto_library( - name = "address", - proto = ":address", - deps = [":base_go_proto"], -) - api_proto_library_internal( name = "base", srcs = ["base.proto"], @@ -43,15 +44,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "base", - proto = ":base", - deps = [ - ":http_uri_go_proto", - "//envoy/type:percent_go_proto", - ], -) - api_proto_library_internal( name = "health_check", srcs = ["health_check.proto"], @@ -64,15 +56,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "health_check", - proto = ":health_check", - deps = [ - ":base_go_proto", - "//envoy/type:range_go_proto", - ], -) - api_proto_library_internal( name = "config_source", srcs = ["config_source.proto"], @@ -85,20 +68,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "config_source", - proto = ":config_source", - deps = [ - ":base_go_proto", - ":grpc_service_go_proto", - ], -) - -api_go_proto_library( - name = "http_uri", - proto = ":http_uri", -) - api_proto_library_internal( name = "http_uri", srcs = ["http_uri.proto"], @@ -116,12 +85,6 @@ api_proto_library_internal( deps = [":base"], ) -api_go_proto_library( - name = "grpc_service", - proto = ":grpc_service", - deps = [":base_go_proto"], -) - api_proto_library_internal( name = "protocol", srcs = ["protocol.proto"], @@ -129,8 +92,3 @@ api_proto_library_internal( ":friends", ], ) - -api_go_proto_library( - name = "protocol", - proto = ":protocol", -) diff --git a/api/envoy/api/v3alpha/core/base.proto b/api/envoy/api/v3alpha/core/base.proto index 0661d99ec546..cf39df887bce 100644 --- a/api/envoy/api/v3alpha/core/base.proto +++ b/api/envoy/api/v3alpha/core/base.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha.core; option java_outer_classname = "BaseProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.core"; -option go_package = "core"; import "envoy/api/v3alpha/core/http_uri.proto"; diff --git a/api/envoy/api/v3alpha/discovery.proto b/api/envoy/api/v3alpha/discovery.proto index 87433f0dca27..105b99888141 100644 --- a/api/envoy/api/v3alpha/discovery.proto +++ b/api/envoy/api/v3alpha/discovery.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha; option java_outer_classname = "DiscoveryProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/api/v3alpha/endpoint/BUILD b/api/envoy/api/v3alpha/endpoint/BUILD index 1630438b13f6..733560514dbd 100644 --- a/api/envoy/api/v3alpha/endpoint/BUILD +++ b/api/envoy/api/v3alpha/endpoint/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/auth", + "//envoy/api/v3alpha/core", + ], +) + api_proto_library_internal( name = "endpoint", srcs = ["endpoint.proto"], @@ -16,19 +23,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "endpoint", - proto = ":endpoint", - deps = [ - "//envoy/api/v3alpha/auth:cert_go_proto", - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/core:config_source_go_proto", - "//envoy/api/v3alpha/core:health_check_go_proto", - "//envoy/api/v3alpha/core:protocol_go_proto", - ], -) - api_proto_library_internal( name = "load_report", srcs = ["load_report.proto"], @@ -38,12 +32,3 @@ api_proto_library_internal( "//envoy/api/v3alpha/core:base", ], ) - -api_go_proto_library( - name = "load_report", - proto = ":load_report", - deps = [ - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - ], -) diff --git a/api/envoy/api/v3alpha/endpoint/endpoint.proto b/api/envoy/api/v3alpha/endpoint/endpoint.proto index 4bb1b57e8710..15357cdbac8b 100644 --- a/api/envoy/api/v3alpha/endpoint/endpoint.proto +++ b/api/envoy/api/v3alpha/endpoint/endpoint.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha.endpoint; option java_outer_classname = "EndpointProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.endpoint"; -option go_package = "endpoint"; import "envoy/api/v3alpha/core/address.proto"; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/api/v3alpha/listener/BUILD b/api/envoy/api/v3alpha/listener/BUILD index 693ead54dde0..3ee071ca5c03 100644 --- a/api/envoy/api/v3alpha/listener/BUILD +++ b/api/envoy/api/v3alpha/listener/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/auth", + "//envoy/api/v3alpha/core", + ], +) + api_proto_library_internal( name = "listener", srcs = ["listener.proto"], @@ -13,16 +20,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "listener", - proto = ":listener", - deps = [ - "//envoy/api/v3alpha/auth:cert_go_proto", - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - ], -) - api_proto_library_internal( name = "udp_listener_config", srcs = ["udp_listener_config.proto"], @@ -31,11 +28,3 @@ api_proto_library_internal( "//envoy/api/v3alpha/core:base", ], ) - -api_go_proto_library( - name = "udp_listener_config", - proto = ":udp_listener_config", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - ], -) diff --git a/api/envoy/api/v3alpha/listener/listener.proto b/api/envoy/api/v3alpha/listener/listener.proto index 2aa7146a822c..dc44c1454c02 100644 --- a/api/envoy/api/v3alpha/listener/listener.proto +++ b/api/envoy/api/v3alpha/listener/listener.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha.listener; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.listener"; -option go_package = "listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy::Api::V2::ListenerNS"; diff --git a/api/envoy/api/v3alpha/listener/udp_listener_config.proto b/api/envoy/api/v3alpha/listener/udp_listener_config.proto index 763a08a93ad3..532028da9f73 100644 --- a/api/envoy/api/v3alpha/listener/udp_listener_config.proto +++ b/api/envoy/api/v3alpha/listener/udp_listener_config.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha.listener; option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.listener"; -option go_package = "listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy::Api::V2::ListenerNS"; diff --git a/api/envoy/api/v3alpha/ratelimit/BUILD b/api/envoy/api/v3alpha/ratelimit/BUILD index b08c1fc029a0..a99624b1c421 100644 --- a/api/envoy/api/v3alpha/ratelimit/BUILD +++ b/api/envoy/api/v3alpha/ratelimit/BUILD @@ -1,14 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "ratelimit", srcs = ["ratelimit.proto"], visibility = ["//envoy/api/v3alpha:friends"], ) - -api_go_proto_library( - name = "ratelimit", - proto = ":ratelimit", -) diff --git a/api/envoy/api/v3alpha/ratelimit/ratelimit.proto b/api/envoy/api/v3alpha/ratelimit/ratelimit.proto index c10bfef83b98..9f2b67818a02 100644 --- a/api/envoy/api/v3alpha/ratelimit/ratelimit.proto +++ b/api/envoy/api/v3alpha/ratelimit/ratelimit.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha.ratelimit; option java_outer_classname = "RatelimitProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.ratelimit"; -option go_package = "ratelimit"; import "validate/validate.proto"; diff --git a/api/envoy/api/v3alpha/route/BUILD b/api/envoy/api/v3alpha/route/BUILD index 0b660893c5d4..cbed3ec01f4b 100644 --- a/api/envoy/api/v3alpha/route/BUILD +++ b/api/envoy/api/v3alpha/route/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/type", + "//envoy/type/matcher", + ], +) + api_proto_library_internal( name = "route", srcs = ["route.proto"], @@ -14,15 +22,3 @@ api_proto_library_internal( "//envoy/type/matcher:string", ], ) - -api_go_proto_library( - name = "route", - proto = ":route", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/type:percent_go_proto", - "//envoy/type:range_go_proto", - "//envoy/type/matcher:regex_go_proto", - "//envoy/type/matcher:string_go_proto", - ], -) diff --git a/api/envoy/api/v3alpha/route/route.proto b/api/envoy/api/v3alpha/route/route.proto index 963d94f1b022..e69e116b9b63 100644 --- a/api/envoy/api/v3alpha/route/route.proto +++ b/api/envoy/api/v3alpha/route/route.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha.route; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.route"; -option go_package = "route"; option java_generic_services = true; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/config/accesslog/v2/BUILD b/api/envoy/config/accesslog/v2/BUILD index 85ac228ccf13..22c48f795224 100644 --- a/api/envoy/config/accesslog/v2/BUILD +++ b/api/envoy/config/accesslog/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2/core"], +) + api_proto_library_internal( name = "als", srcs = ["als.proto"], @@ -14,9 +18,3 @@ api_proto_library_internal( name = "file", srcs = ["file.proto"], ) - -api_go_proto_library( - name = "als", - proto = ":als", - deps = ["//envoy/api/v2/core:grpc_service_go_proto"], -) diff --git a/api/envoy/config/accesslog/v2/als.proto b/api/envoy/config/accesslog/v2/als.proto index 9d83ebfcfb91..c02835dbbc56 100644 --- a/api/envoy/config/accesslog/v2/als.proto +++ b/api/envoy/config/accesslog/v2/als.proto @@ -5,7 +5,6 @@ package envoy.config.accesslog.v2; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; -option go_package = "v2"; import "envoy/api/v2/core/grpc_service.proto"; diff --git a/api/envoy/config/accesslog/v2/file.proto b/api/envoy/config/accesslog/v2/file.proto index 48a1841a9614..b88529a3251d 100644 --- a/api/envoy/config/accesslog/v2/file.proto +++ b/api/envoy/config/accesslog/v2/file.proto @@ -5,7 +5,6 @@ package envoy.config.accesslog.v2; option java_outer_classname = "FileProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; -option go_package = "v2"; import "validate/validate.proto"; import "google/protobuf/struct.proto"; diff --git a/api/envoy/config/accesslog/v3alpha/BUILD b/api/envoy/config/accesslog/v3alpha/BUILD index 4f5da73ee424..8409598da650 100644 --- a/api/envoy/config/accesslog/v3alpha/BUILD +++ b/api/envoy/config/accesslog/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) + api_proto_library_internal( name = "als", srcs = ["als.proto"], @@ -14,9 +18,3 @@ api_proto_library_internal( name = "file", srcs = ["file.proto"], ) - -api_go_proto_library( - name = "als", - proto = ":als", - deps = ["//envoy/api/v3alpha/core:grpc_service_go_proto"], -) diff --git a/api/envoy/config/accesslog/v3alpha/als.proto b/api/envoy/config/accesslog/v3alpha/als.proto index a194d1449e4b..07ec724d10ef 100644 --- a/api/envoy/config/accesslog/v3alpha/als.proto +++ b/api/envoy/config/accesslog/v3alpha/als.proto @@ -5,7 +5,6 @@ package envoy.config.accesslog.v3alpha; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.accesslog.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/grpc_service.proto"; diff --git a/api/envoy/config/accesslog/v3alpha/file.proto b/api/envoy/config/accesslog/v3alpha/file.proto index b07658bc9275..2f32da7bb64f 100644 --- a/api/envoy/config/accesslog/v3alpha/file.proto +++ b/api/envoy/config/accesslog/v3alpha/file.proto @@ -5,7 +5,6 @@ package envoy.config.accesslog.v3alpha; option java_outer_classname = "FileProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.accesslog.v3alpha"; -option go_package = "v2"; import "validate/validate.proto"; import "google/protobuf/struct.proto"; diff --git a/api/envoy/config/bootstrap/v2/BUILD b/api/envoy/config/bootstrap/v2/BUILD index 455365ab1e77..1f3a79104b60 100644 --- a/api/envoy/config/bootstrap/v2/BUILD +++ b/api/envoy/config/bootstrap/v2/BUILD @@ -1,7 +1,19 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2", + "//envoy/api/v2/auth", + "//envoy/api/v2/core", + "//envoy/config/metrics/v2:pkg", + "//envoy/config/overload/v2alpha:pkg", + "//envoy/config/ratelimit/v2:pkg", + "//envoy/config/trace/v2:pkg", + ], +) + api_proto_library_internal( name = "bootstrap", srcs = ["bootstrap.proto"], @@ -20,21 +32,3 @@ api_proto_library_internal( "//envoy/config/trace/v2:trace", ], ) - -api_go_proto_library( - name = "bootstrap", - proto = ":bootstrap", - deps = [ - "//envoy/api/v2:cds_go_grpc", - "//envoy/api/v2:lds_go_grpc", - "//envoy/api/v2/auth:cert_go_proto", - "//envoy/api/v2/core:address_go_proto", - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/core:config_source_go_proto", - "//envoy/config/metrics/v2:metrics_service_go_proto", - "//envoy/config/metrics/v2:stats_go_proto", - "//envoy/config/overload/v2alpha:overload_go_proto", - "//envoy/config/ratelimit/v2:rls_go_grpc", - "//envoy/config/trace/v2:trace_go_proto", - ], -) diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto index 9e3d9fe1cdb1..66f05aa78486 100644 --- a/api/envoy/config/bootstrap/v2/bootstrap.proto +++ b/api/envoy/config/bootstrap/v2/bootstrap.proto @@ -10,7 +10,6 @@ package envoy.config.bootstrap.v2; option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.bootstrap.v2"; -option go_package = "v2"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/config/bootstrap/v3alpha/BUILD b/api/envoy/config/bootstrap/v3alpha/BUILD index d148021c741a..c88b982492ce 100644 --- a/api/envoy/config/bootstrap/v3alpha/BUILD +++ b/api/envoy/config/bootstrap/v3alpha/BUILD @@ -1,7 +1,19 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha", + "//envoy/api/v3alpha/auth", + "//envoy/api/v3alpha/core", + "//envoy/config/metrics/v3alpha:pkg", + "//envoy/config/overload/v3alpha:pkg", + "//envoy/config/ratelimit/v3alpha:pkg", + "//envoy/config/trace/v3alpha:pkg", + ], +) + api_proto_library_internal( name = "bootstrap", srcs = ["bootstrap.proto"], @@ -20,21 +32,3 @@ api_proto_library_internal( "//envoy/config/trace/v3alpha:trace", ], ) - -api_go_proto_library( - name = "bootstrap", - proto = ":bootstrap", - deps = [ - "//envoy/api/v3alpha:cds_go_grpc", - "//envoy/api/v3alpha:lds_go_grpc", - "//envoy/api/v3alpha/auth:cert_go_proto", - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/core:config_source_go_proto", - "//envoy/config/metrics/v3alpha:metrics_service_go_proto", - "//envoy/config/metrics/v3alpha:stats_go_proto", - "//envoy/config/overload/v3alpha:overload_go_proto", - "//envoy/config/ratelimit/v3alpha:rls_go_grpc", - "//envoy/config/trace/v3alpha:trace_go_proto", - ], -) diff --git a/api/envoy/config/bootstrap/v3alpha/bootstrap.proto b/api/envoy/config/bootstrap/v3alpha/bootstrap.proto index 57157a4ae3f3..0bf18ffa360d 100644 --- a/api/envoy/config/bootstrap/v3alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3alpha/bootstrap.proto @@ -10,7 +10,6 @@ package envoy.config.bootstrap.v3alpha; option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.bootstrap.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/address.proto"; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD index b09f5c858ba9..669b6745ab74 100644 --- a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg"], +) + api_proto_library_internal( name = "cluster", srcs = ["cluster.proto"], diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto index d9ae85903264..c6d47807ce50 100644 --- a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto @@ -5,7 +5,6 @@ package envoy.config.cluster.dynamic_forward_proxy.v2alpha; option java_outer_classname = "DynamicForwardProxyClusterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha"; -option go_package = "v2alpha"; import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD index 50d0aa2354eb..3c1d737802cb 100644 --- a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/common/dynamic_forward_proxy/v3alpha:pkg"], +) + api_proto_library_internal( name = "cluster", srcs = ["cluster.proto"], diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto index baed68d3b1ac..6bc7bdd4c551 100644 --- a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto @@ -5,7 +5,6 @@ package envoy.config.cluster.dynamic_forward_proxy.v3alpha; option java_outer_classname = "DynamicForwardProxyClusterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v3alpha"; -option go_package = "v2alpha"; import "envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto"; diff --git a/api/envoy/config/cluster/redis/BUILD b/api/envoy/config/cluster/redis/BUILD index 42e2d408e358..760ae606c05d 100644 --- a/api/envoy/config/cluster/redis/BUILD +++ b/api/envoy/config/cluster/redis/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "redis_cluster", srcs = ["redis_cluster.proto"], diff --git a/api/envoy/config/cluster/redis/redis_cluster.proto b/api/envoy/config/cluster/redis/redis_cluster.proto index 2644288c40d2..abe2c857532d 100644 --- a/api/envoy/config/cluster/redis/redis_cluster.proto +++ b/api/envoy/config/cluster/redis/redis_cluster.proto @@ -5,7 +5,6 @@ package envoy.config.cluster.redis; option java_outer_classname = "RedisClusterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.cluster.redis"; -option go_package = "v2"; import "google/protobuf/duration.proto"; diff --git a/api/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD b/api/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD index 53095826454d..312ae36b3762 100644 --- a/api/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD +++ b/api/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2"], +) + api_proto_library_internal( name = "dns_cache", srcs = ["dns_cache.proto"], diff --git a/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD index bdd23e86de9f..e1853725da14 100644 --- a/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD +++ b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha"], +) + api_proto_library_internal( name = "dns_cache", srcs = ["dns_cache.proto"], diff --git a/api/envoy/config/common/tap/v2alpha/BUILD b/api/envoy/config/common/tap/v2alpha/BUILD index 863ba519d128..898773297b51 100644 --- a/api/envoy/config/common/tap/v2alpha/BUILD +++ b/api/envoy/config/common/tap/v2alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/service/tap/v2alpha:pkg", + ], +) + api_proto_library_internal( name = "common", srcs = ["common.proto"], diff --git a/api/envoy/config/common/tap/v3alpha/BUILD b/api/envoy/config/common/tap/v3alpha/BUILD index 673a602800af..55147b12ba3d 100644 --- a/api/envoy/config/common/tap/v3alpha/BUILD +++ b/api/envoy/config/common/tap/v3alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/service/tap/v3alpha:pkg", + ], +) + api_proto_library_internal( name = "common", srcs = ["common.proto"], diff --git a/api/envoy/config/filter/accesslog/v2/BUILD b/api/envoy/config/filter/accesslog/v2/BUILD index fdbf376af177..d9b740921357 100644 --- a/api/envoy/config/filter/accesslog/v2/BUILD +++ b/api/envoy/config/filter/accesslog/v2/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/api/v2/route:pkg", + "//envoy/type", + ], +) + api_proto_library_internal( name = "accesslog", srcs = ["accesslog.proto"], @@ -16,13 +24,3 @@ api_proto_library_internal( "//envoy/type:percent", ], ) - -api_go_proto_library( - name = "accesslog", - proto = ":accesslog", - deps = [ - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/route:route_go_proto", - "//envoy/type:percent_go_proto", - ], -) diff --git a/api/envoy/config/filter/accesslog/v2/accesslog.proto b/api/envoy/config/filter/accesslog/v2/accesslog.proto index 76fc4baf80c7..d777708175b5 100644 --- a/api/envoy/config/filter/accesslog/v2/accesslog.proto +++ b/api/envoy/config/filter/accesslog/v2/accesslog.proto @@ -5,7 +5,6 @@ package envoy.config.filter.accesslog.v2; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v2"; -option go_package = "v2"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/route/route.proto"; diff --git a/api/envoy/config/filter/accesslog/v3alpha/BUILD b/api/envoy/config/filter/accesslog/v3alpha/BUILD index 3f241bc5e10b..454a1ab4a135 100644 --- a/api/envoy/config/filter/accesslog/v3alpha/BUILD +++ b/api/envoy/config/filter/accesslog/v3alpha/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/api/v3alpha/route:pkg", + "//envoy/type", + ], +) + api_proto_library_internal( name = "accesslog", srcs = ["accesslog.proto"], @@ -16,13 +24,3 @@ api_proto_library_internal( "//envoy/type:percent", ], ) - -api_go_proto_library( - name = "accesslog", - proto = ":accesslog", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/route:route_go_proto", - "//envoy/type:percent_go_proto", - ], -) diff --git a/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto b/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto index 381e7bdf9a87..b7beef0bd974 100644 --- a/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto +++ b/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto @@ -5,7 +5,6 @@ package envoy.config.filter.accesslog.v3alpha; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/base.proto"; import "envoy/api/v3alpha/route/route.proto"; diff --git a/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD b/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD index 51c69c0d5b20..68bd8c126b80 100644 --- a/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD +++ b/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "router", srcs = ["router.proto"], diff --git a/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto b/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto index 37a5542a17bb..4e65f14e0ea9 100644 --- a/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto +++ b/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto @@ -5,7 +5,6 @@ package envoy.config.filter.dubbo.router.v2alpha1; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1"; -option go_package = "v2alpha1"; // [#protodoc-title: Router] // Dubbo router :ref:`configuration overview `. diff --git a/api/envoy/config/filter/fault/v2/BUILD b/api/envoy/config/filter/fault/v2/BUILD index 35419a9902b3..78687f4e4da4 100644 --- a/api/envoy/config/filter/fault/v2/BUILD +++ b/api/envoy/config/filter/fault/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/type"], +) + api_proto_library_internal( name = "fault", srcs = ["fault.proto"], diff --git a/api/envoy/config/filter/fault/v2/fault.proto b/api/envoy/config/filter/fault/v2/fault.proto index f27f9d446267..2298ecf4a1c0 100644 --- a/api/envoy/config/filter/fault/v2/fault.proto +++ b/api/envoy/config/filter/fault/v2/fault.proto @@ -5,7 +5,6 @@ package envoy.config.filter.fault.v2; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.fault.v2"; -option go_package = "v2"; import "envoy/type/percent.proto"; diff --git a/api/envoy/config/filter/fault/v3alpha/BUILD b/api/envoy/config/filter/fault/v3alpha/BUILD index 22e3bec56ca3..61bc8dc6bc5e 100644 --- a/api/envoy/config/filter/fault/v3alpha/BUILD +++ b/api/envoy/config/filter/fault/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/type"], +) + api_proto_library_internal( name = "fault", srcs = ["fault.proto"], diff --git a/api/envoy/config/filter/fault/v3alpha/fault.proto b/api/envoy/config/filter/fault/v3alpha/fault.proto index b54a063e7665..054eb8470f3a 100644 --- a/api/envoy/config/filter/fault/v3alpha/fault.proto +++ b/api/envoy/config/filter/fault/v3alpha/fault.proto @@ -5,7 +5,6 @@ package envoy.config.filter.fault.v3alpha; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.fault.v3alpha"; -option go_package = "v2"; import "envoy/type/percent.proto"; diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD b/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD index 948ceec2223d..b58f88c787ba 100644 --- a/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD +++ b/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2/core"], +) + api_proto_library_internal( name = "adaptive_concurrency", srcs = ["adaptive_concurrency.proto"], diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto b/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto index ff19657260e8..303b681471f4 100644 --- a/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto +++ b/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.adaptive_concurrency.v2alpha; option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v2alpha"; option java_outer_classname = "AdaptiveConcurrencyProto"; option java_multiple_files = true; -option go_package = "v2alpha"; message AdaptiveConcurrency { } diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD index aa2b0634739c..f9813a6a0829 100644 --- a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD +++ b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) + api_proto_library_internal( name = "adaptive_concurrency", srcs = ["adaptive_concurrency.proto"], diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto index 17bac55800ce..3d57196f9db7 100644 --- a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto +++ b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.adaptive_concurrency.v3alpha; option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v3alpha"; option java_outer_classname = "AdaptiveConcurrencyProto"; option java_multiple_files = true; -option go_package = "v2alpha"; message AdaptiveConcurrency { } diff --git a/api/envoy/config/filter/http/buffer/v2/BUILD b/api/envoy/config/filter/http/buffer/v2/BUILD index e59429af9ace..039ebb63e6d2 100644 --- a/api/envoy/config/filter/http/buffer/v2/BUILD +++ b/api/envoy/config/filter/http/buffer/v2/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "buffer", srcs = ["buffer.proto"], diff --git a/api/envoy/config/filter/http/buffer/v2/buffer.proto b/api/envoy/config/filter/http/buffer/v2/buffer.proto index a203d9d98cc2..92780adad69b 100644 --- a/api/envoy/config/filter/http/buffer/v2/buffer.proto +++ b/api/envoy/config/filter/http/buffer/v2/buffer.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.buffer.v2; option java_outer_classname = "BufferProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v2"; -option go_package = "v2"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/config/filter/http/buffer/v3alpha/BUILD b/api/envoy/config/filter/http/buffer/v3alpha/BUILD index e59429af9ace..039ebb63e6d2 100644 --- a/api/envoy/config/filter/http/buffer/v3alpha/BUILD +++ b/api/envoy/config/filter/http/buffer/v3alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "buffer", srcs = ["buffer.proto"], diff --git a/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto b/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto index a948493b2450..25530c5a58b4 100644 --- a/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto +++ b/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.buffer.v3alpha; option java_outer_classname = "BufferProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v3alpha"; -option go_package = "v2"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/config/filter/http/csrf/v2/BUILD b/api/envoy/config/filter/http/csrf/v2/BUILD index 0d58b1ef6d43..af3a87b07c05 100644 --- a/api/envoy/config/filter/http/csrf/v2/BUILD +++ b/api/envoy/config/filter/http/csrf/v2/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/type/matcher", + ], +) + api_proto_library_internal( name = "csrf", srcs = ["csrf.proto"], diff --git a/api/envoy/config/filter/http/csrf/v2/csrf.proto b/api/envoy/config/filter/http/csrf/v2/csrf.proto index 525ed118a71b..d4c35291e63d 100644 --- a/api/envoy/config/filter/http/csrf/v2/csrf.proto +++ b/api/envoy/config/filter/http/csrf/v2/csrf.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.csrf.v2; option java_outer_classname = "CsrfPolicyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2"; -option go_package = "v2"; import "envoy/api/v2/core/base.proto"; import "envoy/type/matcher/string.proto"; diff --git a/api/envoy/config/filter/http/csrf/v3alpha/BUILD b/api/envoy/config/filter/http/csrf/v3alpha/BUILD index b5da684c54b4..676559830c1f 100644 --- a/api/envoy/config/filter/http/csrf/v3alpha/BUILD +++ b/api/envoy/config/filter/http/csrf/v3alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/type/matcher", + ], +) + api_proto_library_internal( name = "csrf", srcs = ["csrf.proto"], diff --git a/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto b/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto index 5eaa14c567d7..8fe68d5ea2ef 100644 --- a/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto +++ b/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.csrf.v3alpha; option java_outer_classname = "CsrfPolicyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/base.proto"; import "envoy/type/matcher/string.proto"; diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD index 4fd1d84399fa..15d184377ef7 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg"], +) + api_proto_library_internal( name = "dynamic_forward_proxy", srcs = ["dynamic_forward_proxy.proto"], diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto index 631363a6d95e..c315ddb46515 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.dynamic_forward_proxy.v2alpha; option java_outer_classname = "DynamicForwardProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v2alpha"; -option go_package = "v2alpha"; import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD index f09166ba8129..c06227674a08 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/common/dynamic_forward_proxy/v3alpha:pkg"], +) + api_proto_library_internal( name = "dynamic_forward_proxy", srcs = ["dynamic_forward_proxy.proto"], diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto index 0fab44d63db5..f60aaae89e2e 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.dynamic_forward_proxy.v3alpha; option java_outer_classname = "DynamicForwardProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v3alpha"; -option go_package = "v2alpha"; import "envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto"; diff --git a/api/envoy/config/filter/http/ext_authz/v2/BUILD b/api/envoy/config/filter/http/ext_authz/v2/BUILD index b1d02437df04..10187f48bd2c 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/BUILD +++ b/api/envoy/config/filter/http/ext_authz/v2/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/type", + "//envoy/type/matcher", + ], +) + api_proto_library_internal( name = "ext_authz", srcs = ["ext_authz.proto"], diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index de105eff3c80..e2922348b6a7 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.ext_authz.v2; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; -option go_package = "v2"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/grpc_service.proto"; diff --git a/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD b/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD index 39f9e44bb382..cb0d25a3eebf 100644 --- a/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD +++ b/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/type", + "//envoy/type/matcher", + ], +) + api_proto_library_internal( name = "ext_authz", srcs = ["ext_authz.proto"], diff --git a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto index af6f3c4866e5..8cc48e36ffb0 100644 --- a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.ext_authz.v3alpha; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/base.proto"; import "envoy/api/v3alpha/core/grpc_service.proto"; diff --git a/api/envoy/config/filter/http/fault/v2/BUILD b/api/envoy/config/filter/http/fault/v2/BUILD index e561e88196b9..b169a0904860 100644 --- a/api/envoy/config/filter/http/fault/v2/BUILD +++ b/api/envoy/config/filter/http/fault/v2/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/route:pkg", + "//envoy/config/filter/fault/v2:pkg", + "//envoy/type", + ], +) + api_proto_library_internal( name = "fault", srcs = ["fault.proto"], diff --git a/api/envoy/config/filter/http/fault/v2/fault.proto b/api/envoy/config/filter/http/fault/v2/fault.proto index 51ee24ac91a8..8256690837fc 100644 --- a/api/envoy/config/filter/http/fault/v2/fault.proto +++ b/api/envoy/config/filter/http/fault/v2/fault.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.fault.v2; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v2"; -option go_package = "v2"; import "envoy/api/v2/route/route.proto"; import "envoy/config/filter/fault/v2/fault.proto"; diff --git a/api/envoy/config/filter/http/fault/v3alpha/BUILD b/api/envoy/config/filter/http/fault/v3alpha/BUILD index 1fd5632c088d..508e2d3c92d2 100644 --- a/api/envoy/config/filter/http/fault/v3alpha/BUILD +++ b/api/envoy/config/filter/http/fault/v3alpha/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/route:pkg", + "//envoy/config/filter/fault/v3alpha:pkg", + "//envoy/type", + ], +) + api_proto_library_internal( name = "fault", srcs = ["fault.proto"], diff --git a/api/envoy/config/filter/http/fault/v3alpha/fault.proto b/api/envoy/config/filter/http/fault/v3alpha/fault.proto index f654ec17f617..2189e4a4c131 100644 --- a/api/envoy/config/filter/http/fault/v3alpha/fault.proto +++ b/api/envoy/config/filter/http/fault/v3alpha/fault.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.fault.v3alpha; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/route/route.proto"; import "envoy/config/filter/fault/v3alpha/fault.proto"; diff --git a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD index 7c1deb713c34..a88ba2443cad 100644 --- a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD +++ b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library( name = "config", srcs = ["config.proto"], diff --git a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto index 0c33b6d077a1..b3b1fde5e1a2 100644 --- a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto +++ b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1"; -option go_package = "v2"; import "validate/validate.proto"; diff --git a/api/envoy/config/filter/http/gzip/v2/BUILD b/api/envoy/config/filter/http/gzip/v2/BUILD index e34d73c51c21..a3f4b0af2a44 100644 --- a/api/envoy/config/filter/http/gzip/v2/BUILD +++ b/api/envoy/config/filter/http/gzip/v2/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "gzip", srcs = ["gzip.proto"], diff --git a/api/envoy/config/filter/http/gzip/v2/gzip.proto b/api/envoy/config/filter/http/gzip/v2/gzip.proto index fb6b8878e652..ec512c495dc8 100644 --- a/api/envoy/config/filter/http/gzip/v2/gzip.proto +++ b/api/envoy/config/filter/http/gzip/v2/gzip.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.gzip.v2; option java_outer_classname = "GzipProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v2"; -option go_package = "v2"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/config/filter/http/gzip/v3alpha/BUILD b/api/envoy/config/filter/http/gzip/v3alpha/BUILD index e34d73c51c21..a3f4b0af2a44 100644 --- a/api/envoy/config/filter/http/gzip/v3alpha/BUILD +++ b/api/envoy/config/filter/http/gzip/v3alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "gzip", srcs = ["gzip.proto"], diff --git a/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto b/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto index 5b5c6d6d1df7..d7afb89116c2 100644 --- a/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto +++ b/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.gzip.v3alpha; option java_outer_classname = "GzipProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v3alpha"; -option go_package = "v2"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/config/filter/http/header_to_metadata/v2/BUILD b/api/envoy/config/filter/http/header_to_metadata/v2/BUILD index 3f8503acbe65..cfd34fcf2b08 100644 --- a/api/envoy/config/filter/http/header_to_metadata/v2/BUILD +++ b/api/envoy/config/filter/http/header_to_metadata/v2/BUILD @@ -1,9 +1,10 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "header_to_metadata", srcs = ["header_to_metadata.proto"], - deps = [], ) diff --git a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto index 5e70bbfce46f..345c5225edf1 100644 --- a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto +++ b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.header_to_metadata.v2; option java_outer_classname = "HeaderToMetadataProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v2"; -option go_package = "v2"; import "validate/validate.proto"; diff --git a/api/envoy/config/filter/http/header_to_metadata/v3alpha/BUILD b/api/envoy/config/filter/http/header_to_metadata/v3alpha/BUILD index 3f8503acbe65..cfd34fcf2b08 100644 --- a/api/envoy/config/filter/http/header_to_metadata/v3alpha/BUILD +++ b/api/envoy/config/filter/http/header_to_metadata/v3alpha/BUILD @@ -1,9 +1,10 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "header_to_metadata", srcs = ["header_to_metadata.proto"], - deps = [], ) diff --git a/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto b/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto index 927574a5a721..c3811a00577a 100644 --- a/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto +++ b/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.header_to_metadata.v3alpha; option java_outer_classname = "HeaderToMetadataProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v3alpha"; -option go_package = "v2"; import "validate/validate.proto"; diff --git a/api/envoy/config/filter/http/health_check/v2/BUILD b/api/envoy/config/filter/http/health_check/v2/BUILD index 9dc0af2df16f..8a995f1694af 100644 --- a/api/envoy/config/filter/http/health_check/v2/BUILD +++ b/api/envoy/config/filter/http/health_check/v2/BUILD @@ -1,21 +1,19 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_library_internal( - name = "health_check", - srcs = ["health_check.proto"], +api_proto_package( deps = [ - "//envoy/api/v2/route", - "//envoy/type:percent", + "//envoy/api/v2/route:pkg", + "//envoy/type", ], ) -api_go_proto_library( +api_proto_library_internal( name = "health_check", - proto = ":health_check", + srcs = ["health_check.proto"], deps = [ - "//envoy/api/v2/route:route_go_proto", - "//envoy/type:percent_go_proto", + "//envoy/api/v2/route", + "//envoy/type:percent", ], ) diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto index bc8433732d72..2aa6d4191596 100644 --- a/api/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.health_check.v2; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v2"; -option go_package = "v2"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/config/filter/http/health_check/v3alpha/BUILD b/api/envoy/config/filter/http/health_check/v3alpha/BUILD index 89e6eb3af702..b583685750da 100644 --- a/api/envoy/config/filter/http/health_check/v3alpha/BUILD +++ b/api/envoy/config/filter/http/health_check/v3alpha/BUILD @@ -1,21 +1,19 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_library_internal( - name = "health_check", - srcs = ["health_check.proto"], +api_proto_package( deps = [ - "//envoy/api/v3alpha/route", - "//envoy/type:percent", + "//envoy/api/v3alpha/route:pkg", + "//envoy/type", ], ) -api_go_proto_library( +api_proto_library_internal( name = "health_check", - proto = ":health_check", + srcs = ["health_check.proto"], deps = [ - "//envoy/api/v3alpha/route:route_go_proto", - "//envoy/type:percent_go_proto", + "//envoy/api/v3alpha/route", + "//envoy/type:percent", ], ) diff --git a/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto b/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto index 31fcdfffaa80..ecbb8e507851 100644 --- a/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.health_check.v3alpha; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v3alpha"; -option go_package = "v2"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/config/filter/http/ip_tagging/v2/BUILD b/api/envoy/config/filter/http/ip_tagging/v2/BUILD index 4c7001972e25..b318ae58f381 100644 --- a/api/envoy/config/filter/http/ip_tagging/v2/BUILD +++ b/api/envoy/config/filter/http/ip_tagging/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2/core"], +) + api_proto_library_internal( name = "ip_tagging", srcs = ["ip_tagging.proto"], diff --git a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto index 4f5da60150f3..92ec469c62ad 100644 --- a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto +++ b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.ip_tagging.v2; option java_outer_classname = "IpTaggingProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v2"; -option go_package = "v2"; import "envoy/api/v2/core/address.proto"; diff --git a/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD b/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD index 5b34fcd9c458..a05f0fd96bb0 100644 --- a/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD +++ b/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) + api_proto_library_internal( name = "ip_tagging", srcs = ["ip_tagging.proto"], diff --git a/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto index e305800a5fc8..de7871d9e701 100644 --- a/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto +++ b/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.ip_tagging.v3alpha; option java_outer_classname = "IpTaggingProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/address.proto"; diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD b/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD index e48aa582676c..80b4345f6151 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD @@ -1,6 +1,13 @@ licenses(["notice"]) # Apache 2 -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") + +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/api/v2/route:pkg", + ], +) api_proto_library_internal( name = "jwt_authn", @@ -11,13 +18,3 @@ api_proto_library_internal( "//envoy/api/v2/route", ], ) - -api_go_proto_library( - name = "jwt_authn", - proto = ":jwt_authn", - deps = [ - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/core:http_uri_go_proto", - "//envoy/api/v2/route:route_go_proto", - ], -) diff --git a/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD b/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD index 2970da93f467..ea5d0d17b16a 100644 --- a/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD +++ b/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD @@ -1,6 +1,13 @@ licenses(["notice"]) # Apache 2 -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") + +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/api/v3alpha/route:pkg", + ], +) api_proto_library_internal( name = "jwt_authn", @@ -11,13 +18,3 @@ api_proto_library_internal( "//envoy/api/v3alpha/route", ], ) - -api_go_proto_library( - name = "jwt_authn", - proto = ":jwt_authn", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/core:http_uri_go_proto", - "//envoy/api/v3alpha/route:route_go_proto", - ], -) diff --git a/api/envoy/config/filter/http/lua/v2/BUILD b/api/envoy/config/filter/http/lua/v2/BUILD index 6daf0c82f174..7aaf74617c96 100644 --- a/api/envoy/config/filter/http/lua/v2/BUILD +++ b/api/envoy/config/filter/http/lua/v2/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "lua", srcs = ["lua.proto"], diff --git a/api/envoy/config/filter/http/lua/v2/lua.proto b/api/envoy/config/filter/http/lua/v2/lua.proto index f29bcdbe89ef..6fc7fabc6be3 100644 --- a/api/envoy/config/filter/http/lua/v2/lua.proto +++ b/api/envoy/config/filter/http/lua/v2/lua.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.lua.v2; option java_outer_classname = "LuaProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v2"; -option go_package = "v2"; import "validate/validate.proto"; diff --git a/api/envoy/config/filter/http/lua/v3alpha/BUILD b/api/envoy/config/filter/http/lua/v3alpha/BUILD index 6daf0c82f174..7aaf74617c96 100644 --- a/api/envoy/config/filter/http/lua/v3alpha/BUILD +++ b/api/envoy/config/filter/http/lua/v3alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "lua", srcs = ["lua.proto"], diff --git a/api/envoy/config/filter/http/lua/v3alpha/lua.proto b/api/envoy/config/filter/http/lua/v3alpha/lua.proto index ff586ca2429e..934a592678a4 100644 --- a/api/envoy/config/filter/http/lua/v3alpha/lua.proto +++ b/api/envoy/config/filter/http/lua/v3alpha/lua.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.lua.v3alpha; option java_outer_classname = "LuaProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v3alpha"; -option go_package = "v2"; import "validate/validate.proto"; diff --git a/api/envoy/config/filter/http/original_src/v2alpha1/BUILD b/api/envoy/config/filter/http/original_src/v2alpha1/BUILD index e064545b21cd..a7435bb55cfc 100644 --- a/api/envoy/config/filter/http/original_src/v2alpha1/BUILD +++ b/api/envoy/config/filter/http/original_src/v2alpha1/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "original_src", srcs = ["original_src.proto"], diff --git a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto index 32f37a8c48f0..5c09b860fc5c 100644 --- a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto +++ b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto @@ -6,8 +6,6 @@ option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.original_src.v2alpha1"; -option go_package = "v2alpha1"; - import "validate/validate.proto"; // [#protodoc-title: Original Src Filter] diff --git a/api/envoy/config/filter/http/rate_limit/v2/BUILD b/api/envoy/config/filter/http/rate_limit/v2/BUILD index d8fb8e72ffec..4a6d451da981 100644 --- a/api/envoy/config/filter/http/rate_limit/v2/BUILD +++ b/api/envoy/config/filter/http/rate_limit/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/ratelimit/v2:pkg"], +) + api_proto_library_internal( name = "rate_limit", srcs = ["rate_limit.proto"], diff --git a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto index 9d93e4a255bd..1ad3c4c36d01 100644 --- a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.rate_limit.v2; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v2"; -option go_package = "v2"; import "envoy/config/ratelimit/v2/rls.proto"; diff --git a/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD b/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD index e131d3a92263..7060f7e9ce38 100644 --- a/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD +++ b/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/ratelimit/v3alpha:pkg"], +) + api_proto_library_internal( name = "rate_limit", srcs = ["rate_limit.proto"], diff --git a/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto index 69e8d389bc4b..427d22a6c1c6 100644 --- a/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto +++ b/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.rate_limit.v3alpha; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v3alpha"; -option go_package = "v2"; import "envoy/config/ratelimit/v3alpha/rls.proto"; diff --git a/api/envoy/config/filter/http/rbac/v2/BUILD b/api/envoy/config/filter/http/rbac/v2/BUILD index 6182fe26748a..ca9aa2ca410c 100644 --- a/api/envoy/config/filter/http/rbac/v2/BUILD +++ b/api/envoy/config/filter/http/rbac/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/rbac/v2:pkg"], +) + api_proto_library_internal( name = "rbac", srcs = ["rbac.proto"], diff --git a/api/envoy/config/filter/http/rbac/v2/rbac.proto b/api/envoy/config/filter/http/rbac/v2/rbac.proto index 0a75d9590fa5..611cdc6ccbed 100644 --- a/api/envoy/config/filter/http/rbac/v2/rbac.proto +++ b/api/envoy/config/filter/http/rbac/v2/rbac.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.rbac.v2; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v2"; -option go_package = "v2"; import "envoy/config/rbac/v2/rbac.proto"; diff --git a/api/envoy/config/filter/http/rbac/v3alpha/BUILD b/api/envoy/config/filter/http/rbac/v3alpha/BUILD index a6ee42cf7893..1e4d51b50453 100644 --- a/api/envoy/config/filter/http/rbac/v3alpha/BUILD +++ b/api/envoy/config/filter/http/rbac/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/rbac/v3alpha:pkg"], +) + api_proto_library_internal( name = "rbac", srcs = ["rbac.proto"], diff --git a/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto b/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto index 8ec8989652aa..47160ffa9e3c 100644 --- a/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto +++ b/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.rbac.v3alpha; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v3alpha"; -option go_package = "v2"; import "envoy/config/rbac/v3alpha/rbac.proto"; diff --git a/api/envoy/config/filter/http/router/v2/BUILD b/api/envoy/config/filter/http/router/v2/BUILD index 7a80299a2cf7..9ddaf54b2845 100644 --- a/api/envoy/config/filter/http/router/v2/BUILD +++ b/api/envoy/config/filter/http/router/v2/BUILD @@ -1,15 +1,13 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/filter/accesslog/v2:pkg"], +) + api_proto_library_internal( name = "router", srcs = ["router.proto"], deps = ["//envoy/config/filter/accesslog/v2:accesslog"], ) - -api_go_proto_library( - name = "router", - proto = ":router", - deps = ["//envoy/config/filter/accesslog/v2:accesslog_go_proto"], -) diff --git a/api/envoy/config/filter/http/router/v2/router.proto b/api/envoy/config/filter/http/router/v2/router.proto index e77675673357..fd0cadec9631 100644 --- a/api/envoy/config/filter/http/router/v2/router.proto +++ b/api/envoy/config/filter/http/router/v2/router.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.router.v2; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.router.v2"; -option go_package = "v2"; import "envoy/config/filter/accesslog/v2/accesslog.proto"; diff --git a/api/envoy/config/filter/http/router/v3alpha/BUILD b/api/envoy/config/filter/http/router/v3alpha/BUILD index f0b6c100d445..d68a0ac2c2ee 100644 --- a/api/envoy/config/filter/http/router/v3alpha/BUILD +++ b/api/envoy/config/filter/http/router/v3alpha/BUILD @@ -1,15 +1,13 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/filter/accesslog/v3alpha:pkg"], +) + api_proto_library_internal( name = "router", srcs = ["router.proto"], deps = ["//envoy/config/filter/accesslog/v3alpha:accesslog"], ) - -api_go_proto_library( - name = "router", - proto = ":router", - deps = ["//envoy/config/filter/accesslog/v3alpha:accesslog_go_proto"], -) diff --git a/api/envoy/config/filter/http/router/v3alpha/router.proto b/api/envoy/config/filter/http/router/v3alpha/router.proto index 92efe315c6ff..a4ceae7dc1f7 100644 --- a/api/envoy/config/filter/http/router/v3alpha/router.proto +++ b/api/envoy/config/filter/http/router/v3alpha/router.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.router.v3alpha; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.router.v3alpha"; -option go_package = "v2"; import "envoy/config/filter/accesslog/v3alpha/accesslog.proto"; diff --git a/api/envoy/config/filter/http/squash/v2/BUILD b/api/envoy/config/filter/http/squash/v2/BUILD index 86bd4e8cfb65..2a0c1c8e30fa 100644 --- a/api/envoy/config/filter/http/squash/v2/BUILD +++ b/api/envoy/config/filter/http/squash/v2/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "squash", srcs = ["squash.proto"], diff --git a/api/envoy/config/filter/http/squash/v2/squash.proto b/api/envoy/config/filter/http/squash/v2/squash.proto index 006af4380d41..2f3a2e21cdd2 100644 --- a/api/envoy/config/filter/http/squash/v2/squash.proto +++ b/api/envoy/config/filter/http/squash/v2/squash.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.squash.v2; option java_outer_classname = "SquashProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v2"; -option go_package = "v2"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; diff --git a/api/envoy/config/filter/http/squash/v3alpha/BUILD b/api/envoy/config/filter/http/squash/v3alpha/BUILD index 86bd4e8cfb65..2a0c1c8e30fa 100644 --- a/api/envoy/config/filter/http/squash/v3alpha/BUILD +++ b/api/envoy/config/filter/http/squash/v3alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "squash", srcs = ["squash.proto"], diff --git a/api/envoy/config/filter/http/squash/v3alpha/squash.proto b/api/envoy/config/filter/http/squash/v3alpha/squash.proto index 43a62af98c1c..24236def872c 100644 --- a/api/envoy/config/filter/http/squash/v3alpha/squash.proto +++ b/api/envoy/config/filter/http/squash/v3alpha/squash.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.squash.v3alpha; option java_outer_classname = "SquashProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v3alpha"; -option go_package = "v2"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; diff --git a/api/envoy/config/filter/http/tap/v2alpha/BUILD b/api/envoy/config/filter/http/tap/v2alpha/BUILD index f84625a7da73..0949dad0c6ac 100644 --- a/api/envoy/config/filter/http/tap/v2alpha/BUILD +++ b/api/envoy/config/filter/http/tap/v2alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/common/tap/v2alpha:pkg"], +) + api_proto_library_internal( name = "tap", srcs = ["tap.proto"], diff --git a/api/envoy/config/filter/http/tap/v3alpha/BUILD b/api/envoy/config/filter/http/tap/v3alpha/BUILD index a2af23059be6..0535cfbc21ae 100644 --- a/api/envoy/config/filter/http/tap/v3alpha/BUILD +++ b/api/envoy/config/filter/http/tap/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/common/tap/v3alpha:pkg"], +) + api_proto_library_internal( name = "tap", srcs = ["tap.proto"], diff --git a/api/envoy/config/filter/http/transcoder/v2/BUILD b/api/envoy/config/filter/http/transcoder/v2/BUILD index c1a845bcd96e..33a99a23a061 100644 --- a/api/envoy/config/filter/http/transcoder/v2/BUILD +++ b/api/envoy/config/filter/http/transcoder/v2/BUILD @@ -1,13 +1,10 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "transcoder", srcs = ["transcoder.proto"], ) - -api_go_proto_library( - name = "transcoder", - proto = ":transcoder", -) diff --git a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto index 14f54124508d..85f837fa794f 100644 --- a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto +++ b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.transcoder.v2; option java_outer_classname = "TranscoderProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v2"; -option go_package = "v2"; import "validate/validate.proto"; diff --git a/api/envoy/config/filter/http/transcoder/v3alpha/BUILD b/api/envoy/config/filter/http/transcoder/v3alpha/BUILD index c1a845bcd96e..33a99a23a061 100644 --- a/api/envoy/config/filter/http/transcoder/v3alpha/BUILD +++ b/api/envoy/config/filter/http/transcoder/v3alpha/BUILD @@ -1,13 +1,10 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "transcoder", srcs = ["transcoder.proto"], ) - -api_go_proto_library( - name = "transcoder", - proto = ":transcoder", -) diff --git a/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto b/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto index 078ac52473ac..630ad245a8a6 100644 --- a/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto +++ b/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto @@ -5,7 +5,6 @@ package envoy.config.filter.http.transcoder.v3alpha; option java_outer_classname = "TranscoderProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v3alpha"; -option go_package = "v2"; import "validate/validate.proto"; diff --git a/api/envoy/config/filter/listener/original_src/v2alpha1/BUILD b/api/envoy/config/filter/listener/original_src/v2alpha1/BUILD index e064545b21cd..a7435bb55cfc 100644 --- a/api/envoy/config/filter/listener/original_src/v2alpha1/BUILD +++ b/api/envoy/config/filter/listener/original_src/v2alpha1/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "original_src", srcs = ["original_src.proto"], diff --git a/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto index aa38e1d3df0a..11f55a787fdf 100644 --- a/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto +++ b/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto @@ -6,8 +6,6 @@ option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.listener.original_src.v2alpha1"; -option go_package = "v2alpha1"; - import "validate/validate.proto"; // [#protodoc-title: Original Src Filter] diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD b/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD index dad2d7fea262..96b5e9d0d47c 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD +++ b/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2/core"], +) + api_proto_library_internal( name = "client_ssl_auth", srcs = ["client_ssl_auth.proto"], diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto index fe0a6a3800b8..6add30a59552 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.client_ssl_auth.v2; option java_outer_classname = "ClientSslAuthProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2"; -option go_package = "v2"; import "envoy/api/v2/core/address.proto"; import "google/protobuf/duration.proto"; diff --git a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD index bece14103bbe..540d8b4aa1a4 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD +++ b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) + api_proto_library_internal( name = "client_ssl_auth", srcs = ["client_ssl_auth.proto"], diff --git a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto index a0ea3bf0bfaa..821d63494742 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto +++ b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.client_ssl_auth.v3alpha; option java_outer_classname = "ClientSslAuthProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/address.proto"; import "google/protobuf/duration.proto"; diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD index e3e83a704684..c6cee209c654 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD @@ -1,7 +1,16 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/api/v2/route:pkg", + "//envoy/type", + "//envoy/type/matcher", + ], +) + api_proto_library_internal( name = "dubbo_proxy", srcs = [ diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto index 5b0995ba0022..e9834b704ed3 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.dubbo_proxy.v2alpha1; option java_outer_classname = "DubboProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; -option go_package = "v2"; import "envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto"; @@ -58,4 +57,4 @@ message DubboFilter { // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. google.protobuf.Any config = 2; -} \ No newline at end of file +} diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto index 39f16e1a9342..c852a7cf5e53 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.dubbo_proxy.v2alpha1; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; -option go_package = "v2"; import "envoy/api/v2/route/route.proto"; import "envoy/type/matcher/string.proto"; diff --git a/api/envoy/config/filter/network/ext_authz/v2/BUILD b/api/envoy/config/filter/network/ext_authz/v2/BUILD index 96184437fa54..3bdae60659a1 100644 --- a/api/envoy/config/filter/network/ext_authz/v2/BUILD +++ b/api/envoy/config/filter/network/ext_authz/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2/core"], +) + api_proto_library_internal( name = "ext_authz", srcs = ["ext_authz.proto"], diff --git a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto index f9a2f351f79e..8d0a6c6ca246 100644 --- a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.ext_authz.v2; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v2"; -option go_package = "v2"; import "envoy/api/v2/core/grpc_service.proto"; diff --git a/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD b/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD index 839724af13b4..58aa28906331 100644 --- a/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD +++ b/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) + api_proto_library_internal( name = "ext_authz", srcs = ["ext_authz.proto"], diff --git a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto index 99c0c7239753..c53b509fee79 100644 --- a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto +++ b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.ext_authz.v3alpha; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/grpc_service.proto"; diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/BUILD b/api/envoy/config/filter/network/http_connection_manager/v2/BUILD index 95d3811f426a..6a090f3a115d 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/BUILD +++ b/api/envoy/config/filter/network/http_connection_manager/v2/BUILD @@ -1,7 +1,16 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2", + "//envoy/api/v2/core", + "//envoy/config/filter/accesslog/v2:pkg", + "//envoy/type", + ], +) + api_proto_library_internal( name = "http_connection_manager", srcs = ["http_connection_manager.proto"], @@ -15,17 +24,3 @@ api_proto_library_internal( "//envoy/type:percent", ], ) - -api_go_proto_library( - name = "http_connection_manager", - proto = ":http_connection_manager", - deps = [ - "//envoy/api/v2:rds_go_grpc", - "//envoy/api/v2:srds_go_grpc", - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/core:config_source_go_proto", - "//envoy/api/v2/core:protocol_go_proto", - "//envoy/config/filter/accesslog/v2:accesslog_go_proto", - "//envoy/type:percent_go_proto", - ], -) diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index bf1195731b86..e20a91542b83 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.http_connection_manager.v2; option java_outer_classname = "HttpConnectionManagerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2"; -option go_package = "v2"; import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/core/protocol.proto"; diff --git a/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD b/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD index 300b3c8e671a..57e0528c2ea6 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD +++ b/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD @@ -1,7 +1,16 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha", + "//envoy/api/v3alpha/core", + "//envoy/config/filter/accesslog/v3alpha:pkg", + "//envoy/type", + ], +) + api_proto_library_internal( name = "http_connection_manager", srcs = ["http_connection_manager.proto"], @@ -15,17 +24,3 @@ api_proto_library_internal( "//envoy/type:percent", ], ) - -api_go_proto_library( - name = "http_connection_manager", - proto = ":http_connection_manager", - deps = [ - "//envoy/api/v3alpha:rds_go_grpc", - "//envoy/api/v3alpha:srds_go_grpc", - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/core:config_source_go_proto", - "//envoy/api/v3alpha/core:protocol_go_proto", - "//envoy/config/filter/accesslog/v3alpha:accesslog_go_proto", - "//envoy/type:percent_go_proto", - ], -) diff --git a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto index 57e529b2164a..4102c7016f2d 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.http_connection_manager.v3alpha; option java_outer_classname = "HttpConnectionManagerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/config_source.proto"; import "envoy/api/v3alpha/core/protocol.proto"; diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/BUILD b/api/envoy/config/filter/network/mongo_proxy/v2/BUILD index 5535f010179d..59bad30ed94d 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v2/BUILD +++ b/api/envoy/config/filter/network/mongo_proxy/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/filter/fault/v2:pkg"], +) + api_proto_library_internal( name = "mongo_proxy", srcs = ["mongo_proxy.proto"], diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto index 0d3d67bf6654..46ef44c96b94 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto +++ b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.mongo_proxy.v2; option java_outer_classname = "MongoProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v2"; -option go_package = "v2"; import "envoy/config/filter/fault/v2/fault.proto"; diff --git a/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD index a2c09e709030..67dca3bb139a 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD +++ b/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/filter/fault/v3alpha:pkg"], +) + api_proto_library_internal( name = "mongo_proxy", srcs = ["mongo_proxy.proto"], diff --git a/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto index 9149b433e372..780483ccb4c8 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto +++ b/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.mongo_proxy.v3alpha; option java_outer_classname = "MongoProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v3alpha"; -option go_package = "v2"; import "envoy/config/filter/fault/v3alpha/fault.proto"; diff --git a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD index fde664838c93..7f7da3af9276 100644 --- a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD +++ b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "mysql_proxy", srcs = ["mysql_proxy.proto"], diff --git a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto index e4246c9314aa..dee014556360 100644 --- a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto +++ b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.mysql_proxy.v1alpha1; option java_outer_classname = "MysqlProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.mysql_proxy.v1alpha1"; -option go_package = "v1alpha1"; import "validate/validate.proto"; diff --git a/api/envoy/config/filter/network/rate_limit/v2/BUILD b/api/envoy/config/filter/network/rate_limit/v2/BUILD index 08d5db95b117..fcdcd0dfa5ef 100644 --- a/api/envoy/config/filter/network/rate_limit/v2/BUILD +++ b/api/envoy/config/filter/network/rate_limit/v2/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/ratelimit:pkg", + "//envoy/config/ratelimit/v2:pkg", + ], +) + api_proto_library_internal( name = "rate_limit", srcs = ["rate_limit.proto"], diff --git a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto index 6a1b795580c8..9a8f2f02146d 100644 --- a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.rate_limit.v2; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v2"; -option go_package = "v2"; import "envoy/api/v2/ratelimit/ratelimit.proto"; import "envoy/config/ratelimit/v2/rls.proto"; diff --git a/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD b/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD index 9dc17266721c..a13183b9eb75 100644 --- a/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD +++ b/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/ratelimit:pkg", + "//envoy/config/ratelimit/v3alpha:pkg", + ], +) + api_proto_library_internal( name = "rate_limit", srcs = ["rate_limit.proto"], diff --git a/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto index a0edc98e561d..60e2d27aff6b 100644 --- a/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto +++ b/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.rate_limit.v3alpha; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/ratelimit/ratelimit.proto"; import "envoy/config/ratelimit/v3alpha/rls.proto"; diff --git a/api/envoy/config/filter/network/rbac/v2/BUILD b/api/envoy/config/filter/network/rbac/v2/BUILD index 6182fe26748a..ca9aa2ca410c 100644 --- a/api/envoy/config/filter/network/rbac/v2/BUILD +++ b/api/envoy/config/filter/network/rbac/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/rbac/v2:pkg"], +) + api_proto_library_internal( name = "rbac", srcs = ["rbac.proto"], diff --git a/api/envoy/config/filter/network/rbac/v2/rbac.proto b/api/envoy/config/filter/network/rbac/v2/rbac.proto index aea17f725ff4..c6c6fac41c57 100644 --- a/api/envoy/config/filter/network/rbac/v2/rbac.proto +++ b/api/envoy/config/filter/network/rbac/v2/rbac.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.rbac.v2; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v2"; -option go_package = "v2"; import "envoy/config/rbac/v2/rbac.proto"; diff --git a/api/envoy/config/filter/network/rbac/v3alpha/BUILD b/api/envoy/config/filter/network/rbac/v3alpha/BUILD index a6ee42cf7893..1e4d51b50453 100644 --- a/api/envoy/config/filter/network/rbac/v3alpha/BUILD +++ b/api/envoy/config/filter/network/rbac/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/config/rbac/v3alpha:pkg"], +) + api_proto_library_internal( name = "rbac", srcs = ["rbac.proto"], diff --git a/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto b/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto index 5c2114cd6063..5faa5f5c087c 100644 --- a/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto +++ b/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.rbac.v3alpha; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v3alpha"; -option go_package = "v2"; import "envoy/config/rbac/v3alpha/rbac.proto"; diff --git a/api/envoy/config/filter/network/redis_proxy/v2/BUILD b/api/envoy/config/filter/network/redis_proxy/v2/BUILD index 16cff613b38d..d23450a55d1e 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/BUILD +++ b/api/envoy/config/filter/network/redis_proxy/v2/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/type", + ], +) + api_proto_library_internal( name = "redis_proxy", srcs = ["redis_proxy.proto"], diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index 175e564dec3f..656cedf75025 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.redis_proxy.v2; option java_outer_classname = "RedisProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v2"; -option go_package = "v2"; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD index ef7cc5683f0c..4db47e3bb664 100644 --- a/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD +++ b/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/type", + ], +) + api_proto_library_internal( name = "redis_proxy", srcs = ["redis_proxy.proto"], diff --git a/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto index 1bda0ab7c466..a690451f7947 100644 --- a/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.redis_proxy.v3alpha; option java_outer_classname = "RedisProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/BUILD b/api/envoy/config/filter/network/tcp_proxy/v2/BUILD index e75ab7036b75..a0cc067086cc 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v2/BUILD +++ b/api/envoy/config/filter/network/tcp_proxy/v2/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/config/filter/accesslog/v2:pkg", + ], +) + api_proto_library_internal( name = "tcp_proxy", srcs = ["tcp_proxy.proto"], @@ -11,13 +18,3 @@ api_proto_library_internal( "//envoy/config/filter/accesslog/v2:accesslog", ], ) - -api_go_proto_library( - name = "tcp_proxy", - proto = ":tcp_proxy", - deps = [ - "//envoy/api/v2/core:address_go_proto", - "//envoy/api/v2/core:base_go_proto", - "//envoy/config/filter/accesslog/v2:accesslog_go_proto", - ], -) diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto index 62874fe1d45d..376c980fb244 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.tcp_proxy.v2; option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2"; -option go_package = "v2"; import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "envoy/api/v2/core/address.proto"; diff --git a/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD index a9ea8de3bf1e..305e06bc8bfb 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD +++ b/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/config/filter/accesslog/v3alpha:pkg", + ], +) + api_proto_library_internal( name = "tcp_proxy", srcs = ["tcp_proxy.proto"], @@ -11,13 +18,3 @@ api_proto_library_internal( "//envoy/config/filter/accesslog/v3alpha:accesslog", ], ) - -api_go_proto_library( - name = "tcp_proxy", - proto = ":tcp_proxy", - deps = [ - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/config/filter/accesslog/v3alpha:accesslog_go_proto", - ], -) diff --git a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto index f2597a3ab361..4e04d7b352d1 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.tcp_proxy.v3alpha; option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v3alpha"; -option go_package = "v2"; import "envoy/config/filter/accesslog/v3alpha/accesslog.proto"; import "envoy/api/v3alpha/core/address.proto"; diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD index f758f7f580f5..28a64a0a329e 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/api/v2/route:pkg", + ], +) + api_proto_library_internal( name = "thrift_proxy", srcs = [ diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto index dcd83f2f1a2a..5d230d4474cc 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.thrift_proxy.v2alpha1; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; -option go_package = "v2"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/route/route.proto"; diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto index 0be6c337037f..4cfe538798a2 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.thrift_proxy.v2alpha1; option java_outer_classname = "ThriftProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; -option go_package = "v2"; import "envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto"; diff --git a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD index 8719f5083f12..02594c24b8ae 100644 --- a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD +++ b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "zookeeper_proxy", srcs = ["zookeeper_proxy.proto"], diff --git a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto index 6a8afdd12ec0..72d09810ff0f 100644 --- a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto +++ b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto @@ -5,7 +5,6 @@ package envoy.config.filter.network.zookeeper_proxy.v1alpha1; option java_outer_classname = "ZookeeperProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1"; -option go_package = "v1alpha1"; import "validate/validate.proto"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD index 08d5db95b117..fcdcd0dfa5ef 100644 --- a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD +++ b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/ratelimit:pkg", + "//envoy/config/ratelimit/v2:pkg", + ], +) + api_proto_library_internal( name = "rate_limit", srcs = ["rate_limit.proto"], diff --git a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto index 15a50d553f9b..743bdc7256b7 100644 --- a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto +++ b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto @@ -5,7 +5,6 @@ package envoy.config.filter.thrift.rate_limit.v2alpha1; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.thrift.rate_limit.v2alpha1"; -option go_package = "v2alpha1"; import "envoy/config/ratelimit/v2/rls.proto"; diff --git a/api/envoy/config/filter/thrift/router/v2alpha1/BUILD b/api/envoy/config/filter/thrift/router/v2alpha1/BUILD index 51c69c0d5b20..68bd8c126b80 100644 --- a/api/envoy/config/filter/thrift/router/v2alpha1/BUILD +++ b/api/envoy/config/filter/thrift/router/v2alpha1/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "router", srcs = ["router.proto"], diff --git a/api/envoy/config/filter/thrift/router/v2alpha1/router.proto b/api/envoy/config/filter/thrift/router/v2alpha1/router.proto index c515752c2a00..9c9383caf33f 100644 --- a/api/envoy/config/filter/thrift/router/v2alpha1/router.proto +++ b/api/envoy/config/filter/thrift/router/v2alpha1/router.proto @@ -5,7 +5,6 @@ package envoy.config.filter.thrift.router.v2alpha1; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1"; -option go_package = "v2alpha1"; // [#protodoc-title: Router] // Thrift router :ref:`configuration overview `. diff --git a/api/envoy/config/grpc_credential/v2alpha/BUILD b/api/envoy/config/grpc_credential/v2alpha/BUILD index f299179ecb00..484aa5680d12 100644 --- a/api/envoy/config/grpc_credential/v2alpha/BUILD +++ b/api/envoy/config/grpc_credential/v2alpha/BUILD @@ -1,6 +1,10 @@ licenses(["notice"]) # Apache 2 -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") + +api_proto_package( + deps = ["//envoy/api/v2/core"], +) api_proto_library_internal( name = "file_based_metadata", @@ -8,20 +12,7 @@ api_proto_library_internal( deps = ["//envoy/api/v2/core:base"], ) -api_go_proto_library( - name = "file_based_metadata", - proto = ":file_based_metadata", - deps = [ - "//envoy/api/v2/core:base_go_proto", - ], -) - api_proto_library_internal( name = "aws_iam", srcs = ["aws_iam.proto"], ) - -api_go_proto_library( - name = "aws_iam", - proto = ":aws_iam", -) diff --git a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto index 3689b80611f5..e7a7bf94cce6 100644 --- a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto +++ b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto @@ -8,7 +8,6 @@ package envoy.config.grpc_credential.v2alpha; option java_outer_classname = "AwsIamProto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; option java_multiple_files = true; -option go_package = "v2alpha"; import "validate/validate.proto"; diff --git a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto index c91c50e39a55..1746492fe261 100644 --- a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto +++ b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto @@ -8,7 +8,6 @@ package envoy.config.grpc_credential.v2alpha; option java_outer_classname = "FileBasedMetadataProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; -option go_package = "v2alpha"; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/config/grpc_credential/v3alpha/BUILD b/api/envoy/config/grpc_credential/v3alpha/BUILD index 2f6736732881..7c327f91f031 100644 --- a/api/envoy/config/grpc_credential/v3alpha/BUILD +++ b/api/envoy/config/grpc_credential/v3alpha/BUILD @@ -1,6 +1,10 @@ licenses(["notice"]) # Apache 2 -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") + +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) api_proto_library_internal( name = "file_based_metadata", @@ -8,20 +12,7 @@ api_proto_library_internal( deps = ["//envoy/api/v3alpha/core:base"], ) -api_go_proto_library( - name = "file_based_metadata", - proto = ":file_based_metadata", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - ], -) - api_proto_library_internal( name = "aws_iam", srcs = ["aws_iam.proto"], ) - -api_go_proto_library( - name = "aws_iam", - proto = ":aws_iam", -) diff --git a/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto index 33921db6d69a..29c9cf140a00 100644 --- a/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto +++ b/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto @@ -8,7 +8,6 @@ package envoy.config.grpc_credential.v3alpha; option java_outer_classname = "AwsIamProto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha"; option java_multiple_files = true; -option go_package = "v2alpha"; import "validate/validate.proto"; diff --git a/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto index 2886921b3415..9bab390cc833 100644 --- a/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto +++ b/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto @@ -8,7 +8,6 @@ package envoy.config.grpc_credential.v3alpha; option java_outer_classname = "FileBasedMetadataProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha"; -option go_package = "v2alpha"; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/config/health_checker/redis/v2/BUILD b/api/envoy/config/health_checker/redis/v2/BUILD index 239d1f224fc6..f7b289b08f69 100644 --- a/api/envoy/config/health_checker/redis/v2/BUILD +++ b/api/envoy/config/health_checker/redis/v2/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "redis", srcs = ["redis.proto"], diff --git a/api/envoy/config/health_checker/redis/v2/redis.proto b/api/envoy/config/health_checker/redis/v2/redis.proto index 130454b5d406..8ab2de269a5f 100644 --- a/api/envoy/config/health_checker/redis/v2/redis.proto +++ b/api/envoy/config/health_checker/redis/v2/redis.proto @@ -5,7 +5,6 @@ package envoy.config.health_checker.redis.v2; option java_outer_classname = "RedisProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v2"; -option go_package = "v2"; // [#protodoc-title: Redis] // Redis health checker :ref:`configuration overview `. diff --git a/api/envoy/config/health_checker/redis/v3alpha/BUILD b/api/envoy/config/health_checker/redis/v3alpha/BUILD index 239d1f224fc6..f7b289b08f69 100644 --- a/api/envoy/config/health_checker/redis/v3alpha/BUILD +++ b/api/envoy/config/health_checker/redis/v3alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "redis", srcs = ["redis.proto"], diff --git a/api/envoy/config/health_checker/redis/v3alpha/redis.proto b/api/envoy/config/health_checker/redis/v3alpha/redis.proto index 234da40d56ba..1409e9545f41 100644 --- a/api/envoy/config/health_checker/redis/v3alpha/redis.proto +++ b/api/envoy/config/health_checker/redis/v3alpha/redis.proto @@ -5,7 +5,6 @@ package envoy.config.health_checker.redis.v3alpha; option java_outer_classname = "RedisProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v3alpha"; -option go_package = "v2"; // [#protodoc-title: Redis] // Redis health checker :ref:`configuration overview `. diff --git a/api/envoy/config/metrics/v2/BUILD b/api/envoy/config/metrics/v2/BUILD index 157b09c4d814..13ac8bdd9992 100644 --- a/api/envoy/config/metrics/v2/BUILD +++ b/api/envoy/config/metrics/v2/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/type/matcher", + ], +) + api_proto_library_internal( name = "metrics_service", srcs = ["metrics_service.proto"], @@ -13,14 +20,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "metrics_service", - proto = ":metrics_service", - deps = [ - "//envoy/api/v2/core:grpc_service_go_proto", - ], -) - api_proto_library_internal( name = "stats", srcs = ["stats.proto"], @@ -32,12 +31,3 @@ api_proto_library_internal( "//envoy/type/matcher:string", ], ) - -api_go_proto_library( - name = "stats", - proto = ":stats", - deps = [ - "//envoy/api/v2/core:address_go_proto", - "//envoy/type/matcher:string_go_proto", - ], -) diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto index 08172180b545..fea8b9b0f878 100644 --- a/api/envoy/config/metrics/v2/stats.proto +++ b/api/envoy/config/metrics/v2/stats.proto @@ -8,7 +8,6 @@ package envoy.config.metrics.v2; option java_outer_classname = "StatsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.metrics.v2"; -option go_package = "v2"; import "envoy/api/v2/core/address.proto"; import "envoy/type/matcher/string.proto"; diff --git a/api/envoy/config/metrics/v3alpha/BUILD b/api/envoy/config/metrics/v3alpha/BUILD index 39d0b79654d0..399ec444208d 100644 --- a/api/envoy/config/metrics/v3alpha/BUILD +++ b/api/envoy/config/metrics/v3alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/type/matcher", + ], +) + api_proto_library_internal( name = "metrics_service", srcs = ["metrics_service.proto"], @@ -13,14 +20,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "metrics_service", - proto = ":metrics_service", - deps = [ - "//envoy/api/v3alpha/core:grpc_service_go_proto", - ], -) - api_proto_library_internal( name = "stats", srcs = ["stats.proto"], @@ -32,12 +31,3 @@ api_proto_library_internal( "//envoy/type/matcher:string", ], ) - -api_go_proto_library( - name = "stats", - proto = ":stats", - deps = [ - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/type/matcher:string_go_proto", - ], -) diff --git a/api/envoy/config/metrics/v3alpha/stats.proto b/api/envoy/config/metrics/v3alpha/stats.proto index 91324ed0ef61..afa4468b3444 100644 --- a/api/envoy/config/metrics/v3alpha/stats.proto +++ b/api/envoy/config/metrics/v3alpha/stats.proto @@ -8,7 +8,6 @@ package envoy.config.metrics.v3alpha; option java_outer_classname = "StatsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.metrics.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/address.proto"; import "envoy/type/matcher/string.proto"; diff --git a/api/envoy/config/overload/v2alpha/BUILD b/api/envoy/config/overload/v2alpha/BUILD index bfffb5639ca7..e247848d07a9 100644 --- a/api/envoy/config/overload/v2alpha/BUILD +++ b/api/envoy/config/overload/v2alpha/BUILD @@ -1,14 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "overload", srcs = ["overload.proto"], visibility = ["//visibility:public"], ) - -api_go_proto_library( - name = "overload", - proto = ":overload", -) diff --git a/api/envoy/config/overload/v2alpha/overload.proto b/api/envoy/config/overload/v2alpha/overload.proto index efdba5a09a72..e32764675cb5 100644 --- a/api/envoy/config/overload/v2alpha/overload.proto +++ b/api/envoy/config/overload/v2alpha/overload.proto @@ -5,7 +5,6 @@ package envoy.config.overload.v2alpha; option java_outer_classname = "OverloadProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.overload.v2alpha"; -option go_package = "v2alpha"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; diff --git a/api/envoy/config/overload/v3alpha/BUILD b/api/envoy/config/overload/v3alpha/BUILD index bfffb5639ca7..e247848d07a9 100644 --- a/api/envoy/config/overload/v3alpha/BUILD +++ b/api/envoy/config/overload/v3alpha/BUILD @@ -1,14 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "overload", srcs = ["overload.proto"], visibility = ["//visibility:public"], ) - -api_go_proto_library( - name = "overload", - proto = ":overload", -) diff --git a/api/envoy/config/overload/v3alpha/overload.proto b/api/envoy/config/overload/v3alpha/overload.proto index 474c7677002b..857b510e665a 100644 --- a/api/envoy/config/overload/v3alpha/overload.proto +++ b/api/envoy/config/overload/v3alpha/overload.proto @@ -5,7 +5,6 @@ package envoy.config.overload.v3alpha; option java_outer_classname = "OverloadProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.overload.v3alpha"; -option go_package = "v2alpha"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; diff --git a/api/envoy/config/ratelimit/v2/BUILD b/api/envoy/config/ratelimit/v2/BUILD index be3fc1c212bb..432f4b9592d3 100644 --- a/api/envoy/config/ratelimit/v2/BUILD +++ b/api/envoy/config/ratelimit/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2/core"], +) + api_proto_library_internal( name = "rls", srcs = ["rls.proto"], @@ -10,11 +14,3 @@ api_proto_library_internal( "//envoy/api/v2/core:grpc_service", ], ) - -api_go_grpc_library( - name = "rls", - proto = ":rls", - deps = [ - "//envoy/api/v2/core:grpc_service_go_proto", - ], -) diff --git a/api/envoy/config/ratelimit/v2/rls.proto b/api/envoy/config/ratelimit/v2/rls.proto index 8f039b44efeb..55577d4ab013 100644 --- a/api/envoy/config/ratelimit/v2/rls.proto +++ b/api/envoy/config/ratelimit/v2/rls.proto @@ -5,7 +5,6 @@ package envoy.config.ratelimit.v2; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.ratelimit.v2"; -option go_package = "v2"; import "envoy/api/v2/core/grpc_service.proto"; diff --git a/api/envoy/config/ratelimit/v3alpha/BUILD b/api/envoy/config/ratelimit/v3alpha/BUILD index 571a768dde4b..1d009164ba64 100644 --- a/api/envoy/config/ratelimit/v3alpha/BUILD +++ b/api/envoy/config/ratelimit/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) + api_proto_library_internal( name = "rls", srcs = ["rls.proto"], @@ -10,11 +14,3 @@ api_proto_library_internal( "//envoy/api/v3alpha/core:grpc_service", ], ) - -api_go_grpc_library( - name = "rls", - proto = ":rls", - deps = [ - "//envoy/api/v3alpha/core:grpc_service_go_proto", - ], -) diff --git a/api/envoy/config/ratelimit/v3alpha/rls.proto b/api/envoy/config/ratelimit/v3alpha/rls.proto index 67ac6479cd23..16d5a4ad7712 100644 --- a/api/envoy/config/ratelimit/v3alpha/rls.proto +++ b/api/envoy/config/ratelimit/v3alpha/rls.proto @@ -5,7 +5,6 @@ package envoy.config.ratelimit.v3alpha; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.ratelimit.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/grpc_service.proto"; diff --git a/api/envoy/config/rbac/v2/BUILD b/api/envoy/config/rbac/v2/BUILD index fac50eb66f9b..18b1bb24f29d 100644 --- a/api/envoy/config/rbac/v2/BUILD +++ b/api/envoy/config/rbac/v2/BUILD @@ -1,6 +1,15 @@ licenses(["notice"]) # Apache 2 -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") + +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/api/v2/route:pkg", + "//envoy/type/matcher", + "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", + ], +) api_proto_library_internal( name = "rbac", @@ -22,15 +31,3 @@ api_proto_library_internal( "//envoy/type/matcher:string", ], ) - -api_go_proto_library( - name = "rbac", - proto = ":rbac", - deps = [ - "//envoy/api/v2/core:address_go_proto", - "//envoy/api/v2/route:route_go_proto", - "//envoy/type/matcher:metadata_go_proto", - "//envoy/type/matcher:string_go_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:cel_go_proto", - ], -) diff --git a/api/envoy/config/rbac/v2/rbac.proto b/api/envoy/config/rbac/v2/rbac.proto index 15554e561df4..34a062be535b 100644 --- a/api/envoy/config/rbac/v2/rbac.proto +++ b/api/envoy/config/rbac/v2/rbac.proto @@ -14,7 +14,6 @@ package envoy.config.rbac.v2; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.rbac.v2"; -option go_package = "v2"; option (gogoproto.stable_marshaler_all) = true; diff --git a/api/envoy/config/rbac/v3alpha/BUILD b/api/envoy/config/rbac/v3alpha/BUILD index 89f98c97d481..60200f034ea1 100644 --- a/api/envoy/config/rbac/v3alpha/BUILD +++ b/api/envoy/config/rbac/v3alpha/BUILD @@ -1,6 +1,15 @@ licenses(["notice"]) # Apache 2 -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") + +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/api/v3alpha/route:pkg", + "//envoy/type/matcher", + "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", + ], +) api_proto_library_internal( name = "rbac", @@ -22,15 +31,3 @@ api_proto_library_internal( "//envoy/type/matcher:string", ], ) - -api_go_proto_library( - name = "rbac", - proto = ":rbac", - deps = [ - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/api/v3alpha/route:route_go_proto", - "//envoy/type/matcher:metadata_go_proto", - "//envoy/type/matcher:string_go_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:cel_go_proto", - ], -) diff --git a/api/envoy/config/rbac/v3alpha/rbac.proto b/api/envoy/config/rbac/v3alpha/rbac.proto index d299c384da90..9087e745690d 100644 --- a/api/envoy/config/rbac/v3alpha/rbac.proto +++ b/api/envoy/config/rbac/v3alpha/rbac.proto @@ -14,7 +14,6 @@ package envoy.config.rbac.v3alpha; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.rbac.v3alpha"; -option go_package = "v2"; option (gogoproto.stable_marshaler_all) = true; diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD index 363d90f11808..a5003e219c8a 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD +++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "fixed_heap", srcs = ["fixed_heap.proto"], diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto index f7efe0b5643a..110123e3c332 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto +++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto @@ -5,7 +5,6 @@ package envoy.config.resource_monitor.fixed_heap.v2alpha; option java_outer_classname = "FixedHeapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2alpha"; -option go_package = "v2alpha"; import "validate/validate.proto"; diff --git a/api/envoy/config/resource_monitor/fixed_heap/v3alpha/BUILD b/api/envoy/config/resource_monitor/fixed_heap/v3alpha/BUILD index 363d90f11808..a5003e219c8a 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v3alpha/BUILD +++ b/api/envoy/config/resource_monitor/fixed_heap/v3alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "fixed_heap", srcs = ["fixed_heap.proto"], diff --git a/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto b/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto index 2bc1baf85243..bc84ee992452 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto +++ b/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto @@ -5,7 +5,6 @@ package envoy.config.resource_monitor.fixed_heap.v3alpha; option java_outer_classname = "FixedHeapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v3alpha"; -option go_package = "v2alpha"; import "validate/validate.proto"; diff --git a/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD b/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD index 10abf09e9ef8..3a1764216b00 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD +++ b/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "injected_resource", srcs = ["injected_resource.proto"], diff --git a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto index cab704a4b64a..64c984fa0cb3 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto +++ b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto @@ -5,7 +5,6 @@ package envoy.config.resource_monitor.injected_resource.v2alpha; option java_outer_classname = "InjectedResourceProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v2alpha"; -option go_package = "v2alpha"; import "validate/validate.proto"; diff --git a/api/envoy/config/resource_monitor/injected_resource/v3alpha/BUILD b/api/envoy/config/resource_monitor/injected_resource/v3alpha/BUILD index 10abf09e9ef8..3a1764216b00 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v3alpha/BUILD +++ b/api/envoy/config/resource_monitor/injected_resource/v3alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library_internal( name = "injected_resource", srcs = ["injected_resource.proto"], diff --git a/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto b/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto index f5b41ef165c8..555e15323f46 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto +++ b/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto @@ -5,7 +5,6 @@ package envoy.config.resource_monitor.injected_resource.v3alpha; option java_outer_classname = "InjectedResourceProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v3alpha"; -option go_package = "v2alpha"; import "validate/validate.proto"; diff --git a/api/envoy/config/retry/previous_priorities/BUILD b/api/envoy/config/retry/previous_priorities/BUILD index 13a694af37d2..8140346d4747 100644 --- a/api/envoy/config/retry/previous_priorities/BUILD +++ b/api/envoy/config/retry/previous_priorities/BUILD @@ -1,6 +1,10 @@ licenses(["notice"]) # Apache 2 -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") + +api_proto_package( + deps = ["//envoy/api/v2/core"], +) api_proto_library_internal( name = "previous_priorities", diff --git a/api/envoy/config/trace/v2/BUILD b/api/envoy/config/trace/v2/BUILD index b00f63dafb45..f894a5289fd5 100644 --- a/api/envoy/config/trace/v2/BUILD +++ b/api/envoy/config/trace/v2/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", + ], +) + api_proto_library_internal( name = "trace", srcs = ["trace.proto"], @@ -13,12 +20,3 @@ api_proto_library_internal( "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", ], ) - -api_go_proto_library( - name = "trace", - proto = ":trace", - deps = [ - "//envoy/api/v2/core:grpc_service_go_proto", - "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", - ], -) diff --git a/api/envoy/config/trace/v2/trace.proto b/api/envoy/config/trace/v2/trace.proto index 65c027cd73fe..43f5013b27f1 100644 --- a/api/envoy/config/trace/v2/trace.proto +++ b/api/envoy/config/trace/v2/trace.proto @@ -8,7 +8,6 @@ package envoy.config.trace.v2; option java_outer_classname = "TraceProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option go_package = "v2"; import "envoy/api/v2/core/grpc_service.proto"; import "opencensus/proto/trace/v1/trace_config.proto"; diff --git a/api/envoy/config/trace/v3alpha/BUILD b/api/envoy/config/trace/v3alpha/BUILD index 72056b3ad4b6..97014ca68f6f 100644 --- a/api/envoy/config/trace/v3alpha/BUILD +++ b/api/envoy/config/trace/v3alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", + ], +) + api_proto_library_internal( name = "trace", srcs = ["trace.proto"], @@ -13,12 +20,3 @@ api_proto_library_internal( "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", ], ) - -api_go_proto_library( - name = "trace", - proto = ":trace", - deps = [ - "//envoy/api/v3alpha/core:grpc_service_go_proto", - "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", - ], -) diff --git a/api/envoy/config/trace/v3alpha/trace.proto b/api/envoy/config/trace/v3alpha/trace.proto index 2771c1b40f28..f98f1f708962 100644 --- a/api/envoy/config/trace/v3alpha/trace.proto +++ b/api/envoy/config/trace/v3alpha/trace.proto @@ -8,7 +8,6 @@ package envoy.config.trace.v3alpha; option java_outer_classname = "TraceProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.trace.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/grpc_service.proto"; import "opencensus/proto/trace/v1/trace_config.proto"; diff --git a/api/envoy/config/transport_socket/alts/v2alpha/BUILD b/api/envoy/config/transport_socket/alts/v2alpha/BUILD index 6cb181f202d2..eb247ae14b04 100644 --- a/api/envoy/config/transport_socket/alts/v2alpha/BUILD +++ b/api/envoy/config/transport_socket/alts/v2alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2/core"], +) + api_proto_library( name = "alts", srcs = ["alts.proto"], diff --git a/api/envoy/config/transport_socket/alts/v2alpha/alts.proto b/api/envoy/config/transport_socket/alts/v2alpha/alts.proto index f5a9db64c0e4..ec294af17426 100644 --- a/api/envoy/config/transport_socket/alts/v2alpha/alts.proto +++ b/api/envoy/config/transport_socket/alts/v2alpha/alts.proto @@ -5,7 +5,6 @@ package envoy.config.transport_socket.alts.v2alpha; option java_outer_classname = "AltsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v2alpha"; -option go_package = "v2"; // [#protodoc-title: ALTS] diff --git a/api/envoy/config/transport_socket/alts/v3alpha/BUILD b/api/envoy/config/transport_socket/alts/v3alpha/BUILD index 7ffc03097000..4e6642283e3a 100644 --- a/api/envoy/config/transport_socket/alts/v3alpha/BUILD +++ b/api/envoy/config/transport_socket/alts/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) + api_proto_library( name = "alts", srcs = ["alts.proto"], diff --git a/api/envoy/config/transport_socket/alts/v3alpha/alts.proto b/api/envoy/config/transport_socket/alts/v3alpha/alts.proto index 22684b862614..adec43c25cb8 100644 --- a/api/envoy/config/transport_socket/alts/v3alpha/alts.proto +++ b/api/envoy/config/transport_socket/alts/v3alpha/alts.proto @@ -5,7 +5,6 @@ package envoy.config.transport_socket.alts.v3alpha; option java_outer_classname = "AltsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v3alpha"; -option go_package = "v2"; // [#protodoc-title: ALTS] diff --git a/api/envoy/config/transport_socket/tap/v2alpha/BUILD b/api/envoy/config/transport_socket/tap/v2alpha/BUILD index 75810cd0c269..e18d4fc1c128 100644 --- a/api/envoy/config/transport_socket/tap/v2alpha/BUILD +++ b/api/envoy/config/transport_socket/tap/v2alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v2/core", + "//envoy/config/common/tap/v2alpha:pkg", + ], +) + api_proto_library_internal( name = "tap", srcs = ["tap.proto"], diff --git a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto index 84918699ef97..e68b40dae530 100644 --- a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto +++ b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto @@ -5,7 +5,6 @@ package envoy.config.transport_socket.tap.v2alpha; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v2alpha"; -option go_package = "v2"; // [#protodoc-title: Tap] diff --git a/api/envoy/config/transport_socket/tap/v3alpha/BUILD b/api/envoy/config/transport_socket/tap/v3alpha/BUILD index 8056ad6f17bb..0f24cca4c1a1 100644 --- a/api/envoy/config/transport_socket/tap/v3alpha/BUILD +++ b/api/envoy/config/transport_socket/tap/v3alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/config/common/tap/v3alpha:pkg", + ], +) + api_proto_library_internal( name = "tap", srcs = ["tap.proto"], diff --git a/api/envoy/config/transport_socket/tap/v3alpha/tap.proto b/api/envoy/config/transport_socket/tap/v3alpha/tap.proto index 1cca6814c803..21625e17ef9c 100644 --- a/api/envoy/config/transport_socket/tap/v3alpha/tap.proto +++ b/api/envoy/config/transport_socket/tap/v3alpha/tap.proto @@ -5,7 +5,6 @@ package envoy.config.transport_socket.tap.v3alpha; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v3alpha"; -option go_package = "v2"; // [#protodoc-title: Tap] diff --git a/api/envoy/data/accesslog/v2/BUILD b/api/envoy/data/accesslog/v2/BUILD index d3ade88e922f..22c4c45ee847 100644 --- a/api/envoy/data/accesslog/v2/BUILD +++ b/api/envoy/data/accesslog/v2/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2/core"], +) + api_proto_library_internal( name = "accesslog", srcs = ["accesslog.proto"], @@ -13,12 +17,3 @@ api_proto_library_internal( "//envoy/api/v2/core:base", ], ) - -api_go_proto_library( - name = "accesslog", - proto = ":accesslog", - deps = [ - "//envoy/api/v2/core:address_go_proto", - "//envoy/api/v2/core:base_go_proto", - ], -) diff --git a/api/envoy/data/accesslog/v3alpha/BUILD b/api/envoy/data/accesslog/v3alpha/BUILD index 30157958e7fe..e1fafc00343b 100644 --- a/api/envoy/data/accesslog/v3alpha/BUILD +++ b/api/envoy/data/accesslog/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) + api_proto_library_internal( name = "accesslog", srcs = ["accesslog.proto"], @@ -13,12 +17,3 @@ api_proto_library_internal( "//envoy/api/v3alpha/core:base", ], ) - -api_go_proto_library( - name = "accesslog", - proto = ":accesslog", - deps = [ - "//envoy/api/v3alpha/core:address_go_proto", - "//envoy/api/v3alpha/core:base_go_proto", - ], -) diff --git a/api/envoy/data/cluster/v2alpha/BUILD b/api/envoy/data/cluster/v2alpha/BUILD index 00edd8294b6f..4d921f4d97ac 100644 --- a/api/envoy/data/cluster/v2alpha/BUILD +++ b/api/envoy/data/cluster/v2alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library( name = "outlier_detection_event", srcs = ["outlier_detection_event.proto"], diff --git a/api/envoy/data/cluster/v3alpha/BUILD b/api/envoy/data/cluster/v3alpha/BUILD index 00edd8294b6f..4d921f4d97ac 100644 --- a/api/envoy/data/cluster/v3alpha/BUILD +++ b/api/envoy/data/cluster/v3alpha/BUILD @@ -1,7 +1,9 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package() + api_proto_library( name = "outlier_detection_event", srcs = ["outlier_detection_event.proto"], diff --git a/api/envoy/data/core/v2alpha/BUILD b/api/envoy/data/core/v2alpha/BUILD index 8320031d8466..331032348388 100644 --- a/api/envoy/data/core/v2alpha/BUILD +++ b/api/envoy/data/core/v2alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2/core"], +) + api_proto_library( name = "health_check_event", srcs = ["health_check_event.proto"], diff --git a/api/envoy/data/core/v3alpha/BUILD b/api/envoy/data/core/v3alpha/BUILD index 9e82e3eb1731..6c44f3e4d79e 100644 --- a/api/envoy/data/core/v3alpha/BUILD +++ b/api/envoy/data/core/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) + api_proto_library( name = "health_check_event", srcs = ["health_check_event.proto"], diff --git a/api/envoy/data/tap/v2alpha/BUILD b/api/envoy/data/tap/v2alpha/BUILD index 1b373eee86df..bf108c4792a1 100644 --- a/api/envoy/data/tap/v2alpha/BUILD +++ b/api/envoy/data/tap/v2alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v2/core"], +) + api_proto_library_internal( name = "common", srcs = ["common.proto"], diff --git a/api/envoy/data/tap/v2alpha/transport.proto b/api/envoy/data/tap/v2alpha/transport.proto index 3b8c244b9bae..c3a3d8b8eb86 100644 --- a/api/envoy/data/tap/v2alpha/transport.proto +++ b/api/envoy/data/tap/v2alpha/transport.proto @@ -9,7 +9,6 @@ package envoy.data.tap.v2alpha; option java_outer_classname = "TransportProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; -option go_package = "v2"; import "envoy/api/v2/core/address.proto"; import "envoy/data/tap/v2alpha/common.proto"; diff --git a/api/envoy/data/tap/v3alpha/BUILD b/api/envoy/data/tap/v3alpha/BUILD index ab9be74ce98a..33d151e333a9 100644 --- a/api/envoy/data/tap/v3alpha/BUILD +++ b/api/envoy/data/tap/v3alpha/BUILD @@ -1,7 +1,11 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + deps = ["//envoy/api/v3alpha/core"], +) + api_proto_library_internal( name = "common", srcs = ["common.proto"], diff --git a/api/envoy/data/tap/v3alpha/transport.proto b/api/envoy/data/tap/v3alpha/transport.proto index 3dfb0c6478ba..e35f036f5d82 100644 --- a/api/envoy/data/tap/v3alpha/transport.proto +++ b/api/envoy/data/tap/v3alpha/transport.proto @@ -9,7 +9,6 @@ package envoy.data.tap.v3alpha; option java_outer_classname = "TransportProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.data.tap.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/address.proto"; import "envoy/data/tap/v3alpha/common.proto"; diff --git a/api/envoy/service/accesslog/v2/BUILD b/api/envoy/service/accesslog/v2/BUILD index 1dad9447048d..d4f7c300361e 100644 --- a/api/envoy/service/accesslog/v2/BUILD +++ b/api/envoy/service/accesslog/v2/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v2/core", + "//envoy/data/accesslog/v2:pkg", + ], +) + api_proto_library_internal( name = "als", srcs = ["als.proto"], @@ -12,12 +20,3 @@ api_proto_library_internal( "//envoy/data/accesslog/v2:accesslog", ], ) - -api_go_grpc_library( - name = "als", - proto = ":als", - deps = [ - "//envoy/api/v2/core:base_go_proto", - "//envoy/data/accesslog/v2:accesslog_go_proto", - ], -) diff --git a/api/envoy/service/accesslog/v2/als.proto b/api/envoy/service/accesslog/v2/als.proto index 52788e0659c6..c06199a2b208 100644 --- a/api/envoy/service/accesslog/v2/als.proto +++ b/api/envoy/service/accesslog/v2/als.proto @@ -5,7 +5,6 @@ package envoy.service.accesslog.v2; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.accesslog.v2"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/service/accesslog/v3alpha/BUILD b/api/envoy/service/accesslog/v3alpha/BUILD index 1a8eab975b56..0bb8716b82ab 100644 --- a/api/envoy/service/accesslog/v3alpha/BUILD +++ b/api/envoy/service/accesslog/v3alpha/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/data/accesslog/v3alpha:pkg", + ], +) + api_proto_library_internal( name = "als", srcs = ["als.proto"], @@ -12,12 +20,3 @@ api_proto_library_internal( "//envoy/data/accesslog/v3alpha:accesslog", ], ) - -api_go_grpc_library( - name = "als", - proto = ":als", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/data/accesslog/v3alpha:accesslog_go_proto", - ], -) diff --git a/api/envoy/service/accesslog/v3alpha/als.proto b/api/envoy/service/accesslog/v3alpha/als.proto index 092d4d17696c..ad05b823d1e5 100644 --- a/api/envoy/service/accesslog/v3alpha/als.proto +++ b/api/envoy/service/accesslog/v3alpha/als.proto @@ -5,7 +5,6 @@ package envoy.service.accesslog.v3alpha; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.accesslog.v3alpha"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/service/auth/v2/BUILD b/api/envoy/service/auth/v2/BUILD index 57041668ddc8..91a4eeebbf13 100644 --- a/api/envoy/service/auth/v2/BUILD +++ b/api/envoy/service/auth/v2/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v2/core", + "//envoy/type", + ], +) + api_proto_library_internal( name = "attribute_context", srcs = [ diff --git a/api/envoy/service/auth/v2/external_auth.proto b/api/envoy/service/auth/v2/external_auth.proto index 0f723c98e46c..8a3d4f1a629e 100644 --- a/api/envoy/service/auth/v2/external_auth.proto +++ b/api/envoy/service/auth/v2/external_auth.proto @@ -5,7 +5,6 @@ package envoy.service.auth.v2; option java_outer_classname = "ExternalAuthProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.auth.v2"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/service/auth/v2alpha/BUILD b/api/envoy/service/auth/v2alpha/BUILD index 1d9873a5ffa4..1940f4f2f885 100644 --- a/api/envoy/service/auth/v2alpha/BUILD +++ b/api/envoy/service/auth/v2alpha/BUILD @@ -1,7 +1,14 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/service/auth/v2:pkg", + ], +) + api_proto_library_internal( name = "external_auth", srcs = [ diff --git a/api/envoy/service/auth/v2alpha/external_auth.proto b/api/envoy/service/auth/v2alpha/external_auth.proto index bdf0d2e4853d..85e9c12c6afb 100644 --- a/api/envoy/service/auth/v2alpha/external_auth.proto +++ b/api/envoy/service/auth/v2alpha/external_auth.proto @@ -2,8 +2,6 @@ syntax = "proto3"; package envoy.service.auth.v2alpha; -option go_package = "v2alpha"; - option java_multiple_files = true; option java_generic_services = true; option java_outer_classname = "CertsProto"; diff --git a/api/envoy/service/auth/v3alpha/BUILD b/api/envoy/service/auth/v3alpha/BUILD index 6a335f88f949..f6a70cb5b9bd 100644 --- a/api/envoy/service/auth/v3alpha/BUILD +++ b/api/envoy/service/auth/v3alpha/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/type", + ], +) + api_proto_library_internal( name = "attribute_context", srcs = [ diff --git a/api/envoy/service/auth/v3alpha/external_auth.proto b/api/envoy/service/auth/v3alpha/external_auth.proto index 4b7e459a4436..0130040c1409 100644 --- a/api/envoy/service/auth/v3alpha/external_auth.proto +++ b/api/envoy/service/auth/v3alpha/external_auth.proto @@ -5,7 +5,6 @@ package envoy.service.auth.v3alpha; option java_outer_classname = "ExternalAuthProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.auth.v3alpha"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/service/discovery/v2/BUILD b/api/envoy/service/discovery/v2/BUILD index a9c2efd02fb9..13db2701c2a5 100644 --- a/api/envoy/service/discovery/v2/BUILD +++ b/api/envoy/service/discovery/v2/BUILD @@ -1,21 +1,22 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_library_internal( - name = "ads", - srcs = ["ads.proto"], - has_services = 1, +api_proto_package( + has_services = True, deps = [ - "//envoy/api/v2:discovery", + "//envoy/api/v2", + "//envoy/api/v2/core", + "//envoy/api/v2/endpoint:pkg", ], ) -api_go_grpc_library( +api_proto_library_internal( name = "ads", - proto = ":ads", + srcs = ["ads.proto"], + has_services = 1, deps = [ - "//envoy/api/v2:discovery_go_proto", + "//envoy/api/v2:discovery", ], ) @@ -30,16 +31,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "hds", - proto = ":hds", - deps = [ - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/core:health_check_go_proto", - "//envoy/api/v2/endpoint:endpoint_go_proto", - ], -) - api_proto_library_internal( name = "sds", srcs = ["sds.proto"], @@ -49,14 +40,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "sds", - proto = ":sds", - deps = [ - "//envoy/api/v2:discovery_go_proto", - ], -) - api_proto_library_internal( name = "rtds", srcs = ["rtds.proto"], @@ -65,11 +48,3 @@ api_proto_library_internal( "//envoy/api/v2:discovery", ], ) - -api_go_grpc_library( - name = "rtds", - proto = ":rtds", - deps = [ - "//envoy/api/v2:discovery_go_proto", - ], -) diff --git a/api/envoy/service/discovery/v2/ads.proto b/api/envoy/service/discovery/v2/ads.proto index 6a9d044ab4bd..45a7407f0c44 100644 --- a/api/envoy/service/discovery/v2/ads.proto +++ b/api/envoy/service/discovery/v2/ads.proto @@ -5,7 +5,6 @@ package envoy.service.discovery.v2; option java_outer_classname = "AdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v2/discovery.proto"; diff --git a/api/envoy/service/discovery/v3alpha/BUILD b/api/envoy/service/discovery/v3alpha/BUILD index d34955c1cb5a..138186e6ea05 100644 --- a/api/envoy/service/discovery/v3alpha/BUILD +++ b/api/envoy/service/discovery/v3alpha/BUILD @@ -1,21 +1,22 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_library_internal( - name = "ads", - srcs = ["ads.proto"], - has_services = 1, +api_proto_package( + has_services = True, deps = [ - "//envoy/api/v3alpha:discovery", + "//envoy/api/v3alpha", + "//envoy/api/v3alpha/core", + "//envoy/api/v3alpha/endpoint:pkg", ], ) -api_go_grpc_library( +api_proto_library_internal( name = "ads", - proto = ":ads", + srcs = ["ads.proto"], + has_services = 1, deps = [ - "//envoy/api/v3alpha:discovery_go_proto", + "//envoy/api/v3alpha:discovery", ], ) @@ -30,16 +31,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "hds", - proto = ":hds", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/core:health_check_go_proto", - "//envoy/api/v3alpha/endpoint:endpoint_go_proto", - ], -) - api_proto_library_internal( name = "sds", srcs = ["sds.proto"], @@ -49,14 +40,6 @@ api_proto_library_internal( ], ) -api_go_grpc_library( - name = "sds", - proto = ":sds", - deps = [ - "//envoy/api/v3alpha:discovery_go_proto", - ], -) - api_proto_library_internal( name = "rtds", srcs = ["rtds.proto"], @@ -65,11 +48,3 @@ api_proto_library_internal( "//envoy/api/v3alpha:discovery", ], ) - -api_go_grpc_library( - name = "rtds", - proto = ":rtds", - deps = [ - "//envoy/api/v3alpha:discovery_go_proto", - ], -) diff --git a/api/envoy/service/discovery/v3alpha/ads.proto b/api/envoy/service/discovery/v3alpha/ads.proto index d6b7897ba7a1..251c51301a16 100644 --- a/api/envoy/service/discovery/v3alpha/ads.proto +++ b/api/envoy/service/discovery/v3alpha/ads.proto @@ -5,7 +5,6 @@ package envoy.service.discovery.v3alpha; option java_outer_classname = "AdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v3alpha/discovery.proto"; diff --git a/api/envoy/service/load_stats/v2/BUILD b/api/envoy/service/load_stats/v2/BUILD index f126ebcb1d44..af07d8aa101c 100644 --- a/api/envoy/service/load_stats/v2/BUILD +++ b/api/envoy/service/load_stats/v2/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v2/core", + "//envoy/api/v2/endpoint:pkg", + ], +) + api_proto_library_internal( name = "lrs", srcs = ["lrs.proto"], @@ -11,12 +19,3 @@ api_proto_library_internal( "//envoy/api/v2/endpoint:load_report", ], ) - -api_go_grpc_library( - name = "lrs", - proto = ":lrs", - deps = [ - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/endpoint:load_report_go_proto", - ], -) diff --git a/api/envoy/service/load_stats/v2/lrs.proto b/api/envoy/service/load_stats/v2/lrs.proto index 2fe95f3b6a90..d7029db0b5ea 100644 --- a/api/envoy/service/load_stats/v2/lrs.proto +++ b/api/envoy/service/load_stats/v2/lrs.proto @@ -5,7 +5,6 @@ package envoy.service.load_stats.v2; option java_outer_classname = "LrsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.load_stats.v2"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/service/load_stats/v3alpha/BUILD b/api/envoy/service/load_stats/v3alpha/BUILD index 42c7ce8438da..bc4ff2642c6d 100644 --- a/api/envoy/service/load_stats/v3alpha/BUILD +++ b/api/envoy/service/load_stats/v3alpha/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/api/v3alpha/endpoint:pkg", + ], +) + api_proto_library_internal( name = "lrs", srcs = ["lrs.proto"], @@ -11,12 +19,3 @@ api_proto_library_internal( "//envoy/api/v3alpha/endpoint:load_report", ], ) - -api_go_grpc_library( - name = "lrs", - proto = ":lrs", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/endpoint:load_report_go_proto", - ], -) diff --git a/api/envoy/service/load_stats/v3alpha/lrs.proto b/api/envoy/service/load_stats/v3alpha/lrs.proto index 81058ed574a7..ec8adedbf2a8 100644 --- a/api/envoy/service/load_stats/v3alpha/lrs.proto +++ b/api/envoy/service/load_stats/v3alpha/lrs.proto @@ -5,7 +5,6 @@ package envoy.service.load_stats.v3alpha; option java_outer_classname = "LrsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.load_stats.v3alpha"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/service/metrics/v2/BUILD b/api/envoy/service/metrics/v2/BUILD index 7f3921ced629..091d40e7f8e5 100644 --- a/api/envoy/service/metrics/v2/BUILD +++ b/api/envoy/service/metrics/v2/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v2/core", + "@prometheus_metrics_model//:client_model", + ], +) + api_proto_library_internal( name = "metrics_service", srcs = ["metrics_service.proto"], @@ -13,12 +21,3 @@ api_proto_library_internal( "@prometheus_metrics_model//:client_model", ], ) - -api_go_grpc_library( - name = "metrics_service", - proto = ":metrics_service", - deps = [ - "//envoy/api/v2/core:base_go_proto", - "@prometheus_metrics_model//:client_model_go_proto", - ], -) diff --git a/api/envoy/service/metrics/v2/metrics_service.proto b/api/envoy/service/metrics/v2/metrics_service.proto index b70be3bdd9a1..10745ba665c0 100644 --- a/api/envoy/service/metrics/v2/metrics_service.proto +++ b/api/envoy/service/metrics/v2/metrics_service.proto @@ -5,7 +5,6 @@ package envoy.service.metrics.v2; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.metrics.v2"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/service/metrics/v3alpha/BUILD b/api/envoy/service/metrics/v3alpha/BUILD index 1f1bb553cd82..6053aac4f1be 100644 --- a/api/envoy/service/metrics/v3alpha/BUILD +++ b/api/envoy/service/metrics/v3alpha/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v3alpha/core", + "@prometheus_metrics_model//:client_model", + ], +) + api_proto_library_internal( name = "metrics_service", srcs = ["metrics_service.proto"], @@ -13,12 +21,3 @@ api_proto_library_internal( "@prometheus_metrics_model//:client_model", ], ) - -api_go_grpc_library( - name = "metrics_service", - proto = ":metrics_service", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - "@prometheus_metrics_model//:client_model_go_proto", - ], -) diff --git a/api/envoy/service/metrics/v3alpha/metrics_service.proto b/api/envoy/service/metrics/v3alpha/metrics_service.proto index 9a5306553b18..bcf1caa28a2d 100644 --- a/api/envoy/service/metrics/v3alpha/metrics_service.proto +++ b/api/envoy/service/metrics/v3alpha/metrics_service.proto @@ -5,7 +5,6 @@ package envoy.service.metrics.v3alpha; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.metrics.v3alpha"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/service/ratelimit/v2/BUILD b/api/envoy/service/ratelimit/v2/BUILD index 24278fbebc1f..7bc5db7113e7 100644 --- a/api/envoy/service/ratelimit/v2/BUILD +++ b/api/envoy/service/ratelimit/v2/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v2/core", + "//envoy/api/v2/ratelimit:pkg", + ], +) + api_proto_library_internal( name = "rls", srcs = ["rls.proto"], @@ -12,13 +20,3 @@ api_proto_library_internal( "//envoy/api/v2/ratelimit", ], ) - -api_go_grpc_library( - name = "rls", - proto = ":rls", - deps = [ - "//envoy/api/v2/core:base_go_proto", - "//envoy/api/v2/core:grpc_service_go_proto", - "//envoy/api/v2/ratelimit:ratelimit_go_proto", - ], -) diff --git a/api/envoy/service/ratelimit/v2/rls.proto b/api/envoy/service/ratelimit/v2/rls.proto index 18b6b678e908..328bb547d630 100644 --- a/api/envoy/service/ratelimit/v2/rls.proto +++ b/api/envoy/service/ratelimit/v2/rls.proto @@ -5,7 +5,6 @@ package envoy.service.ratelimit.v2; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.ratelimit.v2"; -option go_package = "v2"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/ratelimit/ratelimit.proto"; diff --git a/api/envoy/service/ratelimit/v3alpha/BUILD b/api/envoy/service/ratelimit/v3alpha/BUILD index 19954c5bfcc9..965458beaec6 100644 --- a/api/envoy/service/ratelimit/v3alpha/BUILD +++ b/api/envoy/service/ratelimit/v3alpha/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/api/v3alpha/ratelimit:pkg", + ], +) + api_proto_library_internal( name = "rls", srcs = ["rls.proto"], @@ -12,13 +20,3 @@ api_proto_library_internal( "//envoy/api/v3alpha/ratelimit", ], ) - -api_go_grpc_library( - name = "rls", - proto = ":rls", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - "//envoy/api/v3alpha/core:grpc_service_go_proto", - "//envoy/api/v3alpha/ratelimit:ratelimit_go_proto", - ], -) diff --git a/api/envoy/service/ratelimit/v3alpha/rls.proto b/api/envoy/service/ratelimit/v3alpha/rls.proto index 7bbd2e3ec183..57a3ee98de94 100644 --- a/api/envoy/service/ratelimit/v3alpha/rls.proto +++ b/api/envoy/service/ratelimit/v3alpha/rls.proto @@ -5,7 +5,6 @@ package envoy.service.ratelimit.v3alpha; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.ratelimit.v3alpha"; -option go_package = "v2"; import "envoy/api/v3alpha/core/base.proto"; import "envoy/api/v3alpha/ratelimit/ratelimit.proto"; diff --git a/api/envoy/service/tap/v2alpha/BUILD b/api/envoy/service/tap/v2alpha/BUILD index 63d11e80a755..621bf208d495 100644 --- a/api/envoy/service/tap/v2alpha/BUILD +++ b/api/envoy/service/tap/v2alpha/BUILD @@ -1,7 +1,17 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v2", + "//envoy/api/v2/core", + "//envoy/api/v2/route:pkg", + "//envoy/data/tap/v2alpha:pkg", + ], +) + api_proto_library_internal( name = "common", srcs = ["common.proto"], diff --git a/api/envoy/service/tap/v3alpha/BUILD b/api/envoy/service/tap/v3alpha/BUILD index a90b2d819297..005ef96b61a0 100644 --- a/api/envoy/service/tap/v3alpha/BUILD +++ b/api/envoy/service/tap/v3alpha/BUILD @@ -1,7 +1,17 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v3alpha", + "//envoy/api/v3alpha/core", + "//envoy/api/v3alpha/route:pkg", + "//envoy/data/tap/v3alpha:pkg", + ], +) + api_proto_library_internal( name = "common", srcs = ["common.proto"], diff --git a/api/envoy/service/trace/v2/BUILD b/api/envoy/service/trace/v2/BUILD index 2b3367f0af45..cee54d8b34a0 100644 --- a/api/envoy/service/trace/v2/BUILD +++ b/api/envoy/service/trace/v2/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v2/core", + "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", + ], +) + api_proto_library_internal( name = "trace_service", srcs = ["trace_service.proto"], @@ -12,12 +20,3 @@ api_proto_library_internal( "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", ], ) - -api_go_grpc_library( - name = "trace_service", - proto = ":trace_service", - deps = [ - "//envoy/api/v2/core:base_go_proto", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", - ], -) diff --git a/api/envoy/service/trace/v2/trace_service.proto b/api/envoy/service/trace/v2/trace_service.proto index ec87b3560651..92b8489f2108 100644 --- a/api/envoy/service/trace/v2/trace_service.proto +++ b/api/envoy/service/trace/v2/trace_service.proto @@ -7,7 +7,6 @@ package envoy.service.trace.v2; option java_outer_classname = "TraceServiceProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.trace.v2"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/service/trace/v3alpha/BUILD b/api/envoy/service/trace/v3alpha/BUILD index 815d0c4c93cc..fbfafec678de 100644 --- a/api/envoy/service/trace/v3alpha/BUILD +++ b/api/envoy/service/trace/v3alpha/BUILD @@ -1,7 +1,15 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + has_services = True, + deps = [ + "//envoy/api/v3alpha/core", + "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", + ], +) + api_proto_library_internal( name = "trace_service", srcs = ["trace_service.proto"], @@ -12,12 +20,3 @@ api_proto_library_internal( "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", ], ) - -api_go_grpc_library( - name = "trace_service", - proto = ":trace_service", - deps = [ - "//envoy/api/v3alpha/core:base_go_proto", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", - ], -) diff --git a/api/envoy/service/trace/v3alpha/trace_service.proto b/api/envoy/service/trace/v3alpha/trace_service.proto index 521139a084e5..b6559800cd39 100644 --- a/api/envoy/service/trace/v3alpha/trace_service.proto +++ b/api/envoy/service/trace/v3alpha/trace_service.proto @@ -7,7 +7,6 @@ package envoy.service.trace.v3alpha; option java_outer_classname = "TraceServiceProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.trace.v3alpha"; -option go_package = "v2"; option java_generic_services = true; import "envoy/api/v3alpha/core/base.proto"; diff --git a/api/envoy/type/BUILD b/api/envoy/type/BUILD index 97f0fd424f36..26dd9730d9ea 100644 --- a/api/envoy/type/BUILD +++ b/api/envoy/type/BUILD @@ -1,36 +1,25 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + name = "type", +) + api_proto_library_internal( name = "http_status", srcs = ["http_status.proto"], visibility = ["//visibility:public"], ) -api_go_proto_library( - name = "http_status", - proto = ":http_status", -) - api_proto_library_internal( name = "percent", srcs = ["percent.proto"], visibility = ["//visibility:public"], ) -api_go_proto_library( - name = "percent", - proto = ":percent", -) - api_proto_library_internal( name = "range", srcs = ["range.proto"], visibility = ["//visibility:public"], ) - -api_go_proto_library( - name = "range", - proto = ":range", -) diff --git a/api/envoy/type/matcher/BUILD b/api/envoy/type/matcher/BUILD index 5fe594db4ca2..c7db01b6cdfe 100644 --- a/api/envoy/type/matcher/BUILD +++ b/api/envoy/type/matcher/BUILD @@ -1,7 +1,12 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") licenses(["notice"]) # Apache 2 +api_proto_package( + name = "matcher", + deps = ["//envoy/type"], +) + api_proto_library_internal( name = "metadata", srcs = ["metadata.proto"], @@ -11,14 +16,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "metadata", - proto = ":metadata", - deps = [ - ":value_go_proto", - ], -) - api_proto_library_internal( name = "number", srcs = ["number.proto"], @@ -28,14 +25,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "number", - proto = ":number", - deps = [ - "//envoy/type:range_go_proto", - ], -) - api_proto_library_internal( name = "string", srcs = ["string.proto"], @@ -45,14 +34,6 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "string", - proto = ":string", - deps = [ - ":regex_go_proto", - ], -) - api_proto_library_internal( name = "value", srcs = ["value.proto"], @@ -63,22 +44,8 @@ api_proto_library_internal( ], ) -api_go_proto_library( - name = "value", - proto = ":value", - deps = [ - ":number_go_proto", - ":string_go_proto", - ], -) - api_proto_library_internal( name = "regex", srcs = ["regex.proto"], visibility = ["//visibility:public"], ) - -api_go_proto_library( - name = "regex", - proto = ":regex", -) diff --git a/api/envoy/type/matcher/metadata.proto b/api/envoy/type/matcher/metadata.proto index 08190a9f5d38..56b69eae5968 100644 --- a/api/envoy/type/matcher/metadata.proto +++ b/api/envoy/type/matcher/metadata.proto @@ -5,7 +5,6 @@ package envoy.type.matcher; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.type.matcher"; -option go_package = "matcher"; import "envoy/type/matcher/value.proto"; diff --git a/api/envoy/type/matcher/number.proto b/api/envoy/type/matcher/number.proto index f6c49b3fcdf6..5c8cec7bcbdc 100644 --- a/api/envoy/type/matcher/number.proto +++ b/api/envoy/type/matcher/number.proto @@ -5,7 +5,6 @@ package envoy.type.matcher; option java_outer_classname = "NumberProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.type.matcher"; -option go_package = "matcher"; import "envoy/type/range.proto"; diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto index 048a576cc8a6..cf6343c9ac51 100644 --- a/api/envoy/type/matcher/regex.proto +++ b/api/envoy/type/matcher/regex.proto @@ -5,7 +5,6 @@ package envoy.type.matcher; option java_outer_classname = "RegexProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.type.matcher"; -option go_package = "matcher"; import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; diff --git a/api/envoy/type/matcher/string.proto b/api/envoy/type/matcher/string.proto index 35628b7ccb10..986e393be154 100644 --- a/api/envoy/type/matcher/string.proto +++ b/api/envoy/type/matcher/string.proto @@ -5,7 +5,6 @@ package envoy.type.matcher; option java_outer_classname = "StringProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.type.matcher"; -option go_package = "matcher"; import "envoy/type/matcher/regex.proto"; diff --git a/api/envoy/type/matcher/value.proto b/api/envoy/type/matcher/value.proto index 52f5e5b100b1..7164504366d9 100644 --- a/api/envoy/type/matcher/value.proto +++ b/api/envoy/type/matcher/value.proto @@ -5,7 +5,6 @@ package envoy.type.matcher; option java_outer_classname = "ValueProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.type.matcher"; -option go_package = "matcher"; import "envoy/type/matcher/number.proto"; import "envoy/type/matcher/string.proto"; diff --git a/api/envoy/type/range.proto b/api/envoy/type/range.proto index e64b71e440f3..cc38e8f25f5e 100644 --- a/api/envoy/type/range.proto +++ b/api/envoy/type/range.proto @@ -5,7 +5,6 @@ package envoy.type; option java_outer_classname = "RangeProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.type"; -option go_package = "envoy_type"; import "gogoproto/gogo.proto"; diff --git a/api/test/build/BUILD b/api/test/build/BUILD index a271aa5daf82..c8390c1fede4 100644 --- a/api/test/build/BUILD +++ b/api/test/build/BUILD @@ -26,18 +26,13 @@ api_go_test( srcs = ["go_build_test.go"], importpath = "go_build_test", deps = [ - "//envoy/api/v2:cds_go_grpc", - "//envoy/api/v2:eds_go_grpc", - "//envoy/api/v2:lds_go_grpc", - "//envoy/api/v2:rds_go_grpc", - "//envoy/api/v2/auth:cert_go_proto", - "//envoy/config/bootstrap/v2:bootstrap_go_proto", - "//envoy/service/accesslog/v2:als_go_grpc", - "//envoy/service/discovery/v2:ads_go_grpc", - "//envoy/service/discovery/v2:hds_go_grpc", - "//envoy/service/discovery/v2:sds_go_grpc", - "//envoy/service/metrics/v2:metrics_service_go_grpc", - "//envoy/service/ratelimit/v2:rls_go_grpc", - "//envoy/service/trace/v2:trace_service_go_grpc", + "//envoy/api/v2:v2_go_proto", + "//envoy/api/v2/auth:auth_go_proto", + "//envoy/config/bootstrap/v2:pkg_go_proto", + "//envoy/service/accesslog/v2:pkg_go_proto", + "//envoy/service/discovery/v2:pkg_go_proto", + "//envoy/service/metrics/v2:pkg_go_proto", + "//envoy/service/ratelimit/v2:pkg_go_proto", + "//envoy/service/trace/v2:pkg_go_proto", ], ) diff --git a/api/test/build/go_build_test.go b/api/test/build/go_build_test.go index 911d3ef39655..c5c15becff35 100644 --- a/api/test/build/go_build_test.go +++ b/api/test/build/go_build_test.go @@ -3,19 +3,14 @@ package go_build_test import ( "testing" - _ "github.com/envoyproxy/data-plane-api/api/ads" - _ "github.com/envoyproxy/data-plane-api/api/als" - _ "github.com/envoyproxy/data-plane-api/api/bootstrap" - _ "github.com/envoyproxy/data-plane-api/api/cds" - _ "github.com/envoyproxy/data-plane-api/api/cert" - _ "github.com/envoyproxy/data-plane-api/api/eds" - _ "github.com/envoyproxy/data-plane-api/api/hds" - _ "github.com/envoyproxy/data-plane-api/api/lds" - _ "github.com/envoyproxy/data-plane-api/api/metrics_service" - _ "github.com/envoyproxy/data-plane-api/api/rds" - _ "github.com/envoyproxy/data-plane-api/api/rls" - _ "github.com/envoyproxy/data-plane-api/api/sds" - _ "github.com/envoyproxy/data-plane-api/api/trace_service" + _ "github.com/envoyproxy/data-plane-api/api/envoy/api/v2" + _ "github.com/envoyproxy/data-plane-api/api/envoy/api/v2/auth" + _ "github.com/envoyproxy/data-plane-api/api/envoy/config/bootstrap/v2" + _ "github.com/envoyproxy/data-plane-api/api/envoy/service/accesslog/v2" + _ "github.com/envoyproxy/data-plane-api/api/envoy/service/discovery/v2" + _ "github.com/envoyproxy/data-plane-api/api/envoy/service/metrics/v2" + _ "github.com/envoyproxy/data-plane-api/api/envoy/service/ratelimit/v2" + _ "github.com/envoyproxy/data-plane-api/api/envoy/service/trace/v2" ) func TestNoop(t *testing.T) { diff --git a/tools/check_format.py b/tools/check_format.py index 26b3e621848f..ed3b36974963 100755 --- a/tools/check_format.py +++ b/tools/check_format.py @@ -72,6 +72,7 @@ PROTO_OPTION_JAVA_PACKAGE = "option java_package = \"" PROTO_OPTION_JAVA_OUTER_CLASSNAME = "option java_outer_classname = \"" PROTO_OPTION_JAVA_MULTIPLE_FILES = "option java_multiple_files = " +PROTO_OPTION_GO_PACKAGE = "option go_package = \"" # yapf: disable PROTOBUF_TYPE_ERRORS = { @@ -623,6 +624,17 @@ def checkBuildPath(file_path): command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path) error_messages += executeCommand(command, "envoy_build_fixer check failed", file_path) + if isBuildFile(file_path) and file_path.startswith(args.api_prefix + "envoy"): + found = False + finput = fileinput.input(file_path) + for line in finput: + if "api_proto_package(" in line: + found = True + break + finput.close() + if not found: + error_messages += ["API build file does not provide api_proto_package()"] + command = "%s -mode=diff %s" % (BUILDIFIER_PATH, file_path) error_messages += executeCommand(command, "buildifier check failed", file_path) error_messages += checkFileContents(file_path, checkBuildLine) @@ -672,6 +684,9 @@ def checkSourcePath(file_path): "Java proto option 'java_outer_classname' not set") error_messages += errorIfNoSubstringFound("\n" + PROTO_OPTION_JAVA_MULTIPLE_FILES, file_path, "Java proto option 'java_multiple_files' not set") + with open(file_path) as f: + if PROTO_OPTION_GO_PACKAGE in f.read(): + error_messages += ["go_package option should not be set in %s" % file_path] return error_messages diff --git a/tools/check_format_test_helper.py b/tools/check_format_test_helper.py index c41abbc60890..844488a8aa35 100755 --- a/tools/check_format_test_helper.py +++ b/tools/check_format_test_helper.py @@ -238,6 +238,7 @@ def checkFileExpectingOK(filename): errors += checkAndFixError("bad_envoy_build_sys_ref.BUILD", "Superfluous '@envoy//' prefix") errors += checkAndFixError("proto_format.proto", "clang-format check failed") errors += checkAndFixError("api/java_options.proto", "Java proto option") + errors += checkFileExpectingError("api/go_package.proto", "go_package option should not be set") errors += checkAndFixError( "cpp_std.cc", "term absl::make_unique< should be replaced with standard library term std::make_unique<") diff --git a/tools/testdata/check_format/api/go_package.proto b/tools/testdata/check_format/api/go_package.proto new file mode 100644 index 000000000000..b32347b6e46f --- /dev/null +++ b/tools/testdata/check_format/api/go_package.proto @@ -0,0 +1,5 @@ +option go_package = "foo"; +option java_package = "io.envoyproxy.envoy.foo"; +option java_outer_classname = "JavaOptionsProto"; +option java_multiple_files = true; +package envoy.foo;