diff --git a/.bazelignore b/.bazelignore
index 04680184abec..8bbbe337c66a 100644
--- a/.bazelignore
+++ b/.bazelignore
@@ -1,2 +1,3 @@
 api
 examples/grpc-bridge/script
+tools/clang_tools
diff --git a/.bazelrc b/.bazelrc
index 9661c979514d..820195d5b3d5 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -27,9 +27,10 @@ build:linux --copt=-fPIC
 # We already have absl in the build, define absl=1 to tell googletest to use absl for backtrace.
 build --define absl=1
 
-# Pass PATH, CC and CXX variables from the environment.
+# Pass PATH, CC, CXX and LLVM_CONFIG variables from the environment.
 build --action_env=CC
 build --action_env=CXX
+build --action_env=LLVM_CONFIG
 build --action_env=PATH
 
 # Common flags for sanitizers
@@ -84,14 +85,17 @@ build:clang-tsan --linkopt -fuse-ld=lld
 # Needed due to https://github.com/libevent/libevent/issues/777
 build:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE
 
-# Clang MSAN - broken today since we need to rebuild lib[std]c++ and external deps with MSAN
-# support (see https://github.com/envoyproxy/envoy/issues/443).
+# Clang MSAN - this is the base config for remote-msan and docker-msan. To run this config without
+# our build image, follow https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo
+# with libc++ instruction and provide corresponding `--copt` and `--linkopt` as well.
 build:clang-msan --action_env=ENVOY_MSAN=1
 build:clang-msan --config=sanitizer
 build:clang-msan --define ENVOY_CONFIG_MSAN=1
 build:clang-msan --copt -fsanitize=memory
 build:clang-msan --linkopt -fsanitize=memory
 build:clang-msan --copt -fsanitize-memory-track-origins=2
+# MSAN needs -O1 to get reasonable performance.
+build:clang-msan --copt -O1
 
 # Clang with libc++
 build:libc++ --config=clang
@@ -125,6 +129,10 @@ build:rbe-toolchain-clang-libc++ --action_env=CXXFLAGS=-stdlib=libc++
 build:rbe-toolchain-clang-libc++ --action_env=LDFLAGS=-stdlib=libc++
 build:rbe-toolchain-clang-libc++ --define force_libcpp=enabled
 
+build:rbe-toolchain-msan --linkopt=-L/opt/libcxx_msan/lib
+build:rbe-toolchain-msan --linkopt=-Wl,-rpath,/opt/libcxx_msan/lib
+build:rbe-toolchain-msan --config=clang-msan
+
 build:rbe-toolchain-gcc --config=rbe-toolchain
 build:rbe-toolchain-gcc --crosstool_top=@rbe_ubuntu_gcc//cc:toolchain
 build:rbe-toolchain-gcc --extra_toolchains=@rbe_ubuntu_gcc//config:cc-toolchain
@@ -146,6 +154,10 @@ build:remote-clang-libc++ --config=rbe-toolchain-clang-libc++
 build:remote-gcc --config=remote
 build:remote-gcc --config=rbe-toolchain-gcc
 
+build:remote-msan --config=remote
+build:remote-msan --config=rbe-toolchain-clang-libc++
+build:remote-msan --config=rbe-toolchain-msan
+
 # Docker sandbox
 # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L7
 build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu@sha256:f0b2453c3587e3297f5caf5e97fbf57c97592c96136209ec13fe2795aae2c896
@@ -166,6 +178,10 @@ build:docker-clang-libc++ --config=rbe-toolchain-clang-libc++
 build:docker-gcc --config=docker-sandbox
 build:docker-gcc --config=rbe-toolchain-gcc
 
+build:docker-msan --config=docker-sandbox
+build:docker-msan --config=rbe-toolchain-clang-libc++
+build:docker-msan --config=rbe-toolchain-msan
+
 # CI configurations
 build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com
 build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com
diff --git a/.clang-tidy b/.clang-tidy
index 500c18bd7c9b..c3c4b9349d68 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -4,6 +4,7 @@ Checks:          'abseil-*,
                   clang-diagnostic-*,
                   misc-unused-using-decls,
                   modernize-*,
+                  -modernize-pass-by-value,
                   -modernize-use-trailing-return-type,
                   performance-*,
                   readability-braces-around-statements,
diff --git a/CODEOWNERS b/CODEOWNERS
index 678b6158890f..3607f0a02bae 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -55,7 +55,7 @@ extensions/filters/common/original_src @snowp @klarose
 # adaptive concurrency limit extension.
 /*/extensions/filters/http/adaptive_concurrency @tonya11en @mattklein123
 # http inspector
-/*/extensions/filters/listener/http_inspector @crazyxy @PiotrSikora @lizan
+/*/extensions/filters/listener/http_inspector @yxue @PiotrSikora @lizan
 # attribute context
 /*/extensions/filters/common/expr @kyessenov @yangminzhu
 # webassembly common extension
@@ -91,3 +91,4 @@ extensions/filters/common/original_src @snowp @klarose
 /*/extensions/filters/network/tcp_proxy @alyssawilk @zuercher
 /*/extensions/filters/network/echo @htuch @alyssawilk
 /*/extensions/filters/udp/udp_proxy @mattklein123 @danzh2010
+/*/extensions/clusters/aggregate @yxue @snowp
diff --git a/OWNERS.md b/OWNERS.md
index 2d1de3355dfe..9bd65d853a55 100644
--- a/OWNERS.md
+++ b/OWNERS.md
@@ -10,7 +10,7 @@ routing PRs, questions, etc. to the right place.
   * Catch-all, "all the things", and generally trying to make himself obsolete as fast as
     possible.
 * Harvey Tuch ([htuch](https://github.com/htuch)) (htuch@google.com)
-  * APIs, xDS, gRPC, configuration, Bazel/build, base server (startup, etc.), Python, and Bash.
+  * APIs, xDS, UDPA, gRPC, configuration, security, Python, and Bash.
 * Alyssa Wilk ([alyssawilk](https://github.com/alyssawilk)) (alyssar@google.com)
   * HTTP, flow control, cluster manager, load balancing, and core networking (listeners,
     connections, etc.).
@@ -25,6 +25,8 @@ routing PRs, questions, etc. to the right place.
 
 # Maintainers
 
+* Asra Ali ([asraa](https://github.com/asraa)) (asraa@google.com)
+  * Fuzzing, security, headers, HTTP/gRPC, router, access log, tests.
 * Jose Nino ([junr03](https://github.com/junr03)) (jnino@lyft.com)
   * Outlier detection, HTTP routing, xDS, configuration/operational questions.
 * Dan NoƩ ([dnoe](https://github.com/dnoe)) (dpn@google.com)
@@ -39,7 +41,6 @@ routing PRs, questions, etc. to the right place.
 * All maintainers
 * Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com)
 * Yan Avlasov ([yanavlasov](https://github.com/yanavlasov)) (yavlasov@google.com)
-* Asra Ali ([asraa](https://github.com/asraa)) (asraa@google.com)
 
 # Emeritus maintainers
 
diff --git a/SECURITY.md b/SECURITY.md
index 42ec10e584b2..4d34d4c395af 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -185,8 +185,8 @@ These steps should be completed 1-3 days after the Release Date. The retrospecti
 ## Private Distributors List
 
 This list is intended to be used primarily to provide actionable information to
-multiple distribution vendors at once. This list is not intended for
-individuals to find out about security issues.
+multiple distribution vendors as well as a *limited* set of high impact end users at once. *This
+list is not intended in the general case for end users to find out about security issues*.
 
 ### Embargo Policy
 
@@ -194,7 +194,7 @@ The information members receive on cncf-envoy-distributors-announce must not be
 even hinted at anywhere beyond the need-to-know within your specific team except with the list's
 explicit approval. This holds true until the public disclosure date/time that was agreed upon by the
 list. Members of the list and others may not use the information for anything other than getting the
-issue fixed for your respective distribution's users.
+issue fixed for your respective users.
 
 Before any information from the list is shared with respective members of your team required to fix
 said issue, they must agree to the same terms and only find out information on a need-to-know basis.
@@ -246,7 +246,7 @@ could be in the form of the following:
 ### Membership Criteria
 
 To be eligible for the cncf-envoy-distributors-announce mailing list, your
-distribution should:
+use of Envoy should:
 
 1. Be either:
    1. An actively maintained distribution of Envoy components. An example is
@@ -261,11 +261,36 @@ distribution should:
       marketing copy, etc.) that it is built on top of Envoy. E.g.,
       "SuperAwesomeCloudProvider's Envoy as a Service (EaaS)". An infrastructure
       service that uses Envoy for a product but does not publicly say they are
-      using Envoy does not qualify. This is essentially IaaS or PaaS, if you use
-      Envoy to support a SaaS, e.g. "SuperAwesomeCatVideoService", this does not
-      qualify.
-2. Have a user or customer base not limited to your own organization. We will use the size
-   of the user or customer base as part of the criteria to determine
+      using Envoy does not *generally* qualify (see option 3 that follows). This is essentially IaaS
+      or PaaS. If you use Envoy to support a SaaS, e.g. "SuperAwesomeCatVideoService", this does not
+      *generally* qualify.
+
+   OR
+
+   3. An end user of Envoy that satisfies the following requirements:
+       1. Is "well known" to the Envoy community. Being "well known" is fully subjective and
+          determined by the Envoy maintainers and security team. Becoming "well known" would
+          generally be achieved by activities such as: PR contributions, either code or
+          documentation; helping other end users on Slack, GitHub, and the mailing lists; speaking
+          about use of Envoy at conferences; writing about use of Envoy in blog posts; sponsoring
+          Envoy conferences, meetups, and other activities; etc. This is a more strict variant of
+          item 5 below.
+       2. Is of sufficient size, scale, and impact to make your inclusion on the list
+          worthwhile. The definition of size, scale, and impact is fully subjective and
+          determined by the Envoy maintainers and security team. The definition will not be
+          discussed further in this document.
+       3. You *must* smoke test and then widely deploy security patches promptly and report back
+          success or failure ASAP. Furthermore, the Envoy maintainers may occasionally ask you to
+          smoke test especially risky public PRs before they are merged. Not performing these tasks
+          in a reasonably prompt timeframe will result in removal from the list. This is a more
+          strict variant of item 7 below.
+       4. In order to balance inclusion in the list versus a greater chance of accidental
+          disclosure, end users added to the list via this option will be limited to a total of
+          **10** slots. Periodic review (see below) may allow new slots to open, so please continue
+          to apply if it seems your organization would otherwise qualify. The security team also
+          reserves the right to change this limit in the future.
+2. Have a user or customer base not limited to your own organization (except for option 3 above).
+   We will use the size of the user or customer base as part of the criteria to determine
    eligibility.
 3. Have a publicly verifiable track record up to present day of fixing security
    issues.
@@ -286,7 +311,7 @@ distribution should:
     e-mail updates. This e-mail address will be [shared with the Envoy community](#Members).
 
 Note that Envoy maintainers are members of the Envoy security team. [Members of the Envoy security
-team](OWNERS.md#envoy-security-team) and the organizations that they represents are implicitly
+team](OWNERS.md#envoy-security-team) and the organizations that they represent are implicitly
 included in the private distributor list. These organizations do not need to meet the above list of
 criteria with the exception of the acceptance of the embargo policy.
 
@@ -306,11 +331,20 @@ Subject: Seven-Corp Membership to cncf-envoy-distributors-announce
 Below are each criterion and why I think we, Seven-Corp, qualify.
 
 > 1. Be an actively maintained distribution of Envoy components OR offer Envoy as a publicly
-     available service in which the product clearly states that it is built on top of Envoy.
+     available service in which the product clearly states that it is built on top of Envoy OR
+     be a well known end user of sufficient size, scale, and impact to make your
+     inclusion worthwhile.
 
 We distribute the "Seven" distribution of Envoy [link]. We have been doing
 this since 1999 before proxies were even cool.
 
+OR
+
+We use Envoy for our #1 rated cat video service and have 40 billion MAU, proxying 40 trillion^2 RPS
+through Envoy at the edge. Secure cat videos are our top priority. We also contribute a lot to the Envoy
+community by implementing features, not making Matt ask for documentation or tests, and writing blog
+posts about efficient Envoy cat video serving.
+
 > 2. Have a user or customer base not limited to your own organization. Please specify an
 >    approximate size of your user or customer base, including the number of
 >    production deployments.
@@ -361,21 +395,29 @@ CrashOverride will vouch for the "Seven" distribution joining the distribution l
       individuals come and go. A good example is envoy-security@seven.com, a bad example is
       acidburn@seven.com. You must accept the invite sent to this address or you will not receive any
       e-mail updates. This e-mail address will be shared with the Envoy community.
+
+envoy-security@seven.com
 ```
 
+### Review of membership criteria
+
+In all cases, members of the distribution list will be reviewed on a yearly basis by the maintainers
+and security team to ensure they still qualify for inclusion on the list.
+
 ### Members
 
-| E-mail                                                | Organization  |
-|-------------------------------------------------------|:-------------:|
-| envoy-security-team@aspenmesh.io                      | Aspen Mesh    |
-| aws-app-mesh-security@amazon.com                      | AWS           |
-| security@cilium.io                                    | Cilium        |
-| vulnerabilityreports@cloudfoundry.org                 | Cloud Foundry |
-| secalert@datawire.io                                  | Datawire      |
-| google-internal-envoy-security@google.com             | Google        |
-| argoprod@us.ibm.com                                   | IBM           |
-| istio-security-vulnerability-reports@googlegroups.com | Istio         |
-| secalert@redhat.com                                   | Red Hat       |
-| envoy-security@solo.io                                | solo.io       |
-| envoy-security@tetrate.io                             | Tetrate       |
-| security@vmware.com                                   | VMware        |
+| E-mail                                                | Organization  | End User | Last Review |
+|-------------------------------------------------------|:-------------:|:--------:|:-----------:|
+| envoy-security-team@aspenmesh.io                      | Aspen Mesh    | No       | 12/19       |
+| aws-app-mesh-security@amazon.com                      | AWS           | No       | 12/19       |
+| security@cilium.io                                    | Cilium        | No       | 12/19       |
+| vulnerabilityreports@cloudfoundry.org                 | Cloud Foundry | No       | 12/19       |
+| secalert@datawire.io                                  | Datawire      | No       | 12/19       |
+| google-internal-envoy-security@google.com             | Google        | No       | 12/19       |
+| argoprod@us.ibm.com                                   | IBM           | No       | 12/19       |
+| istio-security-vulnerability-reports@googlegroups.com | Istio         | No       | 12/19       |
+| secalert@redhat.com                                   | Red Hat       | No       | 12/19       |
+| envoy-security@solo.io                                | solo.io       | No       | 12/19       |
+| envoy-security@tetrate.io                             | Tetrate       | No       | 12/19       |
+| security@vmware.com                                   | VMware        | No       | 12/19       |
+| envoy-security@pinterest.com                          | Pinterest     | Yes      | 12/19       |
diff --git a/api/bazel/api_build_system.bzl b/api/bazel/api_build_system.bzl
index 8000f18e0d4e..40c84e51df26 100644
--- a/api/bazel/api_build_system.bzl
+++ b/api/bazel/api_build_system.bzl
@@ -68,7 +68,7 @@ def _api_py_proto_library(name, srcs = [], deps = []):
 
 # This defines googleapis py_proto_library. The repository does not provide its definition and requires
 # overriding it in the consuming project (see https://github.com/grpc/grpc/issues/19255 for more details).
-def py_proto_library(name, deps = []):
+def py_proto_library(name, deps = [], plugin = None):
     srcs = [dep[:-6] + ".proto" if dep.endswith("_proto") else dep for dep in deps]
     proto_deps = []
 
@@ -77,6 +77,10 @@ def py_proto_library(name, deps = []):
     # As a workaround, manually specify the proto dependencies for the imported python rules.
     if name == "annotations_py_proto":
         proto_deps = proto_deps + [":http_py_proto"]
+
+    # py_proto_library does not support plugin as an argument yet at gRPC v1.25.0:
+    # https://github.com/grpc/grpc/blob/v1.25.0/bazel/python_rules.bzl#L72.
+    # plugin should also be passed in here when gRPC version is greater than v1.25.x.
     _py_proto_library(
         name = name,
         srcs = srcs,
diff --git a/api/bazel/external_proto_deps.bzl b/api/bazel/external_proto_deps.bzl
index 2c4752efbf0a..514093abef90 100644
--- a/api/bazel/external_proto_deps.bzl
+++ b/api/bazel/external_proto_deps.bzl
@@ -17,7 +17,7 @@ EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = {
 
 # This maps from the Bazel proto_library target to the Go language binding target for external dependencies.
 EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = {
-    "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:cel_go_proto",
+    "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto",
     "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go",
     "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go",
 }
diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl
index 2f29473d10bb..928b1f2ff589 100644
--- a/api/bazel/repository_locations.bzl
+++ b/api/bazel/repository_locations.bzl
@@ -7,16 +7,16 @@ OPENCENSUS_PROTO_SHA256 = "faeb93f293ff715b0cb530d273901c0e2e99277b9ed1c0a0326bc
 PGV_GIT_SHA = "a18376249eb51cdd517f67fe8703897322812e6d"  # Nov 5, 2019
 PGV_SHA256 = "8e45a3582e7fa9d0005ad6ff1ed9208e793b847f1c455d2bbe5b1c580338ffaf"
 
-GOOGLEAPIS_GIT_SHA = "be480e391cc88a75cf2a81960ef79c80d5012068"  # Jul 24, 2019
-GOOGLEAPIS_SHA = "c1969e5b72eab6d9b6cfcff748e45ba57294aeea1d96fd04cd081995de0605c2"
+GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841"  # Dec 2, 2019
+GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405"
 
 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"  # Nov 17, 2017
 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b"
 
 KAFKA_SOURCE_SHA = "ae7a1696c0a0302b43c5b21e515c37e6ecd365941f68a510a7e442eebddf39a1"  # 2.2.0-rc2
 
-UDPA_GIT_SHA = "015fc86d90f4045a56f831bcdfa560bc455450e2"  # Oct 4, 2019
-UDPA_SHA256 = "2f2b4bdb718250531f3ed9c2010272f04bbca92af70348714fd3687e86acc1f7"
+UDPA_GIT_SHA = "d1f2ba7f5ba62c55b7466409e7f972c93e957d2b"  # Dec 6, 2019
+UDPA_SHA256 = "0271fb8ad2ec9ade21e4c7737dd128d2a8d8edebe911b777e2fc2585414aa045"
 
 ZIPKINAPI_RELEASE = "0.2.2"  # Aug 23, 2019
 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b"
diff --git a/api/docs/BUILD b/api/docs/BUILD
index 3292a1212c38..908e3a6fc1ff 100644
--- a/api/docs/BUILD
+++ b/api/docs/BUILD
@@ -22,6 +22,7 @@ proto_library(
         "//envoy/api/v2/route:pkg",
         "//envoy/config/accesslog/v2:pkg",
         "//envoy/config/bootstrap/v2:pkg",
+        "//envoy/config/cluster/aggregate/v2alpha:pkg",
         "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg",
         "//envoy/config/cluster/redis:pkg",
         "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg",
@@ -65,6 +66,7 @@ proto_library(
         "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg",
         "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg",
         "//envoy/config/filter/thrift/router/v2alpha1:pkg",
+        "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg",
         "//envoy/config/grpc_credential/v2alpha:pkg",
         "//envoy/config/health_checker/redis/v2:pkg",
         "//envoy/config/listener/v2:pkg",
@@ -92,5 +94,7 @@ proto_library(
         "//envoy/service/trace/v2:pkg",
         "//envoy/type:pkg",
         "//envoy/type/matcher:pkg",
+        "//envoy/type/metadata/v2:pkg",
+        "//envoy/type/tracing/v2:pkg",
     ],
 )
diff --git a/api/envoy/admin/v2alpha/certs.proto b/api/envoy/admin/v2alpha/certs.proto
index e7fcc1262264..1994d422bec7 100644
--- a/api/envoy/admin/v2alpha/certs.proto
+++ b/api/envoy/admin/v2alpha/certs.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.admin.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 option java_outer_classname = "CertsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 
 import "google/protobuf/timestamp.proto";
 
diff --git a/api/envoy/admin/v2alpha/clusters.proto b/api/envoy/admin/v2alpha/clusters.proto
index 05218f81565d..c3580f161e97 100644
--- a/api/envoy/admin/v2alpha/clusters.proto
+++ b/api/envoy/admin/v2alpha/clusters.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.admin.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 option java_outer_classname = "ClustersProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 
 import "envoy/admin/v2alpha/metrics.proto";
 import "envoy/api/v2/core/address.proto";
diff --git a/api/envoy/admin/v2alpha/config_dump.proto b/api/envoy/admin/v2alpha/config_dump.proto
index 6ee87dd9a832..a133318bad05 100644
--- a/api/envoy/admin/v2alpha/config_dump.proto
+++ b/api/envoy/admin/v2alpha/config_dump.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.admin.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 option java_outer_classname = "ConfigDumpProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 
 import "envoy/api/v2/auth/cert.proto";
 import "envoy/api/v2/cds.proto";
diff --git a/api/envoy/admin/v2alpha/listeners.proto b/api/envoy/admin/v2alpha/listeners.proto
index e84f64540857..087103dd9fc4 100644
--- a/api/envoy/admin/v2alpha/listeners.proto
+++ b/api/envoy/admin/v2alpha/listeners.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.admin.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 option java_outer_classname = "ListenersProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 
 import "envoy/api/v2/core/address.proto";
 
diff --git a/api/envoy/admin/v2alpha/memory.proto b/api/envoy/admin/v2alpha/memory.proto
index 65f2061d45e2..8aea1481a0f5 100644
--- a/api/envoy/admin/v2alpha/memory.proto
+++ b/api/envoy/admin/v2alpha/memory.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.admin.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 option java_outer_classname = "MemoryProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 
 // [#protodoc-title: Memory]
 
diff --git a/api/envoy/admin/v2alpha/metrics.proto b/api/envoy/admin/v2alpha/metrics.proto
index 9a91c7477be5..79c15f72b2ec 100644
--- a/api/envoy/admin/v2alpha/metrics.proto
+++ b/api/envoy/admin/v2alpha/metrics.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.admin.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 option java_outer_classname = "MetricsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 
 // [#protodoc-title: Metrics]
 
diff --git a/api/envoy/admin/v2alpha/mutex_stats.proto b/api/envoy/admin/v2alpha/mutex_stats.proto
index 682ff5b49354..1b725a11143a 100644
--- a/api/envoy/admin/v2alpha/mutex_stats.proto
+++ b/api/envoy/admin/v2alpha/mutex_stats.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.admin.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 option java_outer_classname = "MutexStatsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 
 // [#protodoc-title: MutexStats]
 
diff --git a/api/envoy/admin/v2alpha/server_info.proto b/api/envoy/admin/v2alpha/server_info.proto
index 048fb490405f..555d23437fdb 100644
--- a/api/envoy/admin/v2alpha/server_info.proto
+++ b/api/envoy/admin/v2alpha/server_info.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.admin.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 option java_outer_classname = "ServerInfoProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 
 import "google/protobuf/duration.proto";
 
diff --git a/api/envoy/admin/v2alpha/tap.proto b/api/envoy/admin/v2alpha/tap.proto
index d7caf609af52..3f35c124713d 100644
--- a/api/envoy/admin/v2alpha/tap.proto
+++ b/api/envoy/admin/v2alpha/tap.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.admin.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 option java_outer_classname = "TapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v2alpha";
 
 import "envoy/service/tap/v2alpha/common.proto";
 
diff --git a/api/envoy/admin/v3alpha/BUILD b/api/envoy/admin/v3alpha/BUILD
index 400de41668d2..6d1ad80bded3 100644
--- a/api/envoy/admin/v3alpha/BUILD
+++ b/api/envoy/admin/v3alpha/BUILD
@@ -6,11 +6,13 @@ licenses(["notice"])  # Apache 2
 
 api_proto_package(
     deps = [
+        "//envoy/admin/v2alpha:pkg",
         "//envoy/api/v3alpha:pkg",
         "//envoy/api/v3alpha/auth:pkg",
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/config/bootstrap/v3alpha:pkg",
         "//envoy/service/tap/v3alpha:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/admin/v3alpha/certs.proto b/api/envoy/admin/v3alpha/certs.proto
index 4d6a493069ff..679456f46b78 100644
--- a/api/envoy/admin/v3alpha/certs.proto
+++ b/api/envoy/admin/v3alpha/certs.proto
@@ -2,23 +2,29 @@ syntax = "proto3";
 
 package envoy.admin.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 option java_outer_classname = "CertsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 
 import "google/protobuf/timestamp.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: Certificates]
 
 // Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to
 // display certificate information. See :ref:`/certs <operations_admin_interface_certs>` for more
 // information.
 message Certificates {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Certificates";
+
   // List of certificates known to an Envoy.
   repeated Certificate certificates = 1;
 }
 
 message Certificate {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Certificate";
+
   // Details of CA certificate.
   repeated CertificateDetails ca_cert = 1;
 
@@ -28,6 +34,9 @@ message Certificate {
 
 // [#next-free-field: 7]
 message CertificateDetails {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.CertificateDetails";
+
   // Path of the certificate.
   string path = 1;
 
@@ -48,6 +57,9 @@ message CertificateDetails {
 }
 
 message SubjectAlternateName {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.SubjectAlternateName";
+
   // Subject Alternate Name.
   oneof name {
     string dns = 1;
diff --git a/api/envoy/admin/v3alpha/clusters.proto b/api/envoy/admin/v3alpha/clusters.proto
index 607e17744c17..47d809feea14 100644
--- a/api/envoy/admin/v3alpha/clusters.proto
+++ b/api/envoy/admin/v3alpha/clusters.proto
@@ -2,20 +2,24 @@ syntax = "proto3";
 
 package envoy.admin.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 option java_outer_classname = "ClustersProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 
 import "envoy/admin/v3alpha/metrics.proto";
 import "envoy/api/v3alpha/core/address.proto";
 import "envoy/api/v3alpha/core/health_check.proto";
 import "envoy/type/v3alpha/percent.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: Clusters]
 
 // Admin endpoint uses this wrapper for `/clusters` to display cluster status information.
 // See :ref:`/clusters <operations_admin_interface_clusters>` for more information.
 message Clusters {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Clusters";
+
   // Mapping from cluster name to each cluster's status.
   repeated ClusterStatus cluster_statuses = 1;
 }
@@ -23,6 +27,8 @@ message Clusters {
 // Details an individual cluster's current status.
 // [#next-free-field: 6]
 message ClusterStatus {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClusterStatus";
+
   // Name of the cluster.
   string name = 1;
 
@@ -72,6 +78,8 @@ message ClusterStatus {
 // Current state of a particular host.
 // [#next-free-field: 9]
 message HostStatus {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.HostStatus";
+
   // Address of this host.
   api.v3alpha.core.Address address = 1;
 
@@ -123,6 +131,9 @@ message HostStatus {
 // Health status for a host.
 // [#next-free-field: 7]
 message HostHealthStatus {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.HostHealthStatus";
+
   // The host is currently failing active health checks.
   bool failed_active_health_check = 1;
 
diff --git a/api/envoy/admin/v3alpha/config_dump.proto b/api/envoy/admin/v3alpha/config_dump.proto
index b48dd1d090df..c7c7ac85326b 100644
--- a/api/envoy/admin/v3alpha/config_dump.proto
+++ b/api/envoy/admin/v3alpha/config_dump.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.admin.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 option java_outer_classname = "ConfigDumpProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 
 import "envoy/api/v3alpha/auth/cert.proto";
 import "envoy/api/v3alpha/cds.proto";
@@ -16,11 +16,15 @@ import "envoy/config/bootstrap/v3alpha/bootstrap.proto";
 import "google/protobuf/any.proto";
 import "google/protobuf/timestamp.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: ConfigDump]
 
 // The :ref:`/config_dump <operations_admin_interface_config_dump>` admin endpoint uses this wrapper
 // message to maintain and serve arbitrary configuration information from any component in Envoy.
 message ConfigDump {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ConfigDump";
+
   // This list is serialized and dumped in its entirety at the
   // :ref:`/config_dump <operations_admin_interface_config_dump>` endpoint.
   //
@@ -35,6 +39,9 @@ message ConfigDump {
 }
 
 message UpdateFailureState {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.UpdateFailureState";
+
   // What the component configuration would have been if the update had succeeded.
   google.protobuf.Any failed_configuration = 1;
 
@@ -50,6 +57,9 @@ message UpdateFailureState {
 // the static portions of an Envoy configuration by reusing the output as the bootstrap
 // configuration for another Envoy.
 message BootstrapConfigDump {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.BootstrapConfigDump";
+
   config.bootstrap.v3alpha.Bootstrap bootstrap = 1;
 
   // The timestamp when the BootstrapConfig was last updated.
@@ -60,8 +70,14 @@ message BootstrapConfigDump {
 // configuration information can be used to recreate an Envoy configuration by populating all
 // listeners as static listeners or by returning them in a LDS response.
 message ListenersConfigDump {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.ListenersConfigDump";
+
   // Describes a statically loaded listener.
   message StaticListener {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.ListenersConfigDump.StaticListener";
+
     // The listener config.
     api.v3alpha.Listener listener = 1;
 
@@ -70,6 +86,9 @@ message ListenersConfigDump {
   }
 
   message DynamicListenerState {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.ListenersConfigDump.DynamicListenerState";
+
     // This is the per-resource version information. This version is currently taken from the
     // :ref:`version_info <envoy_api_field_api.v3alpha.DiscoveryResponse.version_info>` field at the
     // time that the listener was loaded. In the future, discrete per-listener versions may be
@@ -86,6 +105,9 @@ message ListenersConfigDump {
   // Describes a dynamically loaded listener via the LDS API.
   // [#next-free-field: 6]
   message DynamicListener {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.ListenersConfigDump.DynamicListener";
+
     // The name or unique id of this listener, pulled from the DynamicListenerState config.
     string name = 1;
 
@@ -125,8 +147,14 @@ message ListenersConfigDump {
 // configuration information can be used to recreate an Envoy configuration by populating all
 // clusters as static clusters or by returning them in a CDS response.
 message ClustersConfigDump {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.ClustersConfigDump";
+
   // Describes a statically loaded cluster.
   message StaticCluster {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.ClustersConfigDump.StaticCluster";
+
     // The cluster config.
     api.v3alpha.Cluster cluster = 1;
 
@@ -136,6 +164,9 @@ message ClustersConfigDump {
 
   // Describes a dynamically loaded cluster via the CDS API.
   message DynamicCluster {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.ClustersConfigDump.DynamicCluster";
+
     // This is the per-resource version information. This version is currently taken from the
     // :ref:`version_info <envoy_api_field_api.v3alpha.DiscoveryResponse.version_info>` field at the
     // time that the cluster was loaded. In the future, discrete per-cluster versions may be
@@ -174,7 +205,13 @@ message ClustersConfigDump {
 // to recreate an Envoy configuration by populating all routes as static routes or by returning them
 // in RDS responses.
 message RoutesConfigDump {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.RoutesConfigDump";
+
   message StaticRouteConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.RoutesConfigDump.StaticRouteConfig";
+
     // The route config.
     api.v3alpha.RouteConfiguration route_config = 1;
 
@@ -183,6 +220,9 @@ message RoutesConfigDump {
   }
 
   message DynamicRouteConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.RoutesConfigDump.DynamicRouteConfig";
+
     // This is the per-resource version information. This version is currently taken from the
     // :ref:`version_info <envoy_api_field_api.v3alpha.DiscoveryResponse.version_info>` field at the
     // time that the route configuration was loaded.
@@ -207,7 +247,13 @@ message RoutesConfigDump {
 // the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the
 // dynamically obtained scopes via the SRDS API.
 message ScopedRoutesConfigDump {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.ScopedRoutesConfigDump";
+
   message InlineScopedRouteConfigs {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.ScopedRoutesConfigDump.InlineScopedRouteConfigs";
+
     // The name assigned to the scoped route configurations.
     string name = 1;
 
@@ -219,6 +265,9 @@ message ScopedRoutesConfigDump {
   }
 
   message DynamicScopedRouteConfigs {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.ScopedRoutesConfigDump.DynamicScopedRouteConfigs";
+
     // The name assigned to the scoped route configurations.
     string name = 1;
 
@@ -243,8 +292,14 @@ message ScopedRoutesConfigDump {
 
 // Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.
 message SecretsConfigDump {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.SecretsConfigDump";
+
   // DynamicSecret contains secret information fetched via SDS.
   message DynamicSecret {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.SecretsConfigDump.DynamicSecret";
+
     // The name assigned to the secret.
     string name = 1;
 
@@ -262,6 +317,9 @@ message SecretsConfigDump {
 
   // StaticSecret specifies statically loaded secret in bootstrap.
   message StaticSecret {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.admin.v2alpha.SecretsConfigDump.StaticSecret";
+
     // The name assigned to the secret.
     string name = 1;
 
diff --git a/api/envoy/admin/v3alpha/listeners.proto b/api/envoy/admin/v3alpha/listeners.proto
index 7fc998f47fbd..054cdc180d24 100644
--- a/api/envoy/admin/v3alpha/listeners.proto
+++ b/api/envoy/admin/v3alpha/listeners.proto
@@ -2,23 +2,29 @@ syntax = "proto3";
 
 package envoy.admin.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 option java_outer_classname = "ListenersProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 
 import "envoy/api/v3alpha/core/address.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: Listeners]
 
 // Admin endpoint uses this wrapper for `/listeners` to display listener status information.
 // See :ref:`/listeners <operations_admin_interface_listeners>` for more information.
 message Listeners {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Listeners";
+
   // List of listener statuses.
   repeated ListenerStatus listener_statuses = 1;
 }
 
 // Details an individual listener's current status.
 message ListenerStatus {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ListenerStatus";
+
   // Name of the listener
   string name = 1;
 
diff --git a/api/envoy/admin/v3alpha/memory.proto b/api/envoy/admin/v3alpha/memory.proto
index e6a8824c01ab..e8b234979c48 100644
--- a/api/envoy/admin/v3alpha/memory.proto
+++ b/api/envoy/admin/v3alpha/memory.proto
@@ -2,9 +2,11 @@ syntax = "proto3";
 
 package envoy.admin.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 option java_outer_classname = "MemoryProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v3alpha";
+
+import "udpa/annotations/versioning.proto";
 
 // [#protodoc-title: Memory]
 
@@ -13,6 +15,8 @@ option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 // docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html).
 // [#next-free-field: 6]
 message Memory {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Memory";
+
   // The number of bytes allocated by the heap for Envoy. This is an alias for
   // `generic.current_allocated_bytes`.
   uint64 allocated = 1;
diff --git a/api/envoy/admin/v3alpha/metrics.proto b/api/envoy/admin/v3alpha/metrics.proto
index 5a52ff2648b4..b36932dec362 100644
--- a/api/envoy/admin/v3alpha/metrics.proto
+++ b/api/envoy/admin/v3alpha/metrics.proto
@@ -2,14 +2,18 @@ syntax = "proto3";
 
 package envoy.admin.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 option java_outer_classname = "MetricsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v3alpha";
+
+import "udpa/annotations/versioning.proto";
 
 // [#protodoc-title: Metrics]
 
 // Proto representation of an Envoy Counter or Gauge value.
 message SimpleMetric {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.SimpleMetric";
+
   enum Type {
     COUNTER = 0;
     GAUGE = 1;
diff --git a/api/envoy/admin/v3alpha/mutex_stats.proto b/api/envoy/admin/v3alpha/mutex_stats.proto
index 8d53ec97919a..d2729b021dba 100644
--- a/api/envoy/admin/v3alpha/mutex_stats.proto
+++ b/api/envoy/admin/v3alpha/mutex_stats.proto
@@ -2,9 +2,11 @@ syntax = "proto3";
 
 package envoy.admin.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 option java_outer_classname = "MutexStatsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v3alpha";
+
+import "udpa/annotations/versioning.proto";
 
 // [#protodoc-title: MutexStats]
 
@@ -16,6 +18,8 @@ option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 // correspond to core clock frequency. For more information, see the `CycleClock`
 // [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h).
 message MutexStats {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.MutexStats";
+
   // The number of individual mutex contentions which have occurred since startup.
   uint64 num_contentions = 1;
 
diff --git a/api/envoy/admin/v3alpha/server_info.proto b/api/envoy/admin/v3alpha/server_info.proto
index 21c676d05d64..cefb9d7419d3 100644
--- a/api/envoy/admin/v3alpha/server_info.proto
+++ b/api/envoy/admin/v3alpha/server_info.proto
@@ -2,18 +2,22 @@ syntax = "proto3";
 
 package envoy.admin.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 option java_outer_classname = "ServerInfoProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 
 import "google/protobuf/duration.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: Server State]
 
 // Proto representation of the value returned by /server_info, containing
 // server version/server status information.
 // [#next-free-field: 7]
 message ServerInfo {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ServerInfo";
+
   enum State {
     // Server is live and serving traffic.
     LIVE = 0;
@@ -49,6 +53,9 @@ message ServerInfo {
 
 // [#next-free-field: 28]
 message CommandLineOptions {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.admin.v2alpha.CommandLineOptions";
+
   enum IpVersion {
     v4 = 0;
     v6 = 1;
diff --git a/api/envoy/admin/v3alpha/tap.proto b/api/envoy/admin/v3alpha/tap.proto
index 7fbf3f905a60..c0942866287f 100644
--- a/api/envoy/admin/v3alpha/tap.proto
+++ b/api/envoy/admin/v3alpha/tap.proto
@@ -2,16 +2,20 @@ syntax = "proto3";
 
 package envoy.admin.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 option java_outer_classname = "TapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.admin.v3alpha";
 
 import "envoy/service/tap/v3alpha/common.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // The /tap admin request body that is used to configure an active tap session.
 message TapRequest {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.TapRequest";
+
   // The opaque configuration ID used to match the configuration to a loaded extension.
   // A tap extension configures a similar opaque ID that is used to match.
   string config_id = 1 [(validate.rules).string = {min_bytes: 1}];
diff --git a/api/envoy/api/v2/auth/cert.proto b/api/envoy/api/v2/auth/cert.proto
index 4ff763945f4c..b01999d7ded6 100644
--- a/api/envoy/api/v2/auth/cert.proto
+++ b/api/envoy/api/v2/auth/cert.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.auth;
 
+option java_package = "io.envoyproxy.envoy.api.v2.auth";
 option java_outer_classname = "CertProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.auth";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/api/v2/core/config_source.proto";
diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto
index c462ba640567..b9c9c859da1a 100644
--- a/api/envoy/api/v2/cds.proto
+++ b/api/envoy/api/v2/cds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2;
 
+option java_package = "io.envoyproxy.envoy.api.v2";
 option java_outer_classname = "CdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/auth/cert.proto";
@@ -39,15 +39,13 @@ service ClusterDiscoveryService {
   }
 
   rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v2/discovery:clusters"
-      body: "*"
-    };
+    option (google.api.http).post = "/v2/discovery:clusters";
+    option (google.api.http).body = "*";
   }
 }
 
 // Configuration for a single upstream cluster.
-// [#next-free-field: 45]
+// [#next-free-field: 46]
 message Cluster {
   // Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`
   // for an explanation on each type.
@@ -683,6 +681,10 @@ message Cluster {
   // this setting is ignored.
   repeated core.Address dns_resolvers = 18;
 
+  // [#next-major-version: Reconcile DNS options in a single message.]
+  // Always use TCP queries instead of UDP queries for DNS lookups.
+  bool use_tcp_for_dns_lookups = 45;
+
   // If specified, outlier detection will be enabled for this upstream cluster.
   // Each of the configuration values can be overridden via
   // :ref:`runtime values <config_cluster_manager_cluster_runtime_outlier_detection>`.
diff --git a/api/envoy/api/v2/cluster/circuit_breaker.proto b/api/envoy/api/v2/cluster/circuit_breaker.proto
index 2a1944177c5c..922f3178fbda 100644
--- a/api/envoy/api/v2/cluster/circuit_breaker.proto
+++ b/api/envoy/api/v2/cluster/circuit_breaker.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.cluster;
 
+option java_package = "io.envoyproxy.envoy.api.v2.cluster";
 option java_outer_classname = "CircuitBreakerProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.cluster";
 option csharp_namespace = "Envoy.Api.V2.ClusterNS";
 option ruby_package = "Envoy.Api.V2.ClusterNS";
 
diff --git a/api/envoy/api/v2/cluster/filter.proto b/api/envoy/api/v2/cluster/filter.proto
index b89b2a6b778b..c8e7d0d6c0c5 100644
--- a/api/envoy/api/v2/cluster/filter.proto
+++ b/api/envoy/api/v2/cluster/filter.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.cluster;
 
+option java_package = "io.envoyproxy.envoy.api.v2.cluster";
 option java_outer_classname = "FilterProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.cluster";
 option csharp_namespace = "Envoy.Api.V2.ClusterNS";
 option ruby_package = "Envoy.Api.V2.ClusterNS";
 
diff --git a/api/envoy/api/v2/cluster/outlier_detection.proto b/api/envoy/api/v2/cluster/outlier_detection.proto
index b4194fd07979..a9708d7c984c 100644
--- a/api/envoy/api/v2/cluster/outlier_detection.proto
+++ b/api/envoy/api/v2/cluster/outlier_detection.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.cluster;
 
+option java_package = "io.envoyproxy.envoy.api.v2.cluster";
 option java_outer_classname = "OutlierDetectionProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.cluster";
 option csharp_namespace = "Envoy.Api.V2.ClusterNS";
 option ruby_package = "Envoy.Api.V2.ClusterNS";
 
diff --git a/api/envoy/api/v2/core/address.proto b/api/envoy/api/v2/core/address.proto
index 09c5aaec7571..3b98e0973742 100644
--- a/api/envoy/api/v2/core/address.proto
+++ b/api/envoy/api/v2/core/address.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.core;
 
+option java_package = "io.envoyproxy.envoy.api.v2.core";
 option java_outer_classname = "AddressProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.core";
 
 import "envoy/api/v2/core/base.proto";
 
diff --git a/api/envoy/api/v2/core/base.proto b/api/envoy/api/v2/core/base.proto
index e5299cb4ca69..5bff9947f14b 100644
--- a/api/envoy/api/v2/core/base.proto
+++ b/api/envoy/api/v2/core/base.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.core;
 
+option java_package = "io.envoyproxy.envoy.api.v2.core";
 option java_outer_classname = "BaseProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.core";
 
 import "envoy/api/v2/core/http_uri.proto";
 import "envoy/type/percent.proto";
@@ -136,6 +136,7 @@ message Node {
 // * ``{"envoy.lb": {"canary": <bool> }}`` This indicates the canary status of an
 //   endpoint and is also used during header processing
 //   (x-envoy-upstream-canary) and for stats purposes.
+// [#next-major-version: move to type/metadata/v2]
 message Metadata {
   // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.*
   // namespace is reserved for Envoy's built-in filters.
diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto
index b1e0fa582424..6e532cd5e4f6 100644
--- a/api/envoy/api/v2/core/config_source.proto
+++ b/api/envoy/api/v2/core/config_source.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.core;
 
+option java_package = "io.envoyproxy.envoy.api.v2.core";
 option java_outer_classname = "ConfigSourceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.core";
 
 import "envoy/api/v2/core/grpc_service.proto";
 
diff --git a/api/envoy/api/v2/core/grpc_service.proto b/api/envoy/api/v2/core/grpc_service.proto
index 8614c86ee30e..f9e469a08e44 100644
--- a/api/envoy/api/v2/core/grpc_service.proto
+++ b/api/envoy/api/v2/core/grpc_service.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.core;
 
+option java_package = "io.envoyproxy.envoy.api.v2.core";
 option java_outer_classname = "GrpcServiceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.core";
 
 import "envoy/api/v2/core/base.proto";
 
@@ -62,7 +62,7 @@ message GrpcService {
       }
     }
 
-    // [#next-free-field: 7]
+    // [#next-free-field: 8]
     message CallCredentials {
       message ServiceAccountJWTAccessCredentials {
         string json_key = 1;
@@ -86,6 +86,46 @@ message GrpcService {
         }
       }
 
+      // Security token service configuration that allows Google gRPC to
+      // fetch security token from an OAuth 2.0 authorization server.
+      // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and
+      // https://github.com/grpc/grpc/pull/19587.
+      // [#next-free-field: 10]
+      message StsService {
+        // URI of the token exchange service that handles token exchange requests.
+        string token_exchange_service_uri = 1 [(validate.rules).string = {uri: true}];
+
+        // Location of the target service or resource where the client
+        // intends to use the requested security token.
+        string resource = 2;
+
+        // Logical name of the target service where the client intends to
+        // use the requested security token.
+        string audience = 3;
+
+        // The desired scope of the requested security token in the
+        // context of the service or resource where the token will be used.
+        string scope = 4;
+
+        // Type of the requested security token.
+        string requested_token_type = 5;
+
+        // The path of subject token, a security token that represents the
+        // identity of the party on behalf of whom the request is being made.
+        string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}];
+
+        // Type of the subject token.
+        string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}];
+
+        // The path of actor token, a security token that represents the identity
+        // of the acting party. The acting party is authorized to use the
+        // requested security token and act on behalf of the subject.
+        string actor_token_path = 8;
+
+        // Type of the actor token.
+        string actor_token_type = 9;
+      }
+
       oneof credential_specifier {
         option (validate.required) = true;
 
@@ -113,6 +153,11 @@ message GrpcService {
         // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.
         // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.
         MetadataCredentialsFromPlugin from_plugin = 6;
+
+        // Custom security token service which implements OAuth 2.0 token exchange.
+        // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16
+        // See https://github.com/grpc/grpc/pull/19587.
+        StsService sts_service = 7;
       }
     }
 
diff --git a/api/envoy/api/v2/core/health_check.proto b/api/envoy/api/v2/core/health_check.proto
index cd6fd79a324e..674303aa88ea 100644
--- a/api/envoy/api/v2/core/health_check.proto
+++ b/api/envoy/api/v2/core/health_check.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.core;
 
+option java_package = "io.envoyproxy.envoy.api.v2.core";
 option java_outer_classname = "HealthCheckProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.core";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/type/http.proto";
diff --git a/api/envoy/api/v2/core/http_uri.proto b/api/envoy/api/v2/core/http_uri.proto
index 7e4b4dba43ce..11d6d80cb3fe 100644
--- a/api/envoy/api/v2/core/http_uri.proto
+++ b/api/envoy/api/v2/core/http_uri.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.core;
 
+option java_package = "io.envoyproxy.envoy.api.v2.core";
 option java_outer_classname = "HttpUriProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.core";
 
 import "google/protobuf/duration.proto";
 
diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto
index af5e20cd7367..f3b9a41286e0 100644
--- a/api/envoy/api/v2/core/protocol.proto
+++ b/api/envoy/api/v2/core/protocol.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.core;
 
+option java_package = "io.envoyproxy.envoy.api.v2.core";
 option java_outer_classname = "ProtocolProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.core";
 
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
diff --git a/api/envoy/api/v2/discovery.proto b/api/envoy/api/v2/discovery.proto
index 5832ddb96c7b..386c37d88ced 100644
--- a/api/envoy/api/v2/discovery.proto
+++ b/api/envoy/api/v2/discovery.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2;
 
+option java_package = "io.envoyproxy.envoy.api.v2";
 option java_outer_classname = "DiscoveryProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2";
 
 import "envoy/api/v2/core/base.proto";
 
diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto
index 9d2e288d7ee6..974d981547c0 100644
--- a/api/envoy/api/v2/eds.proto
+++ b/api/envoy/api/v2/eds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2;
 
+option java_package = "io.envoyproxy.envoy.api.v2";
 option java_outer_classname = "EdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/discovery.proto";
@@ -30,10 +30,8 @@ service EndpointDiscoveryService {
   }
 
   rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v2/discovery:endpoints"
-      body: "*"
-    };
+    option (google.api.http).post = "/v2/discovery:endpoints";
+    option (google.api.http).body = "*";
   }
 }
 
diff --git a/api/envoy/api/v2/endpoint/endpoint.proto b/api/envoy/api/v2/endpoint/endpoint.proto
index 58d6ce7ce31f..d878960082f8 100644
--- a/api/envoy/api/v2/endpoint/endpoint.proto
+++ b/api/envoy/api/v2/endpoint/endpoint.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.endpoint;
 
+option java_package = "io.envoyproxy.envoy.api.v2.endpoint";
 option java_outer_classname = "EndpointProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.endpoint";
 
 import "envoy/api/v2/core/address.proto";
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/api/v2/endpoint/load_report.proto b/api/envoy/api/v2/endpoint/load_report.proto
index 974f7a0129c0..ff7465d4d1a3 100644
--- a/api/envoy/api/v2/endpoint/load_report.proto
+++ b/api/envoy/api/v2/endpoint/load_report.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.endpoint;
 
+option java_package = "io.envoyproxy.envoy.api.v2.endpoint";
 option java_outer_classname = "LoadReportProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.endpoint";
 
 import "envoy/api/v2/core/address.proto";
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/api/v2/lds.proto b/api/envoy/api/v2/lds.proto
index 4c67b04b03a3..2d56bff417d7 100644
--- a/api/envoy/api/v2/lds.proto
+++ b/api/envoy/api/v2/lds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2;
 
+option java_package = "io.envoyproxy.envoy.api.v2";
 option java_outer_classname = "LdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/core/address.proto";
@@ -35,14 +35,12 @@ service ListenerDiscoveryService {
   }
 
   rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v2/discovery:listeners"
-      body: "*"
-    };
+    option (google.api.http).post = "/v2/discovery:listeners";
+    option (google.api.http).body = "*";
   }
 }
 
-// [#next-free-field: 21]
+// [#next-free-field: 22]
 message Listener {
   enum DrainType {
     // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check
@@ -238,4 +236,17 @@ message Listener {
   // If no configuration is specified, Envoy will not attempt to balance active connections between
   // worker threads.
   ConnectionBalanceConfig connection_balance_config = 20;
+
+  // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and
+  // create one socket for each worker thread. This makes inbound connections
+  // distribute among worker threads roughly evenly in cases where there are a high number
+  // of connections. When this flag is set to false, all worker threads share one socket.
+  // For UDP this flag is set to true forcibly.
+  //
+  // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart
+  // (see `3rd paragraph in 'soreuseport' commit message
+  // <https://github.com/torvalds/linux/commit/c617f398edd4db2b8567a28e89>`_).
+  // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket
+  // <https://github.com/torvalds/linux/commit/40a1227ea845a37ab197dd1caffb60b047fa36b1>`_.
+  bool reuse_port = 21;
 }
diff --git a/api/envoy/api/v2/listener/listener.proto b/api/envoy/api/v2/listener/listener.proto
index dbe3d351ece4..851da80e7325 100644
--- a/api/envoy/api/v2/listener/listener.proto
+++ b/api/envoy/api/v2/listener/listener.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.listener;
 
+option java_package = "io.envoyproxy.envoy.api.v2.listener";
 option java_outer_classname = "ListenerProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.listener";
 option csharp_namespace = "Envoy.Api.V2.ListenerNS";
 option ruby_package = "Envoy.Api.V2.ListenerNS";
 
diff --git a/api/envoy/api/v2/listener/quic_config.proto b/api/envoy/api/v2/listener/quic_config.proto
index 4b5b261060ae..8679f3b3086c 100644
--- a/api/envoy/api/v2/listener/quic_config.proto
+++ b/api/envoy/api/v2/listener/quic_config.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.listener;
 
+option java_package = "io.envoyproxy.envoy.api.v2.listener";
 option java_outer_classname = "QuicConfigProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.listener";
 option csharp_namespace = "Envoy.Api.V2.ListenerNS";
 option ruby_package = "Envoy.Api.V2.ListenerNS";
 
diff --git a/api/envoy/api/v2/listener/udp_listener_config.proto b/api/envoy/api/v2/listener/udp_listener_config.proto
index b4f5ba11d260..33318826ca68 100644
--- a/api/envoy/api/v2/listener/udp_listener_config.proto
+++ b/api/envoy/api/v2/listener/udp_listener_config.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.listener;
 
+option java_package = "io.envoyproxy.envoy.api.v2.listener";
 option java_outer_classname = "UdpListenerConfigProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.listener";
 option csharp_namespace = "Envoy.Api.V2.ListenerNS";
 option ruby_package = "Envoy.Api.V2.ListenerNS";
 
diff --git a/api/envoy/api/v2/ratelimit/ratelimit.proto b/api/envoy/api/v2/ratelimit/ratelimit.proto
index af910e3938ba..481697166b78 100644
--- a/api/envoy/api/v2/ratelimit/ratelimit.proto
+++ b/api/envoy/api/v2/ratelimit/ratelimit.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2.ratelimit;
 
+option java_package = "io.envoyproxy.envoy.api.v2.ratelimit";
 option java_outer_classname = "RatelimitProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.ratelimit";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/api/v2/rds.proto b/api/envoy/api/v2/rds.proto
index f9e5c12e15d6..ff7cb32393fc 100644
--- a/api/envoy/api/v2/rds.proto
+++ b/api/envoy/api/v2/rds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2;
 
+option java_package = "io.envoyproxy.envoy.api.v2";
 option java_outer_classname = "RdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/core/base.proto";
@@ -34,10 +34,8 @@ service RouteDiscoveryService {
   }
 
   rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v2/discovery:routes"
-      body: "*"
-    };
+    option (google.api.http).post = "/v2/discovery:routes";
+    option (google.api.http).body = "*";
   }
 }
 
diff --git a/api/envoy/api/v2/route/BUILD b/api/envoy/api/v2/route/BUILD
index d3b85c4c63dd..776207ac2f28 100644
--- a/api/envoy/api/v2/route/BUILD
+++ b/api/envoy/api/v2/route/BUILD
@@ -9,5 +9,6 @@ api_proto_package(
         "//envoy/api/v2/core:pkg",
         "//envoy/type:pkg",
         "//envoy/type/matcher:pkg",
+        "//envoy/type/tracing/v2:pkg",
     ],
 )
diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto
index 2810664e9040..0e601d6558ca 100644
--- a/api/envoy/api/v2/route/route.proto
+++ b/api/envoy/api/v2/route/route.proto
@@ -2,15 +2,16 @@ syntax = "proto3";
 
 package envoy.api.v2.route;
 
+option java_package = "io.envoyproxy.envoy.api.v2.route";
 option java_outer_classname = "RouteProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2.route";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/type/matcher/regex.proto";
 import "envoy/type/matcher/string.proto";
 import "envoy/type/percent.proto";
 import "envoy/type/range.proto";
+import "envoy/type/tracing/v2/custom_tag.proto";
 
 import "google/protobuf/any.proto";
 import "google/protobuf/duration.proto";
@@ -380,7 +381,7 @@ message RouteMatch {
     // .. attention::
     //   This field has been deprecated in favor of `safe_regex` as it is not safe for use with
     //   untrusted input in all cases.
-    string regex = 3 [(validate.rules).string = {max_bytes: 1024}, deprecated = true];
+    string regex = 3 [deprecated = true, (validate.rules).string = {max_bytes: 1024}];
 
     // If specified, the route is a regular expression rule meaning that the
     // regex must match the *:path* header once the query string is removed. The entire path
@@ -463,7 +464,7 @@ message CorsPolicy {
   //   This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for
   //   use with untrusted input in all cases.
   repeated string allow_origin_regex = 8
-      [(validate.rules).repeated = {items {string {max_bytes: 1024}}}, deprecated = true];
+      [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}];
 
   // Specifies string patterns that match allowed origins. An origin is allowed if any of the
   // string matchers match.
@@ -572,6 +573,7 @@ message RouteAction {
 
   // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer
   // <arch_overview_load_balancing_types>`.
+  // [#next-free-field: 6]
   message HashPolicy {
     message Header {
       // The name of the request header that will be used to obtain the hash
@@ -614,6 +616,13 @@ message RouteAction {
       bool source_ip = 1;
     }
 
+    message QueryParameter {
+      // The name of the URL query parameter that will be used to obtain the hash
+      // key. If the parameter is not present, no hash will be produced. Query
+      // parameter names are case-sensitive.
+      string name = 1 [(validate.rules).string = {min_bytes: 1}];
+    }
+
     oneof policy_specifier {
       option (validate.required) = true;
 
@@ -625,6 +634,9 @@ message RouteAction {
 
       // Connection properties hash policy.
       ConnectionProperties connection_properties = 3;
+
+      // Query parameter hash policy.
+      QueryParameter query_parameter = 5;
     }
 
     // The flag that short-circuits the hash computing. This field provides a
@@ -1100,6 +1112,14 @@ message Tracing {
   // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.
   // Default: 100%
   type.FractionalPercent overall_sampling = 3;
+
+  // A list of custom tags with unique tag name to create tags for the active span.
+  // It will take effect after merging with the :ref:`corresponding configuration
+  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing.custom_tags>`
+  // configured in the HTTP connection manager. If two tags with the same name are configured
+  // each in the HTTP connection manager and the route level, the one configured here takes
+  // priority.
+  repeated type.tracing.v2.CustomTag custom_tags = 4;
 }
 
 // A virtual cluster is a way of specifying a regex matching rule against
@@ -1133,7 +1153,7 @@ message VirtualCluster {
   // .. attention::
   //   This field has been deprecated in favor of `headers` as it is not safe for use with
   //   untrusted input in all cases.
-  string pattern = 1 [(validate.rules).string = {max_bytes: 1024}, deprecated = true];
+  string pattern = 1 [deprecated = true, (validate.rules).string = {max_bytes: 1024}];
 
   // Specifies a list of header matchers to use for matching requests. Each specified header must
   // match. The pseudo-headers `:path` and `:method` can be used to match the request path and
@@ -1338,7 +1358,7 @@ message HeaderMatcher {
     // .. attention::
     //   This field has been deprecated in favor of `safe_regex_match` as it is not safe for use
     //   with untrusted input in all cases.
-    string regex_match = 5 [(validate.rules).string = {max_bytes: 1024}, deprecated = true];
+    string regex_match = 5 [deprecated = true, (validate.rules).string = {max_bytes: 1024}];
 
     // If specified, this regex string is a regular expression rule which implies the entire request
     // header value must match the regex. The rule will not match if only a subsequence of the
diff --git a/api/envoy/api/v2/srds.proto b/api/envoy/api/v2/srds.proto
index b9b1e89b23ef..89fc25c31a59 100644
--- a/api/envoy/api/v2/srds.proto
+++ b/api/envoy/api/v2/srds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v2;
 
+option java_package = "io.envoyproxy.envoy.api.v2";
 option java_outer_classname = "SrdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/discovery.proto";
@@ -31,10 +31,8 @@ service ScopedRoutesDiscoveryService {
   }
 
   rpc FetchScopedRoutes(DiscoveryRequest) returns (DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v2/discovery:scoped-routes"
-      body: "*"
-    };
+    option (google.api.http).post = "/v2/discovery:scoped-routes";
+    option (google.api.http).body = "*";
   }
 }
 
diff --git a/api/envoy/api/v3alpha/BUILD b/api/envoy/api/v3alpha/BUILD
index 8f10c2f42753..bec9411d6e54 100644
--- a/api/envoy/api/v3alpha/BUILD
+++ b/api/envoy/api/v3alpha/BUILD
@@ -7,6 +7,7 @@ licenses(["notice"])  # Apache 2
 api_proto_package(
     has_services = True,
     deps = [
+        "//envoy/api/v2:pkg",
         "//envoy/api/v3alpha/cluster:pkg",
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/api/v3alpha/endpoint:pkg",
@@ -14,5 +15,6 @@ api_proto_package(
         "//envoy/api/v3alpha/route:pkg",
         "//envoy/config/listener/v2:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/api/v3alpha/auth/BUILD b/api/envoy/api/v3alpha/auth/BUILD
index 4e89d949ab9d..1ffbb704b645 100644
--- a/api/envoy/api/v3alpha/auth/BUILD
+++ b/api/envoy/api/v3alpha/auth/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v2/auth:pkg",
+        "//envoy/api/v3alpha/core:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/api/v3alpha/auth/cert.proto b/api/envoy/api/v3alpha/auth/cert.proto
index 9dd6a068a37b..82bbc3ed2bad 100644
--- a/api/envoy/api/v3alpha/auth/cert.proto
+++ b/api/envoy/api/v3alpha/auth/cert.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.auth;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.auth";
 option java_outer_classname = "CertProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.auth";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/api/v3alpha/core/config_source.proto";
@@ -13,11 +13,15 @@ import "google/protobuf/any.proto";
 import "google/protobuf/struct.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Common TLS configuration]
 
 message TlsParameters {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters";
+
   enum TlsProtocol {
     // Envoy will choose the optimal TLS version.
     TLS_AUTO = 0;
@@ -105,6 +109,9 @@ message TlsParameters {
 // (potentially asynchronous) signing and decryption operations. Some use cases for private key
 // methods would be TPM support and TLS acceleration.
 message PrivateKeyProvider {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.auth.PrivateKeyProvider";
+
   reserved 2;
 
   reserved "config";
@@ -121,6 +128,8 @@ message PrivateKeyProvider {
 
 // [#next-free-field: 7]
 message TlsCertificate {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate";
+
   // The TLS certificate chain.
   core.DataSource certificate_chain = 1;
 
@@ -148,6 +157,9 @@ message TlsCertificate {
 }
 
 message TlsSessionTicketKeys {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.auth.TlsSessionTicketKeys";
+
   // Keys for encrypting and decrypting TLS session tickets. The
   // first key in the array contains the key to encrypt all new sessions created by this context.
   // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys
@@ -177,6 +189,9 @@ message TlsSessionTicketKeys {
 
 // [#next-free-field: 9]
 message CertificateValidationContext {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.auth.CertificateValidationContext";
+
   // TLS certificate data containing certificate authority certificates to use in verifying
   // a presented peer certificate (e.g. server certificate for clusters or client certificate
   // for listeners). If not specified and a peer certificate is presented it will not be
@@ -292,7 +307,12 @@ message CertificateValidationContext {
 // TLS context shared by both client and server TLS contexts.
 // [#next-free-field: 9]
 message CommonTlsContext {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext";
+
   message CombinedCertificateValidationContext {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext";
+
     // How to validate peer certificates.
     CertificateValidationContext default_validation_context = 1
         [(validate.rules).message = {required: true}];
@@ -350,6 +370,9 @@ message CommonTlsContext {
 }
 
 message UpstreamTlsContext {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.auth.UpstreamTlsContext";
+
   // Common TLS context settings.
   //
   // .. attention::
@@ -378,6 +401,9 @@ message UpstreamTlsContext {
 
 // [#next-free-field: 6]
 message DownstreamTlsContext {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.auth.DownstreamTlsContext";
+
   // Common TLS context settings.
   CommonTlsContext common_tls_context = 1;
 
@@ -399,6 +425,8 @@ message DownstreamTlsContext {
 }
 
 message SdsSecretConfig {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig";
+
   // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.
   // When both name and config are specified, then secret can be fetched and/or reloaded via SDS.
   // When only name is specified, then secret will be loaded from static
@@ -409,6 +437,8 @@ message SdsSecretConfig {
 }
 
 message Secret {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret";
+
   // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.
   string name = 1;
 
diff --git a/api/envoy/api/v3alpha/cds.proto b/api/envoy/api/v3alpha/cds.proto
index 61078de959a3..fe20046daa9c 100644
--- a/api/envoy/api/v3alpha/cds.proto
+++ b/api/envoy/api/v3alpha/cds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_outer_classname = "CdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/cluster/circuit_breaker.proto";
@@ -25,6 +25,8 @@ import "google/protobuf/duration.proto";
 import "google/protobuf/struct.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Clusters]
@@ -38,16 +40,16 @@ service ClusterDiscoveryService {
   }
 
   rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v3alpha/discovery:clusters"
-      body: "*"
-    };
+    option (google.api.http).post = "/v3alpha/discovery:clusters";
+    option (google.api.http).body = "*";
   }
 }
 
 // Configuration for a single upstream cluster.
-// [#next-free-field: 45]
+// [#next-free-field: 46]
 message Cluster {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster";
+
   // Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`
   // for an explanation on each type.
   enum DiscoveryType {
@@ -149,6 +151,9 @@ message Cluster {
   // TransportSocketMatch specifies what transport socket config will be used
   // when the match conditions are satisfied.
   message TransportSocketMatch {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.Cluster.TransportSocketMatch";
+
     // The name of the match, used in stats generation.
     string name = 1 [(validate.rules).string = {min_len: 1}];
 
@@ -165,6 +170,9 @@ message Cluster {
 
   // Extended cluster type.
   message CustomClusterType {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.Cluster.CustomClusterType";
+
     // The type of the cluster to instantiate. The name must match a supported cluster type.
     string name = 1 [(validate.rules).string = {min_bytes: 1}];
 
@@ -175,6 +183,9 @@ message Cluster {
 
   // Only valid when discovery type is EDS.
   message EdsClusterConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.Cluster.EdsClusterConfig";
+
     // Configuration for the source of EDS updates for this Cluster.
     core.ConfigSource eds_config = 1;
 
@@ -188,6 +199,9 @@ message Cluster {
   // endpoint metadata and selected by route and weighted cluster metadata.
   // [#next-free-field: 8]
   message LbSubsetConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.Cluster.LbSubsetConfig";
+
     // If NO_FALLBACK is selected, a result
     // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,
     // any cluster endpoint may be returned (subject to policy, health checks,
@@ -201,6 +215,9 @@ message Cluster {
 
     // Specifications for subsets.
     message LbSubsetSelector {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.Cluster.LbSubsetConfig.LbSubsetSelector";
+
       // Allows to override top level fallback policy per selector.
       enum LbSubsetSelectorFallbackPolicy {
         // If NOT_DEFINED top level config fallback policy is used instead.
@@ -311,6 +328,9 @@ message Cluster {
 
   // Specific configuration for the LeastRequest load balancing policy.
   message LeastRequestLbConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.Cluster.LeastRequestLbConfig";
+
     // The number of random healthy hosts from which the host with the fewest active requests will
     // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set.
     google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}];
@@ -319,6 +339,9 @@ message Cluster {
   // Specific configuration for the :ref:`RingHash<arch_overview_load_balancing_types_ring_hash>`
   // load balancing policy.
   message RingHashLbConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.Cluster.RingHashLbConfig";
+
     // The hash function used to hash hosts onto the ketama ring.
     enum HashFunction {
       // Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.
@@ -352,6 +375,9 @@ message Cluster {
   // :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`
   // load balancing policy.
   message OriginalDstLbConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.Cluster.OriginalDstLbConfig";
+
     // When true, :ref:`x-envoy-original-dst-host
     // <config_http_conn_man_headers_x-envoy-original-dst-host>` can be used to override destination
     // address.
@@ -367,9 +393,15 @@ message Cluster {
   // Common configuration for all load balancer implementations.
   // [#next-free-field: 7]
   message CommonLbConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.Cluster.CommonLbConfig";
+
     // Configuration for :ref:`zone aware routing
     // <arch_overview_load_balancing_zone_aware_routing>`.
     message ZoneAwareLbConfig {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.Cluster.CommonLbConfig.ZoneAwareLbConfig";
+
       // Configures percentage of requests that will be considered for zone aware routing
       // if zone aware routing is configured. If not specified, the default is 100%.
       // * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.
@@ -393,6 +425,8 @@ message Cluster {
     // Configuration for :ref:`locality weighted load balancing
     // <arch_overview_load_balancing_locality_weighted_lb>`
     message LocalityWeightedLbConfig {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.Cluster.CommonLbConfig.LocalityWeightedLbConfig";
     }
 
     // Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.
@@ -452,6 +486,8 @@ message Cluster {
   }
 
   message RefreshRate {
+    option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.RefreshRate";
+
     // Specifies the base interval between refreshes. This parameter is required and must be greater
     // than zero and less than
     // :ref:`max_interval <envoy_api_field_api.v3alpha.Cluster.RefreshRate.max_interval>`.
@@ -667,6 +703,10 @@ message Cluster {
   // this setting is ignored.
   repeated core.Address dns_resolvers = 18;
 
+  // [#next-major-version: Reconcile DNS options in a single message.]
+  // Always use TCP queries instead of UDP queries for DNS lookups.
+  bool use_tcp_for_dns_lookups = 45;
+
   // If specified, outlier detection will be enabled for this upstream cluster.
   // Each of the configuration values can be overridden via
   // :ref:`runtime values <config_cluster_manager_cluster_runtime_outlier_detection>`.
@@ -806,7 +846,12 @@ message Cluster {
 // To facilitate this, the config message for the top-level LB policy may include a field of
 // type LoadBalancingPolicy that specifies the child policy.
 message LoadBalancingPolicy {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LoadBalancingPolicy";
+
   message Policy {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.LoadBalancingPolicy.Policy";
+
     reserved 2;
 
     reserved "config";
@@ -826,11 +871,16 @@ message LoadBalancingPolicy {
 // An extensible structure containing the address Envoy should bind to when
 // establishing upstream connections.
 message UpstreamBindConfig {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.UpstreamBindConfig";
+
   // The address Envoy should bind to when establishing upstream connections.
   core.Address source_address = 1;
 }
 
 message UpstreamConnectionOptions {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.UpstreamConnectionOptions";
+
   // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.
   core.TcpKeepalive tcp_keepalive = 1;
 }
diff --git a/api/envoy/api/v3alpha/cluster/BUILD b/api/envoy/api/v3alpha/cluster/BUILD
index 4e89d949ab9d..4915398fc93f 100644
--- a/api/envoy/api/v3alpha/cluster/BUILD
+++ b/api/envoy/api/v3alpha/cluster/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v2/cluster:pkg",
+        "//envoy/api/v3alpha/core:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/api/v3alpha/cluster/circuit_breaker.proto b/api/envoy/api/v3alpha/cluster/circuit_breaker.proto
index 0e229e53aaca..89a8e3c406c9 100644
--- a/api/envoy/api/v3alpha/cluster/circuit_breaker.proto
+++ b/api/envoy/api/v3alpha/cluster/circuit_breaker.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.cluster;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.cluster";
 option java_outer_classname = "CircuitBreakerProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.cluster";
 
 import "envoy/api/v3alpha/core/base.proto";
 
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Circuit breakers]
@@ -17,10 +19,16 @@ import "validate/validate.proto";
 // :ref:`Circuit breaking<arch_overview_circuit_break>` settings can be
 // specified individually for each defined priority.
 message CircuitBreakers {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.cluster.CircuitBreakers";
+
   // A Thresholds defines CircuitBreaker settings for a
   // :ref:`RoutingPriority<envoy_api_enum_api.v3alpha.core.RoutingPriority>`.
   // [#next-free-field: 8]
   message Thresholds {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.cluster.CircuitBreakers.Thresholds";
+
     // The :ref:`RoutingPriority<envoy_api_enum_api.v3alpha.core.RoutingPriority>`
     // the specified CircuitBreaker settings apply to.
     core.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}];
diff --git a/api/envoy/api/v3alpha/cluster/filter.proto b/api/envoy/api/v3alpha/cluster/filter.proto
index c9d37869c4a3..aa2a24bf0231 100644
--- a/api/envoy/api/v3alpha/cluster/filter.proto
+++ b/api/envoy/api/v3alpha/cluster/filter.proto
@@ -2,18 +2,22 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.cluster;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.cluster";
 option java_outer_classname = "FilterProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.cluster";
 
 import "google/protobuf/any.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Upstream filters]
 //
 // Upstream filters apply to the connections to the upstream cluster hosts.
 message Filter {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.Filter";
+
   // The name of the filter to instantiate. The name must match a
   // :ref:`supported filter <config_network_filters>`.
   string name = 1 [(validate.rules).string = {min_bytes: 1}];
diff --git a/api/envoy/api/v3alpha/cluster/outlier_detection.proto b/api/envoy/api/v3alpha/cluster/outlier_detection.proto
index 882b1184705e..f49ceba09256 100644
--- a/api/envoy/api/v3alpha/cluster/outlier_detection.proto
+++ b/api/envoy/api/v3alpha/cluster/outlier_detection.proto
@@ -2,13 +2,15 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.cluster;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.cluster";
 option java_outer_classname = "OutlierDetectionProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.cluster";
 
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Outlier detection]
@@ -17,6 +19,9 @@ import "validate/validate.proto";
 // more information on outlier detection.
 // [#next-free-field: 21]
 message OutlierDetection {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.cluster.OutlierDetection";
+
   // The number of consecutive 5xx responses or local origin errors that are mapped
   // to 5xx error codes before a consecutive 5xx ejection
   // occurs. Defaults to 5.
diff --git a/api/envoy/api/v3alpha/core/BUILD b/api/envoy/api/v3alpha/core/BUILD
index 30e23239cc1b..832558353acc 100644
--- a/api/envoy/api/v3alpha/core/BUILD
+++ b/api/envoy/api/v3alpha/core/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/type/v3alpha:pkg"],
+    deps = [
+        "//envoy/api/v2/core:pkg",
+        "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/api/v3alpha/core/address.proto b/api/envoy/api/v3alpha/core/address.proto
index 206aa5978f82..a2fe375ac76e 100644
--- a/api/envoy/api/v3alpha/core/address.proto
+++ b/api/envoy/api/v3alpha/core/address.proto
@@ -2,19 +2,23 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.core;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 option java_outer_classname = "AddressProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 
 import "envoy/api/v3alpha/core/base.proto";
 
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Network addresses]
 
 message Pipe {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Pipe";
+
   // Unix Domain Socket path. On Linux, paths starting with '@' will use the
   // abstract namespace. The starting '@' is replaced by a null byte by Envoy.
   // Paths starting with '@' will result in an error in environments other than
@@ -27,6 +31,8 @@ message Pipe {
 
 // [#next-free-field: 7]
 message SocketAddress {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketAddress";
+
   enum Protocol {
     TCP = 0;
     UDP = 1;
@@ -73,6 +79,8 @@ message SocketAddress {
 }
 
 message TcpKeepalive {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TcpKeepalive";
+
   // Maximum number of keepalive probes to send without response before deciding
   // the connection is dead. Default is to use the OS level configuration (unless
   // overridden, Linux defaults to 9.)
@@ -89,6 +97,8 @@ message TcpKeepalive {
 }
 
 message BindConfig {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BindConfig";
+
   // The address to bind to when creating a socket.
   SocketAddress source_address = 1 [(validate.rules).message = {required: true}];
 
@@ -110,6 +120,8 @@ message BindConfig {
 // used to tell Envoy where to bind/listen, connect to upstream and find
 // management servers.
 message Address {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Address";
+
   oneof address {
     option (validate.required) = true;
 
@@ -122,6 +134,8 @@ message Address {
 // CidrRange specifies an IP Address and a prefix length to construct
 // the subnet mask for a `CIDR <https://tools.ietf.org/html/rfc4632>`_ range.
 message CidrRange {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.CidrRange";
+
   // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``.
   string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/api/v3alpha/core/base.proto b/api/envoy/api/v3alpha/core/base.proto
index d2806be9419c..493019348162 100644
--- a/api/envoy/api/v3alpha/core/base.proto
+++ b/api/envoy/api/v3alpha/core/base.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.core;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 option java_outer_classname = "BaseProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 
 import "envoy/api/v3alpha/core/http_uri.proto";
 import "envoy/type/v3alpha/percent.proto";
@@ -13,6 +13,8 @@ import "google/protobuf/any.proto";
 import "google/protobuf/struct.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Common types]
@@ -57,6 +59,8 @@ enum TrafficDirection {
 
 // Identifies location of where either Envoy runs or where upstream hosts run.
 message Locality {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Locality";
+
   // Region this :ref:`zone <envoy_api_field_api.v3alpha.core.Locality.zone>` belongs to.
   string region = 1;
 
@@ -82,6 +86,8 @@ message Locality {
 // configuration for serving.
 // [#next-free-field: 6]
 message Node {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Node";
+
   // An opaque node identifier for the Envoy node. This also provides the local
   // service node name. It should be set if any of the following features are
   // used: :ref:`statsd <arch_overview_statistics>`, :ref:`CDS
@@ -137,7 +143,10 @@ message Node {
 // * ``{"envoy.lb": {"canary": <bool> }}`` This indicates the canary status of an
 //   endpoint and is also used during header processing
 //   (x-envoy-upstream-canary) and for stats purposes.
+// [#next-major-version: move to type/metadata/v2]
 message Metadata {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Metadata";
+
   // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.*
   // namespace is reserved for Envoy's built-in filters.
   map<string, google.protobuf.Struct> filter_metadata = 1;
@@ -145,6 +154,8 @@ message Metadata {
 
 // Runtime derived uint32 with a default when not specified.
 message RuntimeUInt32 {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeUInt32";
+
   // Default value if runtime value is not available.
   uint32 default_value = 2;
 
@@ -154,6 +165,9 @@ message RuntimeUInt32 {
 
 // Runtime derived bool with a default when not specified.
 message RuntimeFeatureFlag {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.core.RuntimeFeatureFlag";
+
   // Default value if runtime value is not available.
   google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}];
 
@@ -165,6 +179,8 @@ message RuntimeFeatureFlag {
 
 // Header name/value pair.
 message HeaderValue {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValue";
+
   // Header name.
   string key = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 16384}];
 
@@ -178,6 +194,9 @@ message HeaderValue {
 
 // Header name/value pair plus option to control append behavior.
 message HeaderValueOption {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.core.HeaderValueOption";
+
   // Header name/value pair that this option applies to.
   HeaderValue header = 1 [(validate.rules).message = {required: true}];
 
@@ -188,11 +207,15 @@ message HeaderValueOption {
 
 // Wrapper for a set of headers.
 message HeaderMap {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderMap";
+
   repeated HeaderValue headers = 1;
 }
 
 // Data source consisting of either a file or an inline value.
 message DataSource {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.DataSource";
+
   oneof specifier {
     option (validate.required) = true;
 
@@ -209,6 +232,8 @@ message DataSource {
 
 // The message specifies how to fetch data from remote and how to verify it.
 message RemoteDataSource {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RemoteDataSource";
+
   // The HTTP URI to fetch the remote data.
   HttpUri http_uri = 1 [(validate.rules).message = {required: true}];
 
@@ -218,6 +243,8 @@ message RemoteDataSource {
 
 // Async data source which support async data fetch.
 message AsyncDataSource {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.AsyncDataSource";
+
   oneof specifier {
     option (validate.required) = true;
 
@@ -234,6 +261,8 @@ message AsyncDataSource {
 // empty, a default transport socket implementation and configuration will be
 // chosen based on the platform and existence of tls_context.
 message TransportSocket {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TransportSocket";
+
   reserved 2;
 
   reserved "config";
@@ -253,6 +282,8 @@ message TransportSocket {
 // might not exist in upstream kernels or precompiled Envoy binaries.
 // [#next-free-field: 7]
 message SocketOption {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketOption";
+
   enum SocketState {
     // Socket options are applied after socket creation but before binding the socket to a port
     STATE_PREBIND = 0;
@@ -300,6 +331,9 @@ message SocketOption {
 //   integral percentage out of 100. For instance, a runtime key lookup returning the value "42"
 //   would parse as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED.
 message RuntimeFractionalPercent {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.core.RuntimeFractionalPercent";
+
   // Default value if the runtime value's for the numerator/denominator keys are not available.
   type.v3alpha.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}];
 
@@ -309,6 +343,8 @@ message RuntimeFractionalPercent {
 
 // Identifies a specific ControlPlane instance that Envoy is connected to.
 message ControlPlane {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ControlPlane";
+
   // An opaque control plane identifier that uniquely identifies an instance
   // of control plane. This can be used to identify which control plane instance,
   // the Envoy is connected to.
diff --git a/api/envoy/api/v3alpha/core/config_source.proto b/api/envoy/api/v3alpha/core/config_source.proto
index 5da66d99fa83..ef88f7d44c29 100644
--- a/api/envoy/api/v3alpha/core/config_source.proto
+++ b/api/envoy/api/v3alpha/core/config_source.proto
@@ -2,15 +2,17 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.core;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 option java_outer_classname = "ConfigSourceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 
 import "envoy/api/v3alpha/core/grpc_service.proto";
 
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Configuration sources]
@@ -19,6 +21,8 @@ import "validate/validate.proto";
 // will use to fetch an xDS API.
 // [#next-free-field: 8]
 message ApiConfigSource {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ApiConfigSource";
+
   // APIs may be fetched via either REST or gRPC.
   enum ApiType {
     // Ideally this would be 'reserved 0' but one can't reserve the default
@@ -76,6 +80,8 @@ message ApiConfigSource {
 // set in :ref:`ConfigSource <envoy_api_msg_api.v3alpha.core.ConfigSource>` can be used to
 // specify that ADS is to be used.
 message AggregatedConfigSource {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.core.AggregatedConfigSource";
 }
 
 // [#not-implemented-hide:]
@@ -83,10 +89,14 @@ message AggregatedConfigSource {
 // set in :ref:`ConfigSource <envoy_api_msg_api.v3alpha.core.ConfigSource>` can be used to
 // specify that other data can be obtained from the same server.
 message SelfConfigSource {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SelfConfigSource";
 }
 
 // Rate Limit settings to be applied for discovery requests made by Envoy.
 message RateLimitSettings {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.core.RateLimitSettings";
+
   // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a
   // default value of 100 will be used.
   google.protobuf.UInt32Value max_tokens = 1;
@@ -104,6 +114,8 @@ message RateLimitSettings {
 // inotify for updates.
 // [#next-free-field: 6]
 message ConfigSource {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource";
+
   oneof config_source_specifier {
     option (validate.required) = true;
 
diff --git a/api/envoy/api/v3alpha/core/grpc_service.proto b/api/envoy/api/v3alpha/core/grpc_service.proto
index c02e438faf96..02f3b646a6f9 100644
--- a/api/envoy/api/v3alpha/core/grpc_service.proto
+++ b/api/envoy/api/v3alpha/core/grpc_service.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.core;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 option java_outer_classname = "GrpcServiceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 
 import "envoy/api/v3alpha/core/base.proto";
 
@@ -13,6 +13,8 @@ import "google/protobuf/duration.proto";
 import "google/protobuf/empty.proto";
 import "google/protobuf/struct.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: gRPC services]
@@ -21,7 +23,12 @@ import "validate/validate.proto";
 // <envoy_api_msg_api.v3alpha.core.ApiConfigSource>` and filter configurations.
 // [#next-free-field: 6]
 message GrpcService {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService";
+
   message EnvoyGrpc {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.core.GrpcService.EnvoyGrpc";
+
     // The name of the upstream gRPC cluster. SSL credentials will be supplied
     // in the :ref:`Cluster <envoy_api_msg_api.v3alpha.Cluster>` :ref:`transport_socket
     // <envoy_api_field_api.v3alpha.Cluster.transport_socket>`.
@@ -30,8 +37,14 @@ message GrpcService {
 
   // [#next-free-field: 7]
   message GoogleGrpc {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.core.GrpcService.GoogleGrpc";
+
     // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html.
     message SslCredentials {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials";
+
       // PEM encoded server root certificates.
       DataSource root_certs = 1;
 
@@ -45,11 +58,16 @@ message GrpcService {
     // Local channel credentials. Only UDS is supported for now.
     // See https://github.com/grpc/grpc/pull/15909.
     message GoogleLocalCredentials {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.core.GrpcService.GoogleGrpc.GoogleLocalCredentials";
     }
 
     // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call
     // credential types.
     message ChannelCredentials {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials";
+
       oneof credential_specifier {
         option (validate.required) = true;
 
@@ -62,21 +80,35 @@ message GrpcService {
       }
     }
 
-    // [#next-free-field: 7]
+    // [#next-free-field: 8]
     message CallCredentials {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials";
+
       message ServiceAccountJWTAccessCredentials {
+        option (udpa.annotations.versioning).previous_message_type =
+            "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials."
+            "ServiceAccountJWTAccessCredentials";
+
         string json_key = 1;
 
         uint64 token_lifetime_seconds = 2;
       }
 
       message GoogleIAMCredentials {
+        option (udpa.annotations.versioning).previous_message_type =
+            "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials";
+
         string authorization_token = 1;
 
         string authority_selector = 2;
       }
 
       message MetadataCredentialsFromPlugin {
+        option (udpa.annotations.versioning).previous_message_type =
+            "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials."
+            "MetadataCredentialsFromPlugin";
+
         reserved 2;
 
         reserved "config";
@@ -88,6 +120,49 @@ message GrpcService {
         }
       }
 
+      // Security token service configuration that allows Google gRPC to
+      // fetch security token from an OAuth 2.0 authorization server.
+      // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and
+      // https://github.com/grpc/grpc/pull/19587.
+      // [#next-free-field: 10]
+      message StsService {
+        option (udpa.annotations.versioning).previous_message_type =
+            "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.StsService";
+
+        // URI of the token exchange service that handles token exchange requests.
+        string token_exchange_service_uri = 1 [(validate.rules).string = {uri: true}];
+
+        // Location of the target service or resource where the client
+        // intends to use the requested security token.
+        string resource = 2;
+
+        // Logical name of the target service where the client intends to
+        // use the requested security token.
+        string audience = 3;
+
+        // The desired scope of the requested security token in the
+        // context of the service or resource where the token will be used.
+        string scope = 4;
+
+        // Type of the requested security token.
+        string requested_token_type = 5;
+
+        // The path of subject token, a security token that represents the
+        // identity of the party on behalf of whom the request is being made.
+        string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}];
+
+        // Type of the subject token.
+        string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}];
+
+        // The path of actor token, a security token that represents the identity
+        // of the acting party. The acting party is authorized to use the
+        // requested security token and act on behalf of the subject.
+        string actor_token_path = 8;
+
+        // Type of the actor token.
+        string actor_token_type = 9;
+      }
+
       oneof credential_specifier {
         option (validate.required) = true;
 
@@ -115,6 +190,11 @@ message GrpcService {
         // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.
         // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.
         MetadataCredentialsFromPlugin from_plugin = 6;
+
+        // Custom security token service which implements OAuth 2.0 token exchange.
+        // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16
+        // See https://github.com/grpc/grpc/pull/19587.
+        StsService sts_service = 7;
       }
     }
 
diff --git a/api/envoy/api/v3alpha/core/health_check.proto b/api/envoy/api/v3alpha/core/health_check.proto
index e95f7943f537..a8974740c649 100644
--- a/api/envoy/api/v3alpha/core/health_check.proto
+++ b/api/envoy/api/v3alpha/core/health_check.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.core;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 option java_outer_classname = "HealthCheckProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/type/v3alpha/http.proto";
@@ -15,6 +15,8 @@ import "google/protobuf/duration.proto";
 import "google/protobuf/struct.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Health check]
@@ -50,8 +52,13 @@ enum HealthStatus {
 
 // [#next-free-field: 21]
 message HealthCheck {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck";
+
   // Describes the encoding of the payload bytes in the payload.
   message Payload {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.core.HealthCheck.Payload";
+
     oneof payload {
       option (validate.required) = true;
 
@@ -65,6 +72,9 @@ message HealthCheck {
 
   // [#next-free-field: 11]
   message HttpHealthCheck {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.core.HealthCheck.HttpHealthCheck";
+
     reserved 7;
 
     reserved "use_http2";
@@ -114,6 +124,9 @@ message HealthCheck {
   }
 
   message TcpHealthCheck {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.core.HealthCheck.TcpHealthCheck";
+
     // Empty payloads imply a connect-only health check.
     Payload send = 1;
 
@@ -124,6 +137,9 @@ message HealthCheck {
   }
 
   message RedisHealthCheck {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.core.HealthCheck.RedisHealthCheck";
+
     // If set, optionally perform ``EXISTS <key>`` instead of ``PING``. A return value
     // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other
     // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance
@@ -136,6 +152,9 @@ message HealthCheck {
   // healthcheck. See `gRPC doc <https://github.com/grpc/grpc/blob/master/doc/health-checking.md>`_
   // for details.
   message GrpcHealthCheck {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.core.HealthCheck.GrpcHealthCheck";
+
     // An optional service name parameter which will be sent to gRPC service in
     // `grpc.health.v1.HealthCheckRequest
     // <https://github.com/grpc/grpc/blob/master/src/proto/grpc/health/v1/health.proto#L20>`_.
@@ -151,6 +170,9 @@ message HealthCheck {
 
   // Custom health check.
   message CustomHealthCheck {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.core.HealthCheck.CustomHealthCheck";
+
     reserved 2;
 
     reserved "config";
diff --git a/api/envoy/api/v3alpha/core/http_uri.proto b/api/envoy/api/v3alpha/core/http_uri.proto
index e07e99dbb07f..30b8d8647216 100644
--- a/api/envoy/api/v3alpha/core/http_uri.proto
+++ b/api/envoy/api/v3alpha/core/http_uri.proto
@@ -2,18 +2,22 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.core;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 option java_outer_classname = "HttpUriProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 
 import "google/protobuf/duration.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: HTTP Service URI ]
 
 // Envoy external URI descriptor
 message HttpUri {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpUri";
+
   // The HTTP server URI. It should be a full FQDN with protocol, host and path.
   //
   // Example:
diff --git a/api/envoy/api/v3alpha/core/protocol.proto b/api/envoy/api/v3alpha/core/protocol.proto
index 41246f17675a..77ad860b237a 100644
--- a/api/envoy/api/v3alpha/core/protocol.proto
+++ b/api/envoy/api/v3alpha/core/protocol.proto
@@ -2,22 +2,29 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.core;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 option java_outer_classname = "ProtocolProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.core";
 
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Protocol options]
 
 // [#not-implemented-hide:]
 message TcpProtocolOptions {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.core.TcpProtocolOptions";
 }
 
 message HttpProtocolOptions {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.core.HttpProtocolOptions";
+
   // The idle timeout for connections. The idle timeout is defined as the
   // period in which there are no active requests. If not set, there is no idle timeout. When the
   // idle timeout is reached the connection will be closed. If the connection is an HTTP/2
@@ -47,8 +54,16 @@ message HttpProtocolOptions {
 }
 
 message Http1ProtocolOptions {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.core.Http1ProtocolOptions";
+
   message HeaderKeyFormat {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat";
+
     message ProperCaseWords {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords";
     }
 
     oneof header_format {
@@ -87,6 +102,9 @@ message Http1ProtocolOptions {
 
 // [#next-free-field: 13]
 message Http2ProtocolOptions {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.core.Http2ProtocolOptions";
+
   // `Maximum table size <https://httpwg.org/specs/rfc7541.html#rfc.section.4.2>`_
   // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values
   // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header
@@ -189,5 +207,8 @@ message Http2ProtocolOptions {
 
 // [#not-implemented-hide:]
 message GrpcProtocolOptions {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.core.GrpcProtocolOptions";
+
   Http2ProtocolOptions http2_protocol_options = 1;
 }
diff --git a/api/envoy/api/v3alpha/discovery.proto b/api/envoy/api/v3alpha/discovery.proto
index b123e9808c4a..38a357c40044 100644
--- a/api/envoy/api/v3alpha/discovery.proto
+++ b/api/envoy/api/v3alpha/discovery.proto
@@ -2,21 +2,25 @@ syntax = "proto3";
 
 package envoy.api.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_outer_classname = "DiscoveryProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 
 import "google/protobuf/any.proto";
 import "google/rpc/status.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: Common discovery API components]
 
 // A DiscoveryRequest requests a set of versioned resources of the same type for
 // a given Envoy node on some API.
 // [#next-free-field: 7]
 message DiscoveryRequest {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryRequest";
+
   // The version_info provided in the request messages will be the version_info
   // received with the most recent successfully processed response or empty on
   // the first request. It is expected that no new request is sent after a
@@ -60,6 +64,8 @@ message DiscoveryRequest {
 
 // [#next-free-field: 7]
 message DiscoveryResponse {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryResponse";
+
   // The version of the response data.
   string version_info = 1;
 
@@ -135,6 +141,8 @@ message DiscoveryResponse {
 // initial_resource_versions.
 // [#next-free-field: 8]
 message DeltaDiscoveryRequest {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryRequest";
+
   // The node making the request.
   core.Node node = 1;
 
@@ -192,6 +200,9 @@ message DeltaDiscoveryRequest {
 
 // [#next-free-field: 7]
 message DeltaDiscoveryResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.DeltaDiscoveryResponse";
+
   // The version of the response data (used for debugging).
   string system_version_info = 1;
 
@@ -215,6 +226,8 @@ message DeltaDiscoveryResponse {
 }
 
 message Resource {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Resource";
+
   // The resource's name, to distinguish it from others of the same type of resource.
   string name = 3;
 
diff --git a/api/envoy/api/v3alpha/eds.proto b/api/envoy/api/v3alpha/eds.proto
index a09f54edbb17..dc2f90f4b41e 100644
--- a/api/envoy/api/v3alpha/eds.proto
+++ b/api/envoy/api/v3alpha/eds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_outer_classname = "EdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/discovery.proto";
@@ -15,6 +15,8 @@ import "google/api/annotations.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: EDS]
@@ -30,10 +32,8 @@ service EndpointDiscoveryService {
   }
 
   rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v3alpha/discovery:endpoints"
-      body: "*"
-    };
+    option (google.api.http).post = "/v3alpha/discovery:endpoints";
+    option (google.api.http).body = "*";
   }
 }
 
@@ -48,10 +48,18 @@ service EndpointDiscoveryService {
 // then an endpoint within that locality will be chose based on its weight.
 // [#next-free-field: 6]
 message ClusterLoadAssignment {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment";
+
   // Load balancing policy settings.
   // [#next-free-field: 6]
   message Policy {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.ClusterLoadAssignment.Policy";
+
     message DropOverload {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload";
+
       // Identifier for the policy specifying the drop.
       string category = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/api/v3alpha/endpoint/BUILD b/api/envoy/api/v3alpha/endpoint/BUILD
index 4e89d949ab9d..cb1344273683 100644
--- a/api/envoy/api/v3alpha/endpoint/BUILD
+++ b/api/envoy/api/v3alpha/endpoint/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v2/endpoint:pkg",
+        "//envoy/api/v3alpha/core:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/api/v3alpha/endpoint/endpoint.proto b/api/envoy/api/v3alpha/endpoint/endpoint.proto
index 68fcac7b58f6..cdd0ec032b80 100644
--- a/api/envoy/api/v3alpha/endpoint/endpoint.proto
+++ b/api/envoy/api/v3alpha/endpoint/endpoint.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.endpoint;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.endpoint";
 option java_outer_classname = "EndpointProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.endpoint";
 
 import "envoy/api/v3alpha/core/address.proto";
 import "envoy/api/v3alpha/core/base.proto";
@@ -12,14 +12,21 @@ import "envoy/api/v3alpha/core/health_check.proto";
 
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Endpoints]
 
 // Upstream host identifier.
 message Endpoint {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.Endpoint";
+
   // The optional health check configuration.
   message HealthCheckConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.endpoint.Endpoint.HealthCheckConfig";
+
     // Optional alternative health check port value.
     //
     // By default the health check address port of an upstream host is the same
@@ -53,6 +60,8 @@ message Endpoint {
 // An Endpoint that Envoy can route traffic to.
 // [#next-free-field: 6]
 message LbEndpoint {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LbEndpoint";
+
   // Upstream host identifier or a named reference.
   oneof host_identifier {
     Endpoint endpoint = 1;
@@ -90,6 +99,9 @@ message LbEndpoint {
 // balancing weights or different priorities.
 // [#next-free-field: 7]
 message LocalityLbEndpoints {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.endpoint.LocalityLbEndpoints";
+
   // Identifies location of where the upstream hosts run.
   core.Locality locality = 1;
 
diff --git a/api/envoy/api/v3alpha/endpoint/load_report.proto b/api/envoy/api/v3alpha/endpoint/load_report.proto
index 30f72b582bd1..844f776cf007 100644
--- a/api/envoy/api/v3alpha/endpoint/load_report.proto
+++ b/api/envoy/api/v3alpha/endpoint/load_report.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.endpoint;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.endpoint";
 option java_outer_classname = "LoadReportProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.endpoint";
 
 import "envoy/api/v3alpha/core/address.proto";
 import "envoy/api/v3alpha/core/base.proto";
@@ -12,6 +12,8 @@ import "envoy/api/v3alpha/core/base.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/struct.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // These are stats Envoy reports to GLB every so often. Report frequency is
@@ -21,6 +23,9 @@ import "validate/validate.proto";
 // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.
 // [#next-free-field: 9]
 message UpstreamLocalityStats {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.endpoint.UpstreamLocalityStats";
+
   // Name of zone, region and optionally endpoint group these metrics were
   // collected from. Zone and region names could be empty if unknown.
   core.Locality locality = 1;
@@ -57,6 +62,9 @@ message UpstreamLocalityStats {
 // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.
 // [#next-free-field: 8]
 message UpstreamEndpointStats {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.endpoint.UpstreamEndpointStats";
+
   // Upstream host address.
   core.Address address = 1;
 
@@ -96,6 +104,9 @@ message UpstreamEndpointStats {
 
 // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.
 message EndpointLoadMetricStats {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.endpoint.EndpointLoadMetricStats";
+
   // Name of the metric; may be empty.
   string metric_name = 1;
 
@@ -113,7 +124,12 @@ message EndpointLoadMetricStats {
 // Next ID: 7
 // [#next-free-field: 7]
 message ClusterStats {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.ClusterStats";
+
   message DroppedRequests {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.endpoint.ClusterStats.DroppedRequests";
+
     // Identifier for the policy specifying the drop.
     string category = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/api/v3alpha/lds.proto b/api/envoy/api/v3alpha/lds.proto
index a5e9f4fcb6c4..2424071ce612 100644
--- a/api/envoy/api/v3alpha/lds.proto
+++ b/api/envoy/api/v3alpha/lds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_outer_classname = "LdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/core/address.proto";
@@ -18,6 +18,8 @@ import "google/api/annotations.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Listener]
@@ -35,15 +37,15 @@ service ListenerDiscoveryService {
   }
 
   rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v3alpha/discovery:listeners"
-      body: "*"
-    };
+    option (google.api.http).post = "/v3alpha/discovery:listeners";
+    option (google.api.http).body = "*";
   }
 }
 
-// [#next-free-field: 21]
+// [#next-free-field: 22]
 message Listener {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener";
+
   enum DrainType {
     // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check
     // filter), listener removal/modification, and hot restart.
@@ -57,6 +59,9 @@ message Listener {
 
   // [#not-implemented-hide:]
   message DeprecatedV1 {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.Listener.DeprecatedV1";
+
     // Whether the listener should bind to the port. A listener that doesn't
     // bind can only receive connections redirected from other listeners that
     // set use_original_dst parameter to true. Default is true.
@@ -72,6 +77,9 @@ message Listener {
 
   // Configuration for listener connection balancing.
   message ConnectionBalanceConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.Listener.ConnectionBalanceConfig";
+
     // A connection balancer implementation that does exact balancing. This means that a lock is
     // held during balancing so that connection counts are nearly exactly balanced between worker
     // threads. This is "nearly" exact in the sense that a connection might close in parallel thus
@@ -79,6 +87,8 @@ message Listener {
     // sacrifices accept throughput for accuracy and should be used when there are a small number of
     // connections that rarely cycle (e.g., service mesh gRPC egress).
     message ExactBalance {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.Listener.ConnectionBalanceConfig.ExactBalance";
     }
 
     oneof balance_type {
@@ -223,4 +233,17 @@ message Listener {
   // If no configuration is specified, Envoy will not attempt to balance active connections between
   // worker threads.
   ConnectionBalanceConfig connection_balance_config = 20;
+
+  // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and
+  // create one socket for each worker thread. This makes inbound connections
+  // distribute among worker threads roughly evenly in cases where there are a high number
+  // of connections. When this flag is set to false, all worker threads share one socket.
+  // For UDP this flag is set to true forcibly.
+  //
+  // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart
+  // (see `3rd paragraph in 'soreuseport' commit message
+  // <https://github.com/torvalds/linux/commit/c617f398edd4db2b8567a28e89>`_).
+  // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket
+  // <https://github.com/torvalds/linux/commit/40a1227ea845a37ab197dd1caffb60b047fa36b1>`_.
+  bool reuse_port = 21;
 }
diff --git a/api/envoy/api/v3alpha/listener/BUILD b/api/envoy/api/v3alpha/listener/BUILD
index 4e89d949ab9d..5c16ea19c5fc 100644
--- a/api/envoy/api/v3alpha/listener/BUILD
+++ b/api/envoy/api/v3alpha/listener/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v2/listener:pkg",
+        "//envoy/api/v3alpha/core:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/api/v3alpha/listener/listener.proto b/api/envoy/api/v3alpha/listener/listener.proto
index b964f1d3f968..2cfcb73f2849 100644
--- a/api/envoy/api/v3alpha/listener/listener.proto
+++ b/api/envoy/api/v3alpha/listener/listener.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.listener;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.listener";
 option java_outer_classname = "ListenerProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.listener";
 
 import "envoy/api/v3alpha/core/address.proto";
 import "envoy/api/v3alpha/core/base.proto";
@@ -13,12 +13,16 @@ import "google/protobuf/any.proto";
 import "google/protobuf/struct.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Listener components]
 // Listener :ref:`configuration overview <config_listeners>`
 
 message Filter {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.Filter";
+
   reserved 3, 2;
 
   reserved "config";
@@ -64,6 +68,9 @@ message Filter {
 // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules]
 // [#next-free-field: 13]
 message FilterChainMatch {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.listener.FilterChainMatch";
+
   enum ConnectionSourceType {
     // Any connection source matches.
     ANY = 0;
@@ -160,6 +167,8 @@ message FilterChainMatch {
 // various other parameters.
 // [#next-free-field: 8]
 message FilterChain {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.FilterChain";
+
   reserved 2;
 
   reserved "tls_context";
@@ -198,6 +207,9 @@ message FilterChain {
 }
 
 message ListenerFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.listener.ListenerFilter";
+
   reserved 2;
 
   reserved "config";
diff --git a/api/envoy/api/v3alpha/listener/quic_config.proto b/api/envoy/api/v3alpha/listener/quic_config.proto
index 9d30ab92798c..4f021de000ac 100644
--- a/api/envoy/api/v3alpha/listener/quic_config.proto
+++ b/api/envoy/api/v3alpha/listener/quic_config.proto
@@ -2,18 +2,23 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.listener;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.listener";
 option java_outer_classname = "QuicConfigProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.listener";
 
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: QUIC listener Config]
 
 // Configuration specific to the QUIC protocol.
 // Next id: 4
 message QuicProtocolOptions {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.listener.QuicProtocolOptions";
+
   // Maximum number of streams that the client can negotiate per connection. 100
   // if not specified.
   google.protobuf.UInt32Value max_concurrent_streams = 1;
diff --git a/api/envoy/api/v3alpha/listener/udp_listener_config.proto b/api/envoy/api/v3alpha/listener/udp_listener_config.proto
index a65a985d71b3..f53f7a8ae523 100644
--- a/api/envoy/api/v3alpha/listener/udp_listener_config.proto
+++ b/api/envoy/api/v3alpha/listener/udp_listener_config.proto
@@ -2,17 +2,22 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.listener;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.listener";
 option java_outer_classname = "UdpListenerConfigProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.listener";
 
 import "google/protobuf/any.proto";
 import "google/protobuf/struct.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: UDP Listener Config]
 // Listener :ref:`configuration overview <config_listeners>`
 
 message UdpListenerConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.listener.UdpListenerConfig";
+
   reserved 2;
 
   reserved "config";
diff --git a/api/envoy/api/v3alpha/ratelimit/BUILD b/api/envoy/api/v3alpha/ratelimit/BUILD
index 5dc095ade27a..ee90746aa30a 100644
--- a/api/envoy/api/v3alpha/ratelimit/BUILD
+++ b/api/envoy/api/v3alpha/ratelimit/BUILD
@@ -4,4 +4,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 
 licenses(["notice"])  # Apache 2
 
-api_proto_package()
+api_proto_package(
+    deps = [
+        "//envoy/api/v2/ratelimit:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
+)
diff --git a/api/envoy/api/v3alpha/ratelimit/ratelimit.proto b/api/envoy/api/v3alpha/ratelimit/ratelimit.proto
index 18f559778f19..28a6d7b98887 100644
--- a/api/envoy/api/v3alpha/ratelimit/ratelimit.proto
+++ b/api/envoy/api/v3alpha/ratelimit/ratelimit.proto
@@ -2,9 +2,11 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.ratelimit;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.ratelimit";
 option java_outer_classname = "RatelimitProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.ratelimit";
+
+import "udpa/annotations/versioning.proto";
 
 import "validate/validate.proto";
 
@@ -52,7 +54,13 @@ import "validate/validate.proto";
 // The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired.
 // This enables building complex application scenarios with a generic backend.
 message RateLimitDescriptor {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.ratelimit.RateLimitDescriptor";
+
   message Entry {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.ratelimit.RateLimitDescriptor.Entry";
+
     // Descriptor key.
     string key = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/api/v3alpha/rds.proto b/api/envoy/api/v3alpha/rds.proto
index c3df5c67dabe..378f03166a94 100644
--- a/api/envoy/api/v3alpha/rds.proto
+++ b/api/envoy/api/v3alpha/rds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.api.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_outer_classname = "RdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/core/base.proto";
@@ -15,6 +15,8 @@ import "envoy/api/v3alpha/route/route.proto";
 import "google/api/annotations.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: HTTP route configuration]
@@ -34,10 +36,8 @@ service RouteDiscoveryService {
   }
 
   rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v3alpha/discovery:routes"
-      body: "*"
-    };
+    option (google.api.http).post = "/v3alpha/discovery:routes";
+    option (google.api.http).body = "*";
   }
 }
 
@@ -59,6 +59,8 @@ service VirtualHostDiscoveryService {
 
 // [#next-free-field: 11]
 message RouteConfiguration {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RouteConfiguration";
+
   // The name of the route configuration. For example, it might match
   // :ref:`route_config_name
   // <envoy_api_field_config.filter.network.http_connection_manager.v3alpha.Rds.route_config_name>`
@@ -139,6 +141,8 @@ message RouteConfiguration {
 
 // [#not-implemented-hide:]
 message Vhds {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Vhds";
+
   // Configuration source specifier for VHDS.
   core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];
 }
diff --git a/api/envoy/api/v3alpha/route/BUILD b/api/envoy/api/v3alpha/route/BUILD
index 8ab5e755a3f7..fdc24386cd78 100644
--- a/api/envoy/api/v3alpha/route/BUILD
+++ b/api/envoy/api/v3alpha/route/BUILD
@@ -6,8 +6,11 @@ licenses(["notice"])  # Apache 2
 
 api_proto_package(
     deps = [
+        "//envoy/api/v2/route:pkg",
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/type/matcher/v3alpha:pkg",
+        "//envoy/type/tracing/v2:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/api/v3alpha/route/route.proto b/api/envoy/api/v3alpha/route/route.proto
index b0b409c71744..6b00b7b4526e 100644
--- a/api/envoy/api/v3alpha/route/route.proto
+++ b/api/envoy/api/v3alpha/route/route.proto
@@ -2,13 +2,14 @@ syntax = "proto3";
 
 package envoy.api.v3alpha.route;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha.route";
 option java_outer_classname = "RouteProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha.route";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/type/matcher/v3alpha/regex.proto";
 import "envoy/type/matcher/v3alpha/string.proto";
+import "envoy/type/tracing/v2/custom_tag.proto";
 import "envoy/type/v3alpha/percent.proto";
 import "envoy/type/v3alpha/range.proto";
 
@@ -17,6 +18,8 @@ import "google/protobuf/duration.proto";
 import "google/protobuf/struct.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: HTTP route]
@@ -30,6 +33,8 @@ import "validate/validate.proto";
 // upstream cluster to route to or whether to perform a redirect.
 // [#next-free-field: 19]
 message VirtualHost {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualHost";
+
   enum TlsRequirementType {
     // No TLS requirement for the virtual host.
     NONE = 0;
@@ -149,6 +154,8 @@ message VirtualHost {
 
 // A filter-defined action type.
 message FilterAction {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.FilterAction";
+
   google.protobuf.Any action = 1;
 }
 
@@ -161,6 +168,8 @@ message FilterAction {
 //   <envoy_api_msg_api.v3alpha.route.HeaderMatcher>`.
 // [#next-free-field: 18]
 message Route {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Route";
+
   reserved 6, 8;
 
   reserved "per_filter_config";
@@ -249,8 +258,13 @@ message Route {
 // traffic to be forwarded to each cluster. The router selects an upstream cluster based on the
 // weights.
 message WeightedCluster {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster";
+
   // [#next-free-field: 11]
   message ClusterWeight {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.route.WeightedCluster.ClusterWeight";
+
     reserved 7, 8;
 
     reserved "per_filter_config";
@@ -330,10 +344,17 @@ message WeightedCluster {
 
 // [#next-free-field: 12]
 message RouteMatch {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch";
+
   message GrpcRouteMatchOptions {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.route.RouteMatch.GrpcRouteMatchOptions";
   }
 
   message TlsContextMatchOptions {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.route.RouteMatch.TlsContextMatchOptions";
+
     // If specified, the route will match against whether or not a certificate is presented.
     google.protobuf.BoolValue presented = 1;
   }
@@ -418,6 +439,8 @@ message RouteMatch {
 
 // [#next-free-field: 12]
 message CorsPolicy {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.CorsPolicy";
+
   reserved 1, 8, 7;
 
   reserved "allow_origin", "allow_origin_regex", "enabled";
@@ -466,6 +489,8 @@ message CorsPolicy {
 
 // [#next-free-field: 30]
 message RouteAction {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction";
+
   enum ClusterNotFoundResponseCode {
     // HTTP status code - 503 Service Unavailable.
     SERVICE_UNAVAILABLE = 0;
@@ -488,6 +513,9 @@ message RouteAction {
   // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is
   // useful for logging. For example, *cluster1* becomes *cluster1-shadow*.
   message RequestMirrorPolicy {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.route.RouteAction.RequestMirrorPolicy";
+
     reserved 2;
 
     reserved "runtime_key";
@@ -511,8 +539,15 @@ message RouteAction {
 
   // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer
   // <arch_overview_load_balancing_types>`.
+  // [#next-free-field: 6]
   message HashPolicy {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.route.RouteAction.HashPolicy";
+
     message Header {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.route.RouteAction.HashPolicy.Header";
+
       // The name of the request header that will be used to obtain the hash
       // key. If the request header is not present, no hash will be produced.
       string header_name = 1 [(validate.rules).string = {min_bytes: 1}];
@@ -533,6 +568,9 @@ message RouteAction {
     //    streams on the same connection will independently receive the same
     //    cookie, even if they arrive at the Envoy simultaneously.
     message Cookie {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.route.RouteAction.HashPolicy.Cookie";
+
       // The name of the cookie that will be used to obtain the hash key. If the
       // cookie is not present and ttl below is not set, no hash will be
       // produced.
@@ -549,10 +587,23 @@ message RouteAction {
     }
 
     message ConnectionProperties {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.route.RouteAction.HashPolicy.ConnectionProperties";
+
       // Hash on source IP address.
       bool source_ip = 1;
     }
 
+    message QueryParameter {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.route.RouteAction.HashPolicy.QueryParameter";
+
+      // The name of the URL query parameter that will be used to obtain the hash
+      // key. If the parameter is not present, no hash will be produced. Query
+      // parameter names are case-sensitive.
+      string name = 1 [(validate.rules).string = {min_bytes: 1}];
+    }
+
     oneof policy_specifier {
       option (validate.required) = true;
 
@@ -564,6 +615,9 @@ message RouteAction {
 
       // Connection properties hash policy.
       ConnectionProperties connection_properties = 3;
+
+      // Query parameter hash policy.
+      QueryParameter query_parameter = 5;
     }
 
     // The flag that short-circuits the hash computing. This field provides a
@@ -595,6 +649,9 @@ message RouteAction {
   // <envoy_api_field_config.filter.network.http_connection_manager.v3alpha.HttpConnectionManager.upgrade_configs>`
   // but does not affect any custom filter chain specified there.
   message UpgradeConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.route.RouteAction.UpgradeConfig";
+
     // The case-insensitive name of this upgrade, e.g. "websocket".
     // For each upgrade type present in upgrade_configs, requests with
     // Upgrade: [upgrade_type] will be proxied upstream.
@@ -806,7 +863,12 @@ message RouteAction {
 // HTTP retry :ref:`architecture overview <arch_overview_http_routing_retry>`.
 // [#next-free-field: 11]
 message RetryPolicy {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy";
+
   message RetryPriority {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.route.RetryPolicy.RetryPriority";
+
     reserved 2;
 
     reserved "config";
@@ -819,6 +881,9 @@ message RetryPolicy {
   }
 
   message RetryHostPredicate {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.route.RetryPolicy.RetryHostPredicate";
+
     reserved 2;
 
     reserved "config";
@@ -831,6 +896,9 @@ message RetryPolicy {
   }
 
   message RetryBackOff {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.route.RetryPolicy.RetryBackOff";
+
     // Specifies the base interval between retries. This parameter is required and must be greater
     // than zero. Values less than 1 ms are rounded up to 1 ms.
     // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's
@@ -907,6 +975,8 @@ message RetryPolicy {
 
 // HTTP request hedging :ref:`architecture overview <arch_overview_http_routing_hedging>`.
 message HedgePolicy {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.HedgePolicy";
+
   // Specifies the number of initial requests that should be sent upstream.
   // Must be at least 1.
   // Defaults to 1.
@@ -932,6 +1002,8 @@ message HedgePolicy {
 
 // [#next-free-field: 9]
 message RedirectAction {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RedirectAction";
+
   enum RedirectResponseCode {
     // Moved Permanently HTTP Status Code - 301.
     MOVED_PERMANENTLY = 0;
@@ -994,6 +1066,9 @@ message RedirectAction {
 }
 
 message DirectResponseAction {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.route.DirectResponseAction";
+
   // Specifies the HTTP response status to be returned.
   uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}];
 
@@ -1009,6 +1084,8 @@ message DirectResponseAction {
 }
 
 message Decorator {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Decorator";
+
   // The operation name associated with the request matched to this route. If tracing is
   // enabled, this information will be used as the span name reported for this request.
   //
@@ -1021,6 +1098,8 @@ message Decorator {
 }
 
 message Tracing {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Tracing";
+
   // Target percentage of requests managed by this HTTP connection manager that will be force
   // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`
   // header is set. This field is a direct analog for the runtime variable
@@ -1045,6 +1124,14 @@ message Tracing {
   // :ref:`HTTP Connection Manager <config_http_conn_man_runtime>`.
   // Default: 100%
   type.v3alpha.FractionalPercent overall_sampling = 3;
+
+  // A list of custom tags with unique tag name to create tags for the active span.
+  // It will take effect after merging with the :ref:`corresponding configuration
+  // <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing.custom_tags>`
+  // configured in the HTTP connection manager. If two tags with the same name are configured
+  // each in the HTTP connection manager and the route level, the one configured here takes
+  // priority.
+  repeated type.tracing.v2.CustomTag custom_tags = 4;
 }
 
 // A virtual cluster is a way of specifying a regex matching rule against
@@ -1065,6 +1152,8 @@ message Tracing {
 //    every application endpoint. This is both not easily maintainable and as well the matching and
 //    statistics output are not free.
 message VirtualCluster {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualCluster";
+
   reserved 1, 3;
 
   reserved "pattern", "method";
@@ -1082,8 +1171,13 @@ message VirtualCluster {
 
 // Global rate limiting :ref:`architecture overview <arch_overview_rate_limit>`.
 message RateLimit {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit";
+
   // [#next-free-field: 7]
   message Action {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.route.RateLimit.Action";
+
     // The following descriptor entry is appended to the descriptor:
     //
     // .. code-block:: cpp
@@ -1092,6 +1186,8 @@ message RateLimit {
     //
     // <local service cluster> is derived from the :option:`--service-cluster` option.
     message SourceCluster {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.route.RateLimit.Action.SourceCluster";
     }
 
     // The following descriptor entry is appended to the descriptor:
@@ -1113,6 +1209,8 @@ message RateLimit {
     // indicates which
     //   header in the request contains the target cluster.
     message DestinationCluster {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.route.RateLimit.Action.DestinationCluster";
     }
 
     // The following descriptor entry is appended when a header contains a key that matches the
@@ -1122,6 +1220,9 @@ message RateLimit {
     //
     //   ("<descriptor_key>", "<header_value_queried_from_header>")
     message RequestHeaders {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.route.RateLimit.Action.RequestHeaders";
+
       // The header name to be queried from the request headers. The headerā€™s
       // value is used to populate the value of the descriptor entry for the
       // descriptor_key.
@@ -1138,6 +1239,8 @@ message RateLimit {
     //
     //   ("remote_address", "<trusted address from x-forwarded-for>")
     message RemoteAddress {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.route.RateLimit.Action.RemoteAddress";
     }
 
     // The following descriptor entry is appended to the descriptor:
@@ -1146,6 +1249,9 @@ message RateLimit {
     //
     //   ("generic_key", "<descriptor_value>")
     message GenericKey {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.route.RateLimit.Action.GenericKey";
+
       // The value to use in the descriptor entry.
       string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}];
     }
@@ -1156,6 +1262,9 @@ message RateLimit {
     //
     //   ("header_match", "<descriptor_value>")
     message HeaderValueMatch {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.route.RateLimit.Action.HeaderValueMatch";
+
       // The value to use in the descriptor entry.
       string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}];
 
@@ -1243,6 +1352,8 @@ message RateLimit {
 //  [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.]
 // [#next-free-field: 12]
 message HeaderMatcher {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.HeaderMatcher";
+
   reserved 2, 3, 5;
 
   reserved "regex_match";
@@ -1307,6 +1418,9 @@ message HeaderMatcher {
 // as an ampersand-separated list of keys and/or key=value elements.
 // [#next-free-field: 7]
 message QueryParameterMatcher {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.route.QueryParameterMatcher";
+
   reserved 3, 4;
 
   reserved "value", "regex";
diff --git a/api/envoy/api/v3alpha/srds.proto b/api/envoy/api/v3alpha/srds.proto
index 25f8ff591806..e642fa80e075 100644
--- a/api/envoy/api/v3alpha/srds.proto
+++ b/api/envoy/api/v3alpha/srds.proto
@@ -2,15 +2,17 @@ syntax = "proto3";
 
 package envoy.api.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_outer_classname = "SrdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.api.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/discovery.proto";
 
 import "google/api/annotations.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: HTTP scoped routing configuration]
@@ -31,10 +33,8 @@ service ScopedRoutesDiscoveryService {
   }
 
   rpc FetchScopedRoutes(DiscoveryRequest) returns (DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v3alpha/discovery:scoped-routes"
-      body: "*"
-    };
+    option (google.api.http).post = "/v3alpha/discovery:scoped-routes";
+    option (google.api.http).body = "*";
   }
 }
 
@@ -98,13 +98,22 @@ service ScopedRoutesDiscoveryService {
 // RouteConfiguration being assigned to the HTTP request/stream.
 //
 message ScopedRouteConfiguration {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.api.v2.ScopedRouteConfiguration";
+
   // Specifies a key which is matched against the output of the
   // :ref:`scope_key_builder<envoy_api_field_config.filter.network.http_connection_manager.v3alpha.ScopedRoutes.scope_key_builder>`
   // specified in the HttpConnectionManager. The matching is done per HTTP
   // request and is dependent on the order of the fragments contained in the
   // Key.
   message Key {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.api.v2.ScopedRouteConfiguration.Key";
+
     message Fragment {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.api.v2.ScopedRouteConfiguration.Key.Fragment";
+
       oneof type {
         option (validate.required) = true;
 
diff --git a/api/envoy/config/accesslog/v2/als.proto b/api/envoy/config/accesslog/v2/als.proto
index a45d0ca52052..84af198acbf0 100644
--- a/api/envoy/config/accesslog/v2/als.proto
+++ b/api/envoy/config/accesslog/v2/als.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.accesslog.v2;
 
+option java_package = "io.envoyproxy.envoy.config.accesslog.v2";
 option java_outer_classname = "AlsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.accesslog.v2";
 
 import "envoy/api/v2/core/grpc_service.proto";
 
diff --git a/api/envoy/config/accesslog/v2/file.proto b/api/envoy/config/accesslog/v2/file.proto
index 16a49563ffc9..5e58671a0836 100644
--- a/api/envoy/config/accesslog/v2/file.proto
+++ b/api/envoy/config/accesslog/v2/file.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.accesslog.v2;
 
+option java_package = "io.envoyproxy.envoy.config.accesslog.v2";
 option java_outer_classname = "FileProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.accesslog.v2";
 
 import "google/protobuf/struct.proto";
 
@@ -20,14 +20,20 @@ message FileAccessLog {
   // A path to a local file to which to write the access log entries.
   string path = 1 [(validate.rules).string = {min_bytes: 1}];
 
-  // Access log format. Envoy supports :ref:`custom access log formats
-  // <config_access_log_format>` as well as a :ref:`default format
-  // <config_access_log_default_format>`.
   oneof access_log_format {
-    // Access log :ref:`format string<config_access_log_format_strings>`
+    // Access log :ref:`format string<config_access_log_format_strings>`.
+    // Envoy supports :ref:`custom access log formats <config_access_log_format>` as well as a
+    // :ref:`default format <config_access_log_default_format>`.
     string format = 2;
 
-    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`
+    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. All values
+    // are rendered as strings.
     google.protobuf.Struct json_format = 3;
+
+    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. Values are
+    // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may
+    // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the
+    // documentation for a specific command operator for details.
+    google.protobuf.Struct typed_json_format = 4;
   }
 }
diff --git a/api/envoy/config/accesslog/v3alpha/BUILD b/api/envoy/config/accesslog/v3alpha/BUILD
index 4e89d949ab9d..0f91837cfa50 100644
--- a/api/envoy/config/accesslog/v3alpha/BUILD
+++ b/api/envoy/config/accesslog/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/accesslog/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/accesslog/v3alpha/als.proto b/api/envoy/config/accesslog/v3alpha/als.proto
index 77589fffdae1..a723ff813802 100644
--- a/api/envoy/config/accesslog/v3alpha/als.proto
+++ b/api/envoy/config/accesslog/v3alpha/als.proto
@@ -2,15 +2,17 @@ syntax = "proto3";
 
 package envoy.config.accesslog.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.accesslog.v3alpha";
 option java_outer_classname = "AlsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.accesslog.v3alpha";
 
 import "envoy/api/v3alpha/core/grpc_service.proto";
 
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: gRPC Access Log Service (ALS)]
@@ -21,6 +23,9 @@ import "validate/validate.proto";
 // <envoy_api_field_service.accesslog.v3alpha.StreamAccessLogsMessage.http_logs>`.
 // [#extension: envoy.access_loggers.http_grpc]
 message HttpGrpcAccessLogConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.accesslog.v2.HttpGrpcAccessLogConfig";
+
   CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];
 
   // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers
@@ -40,12 +45,18 @@ message HttpGrpcAccessLogConfig {
 // populate *StreamAccessLogsMessage.tcp_logs*.
 // [#extension: envoy.access_loggers.tcp_grpc]
 message TcpGrpcAccessLogConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.accesslog.v2.TcpGrpcAccessLogConfig";
+
   CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];
 }
 
 // Common configuration for gRPC access logs.
 // [#next-free-field: 6]
 message CommonGrpcAccessLogConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.accesslog.v2.CommonGrpcAccessLogConfig";
+
   // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier
   // <envoy_api_msg_service.accesslog.v3alpha.StreamAccessLogsMessage.Identifier>`. This allows the
   // access log server to differentiate between different access logs coming from the same Envoy.
diff --git a/api/envoy/config/accesslog/v3alpha/file.proto b/api/envoy/config/accesslog/v3alpha/file.proto
index e826cdcb697e..f366dc5768c5 100644
--- a/api/envoy/config/accesslog/v3alpha/file.proto
+++ b/api/envoy/config/accesslog/v3alpha/file.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.config.accesslog.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.accesslog.v3alpha";
 option java_outer_classname = "FileProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.accesslog.v3alpha";
 
 import "google/protobuf/struct.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: File access log]
@@ -17,17 +19,26 @@ import "validate/validate.proto";
 // <envoy_api_msg_config.filter.accesslog.v3alpha.AccessLog>` that writes log entries directly to a
 // file. Configures the built-in *envoy.file_access_log* AccessLog.
 message FileAccessLog {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.accesslog.v2.FileAccessLog";
+
   // A path to a local file to which to write the access log entries.
   string path = 1 [(validate.rules).string = {min_bytes: 1}];
 
-  // Access log format. Envoy supports :ref:`custom access log formats
-  // <config_access_log_format>` as well as a :ref:`default format
-  // <config_access_log_default_format>`.
   oneof access_log_format {
-    // Access log :ref:`format string<config_access_log_format_strings>`
+    // Access log :ref:`format string<config_access_log_format_strings>`.
+    // Envoy supports :ref:`custom access log formats <config_access_log_format>` as well as a
+    // :ref:`default format <config_access_log_default_format>`.
     string format = 2;
 
-    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`
+    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. All values
+    // are rendered as strings.
     google.protobuf.Struct json_format = 3;
+
+    // Access log :ref:`format dictionary<config_access_log_format_dictionaries>`. Values are
+    // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may
+    // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the
+    // documentation for a specific command operator for details.
+    google.protobuf.Struct typed_json_format = 4;
   }
 }
diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto
index 0f8d60133517..6fd3923088c8 100644
--- a/api/envoy/config/bootstrap/v2/bootstrap.proto
+++ b/api/envoy/config/bootstrap/v2/bootstrap.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.bootstrap.v2;
 
+option java_package = "io.envoyproxy.envoy.config.bootstrap.v2";
 option java_outer_classname = "BootstrapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.bootstrap.v2";
 
 import "envoy/api/v2/auth/cert.proto";
 import "envoy/api/v2/cds.proto";
@@ -28,7 +28,7 @@ import "validate/validate.proto";
 // <config_overview_v2_bootstrap>` for more detail.
 
 // Bootstrap :ref:`configuration overview <config_overview_v2_bootstrap>`.
-// [#next-free-field: 20]
+// [#next-free-field: 21]
 message Bootstrap {
   message StaticResources {
     // Static :ref:`Listeners <envoy_api_msg_Listener>`. These listeners are
@@ -155,6 +155,13 @@ message Bootstrap {
   // <server_statistics>` if specified. Envoy will not process this value, it will be sent as is to
   // :ref:`stats sinks <envoy_api_msg_config.metrics.v2.StatsSink>`.
   google.protobuf.UInt64Value stats_server_version_override = 19;
+
+  // Always use TCP queries instead of UDP queries for DNS lookups.
+  // This may be overridden on a per-cluster basis in cds_config,
+  // when :ref:`dns_resolvers <envoy_api_field_Cluster.dns_resolvers>` and
+  // :ref:`use_tcp_for_dns_lookups <envoy_api_field_Cluster.use_tcp_for_dns_lookups>` are
+  // specified.
+  bool use_tcp_for_dns_lookups = 20;
 }
 
 // Administration interface :ref:`operations documentation
diff --git a/api/envoy/config/bootstrap/v3alpha/BUILD b/api/envoy/config/bootstrap/v3alpha/BUILD
index 8ce92ade1e2f..8fc3f433ef1a 100644
--- a/api/envoy/config/bootstrap/v3alpha/BUILD
+++ b/api/envoy/config/bootstrap/v3alpha/BUILD
@@ -9,8 +9,10 @@ api_proto_package(
         "//envoy/api/v3alpha:pkg",
         "//envoy/api/v3alpha/auth:pkg",
         "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/bootstrap/v2:pkg",
         "//envoy/config/metrics/v3alpha:pkg",
         "//envoy/config/overload/v3alpha:pkg",
         "//envoy/config/trace/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/bootstrap/v3alpha/bootstrap.proto b/api/envoy/config/bootstrap/v3alpha/bootstrap.proto
index 2553e9334856..9b9355296eb4 100644
--- a/api/envoy/config/bootstrap/v3alpha/bootstrap.proto
+++ b/api/envoy/config/bootstrap/v3alpha/bootstrap.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.bootstrap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.bootstrap.v3alpha";
 option java_outer_classname = "BootstrapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.bootstrap.v3alpha";
 
 import "envoy/api/v3alpha/auth/cert.proto";
 import "envoy/api/v3alpha/cds.proto";
@@ -20,6 +20,8 @@ import "google/protobuf/duration.proto";
 import "google/protobuf/struct.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Bootstrap]
@@ -28,9 +30,15 @@ import "validate/validate.proto";
 // <config_overview_v2_bootstrap>` for more detail.
 
 // Bootstrap :ref:`configuration overview <config_overview_v2_bootstrap>`.
-// [#next-free-field: 20]
+// [#next-free-field: 21]
 message Bootstrap {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.bootstrap.v2.Bootstrap";
+
   message StaticResources {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.bootstrap.v2.Bootstrap.StaticResources";
+
     // Static :ref:`Listeners <envoy_api_msg_api.v3alpha.Listener>`. These listeners are
     // available regardless of LDS configuration.
     repeated api.v3alpha.Listener listeners = 1;
@@ -48,6 +56,9 @@ message Bootstrap {
   }
 
   message DynamicResources {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.bootstrap.v2.Bootstrap.DynamicResources";
+
     reserved 4;
 
     // All :ref:`Listeners <envoy_api_msg_api.v3alpha.Listener>` are provided by a single
@@ -151,11 +162,20 @@ message Bootstrap {
   // <server_statistics>` if specified. Envoy will not process this value, it will be sent as is to
   // :ref:`stats sinks <envoy_api_msg_config.metrics.v3alpha.StatsSink>`.
   google.protobuf.UInt64Value stats_server_version_override = 19;
+
+  // Always use TCP queries instead of UDP queries for DNS lookups.
+  // This may be overridden on a per-cluster basis in cds_config,
+  // when :ref:`dns_resolvers <envoy_api_field_api.v3alpha.Cluster.dns_resolvers>` and
+  // :ref:`use_tcp_for_dns_lookups <envoy_api_field_api.v3alpha.Cluster.use_tcp_for_dns_lookups>`
+  // are specified.
+  bool use_tcp_for_dns_lookups = 20;
 }
 
 // Administration interface :ref:`operations documentation
 // <operations_admin_interface>`.
 message Admin {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Admin";
+
   // The path to write the access log for the administration server. If no
   // access log is desired specify ā€˜/dev/nullā€™. This is only required if
   // :ref:`address <envoy_api_field_config.bootstrap.v3alpha.Admin.address>` is set.
@@ -176,7 +196,13 @@ message Admin {
 
 // Cluster manager :ref:`architecture overview <arch_overview_cluster_manager>`.
 message ClusterManager {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.bootstrap.v2.ClusterManager";
+
   message OutlierDetection {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.bootstrap.v2.ClusterManager.OutlierDetection";
+
     // Specifies the path to the outlier event log.
     string event_log_path = 1;
   }
@@ -210,6 +236,8 @@ message ClusterManager {
 // nonresponsive threads and kills the process after the configured thresholds.
 // See the :ref:`watchdog documentation <operations_performance_watchdog>` for more information.
 message Watchdog {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Watchdog";
+
   // The duration after which Envoy counts a nonresponsive thread in the
   // *watchdog_miss* statistic. If not specified the default is 200ms.
   google.protobuf.Duration miss_timeout = 1;
@@ -232,6 +260,8 @@ message Watchdog {
 
 // Runtime :ref:`configuration overview <config_runtime>` (deprecated).
 message Runtime {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Runtime";
+
   // The implementation assumes that the file system tree is accessed via a
   // symbolic link. An atomic link swap is used when a new tree should be
   // switched to. This parameter specifies the path to the symbolic link. Envoy
@@ -262,8 +292,14 @@ message Runtime {
 
 // [#next-free-field: 6]
 message RuntimeLayer {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.bootstrap.v2.RuntimeLayer";
+
   // :ref:`Disk runtime <config_runtime_local_disk>` layer.
   message DiskLayer {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.bootstrap.v2.RuntimeLayer.DiskLayer";
+
     // The implementation assumes that the file system tree is accessed via a
     // symbolic link. An atomic link swap is used when a new tree should be
     // switched to. This parameter specifies the path to the symbolic link.
@@ -285,10 +321,15 @@ message RuntimeLayer {
 
   // :ref:`Admin console runtime <config_runtime_admin>` layer.
   message AdminLayer {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.bootstrap.v2.RuntimeLayer.AdminLayer";
   }
 
   // :ref:`Runtime Discovery Service (RTDS) <config_runtime_rtds>` layer.
   message RtdsLayer {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.bootstrap.v2.RuntimeLayer.RtdsLayer";
+
     // Resource to subscribe to at *rtds_config* for the RTDS layer.
     string name = 1;
 
@@ -319,6 +360,9 @@ message RuntimeLayer {
 
 // Runtime :ref:`configuration overview <config_runtime>`.
 message LayeredRuntime {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.bootstrap.v2.LayeredRuntime";
+
   // The :ref:`layers <config_runtime_layering>` of the runtime. This is ordered
   // such that later layers in the list overlay earlier entries.
   repeated RuntimeLayer layers = 1;
diff --git a/api/envoy/config/cluster/aggregate/v2alpha/BUILD b/api/envoy/config/cluster/aggregate/v2alpha/BUILD
new file mode 100644
index 000000000000..5dc095ade27a
--- /dev/null
+++ b/api/envoy/config/cluster/aggregate/v2alpha/BUILD
@@ -0,0 +1,7 @@
+# DO NOT EDIT. This file is generated by tools/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"])  # Apache 2
+
+api_proto_package()
diff --git a/api/envoy/config/cluster/aggregate/v2alpha/cluster.proto b/api/envoy/config/cluster/aggregate/v2alpha/cluster.proto
new file mode 100644
index 000000000000..73e79150d514
--- /dev/null
+++ b/api/envoy/config/cluster/aggregate/v2alpha/cluster.proto
@@ -0,0 +1,20 @@
+syntax = "proto3";
+
+package envoy.config.cluster.aggregate.v2alpha;
+
+option java_package = "io.envoyproxy.envoy.config.cluster.aggregate.v2alpha";
+option java_outer_classname = "ClusterProto";
+option java_multiple_files = true;
+
+import "validate/validate.proto";
+
+// [#protodoc-title: Aggregate cluster configuration]
+
+// Configuration for the aggregate cluster. See the :ref:`architecture overview
+// <arch_overview_aggregate_cluster>` for more information.
+// [#extension: envoy.clusters.aggregate]
+message ClusterConfig {
+  // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they
+  // appear in this list.
+  repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}];
+}
diff --git a/api/envoy/config/cluster/aggregate/v3alpha/BUILD b/api/envoy/config/cluster/aggregate/v3alpha/BUILD
new file mode 100644
index 000000000000..5dc095ade27a
--- /dev/null
+++ b/api/envoy/config/cluster/aggregate/v3alpha/BUILD
@@ -0,0 +1,7 @@
+# DO NOT EDIT. This file is generated by tools/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"])  # Apache 2
+
+api_proto_package()
diff --git a/api/envoy/config/cluster/aggregate/v3alpha/cluster.proto b/api/envoy/config/cluster/aggregate/v3alpha/cluster.proto
new file mode 100644
index 000000000000..da1107274769
--- /dev/null
+++ b/api/envoy/config/cluster/aggregate/v3alpha/cluster.proto
@@ -0,0 +1,20 @@
+syntax = "proto3";
+
+package envoy.config.cluster.aggregate.v3alpha;
+
+option java_outer_classname = "ClusterProto";
+option java_multiple_files = true;
+option java_package = "io.envoyproxy.envoy.config.cluster.aggregate.v3alpha";
+
+import "validate/validate.proto";
+
+// [#protodoc-title: Aggregate cluster configuration]
+
+// Configuration for the aggregate cluster. See the :ref:`architecture overview
+// <arch_overview_aggregate_cluster>` for more information.
+// [#extension: envoy.clusters.aggregate]
+message ClusterConfig {
+  // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they
+  // appear in this list.
+  repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}];
+}
diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto
index ef87aeab5c65..71a95434a2bd 100644
--- a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto
+++ b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.cluster.dynamic_forward_proxy.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha";
 option java_outer_classname = "ClusterProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha";
 
 import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto";
 
diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD
index d0df543383dc..09b370136402 100644
--- a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD
+++ b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/config/common/dynamic_forward_proxy/v3alpha:pkg"],
+    deps = [
+        "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg",
+        "//envoy/config/common/dynamic_forward_proxy/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto
index 609c3f81c098..167e1c2a2507 100644
--- a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto
+++ b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.config.cluster.dynamic_forward_proxy.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v3alpha";
 option java_outer_classname = "ClusterProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v3alpha";
 
 import "envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Dynamic forward proxy cluster configuration]
@@ -16,6 +18,9 @@ import "validate/validate.proto";
 // <arch_overview_http_dynamic_forward_proxy>` for more information.
 // [#extension: envoy.clusters.dynamic_forward_proxy]
 message ClusterConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig";
+
   // The DNS cache configuration that the cluster will attach to. Note this configuration must
   // match that of associated :ref:`dynamic forward proxy HTTP filter configuration
   // <envoy_api_field_config.filter.http.dynamic_forward_proxy.v3alpha.FilterConfig.dns_cache_config>`.
diff --git a/api/envoy/config/cluster/redis/redis_cluster.proto b/api/envoy/config/cluster/redis/redis_cluster.proto
index beef21970085..e9927c63f2c8 100644
--- a/api/envoy/config/cluster/redis/redis_cluster.proto
+++ b/api/envoy/config/cluster/redis/redis_cluster.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.cluster.redis;
 
+option java_package = "io.envoyproxy.envoy.config.cluster.redis";
 option java_outer_classname = "RedisClusterProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.cluster.redis";
 
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
diff --git a/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto b/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto
index 56d11293cc27..e9720cda8d44 100644
--- a/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto
+++ b/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.common.dynamic_forward_proxy.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v2alpha";
 option java_outer_classname = "DnsCacheProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v2alpha";
 
 import "envoy/api/v2/cds.proto";
 
diff --git a/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD
index 31d6642f9a16..876fe62a5e0c 100644
--- a/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD
+++ b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha:pkg"],
+    deps = [
+        "//envoy/api/v3alpha:pkg",
+        "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto
index 780b23871f13..d59ad622d072 100644
--- a/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto
+++ b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto
@@ -2,15 +2,17 @@ syntax = "proto3";
 
 package envoy.config.common.dynamic_forward_proxy.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v3alpha";
 option java_outer_classname = "DnsCacheProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v3alpha";
 
 import "envoy/api/v3alpha/cds.proto";
 
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Dynamic forward proxy common configuration]
@@ -19,6 +21,9 @@ import "validate/validate.proto";
 // <arch_overview_http_dynamic_forward_proxy>` for more information.
 // [#next-free-field: 6]
 message DnsCacheConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig";
+
   // The name of the cache. Multiple named caches allow independent dynamic forward proxy
   // configurations to operate within a single Envoy process using different configurations. All
   // configurations with the same name *must* otherwise have the same settings when referenced
diff --git a/api/envoy/config/common/tap/v2alpha/common.proto b/api/envoy/config/common/tap/v2alpha/common.proto
index 391bed13c69c..30de0067ee05 100644
--- a/api/envoy/config/common/tap/v2alpha/common.proto
+++ b/api/envoy/config/common/tap/v2alpha/common.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.common.tap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.common.tap.v2alpha";
 option java_outer_classname = "CommonProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.common.tap.v2alpha";
 
 import "envoy/api/v2/core/config_source.proto";
 import "envoy/service/tap/v2alpha/common.proto";
diff --git a/api/envoy/config/common/tap/v3alpha/BUILD b/api/envoy/config/common/tap/v3alpha/BUILD
index 2c75bdcc328c..bf646feaa80b 100644
--- a/api/envoy/config/common/tap/v3alpha/BUILD
+++ b/api/envoy/config/common/tap/v3alpha/BUILD
@@ -7,6 +7,8 @@ licenses(["notice"])  # Apache 2
 api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/common/tap/v2alpha:pkg",
         "//envoy/service/tap/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/common/tap/v3alpha/common.proto b/api/envoy/config/common/tap/v3alpha/common.proto
index 74c23b9c0123..127bb3e44220 100644
--- a/api/envoy/config/common/tap/v3alpha/common.proto
+++ b/api/envoy/config/common/tap/v3alpha/common.proto
@@ -2,21 +2,29 @@ syntax = "proto3";
 
 package envoy.config.common.tap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.common.tap.v3alpha";
 option java_outer_classname = "CommonProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.common.tap.v3alpha";
 
 import "envoy/api/v3alpha/core/config_source.proto";
 import "envoy/service/tap/v3alpha/common.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Common tap extension configuration]
 
 // Common configuration for all tap extensions.
 message CommonExtensionConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.common.tap.v2alpha.CommonExtensionConfig";
+
   // [#not-implemented-hide:]
   message TapDSConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.common.tap.v2alpha.CommonExtensionConfig.TapDSConfig";
+
     // Configuration for the source of TapDS updates for this Cluster.
     api.v3alpha.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];
 
@@ -42,6 +50,9 @@ message CommonExtensionConfig {
 // Configuration for the admin handler. See :ref:`here <config_http_filters_tap_admin_handler>` for
 // more information.
 message AdminConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.common.tap.v2alpha.AdminConfig";
+
   // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is
   // matched to the configured filter opaque ID to determine which filter to configure.
   string config_id = 1 [(validate.rules).string = {min_bytes: 1}];
diff --git a/api/envoy/config/filter/accesslog/v2/accesslog.proto b/api/envoy/config/filter/accesslog/v2/accesslog.proto
index b336fb0b1673..489bc82dc469 100644
--- a/api/envoy/config/filter/accesslog/v2/accesslog.proto
+++ b/api/envoy/config/filter/accesslog/v2/accesslog.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.accesslog.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v2";
 option java_outer_classname = "AccesslogProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v2";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/api/v2/route/route.proto";
diff --git a/api/envoy/config/filter/accesslog/v3alpha/BUILD b/api/envoy/config/filter/accesslog/v3alpha/BUILD
index 6ee5eedab8d4..8f85caf88007 100644
--- a/api/envoy/config/filter/accesslog/v3alpha/BUILD
+++ b/api/envoy/config/filter/accesslog/v3alpha/BUILD
@@ -8,6 +8,8 @@ api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/api/v3alpha/route:pkg",
+        "//envoy/config/filter/accesslog/v2:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto b/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto
index 6c21668b593a..5a32bc1a022d 100644
--- a/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto
+++ b/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.accesslog.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v3alpha";
 option java_outer_classname = "AccesslogProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/api/v3alpha/route/route.proto";
@@ -13,11 +13,16 @@ import "envoy/type/v3alpha/percent.proto";
 import "google/protobuf/any.proto";
 import "google/protobuf/struct.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Common access log types]
 
 message AccessLog {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.AccessLog";
+
   reserved 3;
 
   reserved "config";
@@ -49,6 +54,9 @@ message AccessLog {
 
 // [#next-free-field: 12]
 message AccessLogFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.AccessLogFilter";
+
   oneof filter_specifier {
     option (validate.required) = true;
 
@@ -89,6 +97,9 @@ message AccessLogFilter {
 
 // Filter on an integer comparison.
 message ComparisonFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.ComparisonFilter";
+
   enum Op {
     // =
     EQ = 0;
@@ -109,12 +120,18 @@ message ComparisonFilter {
 
 // Filters on HTTP response/status code.
 message StatusCodeFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.StatusCodeFilter";
+
   // Comparison.
   ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];
 }
 
 // Filters on total request duration in milliseconds.
 message DurationFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.DurationFilter";
+
   // Comparison.
   ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];
 }
@@ -122,15 +139,22 @@ message DurationFilter {
 // Filters for requests that are not health check requests. A health check
 // request is marked by the health check filter.
 message NotHealthCheckFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.NotHealthCheckFilter";
 }
 
 // Filters for requests that are traceable. See the tracing overview for more
 // information on how a request becomes traceable.
 message TraceableFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.TraceableFilter";
 }
 
 // Filters for random sampling of requests.
 message RuntimeFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.RuntimeFilter";
+
   // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field.
   // If found in runtime, this value will replace the default numerator.
   string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}];
@@ -157,6 +181,9 @@ message RuntimeFilter {
 // Filters are evaluated sequentially and if one of them returns false, the
 // filter returns false immediately.
 message AndFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.AndFilter";
+
   repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}];
 }
 
@@ -164,11 +191,17 @@ message AndFilter {
 // Filters are evaluated sequentially and if one of them returns true, the
 // filter returns true immediately.
 message OrFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.OrFilter";
+
   repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}];
 }
 
 // Filters requests based on the presence or value of a request header.
 message HeaderFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.HeaderFilter";
+
   // Only requests with a header which matches the specified HeaderMatcher will pass the filter
   // check.
   api.v3alpha.route.HeaderMatcher header = 1 [(validate.rules).message = {required: true}];
@@ -178,6 +211,9 @@ message HeaderFilter {
 // A list of the response flags can be found
 // in the access log formatter :ref:`documentation<config_access_log_format_response_flags>`.
 message ResponseFlagFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.ResponseFlagFilter";
+
   // Only responses with the any of the flags listed in this field will be logged.
   // This field is optional. If it is not specified, then any response flag will pass
   // the filter check.
@@ -211,6 +247,9 @@ message ResponseFlagFilter {
 // Filters gRPC requests based on their response status. If a gRPC status is not provided, the
 // filter will infer the status from the HTTP status code.
 message GrpcStatusFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.GrpcStatusFilter";
+
   enum Status {
     OK = 0;
     CANCELED = 1;
@@ -241,6 +280,9 @@ message GrpcStatusFilter {
 
 // Extension filter is statically registered at runtime.
 message ExtensionFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.accesslog.v2.ExtensionFilter";
+
   reserved 2;
 
   reserved "config";
diff --git a/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto b/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto
index 4e65f14e0ea9..28bbdec4aff1 100644
--- a/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto
+++ b/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.dubbo.router.v2alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1";
 option java_outer_classname = "RouterProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1";
 
 // [#protodoc-title: Router]
 // Dubbo router :ref:`configuration overview <config_dubbo_filters_router>`.
diff --git a/api/envoy/config/filter/fault/v2/fault.proto b/api/envoy/config/filter/fault/v2/fault.proto
index 8414f18baf67..696253f874eb 100644
--- a/api/envoy/config/filter/fault/v2/fault.proto
+++ b/api/envoy/config/filter/fault/v2/fault.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.fault.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.fault.v2";
 option java_outer_classname = "FaultProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.fault.v2";
 
 import "envoy/type/percent.proto";
 
diff --git a/api/envoy/config/filter/fault/v3alpha/BUILD b/api/envoy/config/filter/fault/v3alpha/BUILD
index 30e23239cc1b..57f3af3bcbc0 100644
--- a/api/envoy/config/filter/fault/v3alpha/BUILD
+++ b/api/envoy/config/filter/fault/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/type/v3alpha:pkg"],
+    deps = [
+        "//envoy/config/filter/fault/v2:pkg",
+        "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/fault/v3alpha/fault.proto b/api/envoy/config/filter/fault/v3alpha/fault.proto
index e26d32cfada9..8a225c47326c 100644
--- a/api/envoy/config/filter/fault/v3alpha/fault.proto
+++ b/api/envoy/config/filter/fault/v3alpha/fault.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.config.filter.fault.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.fault.v3alpha";
 option java_outer_classname = "FaultProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.fault.v3alpha";
 
 import "envoy/type/v3alpha/percent.proto";
 
 import "google/protobuf/duration.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Common fault injection types]
@@ -18,6 +20,9 @@ import "validate/validate.proto";
 // HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections.
 // [#next-free-field: 6]
 message FaultDelay {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.fault.v2.FaultDelay";
+
   enum FaultDelayType {
     // Unused and deprecated.
     FIXED = 0;
@@ -27,6 +32,8 @@ message FaultDelay {
   // :ref:`http fault filter <config_http_filters_fault_injection_http_header>` documentation for
   // more information.
   message HeaderDelay {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.fault.v2.FaultDelay.HeaderDelay";
   }
 
   reserved 2, 1;
@@ -54,8 +61,14 @@ message FaultDelay {
 
 // Describes a rate limit to be applied.
 message FaultRateLimit {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.fault.v2.FaultRateLimit";
+
   // Describes a fixed/constant rate limit.
   message FixedLimit {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.fault.v2.FaultRateLimit.FixedLimit";
+
     // The limit supplied in KiB/s.
     uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}];
   }
@@ -64,6 +77,8 @@ message FaultRateLimit {
   // :ref:`http fault filter <config_http_filters_fault_injection_http_header>` documentation for
   // more information.
   message HeaderLimit {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.fault.v2.FaultRateLimit.HeaderLimit";
   }
 
   oneof limit_type {
diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto b/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto
index a4ddb8819eff..52a095195995 100644
--- a/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto
+++ b/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.adaptive_concurrency.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v2alpha";
 option java_outer_classname = "AdaptiveConcurrencyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v2alpha";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/type/percent.proto";
diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD
index d5728c7ddb9d..f99d6310420e 100644
--- a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD
@@ -7,6 +7,8 @@ licenses(["notice"])  # Apache 2
 api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto
index eb9d8791ad54..a0745ce77479 100644
--- a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto
+++ b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.adaptive_concurrency.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v3alpha";
 option java_outer_classname = "AdaptiveConcurrencyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/type/v3alpha/percent.proto";
@@ -13,6 +13,8 @@ import "google/api/annotations.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Adaptive Concurrency]
@@ -22,9 +24,16 @@ import "validate/validate.proto";
 
 // Configuration parameters for the gradient controller.
 message GradientControllerConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig";
+
   // Parameters controlling the periodic recalculation of the concurrency limit from sampled request
   // latencies.
   message ConcurrencyLimitCalculationParams {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig."
+        "ConcurrencyLimitCalculationParams";
+
     // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000.
     google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}];
 
@@ -38,6 +47,10 @@ message GradientControllerConfig {
   // Parameters controlling the periodic minRTT recalculation.
   // [#next-free-field: 6]
   message MinimumRTTCalculationParams {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig."
+        "MinimumRTTCalculationParams";
+
     // The time interval between recalculating the minimum request round-trip time.
     google.protobuf.Duration interval = 1 [(validate.rules).duration = {
       required: true
@@ -76,6 +89,9 @@ message GradientControllerConfig {
 }
 
 message AdaptiveConcurrency {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency";
+
   oneof concurrency_controller_config {
     option (validate.required) = true;
 
diff --git a/api/envoy/config/filter/http/buffer/v2/buffer.proto b/api/envoy/config/filter/http/buffer/v2/buffer.proto
index f342c72d5b01..909e9a2b4143 100644
--- a/api/envoy/config/filter/http/buffer/v2/buffer.proto
+++ b/api/envoy/config/filter/http/buffer/v2/buffer.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.buffer.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v2";
 option java_outer_classname = "BufferProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v2";
 
 import "google/protobuf/wrappers.proto";
 
diff --git a/api/envoy/config/filter/http/csrf/v2/csrf.proto b/api/envoy/config/filter/http/csrf/v2/csrf.proto
index acc7718b1e1b..2ef87b1f9d4d 100644
--- a/api/envoy/config/filter/http/csrf/v2/csrf.proto
+++ b/api/envoy/config/filter/http/csrf/v2/csrf.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.csrf.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2";
 option java_outer_classname = "CsrfProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/type/matcher/string.proto";
diff --git a/api/envoy/config/filter/http/csrf/v3alpha/BUILD b/api/envoy/config/filter/http/csrf/v3alpha/BUILD
index 1a4a60504df0..466cdb8d6991 100644
--- a/api/envoy/config/filter/http/csrf/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/csrf/v3alpha/BUILD
@@ -7,6 +7,8 @@ licenses(["notice"])  # Apache 2
 api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/filter/http/csrf/v2:pkg",
         "//envoy/type/matcher/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto b/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto
index 5d3c3eca6f2e..23ec7be0a302 100644
--- a/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto
+++ b/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto
@@ -2,13 +2,15 @@ syntax = "proto3";
 
 package envoy.config.filter.http.csrf.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v3alpha";
 option java_outer_classname = "CsrfProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/type/matcher/v3alpha/string.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: CSRF]
@@ -17,6 +19,9 @@ import "validate/validate.proto";
 
 // CSRF filter config.
 message CsrfPolicy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.csrf.v2.CsrfPolicy";
+
   // Specifies the % of requests for which the CSRF filter is enabled.
   //
   // If :ref:`runtime_key <envoy_api_field_core.runtimefractionalpercent.runtime_key>` is specified,
diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto
index d66fa81f8eab..4c4e1a5753cb 100644
--- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto
+++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.dynamic_forward_proxy.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v2alpha";
 option java_outer_classname = "DynamicForwardProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v2alpha";
 
 import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto";
 
diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD
index d0df543383dc..dd57048b6ceb 100644
--- a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/config/common/dynamic_forward_proxy/v3alpha:pkg"],
+    deps = [
+        "//envoy/config/common/dynamic_forward_proxy/v3alpha:pkg",
+        "//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto
index 62161c25f1d4..e28aabfb06a4 100644
--- a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto
+++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.config.filter.http.dynamic_forward_proxy.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v3alpha";
 option java_outer_classname = "DynamicForwardProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v3alpha";
 
 import "envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Dynamic forward proxy]
@@ -16,6 +18,9 @@ import "validate/validate.proto";
 // <arch_overview_http_dynamic_forward_proxy>` for more information.
 // [#extension: envoy.filters.http.dynamic_forward_proxy]
 message FilterConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig";
+
   // The DNS cache configuration that the filter will attach to. Note this configuration must
   // match that of associated :ref:`dynamic forward proxy cluster configuration
   // <envoy_api_field_config.cluster.dynamic_forward_proxy.v3alpha.ClusterConfig.dns_cache_config>`.
@@ -25,6 +30,9 @@ message FilterConfig {
 
 // Per route Configuration for the dynamic forward proxy HTTP filter.
 message PerRouteConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.dynamic_forward_proxy.v2alpha.PerRouteConfig";
+
   oneof host_rewrite_specifier {
     // Indicates that before DNS lookup, the host header will be swapped with
     // this value. If not set or empty, the original host header value
diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto
index 8e3eb7b4c2da..5b1529795559 100644
--- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto
+++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.ext_authz.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2";
 option java_outer_classname = "ExtAuthzProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/api/v2/core/grpc_service.proto";
diff --git a/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD b/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD
index 8ab5e755a3f7..cafb58dbcf51 100644
--- a/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/ext_authz/v3alpha/BUILD
@@ -7,7 +7,9 @@ licenses(["notice"])  # Apache 2
 api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/filter/http/ext_authz/v2:pkg",
         "//envoy/type/matcher/v3alpha:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto
index 22ccf8b27739..01825760a5f7 100644
--- a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto
+++ b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.ext_authz.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v3alpha";
 option java_outer_classname = "ExtAuthzProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/api/v3alpha/core/grpc_service.proto";
@@ -12,6 +12,8 @@ import "envoy/api/v3alpha/core/http_uri.proto";
 import "envoy/type/matcher/v3alpha/string.proto";
 import "envoy/type/v3alpha/http_status.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: External Authorization]
@@ -20,6 +22,9 @@ import "validate/validate.proto";
 
 // [#next-free-field: 11]
 message ExtAuthz {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.ext_authz.v2.ExtAuthz";
+
   reserved 4;
 
   reserved "use_alpha";
@@ -99,6 +104,9 @@ message ExtAuthz {
 
 // Configuration for buffering the request data.
 message BufferSettings {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.ext_authz.v2.BufferSettings";
+
   // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return
   // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number
   // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow
@@ -137,6 +145,9 @@ message BufferSettings {
 // for details.
 // [#next-free-field: 9]
 message HttpService {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.ext_authz.v2.HttpService";
+
   reserved 3, 4, 5, 6;
 
   // Sets the HTTP server URI which the authorization requests must be sent to.
@@ -153,6 +164,9 @@ message HttpService {
 }
 
 message AuthorizationRequest {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.ext_authz.v2.AuthorizationRequest";
+
   // Authorization request will include the client request headers that have a correspondent match
   // in the :ref:`list <envoy_api_msg_type.matcher.v3alpha.ListStringMatcher>`. Note that in
   // addition to the user's supplied matchers:
@@ -170,6 +184,9 @@ message AuthorizationRequest {
 }
 
 message AuthorizationResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.ext_authz.v2.AuthorizationResponse";
+
   // When this :ref:`list <envoy_api_msg_type.matcher.v3alpha.ListStringMatcher>` is set,
   // authorization response headers that have a correspondent match will be added to the original
   // client request. Note that coexistent headers will be overridden.
@@ -186,6 +203,9 @@ message AuthorizationResponse {
 
 // Extra settings on a per virtualhost/route/weighted-cluster level.
 message ExtAuthzPerRoute {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.ext_authz.v2.ExtAuthzPerRoute";
+
   oneof override {
     option (validate.required) = true;
 
@@ -204,6 +224,9 @@ message ExtAuthzPerRoute {
 // host is used without needing to parse the host header. If CheckSettings is specified in multiple
 // per-filter-configs, they will be merged in order, and the result will be used.
 message CheckSettings {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.ext_authz.v2.CheckSettings";
+
   // Context extensions to set on the CheckRequest's
   // :ref:`AttributeContext.context_extensions<envoy_api_field_service.auth.v3alpha.AttributeContext.context_extensions>`
   //
diff --git a/api/envoy/config/filter/http/fault/v2/fault.proto b/api/envoy/config/filter/http/fault/v2/fault.proto
index 15103bfc15a9..972dc704429e 100644
--- a/api/envoy/config/filter/http/fault/v2/fault.proto
+++ b/api/envoy/config/filter/http/fault/v2/fault.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.fault.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v2";
 option java_outer_classname = "FaultProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v2";
 
 import "envoy/api/v2/route/route.proto";
 import "envoy/config/filter/fault/v2/fault.proto";
diff --git a/api/envoy/config/filter/http/fault/v3alpha/BUILD b/api/envoy/config/filter/http/fault/v3alpha/BUILD
index 30e4502e2b61..94e816939d66 100644
--- a/api/envoy/config/filter/http/fault/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/fault/v3alpha/BUILD
@@ -8,6 +8,8 @@ api_proto_package(
     deps = [
         "//envoy/api/v3alpha/route:pkg",
         "//envoy/config/filter/fault/v3alpha:pkg",
+        "//envoy/config/filter/http/fault/v2:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/http/fault/v3alpha/fault.proto b/api/envoy/config/filter/http/fault/v3alpha/fault.proto
index c85dfd495c1c..d5045f74dbd7 100644
--- a/api/envoy/config/filter/http/fault/v3alpha/fault.proto
+++ b/api/envoy/config/filter/http/fault/v3alpha/fault.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.fault.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v3alpha";
 option java_outer_classname = "FaultProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v3alpha";
 
 import "envoy/api/v3alpha/route/route.proto";
 import "envoy/config/filter/fault/v3alpha/fault.proto";
@@ -12,6 +12,8 @@ import "envoy/type/v3alpha/percent.proto";
 
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Fault Injection]
@@ -19,6 +21,9 @@ import "validate/validate.proto";
 // [#extension: envoy.filters.http.fault]
 
 message FaultAbort {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.fault.v2.FaultAbort";
+
   reserved 1;
 
   oneof error_type {
@@ -35,6 +40,9 @@ message FaultAbort {
 
 // [#next-free-field: 14]
 message HTTPFault {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.fault.v2.HTTPFault";
+
   // If specified, the filter will inject delays based on the values in the
   // object.
   filter.fault.v3alpha.FaultDelay delay = 1;
diff --git a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto
index 2e533e9f2533..9aa6d4dfe57a 100644
--- a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto
+++ b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1";
 option java_outer_classname = "ConfigProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto b/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto
index 609a80327664..680bfa3dc8e7 100644
--- a/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto
+++ b/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.grpc_stats.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_stats.v2alpha";
 option java_outer_classname = "ConfigProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_stats.v2alpha";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/filter/http/gzip/v2/gzip.proto b/api/envoy/config/filter/http/gzip/v2/gzip.proto
index dd78214f4e26..16ba4aa6a590 100644
--- a/api/envoy/config/filter/http/gzip/v2/gzip.proto
+++ b/api/envoy/config/filter/http/gzip/v2/gzip.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.gzip.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v2";
 option java_outer_classname = "GzipProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v2";
 
 import "google/protobuf/wrappers.proto";
 
diff --git a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto
index d59ee83e6a23..4571b880c478 100644
--- a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto
+++ b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.header_to_metadata.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v2";
 option java_outer_classname = "HeaderToMetadataProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v2";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto
index 055920ff3c15..18f1e79baca5 100644
--- a/api/envoy/config/filter/http/health_check/v2/health_check.proto
+++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.health_check.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v2";
 option java_outer_classname = "HealthCheckProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v2";
 
 import "envoy/api/v2/route/route.proto";
 import "envoy/type/percent.proto";
diff --git a/api/envoy/config/filter/http/health_check/v3alpha/BUILD b/api/envoy/config/filter/http/health_check/v3alpha/BUILD
index 23743719998f..0b37af71b858 100644
--- a/api/envoy/config/filter/http/health_check/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/health_check/v3alpha/BUILD
@@ -7,6 +7,8 @@ licenses(["notice"])  # Apache 2
 api_proto_package(
     deps = [
         "//envoy/api/v3alpha/route:pkg",
+        "//envoy/config/filter/http/health_check/v2:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto b/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto
index 680a3fc89c9a..78b11e730bab 100644
--- a/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto
+++ b/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.health_check.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v3alpha";
 option java_outer_classname = "HealthCheckProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v3alpha";
 
 import "envoy/api/v3alpha/route/route.proto";
 import "envoy/type/v3alpha/percent.proto";
@@ -12,6 +12,8 @@ import "envoy/type/v3alpha/percent.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Health check]
@@ -20,6 +22,9 @@ import "validate/validate.proto";
 
 // [#next-free-field: 6]
 message HealthCheck {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.health_check.v2.HealthCheck";
+
   reserved 2;
 
   // Specifies whether the filter operates in pass through mode or not.
diff --git a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto
index 8e22c906b228..40dc19260bc6 100644
--- a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto
+++ b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.ip_tagging.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v2";
 option java_outer_classname = "IpTaggingProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v2";
 
 import "envoy/api/v2/core/address.proto";
 
diff --git a/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD b/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD
index 4e89d949ab9d..e9430d25de22 100644
--- a/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/ip_tagging/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/filter/http/ip_tagging/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto
index 81ff557bd380..fc6922d05a44 100644
--- a/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto
+++ b/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.config.filter.http.ip_tagging.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v3alpha";
 option java_outer_classname = "IpTaggingProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v3alpha";
 
 import "envoy/api/v3alpha/core/address.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: IP tagging]
@@ -15,6 +17,9 @@ import "validate/validate.proto";
 // [#extension: envoy.filters.http.ip_tagging]
 
 message IPTagging {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.ip_tagging.v2.IPTagging";
+
   // The type of requests the filter should apply to. The supported types
   // are internal, external or both. The
   // :ref:`x-forwarded-for<config_http_conn_man_headers_x-forwarded-for_internal_origin>` header is
@@ -34,6 +39,9 @@ message IPTagging {
 
   // Supplies the IP tag name and the IP address subnets.
   message IPTag {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.http.ip_tagging.v2.IPTagging.IPTag";
+
     // Specifies the IP tag name to apply.
     string ip_tag_name = 1;
 
diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
index 50105dad447d..42a9f32d3865 100644
--- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
+++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.jwt_authn.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha";
 option java_outer_classname = "ConfigProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/api/v2/core/http_uri.proto";
diff --git a/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD b/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD
index 900a0545c66e..c3be36b5be95 100644
--- a/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/jwt_authn/v3alpha/BUILD
@@ -8,5 +8,7 @@ api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/api/v3alpha/route:pkg",
+        "//envoy/config/filter/http/jwt_authn/v2alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto
index 4efefb373eae..e4e1f59fc827 100644
--- a/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto
+++ b/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.jwt_authn.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v3alpha";
 option java_outer_classname = "ConfigProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/api/v3alpha/core/http_uri.proto";
@@ -13,6 +13,8 @@ import "envoy/api/v3alpha/route/route.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/empty.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: JWT Authentication]
@@ -50,6 +52,9 @@ import "validate/validate.proto";
 //
 // [#next-free-field: 10]
 message JwtProvider {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider";
+
   // Specify the `principal <https://tools.ietf.org/html/rfc7519#section-4.1.1>`_ that issued
   // the JWT, usually a URL or an email address.
   //
@@ -188,6 +193,9 @@ message JwtProvider {
 
 // This message specifies how to fetch JWKS from remote and how to cache it.
 message RemoteJwks {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.jwt_authn.v2alpha.RemoteJwks";
+
   // The HTTP URI to fetch the JWKS. For example:
   //
   // .. code-block:: yaml
@@ -205,6 +213,9 @@ message RemoteJwks {
 
 // This message specifies a header location to extract JWT token.
 message JwtHeader {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.jwt_authn.v2alpha.JwtHeader";
+
   // The HTTP header name.
   string name = 1 [(validate.rules).string = {min_bytes: 1}];
 
@@ -216,6 +227,9 @@ message JwtHeader {
 
 // Specify a required provider with audiences.
 message ProviderWithAudiences {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.jwt_authn.v2alpha.ProviderWithAudiences";
+
   // Specify a required provider name.
   string provider_name = 1;
 
@@ -265,6 +279,9 @@ message ProviderWithAudiences {
 //
 // [#next-free-field: 6]
 message JwtRequirement {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirement";
+
   oneof requires_type {
     // Specify a required provider name.
     string provider_name = 1;
@@ -291,6 +308,9 @@ message JwtRequirement {
 // This message specifies a list of RequiredProvider.
 // Their results are OR-ed; if any one of them passes, the result is passed
 message JwtRequirementOrList {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementOrList";
+
   // Specify a list of JwtRequirement.
   repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];
 }
@@ -298,6 +318,9 @@ message JwtRequirementOrList {
 // This message specifies a list of RequiredProvider.
 // Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails.
 message JwtRequirementAndList {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementAndList";
+
   // Specify a list of JwtRequirement.
   repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}];
 }
@@ -324,6 +347,9 @@ message JwtRequirementAndList {
 // In above example, all requests matched the path prefix require jwt authentication
 // from "provider-A".
 message RequirementRule {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.jwt_authn.v2alpha.RequirementRule";
+
   // The route matching parameter. Only when the match is satisfied, the "requires" field will
   // apply.
   //
@@ -358,6 +384,9 @@ message RequirementRule {
 // If a filter set "jwt_selector" with "issuer_1" to FilterState for a request,
 // jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify.
 message FilterStateRule {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.jwt_authn.v2alpha.FilterStateRule";
+
   // The filter state name to retrieve the `Router::StringAccessor` object.
   string name = 1 [(validate.rules).string = {min_bytes: 1}];
 
@@ -408,6 +437,9 @@ message FilterStateRule {
 //              - provider_name: provider2
 //
 message JwtAuthentication {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.jwt_authn.v2alpha.JwtAuthentication";
+
   // Map of provider names to JwtProviders.
   //
   // .. code-block:: yaml
diff --git a/api/envoy/config/filter/http/lua/v2/lua.proto b/api/envoy/config/filter/http/lua/v2/lua.proto
index 1bf16f3c86c7..b1f427cdf3c4 100644
--- a/api/envoy/config/filter/http/lua/v2/lua.proto
+++ b/api/envoy/config/filter/http/lua/v2/lua.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.lua.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v2";
 option java_outer_classname = "LuaProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v2";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto
index ad0082694a38..2244cfc847dc 100644
--- a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto
+++ b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.original_src.v2alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.original_src.v2alpha1";
 option java_outer_classname = "OriginalSrcProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.original_src.v2alpha1";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto
index 0b0e1edfd5fa..0ccce68ce6d0 100644
--- a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto
+++ b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.rate_limit.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v2";
 option java_outer_classname = "RateLimitProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v2";
 
 import "envoy/config/ratelimit/v2/rls.proto";
 
diff --git a/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD b/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD
index bdfffc73a735..f88b85e2bc4f 100644
--- a/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/rate_limit/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/config/ratelimit/v3alpha:pkg"],
+    deps = [
+        "//envoy/config/filter/http/rate_limit/v2:pkg",
+        "//envoy/config/ratelimit/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto
index 7380d261cf87..3a41cb2786aa 100644
--- a/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto
+++ b/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.config.filter.http.rate_limit.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v3alpha";
 option java_outer_classname = "RateLimitProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v3alpha";
 
 import "envoy/config/ratelimit/v3alpha/rls.proto";
 
 import "google/protobuf/duration.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Rate limit]
@@ -18,6 +20,9 @@ import "validate/validate.proto";
 
 // [#next-free-field: 8]
 message RateLimit {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.rate_limit.v2.RateLimit";
+
   // The rate limit domain to use when calling the rate limit service.
   string domain = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/config/filter/http/rbac/v2/rbac.proto b/api/envoy/config/filter/http/rbac/v2/rbac.proto
index c497aa2fa645..baebccaf4a6b 100644
--- a/api/envoy/config/filter/http/rbac/v2/rbac.proto
+++ b/api/envoy/config/filter/http/rbac/v2/rbac.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.rbac.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v2";
 option java_outer_classname = "RbacProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v2";
 
 import "envoy/config/rbac/v2/rbac.proto";
 
diff --git a/api/envoy/config/filter/http/rbac/v3alpha/BUILD b/api/envoy/config/filter/http/rbac/v3alpha/BUILD
index 886af1e0d81b..406ba80429e6 100644
--- a/api/envoy/config/filter/http/rbac/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/rbac/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/config/rbac/v3alpha:pkg"],
+    deps = [
+        "//envoy/config/filter/http/rbac/v2:pkg",
+        "//envoy/config/rbac/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto b/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto
index 994eec6e849c..b5c23c7d4f32 100644
--- a/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto
+++ b/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.config.filter.http.rbac.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v3alpha";
 option java_outer_classname = "RbacProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v3alpha";
 
 import "envoy/config/rbac/v3alpha/rbac.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: RBAC]
@@ -16,6 +18,9 @@ import "validate/validate.proto";
 
 // RBAC filter config.
 message RBAC {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.rbac.v2.RBAC";
+
   // Specify the RBAC rules to be applied globally.
   // If absent, no enforcing RBAC policy will be applied.
   config.rbac.v3alpha.RBAC rules = 1;
@@ -27,6 +32,9 @@ message RBAC {
 }
 
 message RBACPerRoute {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.rbac.v2.RBACPerRoute";
+
   reserved 1;
 
   // Override the global configuration of the filter with this new config.
diff --git a/api/envoy/config/filter/http/router/v2/router.proto b/api/envoy/config/filter/http/router/v2/router.proto
index d8a329be25db..db02de41a253 100644
--- a/api/envoy/config/filter/http/router/v2/router.proto
+++ b/api/envoy/config/filter/http/router/v2/router.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.router.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.router.v2";
 option java_outer_classname = "RouterProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.router.v2";
 
 import "envoy/config/filter/accesslog/v2/accesslog.proto";
 
diff --git a/api/envoy/config/filter/http/router/v3alpha/BUILD b/api/envoy/config/filter/http/router/v3alpha/BUILD
index f2a38cd5405c..c60964768673 100644
--- a/api/envoy/config/filter/http/router/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/router/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/config/filter/accesslog/v3alpha:pkg"],
+    deps = [
+        "//envoy/config/filter/accesslog/v3alpha:pkg",
+        "//envoy/config/filter/http/router/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/http/router/v3alpha/router.proto b/api/envoy/config/filter/http/router/v3alpha/router.proto
index 59c541ccb4d9..6ea99f4256d5 100644
--- a/api/envoy/config/filter/http/router/v3alpha/router.proto
+++ b/api/envoy/config/filter/http/router/v3alpha/router.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.config.filter.http.router.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.router.v3alpha";
 option java_outer_classname = "RouterProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.router.v3alpha";
 
 import "envoy/config/filter/accesslog/v3alpha/accesslog.proto";
 
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Router]
@@ -18,6 +20,9 @@ import "validate/validate.proto";
 
 // [#next-free-field: 7]
 message Router {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.router.v2.Router";
+
   // Whether the router generates dynamic cluster statistics. Defaults to
   // true. Can be disabled in high performance scenarios.
   google.protobuf.BoolValue dynamic_stats = 1;
diff --git a/api/envoy/config/filter/http/squash/v2/squash.proto b/api/envoy/config/filter/http/squash/v2/squash.proto
index 8d4f352ef570..a864f4533346 100644
--- a/api/envoy/config/filter/http/squash/v2/squash.proto
+++ b/api/envoy/config/filter/http/squash/v2/squash.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.squash.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v2";
 option java_outer_classname = "SquashProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v2";
 
 import "google/protobuf/duration.proto";
 import "google/protobuf/struct.proto";
diff --git a/api/envoy/config/filter/http/tap/v2alpha/tap.proto b/api/envoy/config/filter/http/tap/v2alpha/tap.proto
index 4dc3b33bb31a..247e898af8bb 100644
--- a/api/envoy/config/filter/http/tap/v2alpha/tap.proto
+++ b/api/envoy/config/filter/http/tap/v2alpha/tap.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.tap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha";
 option java_outer_classname = "TapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha";
 
 import "envoy/config/common/tap/v2alpha/common.proto";
 
diff --git a/api/envoy/config/filter/http/tap/v3alpha/BUILD b/api/envoy/config/filter/http/tap/v3alpha/BUILD
index 73167eb9145d..a8b5442dd587 100644
--- a/api/envoy/config/filter/http/tap/v3alpha/BUILD
+++ b/api/envoy/config/filter/http/tap/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/config/common/tap/v3alpha:pkg"],
+    deps = [
+        "//envoy/config/common/tap/v3alpha:pkg",
+        "//envoy/config/filter/http/tap/v2alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/http/tap/v3alpha/tap.proto b/api/envoy/config/filter/http/tap/v3alpha/tap.proto
index 064307cba0c5..f2a1f2560fad 100644
--- a/api/envoy/config/filter/http/tap/v3alpha/tap.proto
+++ b/api/envoy/config/filter/http/tap/v3alpha/tap.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.config.filter.http.tap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v3alpha";
 option java_outer_classname = "TapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v3alpha";
 
 import "envoy/config/common/tap/v3alpha/common.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Tap]
@@ -16,6 +18,9 @@ import "validate/validate.proto";
 
 // Top level configuration for the tap filter.
 message Tap {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.http.tap.v2alpha.Tap";
+
   // Common configuration for the HTTP tap filter.
   common.tap.v3alpha.CommonExtensionConfig common_config = 1
       [(validate.rules).message = {required: true}];
diff --git a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto
index af5ea5a18db4..c88b18541b50 100644
--- a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto
+++ b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.http.transcoder.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v2";
 option java_outer_classname = "TranscoderProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v2";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto
index f6aa6b5a0e3f..6259f376f5ed 100644
--- a/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto
+++ b/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.listener.original_src.v2alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.listener.original_src.v2alpha1";
 option java_outer_classname = "OriginalSrcProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.listener.original_src.v2alpha1";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto
index d9819c9e8ca8..cec44c94f986 100644
--- a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto
+++ b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.client_ssl_auth.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2";
 option java_outer_classname = "ClientSslAuthProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2";
 
 import "envoy/api/v2/core/address.proto";
 
diff --git a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD
index 4e89d949ab9d..90b4664eb53c 100644
--- a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD
+++ b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/filter/network/client_ssl_auth/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto
index 6bfe225a1496..2a6b6f10e664 100644
--- a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto
+++ b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.config.filter.network.client_ssl_auth.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v3alpha";
 option java_outer_classname = "ClientSslAuthProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v3alpha";
 
 import "envoy/api/v3alpha/core/address.proto";
 
 import "google/protobuf/duration.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Client TLS authentication]
@@ -18,6 +20,9 @@ import "validate/validate.proto";
 // [#extension: envoy.filters.network.client_ssl_auth]
 
 message ClientSSLAuth {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.client_ssl_auth.v2.ClientSSLAuth";
+
   // The :ref:`cluster manager <arch_overview_cluster_manager>` cluster that runs
   // the authentication service. The filter will connect to the service every 60s to fetch the list
   // of principals. The service must support the expected :ref:`REST API
diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto
index 8a5ede0b6703..16308e28e31c 100644
--- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto
+++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.dubbo_proxy.v2alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1";
 option java_outer_classname = "DubboProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1";
 
 import "envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto";
 
diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto
index 9f76a007443b..b38089d985d6 100644
--- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto
+++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.dubbo_proxy.v2alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1";
 option java_outer_classname = "RouteProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1";
 
 import "envoy/api/v2/route/route.proto";
 import "envoy/type/matcher/string.proto";
diff --git a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/BUILD
index a5d71eaba79d..9517625bffc9 100644
--- a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/BUILD
+++ b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/BUILD
@@ -7,7 +7,9 @@ licenses(["notice"])  # Apache 2
 api_proto_package(
     deps = [
         "//envoy/api/v3alpha/route:pkg",
+        "//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg",
         "//envoy/type/matcher/v3alpha:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto
index ccb885d33006..ad1a3d5ad536 100644
--- a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto
+++ b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.config.filter.network.dubbo_proxy.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v3alpha";
 option java_outer_classname = "DubboProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v3alpha";
 
 import "envoy/config/filter/network/dubbo_proxy/v3alpha/route.proto";
 
 import "google/protobuf/any.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Dubbo Proxy]
@@ -30,6 +32,9 @@ enum SerializationType {
 
 // [#next-free-field: 6]
 message DubboProxy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy";
+
   // The human readable prefix to use when emitting statistics.
   string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
 
@@ -51,6 +56,9 @@ message DubboProxy {
 
 // DubboFilter configures a Dubbo filter.
 message DubboFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter";
+
   // The name of the filter to instantiate. The name must match a supported
   // filter.
   string name = 1 [(validate.rules).string = {min_bytes: 1}];
diff --git a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/route.proto b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/route.proto
index 066e6158b86d..eef19b6adce5 100644
--- a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/route.proto
+++ b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/route.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.config.filter.network.dubbo_proxy.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v3alpha";
 option java_outer_classname = "RouteProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v3alpha";
 
 import "envoy/api/v3alpha/route/route.proto";
 import "envoy/type/matcher/v3alpha/string.proto";
 import "envoy/type/v3alpha/range.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Dubbo Proxy Route Configuration]
@@ -17,6 +19,9 @@ import "validate/validate.proto";
 
 // [#next-free-field: 6]
 message RouteConfiguration {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteConfiguration";
+
   // The name of the route configuration. Reserved for future use in asynchronous route discovery.
   string name = 1;
 
@@ -35,6 +40,9 @@ message RouteConfiguration {
 }
 
 message Route {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.dubbo_proxy.v2alpha1.Route";
+
   // Route matching parameters.
   RouteMatch match = 1 [(validate.rules).message = {required: true}];
 
@@ -43,6 +51,9 @@ message Route {
 }
 
 message RouteMatch {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteMatch";
+
   // Method level routing matching.
   MethodMatch method = 1;
 
@@ -54,6 +65,9 @@ message RouteMatch {
 }
 
 message RouteAction {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteAction";
+
   oneof cluster_specifier {
     option (validate.required) = true;
 
@@ -69,8 +83,14 @@ message RouteAction {
 }
 
 message MethodMatch {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch";
+
   // The parameter matching type.
   message ParameterMatchSpecifier {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch.ParameterMatchSpecifier";
+
     oneof parameter_match_specifier {
       // If specified, header match will be performed based on the value of the header.
       string exact_match = 3;
diff --git a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto
index 795607fcf226..3690bdc89697 100644
--- a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto
+++ b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.ext_authz.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v2";
 option java_outer_classname = "ExtAuthzProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v2";
 
 import "envoy/api/v2/core/grpc_service.proto";
 
diff --git a/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD b/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD
index 4e89d949ab9d..2f529c2c8ecd 100644
--- a/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD
+++ b/api/envoy/config/filter/network/ext_authz/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/filter/network/ext_authz/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto
index 373034886c07..215b9a598903 100644
--- a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto
+++ b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.config.filter.network.ext_authz.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v3alpha";
 option java_outer_classname = "ExtAuthzProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v3alpha";
 
 import "envoy/api/v3alpha/core/grpc_service.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Network External Authorization ]
@@ -20,6 +22,9 @@ import "validate/validate.proto";
 // :ref:`CheckRequest <envoy_api_msg_service.auth.v3alpha.CheckRequest>`.
 // A failed check will cause this filter to close the TCP connection.
 message ExtAuthz {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.ext_authz.v2.ExtAuthz";
+
   // The prefix to use when emitting statistics.
   string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/BUILD b/api/envoy/config/filter/network/http_connection_manager/v2/BUILD
index a9f3622df0d0..3984f8d77a8d 100644
--- a/api/envoy/config/filter/network/http_connection_manager/v2/BUILD
+++ b/api/envoy/config/filter/network/http_connection_manager/v2/BUILD
@@ -10,5 +10,6 @@ api_proto_package(
         "//envoy/api/v2/core:pkg",
         "//envoy/config/filter/accesslog/v2:pkg",
         "//envoy/type:pkg",
+        "//envoy/type/tracing/v2:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto
index 5a826725e07f..f818506993fc 100644
--- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto
+++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.http_connection_manager.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2";
 option java_outer_classname = "HttpConnectionManagerProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2";
 
 import "envoy/api/v2/core/config_source.proto";
 import "envoy/api/v2/core/protocol.proto";
@@ -12,6 +12,7 @@ import "envoy/api/v2/rds.proto";
 import "envoy/api/v2/srds.proto";
 import "envoy/config/filter/accesslog/v2/accesslog.proto";
 import "envoy/type/percent.proto";
+import "envoy/type/tracing/v2/custom_tag.proto";
 
 import "google/protobuf/any.proto";
 import "google/protobuf/duration.proto";
@@ -84,7 +85,7 @@ message HttpConnectionManager {
     ALWAYS_FORWARD_ONLY = 4;
   }
 
-  // [#next-free-field: 8]
+  // [#next-free-field: 9]
   message Tracing {
     enum OperationName {
       // The HTTP listener is used for ingress/incoming requests.
@@ -101,12 +102,16 @@ message HttpConnectionManager {
     // .. attention::
     //  This field has been deprecated in favor of `traffic_direction`.
     OperationName operation_name = 1
-        [(validate.rules).enum = {defined_only: true}, deprecated = true];
+        [deprecated = true, (validate.rules).enum = {defined_only: true}];
 
     // A list of header names used to create tags for the active span. The header name is used to
     // populate the tag name, and the header value is used to populate the tag value. The tag is
     // created if the specified header name is present in the request's headers.
-    repeated string request_headers_for_tags = 2;
+    //
+    // .. attention::
+    //  This field has been deprecated in favor of :ref:`custom_tags
+    //  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing.custom_tags>`.
+    repeated string request_headers_for_tags = 2 [deprecated = true];
 
     // Target percentage of requests managed by this HTTP connection manager that will be force
     // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`
@@ -141,6 +146,9 @@ message HttpConnectionManager {
     // truncate lengthy request paths to meet the needs of a tracing backend.
     // Default: 256
     google.protobuf.UInt32Value max_path_tag_length = 7;
+
+    // A list of custom tags with unique tag name to create tags for the active span.
+    repeated type.tracing.v2.CustomTag custom_tags = 8;
   }
 
   message InternalAddressConfig {
diff --git a/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD b/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD
index e0509485cd77..20cc473c3b94 100644
--- a/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD
+++ b/api/envoy/config/filter/network/http_connection_manager/v3alpha/BUILD
@@ -9,6 +9,9 @@ api_proto_package(
         "//envoy/api/v3alpha:pkg",
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/config/filter/accesslog/v3alpha:pkg",
+        "//envoy/config/filter/network/http_connection_manager/v2:pkg",
+        "//envoy/type/tracing/v2:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto
index 0cdef53dc548..f8880d1de10d 100644
--- a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto
+++ b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto
@@ -2,15 +2,16 @@ syntax = "proto3";
 
 package envoy.config.filter.network.http_connection_manager.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v3alpha";
 option java_outer_classname = "HttpConnectionManagerProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v3alpha";
 
 import "envoy/api/v3alpha/core/config_source.proto";
 import "envoy/api/v3alpha/core/protocol.proto";
 import "envoy/api/v3alpha/rds.proto";
 import "envoy/api/v3alpha/srds.proto";
 import "envoy/config/filter/accesslog/v3alpha/accesslog.proto";
+import "envoy/type/tracing/v2/custom_tag.proto";
 import "envoy/type/v3alpha/percent.proto";
 
 import "google/protobuf/any.proto";
@@ -18,6 +19,8 @@ import "google/protobuf/duration.proto";
 import "google/protobuf/struct.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: HTTP connection manager]
@@ -26,6 +29,9 @@ import "validate/validate.proto";
 
 // [#next-free-field: 36]
 message HttpConnectionManager {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager";
+
   enum CodecType {
     // For every new connection, the connection manager will determine which
     // codec to use. This mode supports both ALPN for TLS listeners as well as
@@ -84,8 +90,11 @@ message HttpConnectionManager {
     ALWAYS_FORWARD_ONLY = 4;
   }
 
-  // [#next-free-field: 8]
+  // [#next-free-field: 9]
   message Tracing {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing";
+
     enum OperationName {
       // The HTTP listener is used for ingress/incoming requests.
       INGRESS = 0;
@@ -94,14 +103,9 @@ message HttpConnectionManager {
       EGRESS = 1;
     }
 
-    reserved 1;
+    reserved 1, 2;
 
-    reserved "operation_name";
-
-    // A list of header names used to create tags for the active span. The header name is used to
-    // populate the tag name, and the header value is used to populate the tag value. The tag is
-    // created if the specified header name is present in the request's headers.
-    repeated string request_headers_for_tags = 2;
+    reserved "operation_name", "request_headers_for_tags";
 
     // Target percentage of requests managed by this HTTP connection manager that will be force
     // traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`
@@ -136,15 +140,26 @@ message HttpConnectionManager {
     // truncate lengthy request paths to meet the needs of a tracing backend.
     // Default: 256
     google.protobuf.UInt32Value max_path_tag_length = 7;
+
+    // A list of custom tags with unique tag name to create tags for the active span.
+    repeated type.tracing.v2.CustomTag custom_tags = 8;
   }
 
   message InternalAddressConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager."
+        "InternalAddressConfig";
+
     // Whether unix socket addresses should be considered internal.
     bool unix_sockets = 1;
   }
 
   // [#next-free-field: 7]
   message SetCurrentClientCertDetails {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager."
+        "SetCurrentClientCertDetails";
+
     reserved 2;
 
     // Whether to forward the subject of the client cert. Defaults to false.
@@ -183,6 +198,10 @@ message HttpConnectionManager {
   //    The current implementation of upgrade headers does not work with HTTP/2
   //    upstreams.
   message UpgradeConfig {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager."
+        "UpgradeConfig";
+
     // The case-insensitive name of this upgrade, e.g. "websocket".
     // For each upgrade type present in upgrade_configs, requests with
     // Upgrade: [upgrade_type]
@@ -458,6 +477,9 @@ message HttpConnectionManager {
 }
 
 message Rds {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.http_connection_manager.v2.Rds";
+
   // Configuration source specifier for RDS.
   api.v3alpha.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}];
 
@@ -470,12 +492,18 @@ message Rds {
 
 // This message is used to work around the limitations with 'oneof' and repeated fields.
 message ScopedRouteConfigurationsList {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.http_connection_manager.v2.ScopedRouteConfigurationsList";
+
   repeated api.v3alpha.ScopedRouteConfiguration scoped_route_configurations = 1
       [(validate.rules).repeated = {min_items: 1}];
 }
 
 // [#next-free-field: 6]
 message ScopedRoutes {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes";
+
   // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These
   // keys are matched against a set of
   // :ref:`Key<envoy_api_msg_api.v3alpha.ScopedRouteConfiguration.Key>` objects assembled from
@@ -488,8 +516,15 @@ message ScopedRoutes {
   // :ref:`RouteConfiguration<envoy_api_msg_api.v3alpha.RouteConfiguration>`) to use for the
   // request.
   message ScopeKeyBuilder {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder";
+
     // Specifies the mechanism for constructing key fragments which are composed into scope keys.
     message FragmentBuilder {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder."
+          "FragmentBuilder";
+
       // Specifies how the value of a header should be extracted.
       // The following example maps the structure of a header to the fields in this message.
       //
@@ -508,8 +543,16 @@ message ScopedRoutes {
       //
       //    Each 'a=b' key-value pair constitutes an 'element' of the header field.
       message HeaderValueExtractor {
+        option (udpa.annotations.versioning).previous_message_type =
+            "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder."
+            "FragmentBuilder.HeaderValueExtractor";
+
         // Specifies a header field's key value pair to match on.
         message KvElement {
+          option (udpa.annotations.versioning).previous_message_type =
+              "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder."
+              "FragmentBuilder.HeaderValueExtractor.KvElement";
+
           // The separator between key and value (e.g., '=' separates 'k=v;...').
           // If an element is an empty string, the element is ignored.
           // If an element contains no separator, the whole element is parsed as key and the
@@ -584,12 +627,18 @@ message ScopedRoutes {
 }
 
 message ScopedRds {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.http_connection_manager.v2.ScopedRds";
+
   // Configuration source specifier for scoped RDS.
   api.v3alpha.core.ConfigSource scoped_rds_config_source = 1
       [(validate.rules).message = {required: true}];
 }
 
 message HttpFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.http_connection_manager.v2.HttpFilter";
+
   reserved 3, 2;
 
   reserved "config";
diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto
index aee2936e99a2..2a1fa76ef729 100644
--- a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto
+++ b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.mongo_proxy.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v2";
 option java_outer_classname = "MongoProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v2";
 
 import "envoy/config/filter/fault/v2/fault.proto";
 
diff --git a/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD
index 0ef883d1e76a..8b697bdd43b4 100644
--- a/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD
+++ b/api/envoy/config/filter/network/mongo_proxy/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/config/filter/fault/v3alpha:pkg"],
+    deps = [
+        "//envoy/config/filter/fault/v3alpha:pkg",
+        "//envoy/config/filter/network/mongo_proxy/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto
index cee198dbc12e..35be47ad393b 100644
--- a/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto
+++ b/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.config.filter.network.mongo_proxy.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v3alpha";
 option java_outer_classname = "MongoProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v3alpha";
 
 import "envoy/config/filter/fault/v3alpha/fault.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Mongo proxy]
@@ -15,6 +17,9 @@ import "validate/validate.proto";
 // [#extension: envoy.filters.network.mongo_proxy]
 
 message MongoProxy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.mongo_proxy.v2.MongoProxy";
+
   // The human readable prefix to use when emitting :ref:`statistics
   // <config_network_filters_mongo_proxy_stats>`.
   string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
diff --git a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto
index 4665bbfa7ba0..cf4e28422ce7 100644
--- a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto
+++ b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.mysql_proxy.v1alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.mysql_proxy.v1alpha1";
 option java_outer_classname = "MysqlProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.mysql_proxy.v1alpha1";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto
index d65797ea5126..d9f4d11c7e21 100644
--- a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto
+++ b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.rate_limit.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v2";
 option java_outer_classname = "RateLimitProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v2";
 
 import "envoy/api/v2/ratelimit/ratelimit.proto";
 import "envoy/config/ratelimit/v2/rls.proto";
diff --git a/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD b/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD
index 7d49a9f51282..e4ed664716fb 100644
--- a/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD
+++ b/api/envoy/config/filter/network/rate_limit/v3alpha/BUILD
@@ -7,6 +7,8 @@ licenses(["notice"])  # Apache 2
 api_proto_package(
     deps = [
         "//envoy/api/v3alpha/ratelimit:pkg",
+        "//envoy/config/filter/network/rate_limit/v2:pkg",
         "//envoy/config/ratelimit/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto
index d16715013198..50c36c1fdf2b 100644
--- a/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto
+++ b/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto
@@ -2,15 +2,17 @@ syntax = "proto3";
 
 package envoy.config.filter.network.rate_limit.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v3alpha";
 option java_outer_classname = "RateLimitProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v3alpha";
 
 import "envoy/api/v3alpha/ratelimit/ratelimit.proto";
 import "envoy/config/ratelimit/v3alpha/rls.proto";
 
 import "google/protobuf/duration.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Rate limit]
@@ -19,6 +21,9 @@ import "validate/validate.proto";
 
 // [#next-free-field: 7]
 message RateLimit {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.rate_limit.v2.RateLimit";
+
   // The prefix to use when emitting :ref:`statistics <config_network_filters_rate_limit_stats>`.
   string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/config/filter/network/rbac/v2/rbac.proto b/api/envoy/config/filter/network/rbac/v2/rbac.proto
index 4d6cb00c62d7..de96f28cf668 100644
--- a/api/envoy/config/filter/network/rbac/v2/rbac.proto
+++ b/api/envoy/config/filter/network/rbac/v2/rbac.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.rbac.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v2";
 option java_outer_classname = "RbacProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v2";
 
 import "envoy/config/rbac/v2/rbac.proto";
 
diff --git a/api/envoy/config/filter/network/rbac/v3alpha/BUILD b/api/envoy/config/filter/network/rbac/v3alpha/BUILD
index 886af1e0d81b..b119e7563310 100644
--- a/api/envoy/config/filter/network/rbac/v3alpha/BUILD
+++ b/api/envoy/config/filter/network/rbac/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/config/rbac/v3alpha:pkg"],
+    deps = [
+        "//envoy/config/filter/network/rbac/v2:pkg",
+        "//envoy/config/rbac/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto b/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto
index febc45062f90..4dbdec4d8c62 100644
--- a/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto
+++ b/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.config.filter.network.rbac.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v3alpha";
 option java_outer_classname = "RbacProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v3alpha";
 
 import "envoy/config/rbac/v3alpha/rbac.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: RBAC]
@@ -19,6 +21,9 @@ import "validate/validate.proto";
 // Header should not be used in rules/shadow_rules in RBAC network filter as
 // this information is only available in :ref:`RBAC http filter <config_http_filters_rbac>`.
 message RBAC {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.rbac.v2.RBAC";
+
   enum EnforcementType {
     // Apply RBAC policies when the first byte of data arrives on the connection.
     ONE_TIME_ON_FIRST_BYTE = 0;
diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto
index da1acdb75c58..9dbf9af696ed 100644
--- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto
+++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.redis_proxy.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v2";
 option java_outer_classname = "RedisProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v2";
 
 import "envoy/api/v2/core/base.proto";
 
diff --git a/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD
index 4e89d949ab9d..c95bb7faa07b 100644
--- a/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD
+++ b/api/envoy/config/filter/network/redis_proxy/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/filter/network/redis_proxy/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto
index 9c04dbacdc3f..26ea0f6c4d9a 100644
--- a/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto
+++ b/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto
@@ -2,15 +2,17 @@ syntax = "proto3";
 
 package envoy.config.filter.network.redis_proxy.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v3alpha";
 option java_outer_classname = "RedisProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Redis Proxy]
@@ -19,9 +21,15 @@ import "validate/validate.proto";
 
 // [#next-free-field: 7]
 message RedisProxy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.redis_proxy.v2.RedisProxy";
+
   // Redis connection pool settings.
   // [#next-free-field: 9]
   message ConnPoolSettings {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings";
+
     // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently
     // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data
     // because replication is asynchronous and requires some delay. You need to ensure that your
@@ -110,12 +118,22 @@ message RedisProxy {
   }
 
   message PrefixRoutes {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes";
+
     message Route {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route";
+
       // The router is capable of shadowing traffic from one cluster to another. The current
       // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to
       // respond before returning the response from the primary cluster. All normal statistics are
       // collected for the shadow cluster making this feature useful for testing.
       message RequestMirrorPolicy {
+        option (udpa.annotations.versioning).previous_message_type =
+            "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route."
+            "RequestMirrorPolicy";
+
         // Specifies the cluster that requests will be mirrored to. The cluster must
         // exist in the cluster manager configuration.
         string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
@@ -219,6 +237,9 @@ message RedisProxy {
 // :ref:`extension_protocol_options<envoy_api_field_api.v3alpha.Cluster.extension_protocol_options>`,
 // keyed by the name `envoy.redis_proxy`.
 message RedisProtocolOptions {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions";
+
   // Upstream server password as defined by the `requirepass` directive
   // <https://redis.io/topics/config>`_ in the server's configuration file.
   api.v3alpha.core.DataSource auth_password = 1;
diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto
index c958ca680733..eacd7a7bcdad 100644
--- a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto
+++ b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.tcp_proxy.v2;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2";
 option java_outer_classname = "TcpProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2";
 
 import "envoy/api/v2/core/address.proto";
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD
index bd4f949a67eb..06a5bb290366 100644
--- a/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD
+++ b/api/envoy/config/filter/network/tcp_proxy/v3alpha/BUILD
@@ -8,6 +8,8 @@ api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/config/filter/accesslog/v3alpha:pkg",
+        "//envoy/config/filter/network/tcp_proxy/v2:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto
index d56c4c41facb..d78bc780f0da 100644
--- a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto
+++ b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.tcp_proxy.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v3alpha";
 option java_outer_classname = "TcpProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v3alpha";
 
 import "envoy/api/v3alpha/core/address.proto";
 import "envoy/api/v3alpha/core/base.proto";
@@ -14,6 +14,8 @@ import "envoy/type/v3alpha/hash_policy.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: TCP Proxy]
@@ -22,12 +24,20 @@ import "validate/validate.proto";
 
 // [#next-free-field: 12]
 message TcpProxy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.tcp_proxy.v2.TcpProxy";
 
   // Allows for specification of multiple upstream clusters along with weights
   // that indicate the percentage of traffic to be forwarded to each cluster.
   // The router selects an upstream cluster based on these weights.
   message WeightedCluster {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster";
+
     message ClusterWeight {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight";
+
       // Name of the upstream cluster.
       string name = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto
index 8f85fab5c582..d2983a131e2a 100644
--- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto
+++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.thrift_proxy.v2alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1";
 option java_outer_classname = "RouteProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/api/v2/route/route.proto";
diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto
index a857592373c4..c99d2eea0384 100644
--- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto
+++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.thrift_proxy.v2alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1";
 option java_outer_classname = "ThriftProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1";
 
 import "envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto";
 
diff --git a/api/envoy/config/filter/network/thrift_proxy/v3alpha/BUILD b/api/envoy/config/filter/network/thrift_proxy/v3alpha/BUILD
index 900a0545c66e..3b50876a6330 100644
--- a/api/envoy/config/filter/network/thrift_proxy/v3alpha/BUILD
+++ b/api/envoy/config/filter/network/thrift_proxy/v3alpha/BUILD
@@ -8,5 +8,7 @@ api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/api/v3alpha/route:pkg",
+        "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/filter/network/thrift_proxy/v3alpha/route.proto b/api/envoy/config/filter/network/thrift_proxy/v3alpha/route.proto
index f7dc521959f2..afe789a8b7bd 100644
--- a/api/envoy/config/filter/network/thrift_proxy/v3alpha/route.proto
+++ b/api/envoy/config/filter/network/thrift_proxy/v3alpha/route.proto
@@ -2,21 +2,26 @@ syntax = "proto3";
 
 package envoy.config.filter.network.thrift_proxy.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v3alpha";
 option java_outer_classname = "RouteProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/api/v3alpha/route/route.proto";
 
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Thrift Proxy Route Configuration]
 // Thrift Proxy :ref:`configuration overview <config_network_filters_thrift_proxy>`.
 
 message RouteConfiguration {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteConfiguration";
+
   // The name of the route configuration. Reserved for future use in asynchronous route discovery.
   string name = 1;
 
@@ -26,6 +31,9 @@ message RouteConfiguration {
 }
 
 message Route {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.thrift_proxy.v2alpha1.Route";
+
   // Route matching parameters.
   RouteMatch match = 1 [(validate.rules).message = {required: true}];
 
@@ -34,6 +42,9 @@ message Route {
 }
 
 message RouteMatch {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteMatch";
+
   oneof match_specifier {
     option (validate.required) = true;
 
@@ -71,6 +82,9 @@ message RouteMatch {
 
 // [#next-free-field: 7]
 message RouteAction {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction";
+
   oneof cluster_specifier {
     option (validate.required) = true;
 
@@ -113,7 +127,13 @@ message RouteAction {
 // percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster
 // based on these weights.
 message WeightedCluster {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster";
+
   message ClusterWeight {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight";
+
     // Name of the upstream cluster.
     string name = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto
index 9c5f5d3966b8..73c5288500a5 100644
--- a/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto
+++ b/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto
@@ -2,15 +2,17 @@ syntax = "proto3";
 
 package envoy.config.filter.network.thrift_proxy.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v3alpha";
 option java_outer_classname = "ThriftProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v3alpha";
 
 import "envoy/config/filter/network/thrift_proxy/v3alpha/route.proto";
 
 import "google/protobuf/any.proto";
 import "google/protobuf/struct.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Thrift Proxy]
@@ -57,6 +59,9 @@ enum ProtocolType {
 
 // [#next-free-field: 6]
 message ThriftProxy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProxy";
+
   // Supplies the type of transport that the Thrift proxy should use. Defaults to
   // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_config.filter.network.thrift_proxy.v3alpha.TransportType.AUTO_TRANSPORT>`.
   TransportType transport = 2 [(validate.rules).enum = {defined_only: true}];
@@ -80,6 +85,9 @@ message ThriftProxy {
 
 // ThriftFilter configures a Thrift filter.
 message ThriftFilter {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftFilter";
+
   reserved 2;
 
   reserved "config";
@@ -104,6 +112,9 @@ message ThriftFilter {
 // :ref:`extension_protocol_options<envoy_api_field_api.v3alpha.Cluster.extension_protocol_options>`,
 // keyed by the name `envoy.filters.network.thrift_proxy`.
 message ThriftProtocolOptions {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProtocolOptions";
+
   // Supplies the type of transport that the Thrift proxy should use for upstream connections.
   // Selecting
   // :ref:`AUTO_TRANSPORT<envoy_api_enum_value_config.filter.network.thrift_proxy.v3alpha.TransportType.AUTO_TRANSPORT>`,
diff --git a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto
index cfe2a1075d86..11cb6a2ffb00 100644
--- a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto
+++ b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.network.zookeeper_proxy.v1alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1";
 option java_outer_classname = "ZookeeperProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1";
 
 import "google/protobuf/wrappers.proto";
 
diff --git a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto
index bf3cb83dcae2..7a057fa49c84 100644
--- a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto
+++ b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.thrift.rate_limit.v2alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.thrift.rate_limit.v2alpha1";
 option java_outer_classname = "RateLimitProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.thrift.rate_limit.v2alpha1";
 
 import "envoy/config/ratelimit/v2/rls.proto";
 
diff --git a/api/envoy/config/filter/thrift/rate_limit/v3alpha/BUILD b/api/envoy/config/filter/thrift/rate_limit/v3alpha/BUILD
index bdfffc73a735..48be9ec51841 100644
--- a/api/envoy/config/filter/thrift/rate_limit/v3alpha/BUILD
+++ b/api/envoy/config/filter/thrift/rate_limit/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/config/ratelimit/v3alpha:pkg"],
+    deps = [
+        "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg",
+        "//envoy/config/ratelimit/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto
index 0365f343fa84..c111454e1da9 100644
--- a/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto
+++ b/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.config.filter.thrift.rate_limit.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.thrift.rate_limit.v3alpha";
 option java_outer_classname = "RateLimitProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.thrift.rate_limit.v3alpha";
 
 import "envoy/config/ratelimit/v3alpha/rls.proto";
 
 import "google/protobuf/duration.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Rate limit]
@@ -18,6 +20,9 @@ import "validate/validate.proto";
 
 // [#next-free-field: 6]
 message RateLimit {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.filter.thrift.rate_limit.v2alpha1.RateLimit";
+
   // The rate limit domain to use in the rate limit service request.
   string domain = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/config/filter/thrift/router/v2alpha1/router.proto b/api/envoy/config/filter/thrift/router/v2alpha1/router.proto
index e5dd085569bc..8661675ce364 100644
--- a/api/envoy/config/filter/thrift/router/v2alpha1/router.proto
+++ b/api/envoy/config/filter/thrift/router/v2alpha1/router.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.filter.thrift.router.v2alpha1;
 
+option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1";
 option java_outer_classname = "RouterProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1";
 
 // [#protodoc-title: Router]
 // Thrift router :ref:`configuration overview <config_thrift_filters_router>`.
diff --git a/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto b/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto
index 68603f38d327..4a8b079f286d 100644
--- a/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto
+++ b/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto
@@ -2,15 +2,17 @@ syntax = "proto3";
 
 package envoy.config.filter.udp.udp_proxy.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha";
 option java_outer_classname = "UdpProxyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha";
 
 import "google/protobuf/duration.proto";
 
 import "validate/validate.proto";
 
-// TODO(mattklein123): docs
+// [#protodoc-title: UDP proxy]
+// UDP proxy :ref:`configuration overview <config_udp_listener_filters_udp_proxy>`.
+// [#extension: envoy.filters.udp_listener.udp_proxy]
 
 // Configuration for the UDP proxy filter.
 message UdpProxyConfig {
diff --git a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto
index 353843f28c8c..7a2566285863 100644
--- a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto
+++ b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.grpc_credential.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha";
 option java_outer_classname = "AwsIamProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto
index 3b814ee850e9..b7fccf2f3fbe 100644
--- a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto
+++ b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.grpc_credential.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha";
 option java_outer_classname = "FileBasedMetadataProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha";
 
 import "envoy/api/v2/core/base.proto";
 
diff --git a/api/envoy/config/grpc_credential/v3alpha/BUILD b/api/envoy/config/grpc_credential/v3alpha/BUILD
index 4e89d949ab9d..3888939e2e6b 100644
--- a/api/envoy/config/grpc_credential/v3alpha/BUILD
+++ b/api/envoy/config/grpc_credential/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/grpc_credential/v2alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto
index 84c60b6f7adc..7841e3d7ac18 100644
--- a/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto
+++ b/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto
@@ -2,9 +2,11 @@ syntax = "proto3";
 
 package envoy.config.grpc_credential.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha";
 option java_outer_classname = "AwsIamProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha";
+
+import "udpa/annotations/versioning.proto";
 
 import "validate/validate.proto";
 
@@ -13,6 +15,9 @@ import "validate/validate.proto";
 // [#extension: envoy.grpc_credentials.aws_iam]
 
 message AwsIamConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.grpc_credential.v2alpha.AwsIamConfig";
+
   // The `service namespace
   // <https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces>`_
   // of the Grpc endpoint.
diff --git a/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto
index 27cc077fd9b7..a1166235c910 100644
--- a/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto
+++ b/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto
@@ -2,17 +2,22 @@ syntax = "proto3";
 
 package envoy.config.grpc_credential.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha";
 option java_outer_classname = "FileBasedMetadataProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: Grpc Credentials File Based Metadata]
 // Configuration for File Based Metadata Grpc Credentials Plugin
 // [#extension: envoy.grpc_credentials.file_based_metadata]
 
 message FileBasedMetadataConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.grpc_credential.v2alpha.FileBasedMetadataConfig";
+
   // Location or inline data of secret to use for authentication of the Google gRPC connection
   // this secret will be attached to a header of the gRPC connection
   api.v3alpha.core.DataSource secret_data = 1;
diff --git a/api/envoy/config/health_checker/redis/v2/redis.proto b/api/envoy/config/health_checker/redis/v2/redis.proto
index 5df588427daa..3f7e15d80d02 100644
--- a/api/envoy/config/health_checker/redis/v2/redis.proto
+++ b/api/envoy/config/health_checker/redis/v2/redis.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.health_checker.redis.v2;
 
+option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v2";
 option java_outer_classname = "RedisProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v2";
 
 // [#protodoc-title: Redis]
 // Redis health checker :ref:`configuration overview <config_health_checkers_redis>`.
diff --git a/api/envoy/config/listener/v2/api_listener.proto b/api/envoy/config/listener/v2/api_listener.proto
index 0c2253596e43..5a497cd17d9d 100644
--- a/api/envoy/config/listener/v2/api_listener.proto
+++ b/api/envoy/config/listener/v2/api_listener.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.listener.v2;
 
+option java_package = "io.envoyproxy.envoy.config.listener.v2";
 option java_outer_classname = "ApiListenerProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.listener.v2";
 
 import "google/protobuf/any.proto";
 
diff --git a/api/envoy/config/metrics/v2/metrics_service.proto b/api/envoy/config/metrics/v2/metrics_service.proto
index 5fd045e47135..d23de6c1a01a 100644
--- a/api/envoy/config/metrics/v2/metrics_service.proto
+++ b/api/envoy/config/metrics/v2/metrics_service.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.metrics.v2;
 
+option java_package = "io.envoyproxy.envoy.config.metrics.v2";
 option java_outer_classname = "MetricsServiceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.metrics.v2";
 
 import "envoy/api/v2/core/grpc_service.proto";
 
diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto
index b46301b2783a..716c9c2eb9c2 100644
--- a/api/envoy/config/metrics/v2/stats.proto
+++ b/api/envoy/config/metrics/v2/stats.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.metrics.v2;
 
+option java_package = "io.envoyproxy.envoy.config.metrics.v2";
 option java_outer_classname = "StatsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.metrics.v2";
 
 import "envoy/api/v2/core/address.proto";
 import "envoy/type/matcher/string.proto";
diff --git a/api/envoy/config/metrics/v3alpha/BUILD b/api/envoy/config/metrics/v3alpha/BUILD
index 1a4a60504df0..90fc98bea25b 100644
--- a/api/envoy/config/metrics/v3alpha/BUILD
+++ b/api/envoy/config/metrics/v3alpha/BUILD
@@ -7,6 +7,8 @@ licenses(["notice"])  # Apache 2
 api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/metrics/v2:pkg",
         "//envoy/type/matcher/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/metrics/v3alpha/metrics_service.proto b/api/envoy/config/metrics/v3alpha/metrics_service.proto
index 2a3fbfb85e9c..d389be5bc68f 100644
--- a/api/envoy/config/metrics/v3alpha/metrics_service.proto
+++ b/api/envoy/config/metrics/v3alpha/metrics_service.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.config.metrics.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.metrics.v3alpha";
 option java_outer_classname = "MetricsServiceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.metrics.v3alpha";
 
 import "envoy/api/v3alpha/core/grpc_service.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Metrics service]
@@ -17,6 +19,9 @@ import "validate/validate.proto";
 // create Metrics Service.
 // [#extension: envoy.stat_sinks.metrics_service]
 message MetricsServiceConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.metrics.v2.MetricsServiceConfig";
+
   // The upstream gRPC cluster that hosts the metrics service.
   api.v3alpha.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];
 }
diff --git a/api/envoy/config/metrics/v3alpha/stats.proto b/api/envoy/config/metrics/v3alpha/stats.proto
index 12e15390e772..5d69dd122060 100644
--- a/api/envoy/config/metrics/v3alpha/stats.proto
+++ b/api/envoy/config/metrics/v3alpha/stats.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.metrics.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.metrics.v3alpha";
 option java_outer_classname = "StatsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.metrics.v3alpha";
 
 import "envoy/api/v3alpha/core/address.proto";
 import "envoy/type/matcher/v3alpha/string.proto";
@@ -13,6 +13,8 @@ import "google/protobuf/any.proto";
 import "google/protobuf/struct.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Stats]
@@ -20,6 +22,8 @@ import "validate/validate.proto";
 
 // Configuration for pluggable stats sinks.
 message StatsSink {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsSink";
+
   reserved 2;
 
   reserved "config";
@@ -44,6 +48,9 @@ message StatsSink {
 
 // Statistics configuration such as tagging.
 message StatsConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.metrics.v2.StatsConfig";
+
   // Each stat name is iteratively processed through these tag specifiers.
   // When a tag is matched, the first capture group is removed from the name so
   // later :ref:`TagSpecifiers <envoy_api_msg_config.metrics.v3alpha.TagSpecifier>` cannot match
@@ -142,6 +149,9 @@ message StatsMatcher {
   //   }
   //
 
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.metrics.v2.StatsMatcher";
+
   oneof stats_matcher {
     option (validate.required) = true;
 
@@ -164,6 +174,9 @@ message StatsMatcher {
 // unconditionally set if a fixed value, otherwise it will only be set if one
 // or more capture groups in the regex match.
 message TagSpecifier {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.metrics.v2.TagSpecifier";
+
   // Attaches an identifier to the tag values to identify the tag being in the
   // sink. Envoy has a set of default names and regexes to extract dynamic
   // portions of existing stats, which can be found in :repo:`well_known_names.h
@@ -245,6 +258,8 @@ message TagSpecifier {
 // tagged metrics.
 // [#extension: envoy.stat_sinks.statsd]
 message StatsdSink {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsdSink";
+
   oneof statsd_specifier {
     option (validate.required) = true;
 
@@ -294,6 +309,9 @@ message StatsdSink {
 // <envoy_api_msg_config.metrics.v3alpha.StatsConfig>`.
 // [#extension: envoy.stat_sinks.dog_statsd]
 message DogStatsdSink {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.metrics.v2.DogStatsdSink";
+
   reserved 2;
 
   oneof dog_statsd_specifier {
@@ -320,6 +338,9 @@ message DogStatsdSink {
 // Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`.
 // [#extension: envoy.stat_sinks.hystrix]
 message HystrixSink {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.metrics.v2.HystrixSink";
+
   // The number of buckets the rolling statistical window is divided into.
   //
   // Each time the sink is flushed, all relevant Envoy statistics are sampled and
diff --git a/api/envoy/config/overload/v2alpha/overload.proto b/api/envoy/config/overload/v2alpha/overload.proto
index 889b3683454a..427245354193 100644
--- a/api/envoy/config/overload/v2alpha/overload.proto
+++ b/api/envoy/config/overload/v2alpha/overload.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.overload.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.overload.v2alpha";
 option java_outer_classname = "OverloadProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.overload.v2alpha";
 
 import "google/protobuf/any.proto";
 import "google/protobuf/duration.proto";
diff --git a/api/envoy/config/overload/v3alpha/BUILD b/api/envoy/config/overload/v3alpha/BUILD
index 5dc095ade27a..075af0ad55f5 100644
--- a/api/envoy/config/overload/v3alpha/BUILD
+++ b/api/envoy/config/overload/v3alpha/BUILD
@@ -4,4 +4,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 
 licenses(["notice"])  # Apache 2
 
-api_proto_package()
+api_proto_package(
+    deps = [
+        "//envoy/config/overload/v2alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
+)
diff --git a/api/envoy/config/overload/v3alpha/overload.proto b/api/envoy/config/overload/v3alpha/overload.proto
index 0cc5c720240c..7e52a31ce24c 100644
--- a/api/envoy/config/overload/v3alpha/overload.proto
+++ b/api/envoy/config/overload/v3alpha/overload.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.config.overload.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.overload.v3alpha";
 option java_outer_classname = "OverloadProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.overload.v3alpha";
 
 import "google/protobuf/any.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/struct.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Overload Manager]
@@ -20,6 +22,9 @@ import "validate/validate.proto";
 // when triggers related to those resources fire.
 
 message ResourceMonitor {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.overload.v2alpha.ResourceMonitor";
+
   reserved 2;
 
   reserved "config";
@@ -40,12 +45,18 @@ message ResourceMonitor {
 }
 
 message ThresholdTrigger {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.overload.v2alpha.ThresholdTrigger";
+
   // If the resource pressure is greater than or equal to this value, the trigger
   // will fire.
   double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}];
 }
 
 message Trigger {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.overload.v2alpha.Trigger";
+
   // The name of the resource this is a trigger for.
   string name = 1 [(validate.rules).string = {min_bytes: 1}];
 
@@ -57,6 +68,9 @@ message Trigger {
 }
 
 message OverloadAction {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.overload.v2alpha.OverloadAction";
+
   // The name of the overload action. This is just a well-known string that listeners can
   // use for registering callbacks. Custom overload actions should be named using reverse
   // DNS to ensure uniqueness.
@@ -69,6 +83,9 @@ message OverloadAction {
 }
 
 message OverloadManager {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.overload.v2alpha.OverloadManager";
+
   // The interval for refreshing resource usage.
   google.protobuf.Duration refresh_interval = 1;
 
diff --git a/api/envoy/config/ratelimit/v2/rls.proto b/api/envoy/config/ratelimit/v2/rls.proto
index 184b3ec081e1..618ecf7c718a 100644
--- a/api/envoy/config/ratelimit/v2/rls.proto
+++ b/api/envoy/config/ratelimit/v2/rls.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.ratelimit.v2;
 
+option java_package = "io.envoyproxy.envoy.config.ratelimit.v2";
 option java_outer_classname = "RlsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.ratelimit.v2";
 
 import "envoy/api/v2/core/grpc_service.proto";
 
diff --git a/api/envoy/config/ratelimit/v3alpha/BUILD b/api/envoy/config/ratelimit/v3alpha/BUILD
index 4e89d949ab9d..4b7fbe183c43 100644
--- a/api/envoy/config/ratelimit/v3alpha/BUILD
+++ b/api/envoy/config/ratelimit/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/ratelimit/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/config/ratelimit/v3alpha/rls.proto b/api/envoy/config/ratelimit/v3alpha/rls.proto
index be651d40e440..73da57d1df8a 100644
--- a/api/envoy/config/ratelimit/v3alpha/rls.proto
+++ b/api/envoy/config/ratelimit/v3alpha/rls.proto
@@ -2,18 +2,23 @@ syntax = "proto3";
 
 package envoy.config.ratelimit.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.ratelimit.v3alpha";
 option java_outer_classname = "RlsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.ratelimit.v3alpha";
 
 import "envoy/api/v3alpha/core/grpc_service.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Rate limit service]
 
 // Rate limit :ref:`configuration overview <config_rate_limit_service>`.
 message RateLimitServiceConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.ratelimit.v2.RateLimitServiceConfig";
+
   reserved 1, 3;
 
   // Specifies the gRPC service that hosts the rate limit service. The client
diff --git a/api/envoy/config/rbac/v2/rbac.proto b/api/envoy/config/rbac/v2/rbac.proto
index 90dfb07bb1da..adf2b320cdb9 100644
--- a/api/envoy/config/rbac/v2/rbac.proto
+++ b/api/envoy/config/rbac/v2/rbac.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.rbac.v2;
 
+option java_package = "io.envoyproxy.envoy.config.rbac.v2";
 option java_outer_classname = "RbacProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.rbac.v2";
 
 import "envoy/api/v2/core/address.proto";
 import "envoy/api/v2/route/route.proto";
diff --git a/api/envoy/config/rbac/v3alpha/BUILD b/api/envoy/config/rbac/v3alpha/BUILD
index a0aa5e55c246..d41458951c47 100644
--- a/api/envoy/config/rbac/v3alpha/BUILD
+++ b/api/envoy/config/rbac/v3alpha/BUILD
@@ -8,7 +8,9 @@ api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/api/v3alpha/route:pkg",
+        "//envoy/config/rbac/v2:pkg",
         "//envoy/type/matcher/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
         "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto",
     ],
 )
diff --git a/api/envoy/config/rbac/v3alpha/rbac.proto b/api/envoy/config/rbac/v3alpha/rbac.proto
index 398b5f099f72..ed704d65f7d6 100644
--- a/api/envoy/config/rbac/v3alpha/rbac.proto
+++ b/api/envoy/config/rbac/v3alpha/rbac.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.rbac.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.rbac.v3alpha";
 option java_outer_classname = "RbacProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.rbac.v3alpha";
 
 import "envoy/api/v3alpha/core/address.proto";
 import "envoy/api/v3alpha/route/route.proto";
@@ -13,6 +13,8 @@ import "envoy/type/matcher/v3alpha/string.proto";
 
 import "google/api/expr/v1alpha1/syntax.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Role Based Access Control (RBAC)]
@@ -57,6 +59,8 @@ import "validate/validate.proto";
 //         - any: true
 //
 message RBAC {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.RBAC";
+
   // Should we do safe-list or block-list style access control?
   enum Action {
     // The policies grant access to principals. The rest is denied. This is safe-list style
@@ -82,6 +86,8 @@ message RBAC {
 // and only if at least one of its permissions match the action taking place AND at least one of its
 // principals match the downstream AND the condition is true if specified.
 message Policy {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Policy";
+
   // Required. The set of permissions that define a role. Each permission is matched with OR
   // semantics. To match all actions for this policy, a single Permission with the `any` field set
   // to true should be used.
@@ -101,9 +107,14 @@ message Policy {
 // Permission defines an action (or actions) that a principal can take.
 // [#next-free-field: 10]
 message Permission {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission";
+
   // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context,
   // each are applied with the associated behavior.
   message Set {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.rbac.v2.Permission.Set";
+
     repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}];
   }
 
@@ -163,14 +174,22 @@ message Permission {
 // Principal defines an identity or a group of identities for a downstream subject.
 // [#next-free-field: 9]
 message Principal {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal";
+
   // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context,
   // each are applied with the associated behavior.
   message Set {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.rbac.v2.Principal.Set";
+
     repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}];
   }
 
   // Authentication attributes for a downstream.
   message Authenticated {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.rbac.v2.Principal.Authenticated";
+
     reserved 1;
 
     // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the
diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto
index 2f9b035f574b..42a946f43166 100644
--- a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto
+++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.resource_monitor.fixed_heap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2alpha";
 option java_outer_classname = "FixedHeapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2alpha";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto
index e31c6f5f328b..12fd97a541b8 100644
--- a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto
+++ b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.resource_monitor.injected_resource.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v2alpha";
 option java_outer_classname = "InjectedResourceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v2alpha";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto b/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto
index ee92a2e49f05..c4df166e69cd 100644
--- a/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto
+++ b/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.retry.previous_priorities;
 
+option java_package = "io.envoyproxy.envoy.config.retry.previous_priorities";
 option java_outer_classname = "PreviousPrioritiesConfigProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.retry.previous_priorities";
 
 // A retry host selector that attempts to spread retries between priorities, even if certain
 // priorities would not normally be attempted due to higher priorities being available.
diff --git a/api/envoy/config/trace/v2/trace.proto b/api/envoy/config/trace/v2/trace.proto
index c9eec1db7ec4..b419ae72f71e 100644
--- a/api/envoy/config/trace/v2/trace.proto
+++ b/api/envoy/config/trace/v2/trace.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.trace.v2;
 
+option java_package = "io.envoyproxy.envoy.config.trace.v2";
 option java_outer_classname = "TraceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.trace.v2";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/api/v2/core/grpc_service.proto";
diff --git a/api/envoy/config/trace/v3alpha/BUILD b/api/envoy/config/trace/v3alpha/BUILD
index 6fb1ebd57dbc..ce97eccf9dea 100644
--- a/api/envoy/config/trace/v3alpha/BUILD
+++ b/api/envoy/config/trace/v3alpha/BUILD
@@ -7,6 +7,8 @@ licenses(["notice"])  # Apache 2
 api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
+        "//envoy/config/trace/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
         "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto",
     ],
 )
diff --git a/api/envoy/config/trace/v3alpha/trace.proto b/api/envoy/config/trace/v3alpha/trace.proto
index f586d15b83e2..9827738a0d16 100644
--- a/api/envoy/config/trace/v3alpha/trace.proto
+++ b/api/envoy/config/trace/v3alpha/trace.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.trace.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.trace.v3alpha";
 option java_outer_classname = "TraceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.trace.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/api/v3alpha/core/grpc_service.proto";
@@ -14,6 +14,7 @@ import "google/protobuf/struct.proto";
 import "google/protobuf/wrappers.proto";
 
 import "opencensus/proto/trace/v1/trace_config.proto";
+import "udpa/annotations/versioning.proto";
 
 import "validate/validate.proto";
 
@@ -26,7 +27,12 @@ import "validate/validate.proto";
 // <envoy_api_field_config.bootstrap.v3alpha.Bootstrap.tracing>` field. Envoy may support other
 // tracers in the future, but right now the HTTP tracer is the only one supported.
 message Tracing {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.Tracing";
+
   message Http {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.config.trace.v2.Tracing.Http";
+
     reserved 2;
 
     reserved "config";
@@ -64,6 +70,9 @@ message Tracing {
 // Configuration for the LightStep tracer.
 // [#extension: envoy.tracers.lightstep]
 message LightstepConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.trace.v2.LightstepConfig";
+
   // The cluster manager cluster that hosts the LightStep collectors.
   string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];
 
@@ -76,6 +85,8 @@ message LightstepConfig {
 // [#extension: envoy.tracers.zipkin]
 // [#next-free-field: 6]
 message ZipkinConfig {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.ZipkinConfig";
+
   // Available Zipkin collector endpoint versions.
   enum CollectorEndpointVersion {
     // Zipkin API v1, JSON over HTTP.
@@ -125,6 +136,9 @@ message ZipkinConfig {
 // <https://github.com/opentracing/opentracing-cpp>`_.
 // [#extension: envoy.tracers.dynamic_ot]
 message DynamicOtConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.trace.v2.DynamicOtConfig";
+
   // Dynamic library implementing the `OpenTracing API
   // <https://github.com/opentracing/opentracing-cpp>`_.
   string library = 1 [(validate.rules).string = {min_bytes: 1}];
@@ -137,6 +151,9 @@ message DynamicOtConfig {
 // Configuration for the Datadog tracer.
 // [#extension: envoy.tracers.datadog]
 message DatadogConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.trace.v2.DatadogConfig";
+
   // The cluster to use for submitting traces to the Datadog agent.
   string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];
 
@@ -148,6 +165,9 @@ message DatadogConfig {
 // [#next-free-field: 13]
 // [#extension: envoy.tracers.opencensus]
 message OpenCensusConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.trace.v2.OpenCensusConfig";
+
   enum TraceContext {
     // No-op default, no trace context is utilized.
     NONE = 0;
@@ -213,6 +233,8 @@ message OpenCensusConfig {
 // [#not-implemented-hide:]
 // Configuration for AWS X-Ray tracer.
 message XRayConfig {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.XRayConfig";
+
   // The endpoint of the X-Ray Daemon where the spans will be sent. Since by default daemon
   // listens to localhost:2000, so the default value is 127.0.0.1:2000.
   string daemon_endpoint = 1 [(validate.rules).string = {min_bytes: 1}];
@@ -226,6 +248,9 @@ message XRayConfig {
 
 // Configuration structure.
 message TraceServiceConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.trace.v2.TraceServiceConfig";
+
   // The upstream gRPC cluster that hosts the metrics service.
   api.v3alpha.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];
 }
diff --git a/api/envoy/config/transport_socket/alts/v2alpha/alts.proto b/api/envoy/config/transport_socket/alts/v2alpha/alts.proto
index 668facfc61dc..6141595acac3 100644
--- a/api/envoy/config/transport_socket/alts/v2alpha/alts.proto
+++ b/api/envoy/config/transport_socket/alts/v2alpha/alts.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.transport_socket.alts.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v2alpha";
 option java_outer_classname = "AltsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v2alpha";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto
index 418116336572..8f3a51ff4489 100644
--- a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto
+++ b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.config.transport_socket.tap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v2alpha";
 option java_outer_classname = "TapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v2alpha";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/config/common/tap/v2alpha/common.proto";
diff --git a/api/envoy/config/transport_socket/tap/v3alpha/BUILD b/api/envoy/config/transport_socket/tap/v3alpha/BUILD
index e582af3881c0..1c789c4ac58c 100644
--- a/api/envoy/config/transport_socket/tap/v3alpha/BUILD
+++ b/api/envoy/config/transport_socket/tap/v3alpha/BUILD
@@ -8,5 +8,7 @@ api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/config/common/tap/v3alpha:pkg",
+        "//envoy/config/transport_socket/tap/v2alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/config/transport_socket/tap/v3alpha/tap.proto b/api/envoy/config/transport_socket/tap/v3alpha/tap.proto
index 6a6fd972bd4b..56648883b801 100644
--- a/api/envoy/config/transport_socket/tap/v3alpha/tap.proto
+++ b/api/envoy/config/transport_socket/tap/v3alpha/tap.proto
@@ -2,13 +2,15 @@ syntax = "proto3";
 
 package envoy.config.transport_socket.tap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v3alpha";
 option java_outer_classname = "TapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/config/common/tap/v3alpha/common.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Tap]
@@ -17,6 +19,9 @@ import "validate/validate.proto";
 // Configuration for tap transport socket. This wraps another transport socket, providing the
 // ability to interpose and record in plain text any traffic that is surfaced to Envoy.
 message Tap {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.config.transport_socket.tap.v2alpha.Tap";
+
   // Common configuration for the tap transport socket.
   common.tap.v3alpha.CommonExtensionConfig common_config = 1
       [(validate.rules).message = {required: true}];
diff --git a/api/envoy/data/accesslog/v2/accesslog.proto b/api/envoy/data/accesslog/v2/accesslog.proto
index 00a2e9ccab18..5d2c75d7a1d3 100644
--- a/api/envoy/data/accesslog/v2/accesslog.proto
+++ b/api/envoy/data/accesslog/v2/accesslog.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.data.accesslog.v2;
 
+option java_package = "io.envoyproxy.envoy.data.accesslog.v2";
 option java_outer_classname = "AccesslogProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.accesslog.v2";
 
 import "envoy/api/v2/core/address.proto";
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/data/accesslog/v3alpha/BUILD b/api/envoy/data/accesslog/v3alpha/BUILD
index 4e89d949ab9d..92881925708c 100644
--- a/api/envoy/data/accesslog/v3alpha/BUILD
+++ b/api/envoy/data/accesslog/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v3alpha/core:pkg",
+        "//envoy/data/accesslog/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/data/accesslog/v3alpha/accesslog.proto b/api/envoy/data/accesslog/v3alpha/accesslog.proto
index f32307ddaf42..68980cb845ef 100644
--- a/api/envoy/data/accesslog/v3alpha/accesslog.proto
+++ b/api/envoy/data/accesslog/v3alpha/accesslog.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.data.accesslog.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.data.accesslog.v3alpha";
 option java_outer_classname = "AccesslogProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.accesslog.v3alpha";
 
 import "envoy/api/v3alpha/core/address.proto";
 import "envoy/api/v3alpha/core/base.proto";
@@ -14,6 +14,8 @@ import "google/protobuf/duration.proto";
 import "google/protobuf/timestamp.proto";
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: gRPC access logs]
@@ -28,6 +30,9 @@ import "validate/validate.proto";
 // in their name.
 
 message TCPAccessLogEntry {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.accesslog.v2.TCPAccessLogEntry";
+
   // Common properties shared by all Envoy access logs.
   AccessLogCommon common_properties = 1;
 
@@ -36,6 +41,9 @@ message TCPAccessLogEntry {
 }
 
 message HTTPAccessLogEntry {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.accesslog.v2.HTTPAccessLogEntry";
+
   // HTTP version
   enum HTTPVersion {
     PROTOCOL_UNSPECIFIED = 0;
@@ -59,6 +67,9 @@ message HTTPAccessLogEntry {
 
 // Defines fields for a connection
 message ConnectionProperties {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.accesslog.v2.ConnectionProperties";
+
   // Number of bytes received from downstream.
   uint64 received_bytes = 1;
 
@@ -69,6 +80,9 @@ message ConnectionProperties {
 // Defines fields that are shared by all Envoy access logs.
 // [#next-free-field: 22]
 message AccessLogCommon {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.accesslog.v2.AccessLogCommon";
+
   // [#not-implemented-hide:]
   // This field indicates the rate at which this log entry was sampled.
   // Valid range is (0.0, 1.0].
@@ -173,7 +187,13 @@ message AccessLogCommon {
 // Flags indicating occurrences during request/response processing.
 // [#next-free-field: 20]
 message ResponseFlags {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.accesslog.v2.ResponseFlags";
+
   message Unauthorized {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.data.accesslog.v2.ResponseFlags.Unauthorized";
+
     // Reasons why the request was unauthorized
     enum Reason {
       REASON_UNSPECIFIED = 0;
@@ -247,6 +267,9 @@ message ResponseFlags {
 // Properties of a negotiated TLS connection.
 // [#next-free-field: 7]
 message TLSProperties {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.accesslog.v2.TLSProperties";
+
   enum TLSVersion {
     VERSION_UNSPECIFIED = 0;
     TLSv1 = 1;
@@ -256,7 +279,13 @@ message TLSProperties {
   }
 
   message CertificateProperties {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.data.accesslog.v2.TLSProperties.CertificateProperties";
+
     message SubjectAltName {
+      option (udpa.annotations.versioning).previous_message_type =
+          "envoy.data.accesslog.v2.TLSProperties.CertificateProperties.SubjectAltName";
+
       oneof san {
         string uri = 1;
 
@@ -297,6 +326,9 @@ message TLSProperties {
 
 // [#next-free-field: 14]
 message HTTPRequestProperties {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.accesslog.v2.HTTPRequestProperties";
+
   // The request method (RFC 7231/2616).
   api.v3alpha.core.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}];
 
@@ -350,6 +382,9 @@ message HTTPRequestProperties {
 
 // [#next-free-field: 7]
 message HTTPResponseProperties {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.accesslog.v2.HTTPResponseProperties";
+
   // The HTTP response code returned by Envoy.
   google.protobuf.UInt32Value response_code = 1;
 
diff --git a/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto b/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto
index db3fa1ee9442..80afadd92b82 100644
--- a/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto
+++ b/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.data.cluster.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.data.cluster.v2alpha";
 option java_outer_classname = "OutlierDetectionEventProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.cluster.v2alpha";
 
 import "google/protobuf/timestamp.proto";
 import "google/protobuf/wrappers.proto";
diff --git a/api/envoy/data/core/v2alpha/health_check_event.proto b/api/envoy/data/core/v2alpha/health_check_event.proto
index f384f9f4bc86..f5a4533e6155 100644
--- a/api/envoy/data/core/v2alpha/health_check_event.proto
+++ b/api/envoy/data/core/v2alpha/health_check_event.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.data.core.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.data.core.v2alpha";
 option java_outer_classname = "HealthCheckEventProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.core.v2alpha";
 
 import "envoy/api/v2/core/address.proto";
 
diff --git a/api/envoy/data/core/v3alpha/BUILD b/api/envoy/data/core/v3alpha/BUILD
index 4e89d949ab9d..d84e9a54c8ed 100644
--- a/api/envoy/data/core/v3alpha/BUILD
+++ b/api/envoy/data/core/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v3alpha/core:pkg",
+        "//envoy/data/core/v2alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/data/core/v3alpha/health_check_event.proto b/api/envoy/data/core/v3alpha/health_check_event.proto
index 886e96df6783..3ae7b40d199a 100644
--- a/api/envoy/data/core/v3alpha/health_check_event.proto
+++ b/api/envoy/data/core/v3alpha/health_check_event.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.data.core.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.data.core.v3alpha";
 option java_outer_classname = "HealthCheckEventProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.core.v3alpha";
 
 import "envoy/api/v3alpha/core/address.proto";
 
 import "google/protobuf/timestamp.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Health check logging events]
@@ -30,6 +32,9 @@ enum HealthCheckerType {
 
 // [#next-free-field: 10]
 message HealthCheckEvent {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.core.v2alpha.HealthCheckEvent";
+
   HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}];
 
   api.v3alpha.core.Address host = 2;
@@ -60,11 +65,17 @@ message HealthCheckEvent {
 }
 
 message HealthCheckEjectUnhealthy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.core.v2alpha.HealthCheckEjectUnhealthy";
+
   // The type of failure that caused this ejection.
   HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}];
 }
 
 message HealthCheckAddHealthy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.core.v2alpha.HealthCheckAddHealthy";
+
   // Whether this addition is the result of the first ever health check on a host, in which case
   // the configured :ref:`healthy threshold
   // <envoy_api_field_api.v3alpha.core.HealthCheck.healthy_threshold>` is bypassed and the host is
@@ -73,6 +84,9 @@ message HealthCheckAddHealthy {
 }
 
 message HealthCheckFailure {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.core.v2alpha.HealthCheckFailure";
+
   // The type of failure that caused this event.
   HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}];
 
@@ -81,7 +95,11 @@ message HealthCheckFailure {
 }
 
 message DegradedHealthyHost {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.core.v2alpha.DegradedHealthyHost";
 }
 
 message NoLongerDegradedHost {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.core.v2alpha.NoLongerDegradedHost";
 }
diff --git a/api/envoy/data/tap/v2alpha/common.proto b/api/envoy/data/tap/v2alpha/common.proto
index d913311842bb..93e33a2ea8d2 100644
--- a/api/envoy/data/tap/v2alpha/common.proto
+++ b/api/envoy/data/tap/v2alpha/common.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.data.tap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.data.tap.v2alpha";
 option java_outer_classname = "CommonProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.tap.v2alpha";
 
 // [#protodoc-title: Tap common data]
 
diff --git a/api/envoy/data/tap/v2alpha/http.proto b/api/envoy/data/tap/v2alpha/http.proto
index d42e6ad54839..849538b892d9 100644
--- a/api/envoy/data/tap/v2alpha/http.proto
+++ b/api/envoy/data/tap/v2alpha/http.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.data.tap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.data.tap.v2alpha";
 option java_outer_classname = "HttpProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.tap.v2alpha";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/data/tap/v2alpha/common.proto";
diff --git a/api/envoy/data/tap/v2alpha/transport.proto b/api/envoy/data/tap/v2alpha/transport.proto
index a34518f30a36..7d2f2c4f1a41 100644
--- a/api/envoy/data/tap/v2alpha/transport.proto
+++ b/api/envoy/data/tap/v2alpha/transport.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.data.tap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.data.tap.v2alpha";
 option java_outer_classname = "TransportProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.tap.v2alpha";
 
 import "envoy/api/v2/core/address.proto";
 import "envoy/data/tap/v2alpha/common.proto";
diff --git a/api/envoy/data/tap/v2alpha/wrapper.proto b/api/envoy/data/tap/v2alpha/wrapper.proto
index 597b22f014df..3445e67e9d28 100644
--- a/api/envoy/data/tap/v2alpha/wrapper.proto
+++ b/api/envoy/data/tap/v2alpha/wrapper.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.data.tap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.data.tap.v2alpha";
 option java_outer_classname = "WrapperProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.tap.v2alpha";
 
 import "envoy/data/tap/v2alpha/http.proto";
 import "envoy/data/tap/v2alpha/transport.proto";
diff --git a/api/envoy/data/tap/v3alpha/BUILD b/api/envoy/data/tap/v3alpha/BUILD
index 4e89d949ab9d..84c57e6541ad 100644
--- a/api/envoy/data/tap/v3alpha/BUILD
+++ b/api/envoy/data/tap/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/api/v3alpha/core:pkg"],
+    deps = [
+        "//envoy/api/v3alpha/core:pkg",
+        "//envoy/data/tap/v2alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/data/tap/v3alpha/common.proto b/api/envoy/data/tap/v3alpha/common.proto
index 21da336e7485..148e48e2b717 100644
--- a/api/envoy/data/tap/v3alpha/common.proto
+++ b/api/envoy/data/tap/v3alpha/common.proto
@@ -2,15 +2,19 @@ syntax = "proto3";
 
 package envoy.data.tap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.data.tap.v3alpha";
 option java_outer_classname = "CommonProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.tap.v3alpha";
+
+import "udpa/annotations/versioning.proto";
 
 // [#protodoc-title: Tap common data]
 
 // Wrapper for tapped body data. This includes HTTP request/response body, transport socket received
 // and transmitted data, etc.
 message Body {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Body";
+
   oneof body_type {
     // Body data as bytes. By default, tap body data will be present in this field, as the proto
     // `bytes` type can contain any valid byte.
diff --git a/api/envoy/data/tap/v3alpha/http.proto b/api/envoy/data/tap/v3alpha/http.proto
index 61329f03a207..f09b2c148f5e 100644
--- a/api/envoy/data/tap/v3alpha/http.proto
+++ b/api/envoy/data/tap/v3alpha/http.proto
@@ -2,19 +2,27 @@ syntax = "proto3";
 
 package envoy.data.tap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.data.tap.v3alpha";
 option java_outer_classname = "HttpProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.tap.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/data/tap/v3alpha/common.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: HTTP tap data]
 
 // A fully buffered HTTP trace message.
 message HttpBufferedTrace {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.tap.v2alpha.HttpBufferedTrace";
+
   // HTTP message wrapper.
   message Message {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.data.tap.v2alpha.HttpBufferedTrace.Message";
+
     // Message headers.
     repeated api.v3alpha.core.HeaderValue headers = 1;
 
@@ -35,6 +43,9 @@ message HttpBufferedTrace {
 // A streamed HTTP trace segment. Multiple segments make up a full trace.
 // [#next-free-field: 8]
 message HttpStreamedTraceSegment {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.tap.v2alpha.HttpStreamedTraceSegment";
+
   // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used
   // for long term stable uniqueness.
   uint64 trace_id = 1;
diff --git a/api/envoy/data/tap/v3alpha/transport.proto b/api/envoy/data/tap/v3alpha/transport.proto
index cf2612c1ce47..2fe147e573f7 100644
--- a/api/envoy/data/tap/v3alpha/transport.proto
+++ b/api/envoy/data/tap/v3alpha/transport.proto
@@ -2,21 +2,25 @@ syntax = "proto3";
 
 package envoy.data.tap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.data.tap.v3alpha";
 option java_outer_classname = "TransportProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.tap.v3alpha";
 
 import "envoy/api/v3alpha/core/address.proto";
 import "envoy/data/tap/v3alpha/common.proto";
 
 import "google/protobuf/timestamp.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: Transport tap data]
 // Trace format for the tap transport socket extension. This dumps plain text read/write
 // sequences on a socket.
 
 // Connection properties.
 message Connection {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Connection";
+
   // Local address.
   api.v3alpha.core.Address local_address = 2;
 
@@ -26,16 +30,24 @@ message Connection {
 
 // Event in a socket trace.
 message SocketEvent {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketEvent";
+
   // Data read by Envoy from the transport socket.
   message Read {
     // TODO(htuch): Half-close for reads.
 
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.data.tap.v2alpha.SocketEvent.Read";
+
     // Binary data read.
     Body data = 1;
   }
 
   // Data written by Envoy to the transport socket.
   message Write {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.data.tap.v2alpha.SocketEvent.Write";
+
     // Binary data written.
     Body data = 1;
 
@@ -46,6 +58,9 @@ message SocketEvent {
   // The connection was closed.
   message Closed {
     // TODO(mattklein123): Close event type.
+
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.data.tap.v2alpha.SocketEvent.Closed";
   }
 
   // Timestamp for event.
@@ -64,6 +79,9 @@ message SocketEvent {
 // Sequence of read/write events that constitute a buffered trace on a socket.
 // [#next-free-field: 6]
 message SocketBufferedTrace {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.tap.v2alpha.SocketBufferedTrace";
+
   // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used
   // for long term stable uniqueness. Matches connection IDs used in Envoy logs.
   uint64 trace_id = 1;
@@ -85,6 +103,9 @@ message SocketBufferedTrace {
 
 // A streamed socket trace segment. Multiple segments make up a full trace.
 message SocketStreamedTraceSegment {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.tap.v2alpha.SocketStreamedTraceSegment";
+
   // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used
   // for long term stable uniqueness. Matches connection IDs used in Envoy logs.
   uint64 trace_id = 1;
diff --git a/api/envoy/data/tap/v3alpha/wrapper.proto b/api/envoy/data/tap/v3alpha/wrapper.proto
index de5cdc0f15b5..bcad27e6f7a2 100644
--- a/api/envoy/data/tap/v3alpha/wrapper.proto
+++ b/api/envoy/data/tap/v3alpha/wrapper.proto
@@ -2,13 +2,15 @@ syntax = "proto3";
 
 package envoy.data.tap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.data.tap.v3alpha";
 option java_outer_classname = "WrapperProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.data.tap.v3alpha";
 
 import "envoy/data/tap/v3alpha/http.proto";
 import "envoy/data/tap/v3alpha/transport.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Tap data wrappers]
@@ -16,6 +18,9 @@ import "validate/validate.proto";
 // Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for
 // sending traces over gRPC APIs or more easily persisting binary messages to files.
 message TraceWrapper {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.data.tap.v2alpha.TraceWrapper";
+
   oneof trace {
     option (validate.required) = true;
 
diff --git a/api/envoy/service/accesslog/v2/als.proto b/api/envoy/service/accesslog/v2/als.proto
index e3022af83bc9..fffe668b9835 100644
--- a/api/envoy/service/accesslog/v2/als.proto
+++ b/api/envoy/service/accesslog/v2/als.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.accesslog.v2;
 
+option java_package = "io.envoyproxy.envoy.service.accesslog.v2";
 option java_outer_classname = "AlsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.accesslog.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/service/accesslog/v3alpha/BUILD b/api/envoy/service/accesslog/v3alpha/BUILD
index 7a7083bcbd5c..c778bda9d4fd 100644
--- a/api/envoy/service/accesslog/v3alpha/BUILD
+++ b/api/envoy/service/accesslog/v3alpha/BUILD
@@ -9,5 +9,7 @@ api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/data/accesslog/v3alpha:pkg",
+        "//envoy/service/accesslog/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/service/accesslog/v3alpha/als.proto b/api/envoy/service/accesslog/v3alpha/als.proto
index aafabec3e17e..113e1adff07c 100644
--- a/api/envoy/service/accesslog/v3alpha/als.proto
+++ b/api/envoy/service/accesslog/v3alpha/als.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.service.accesslog.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.accesslog.v3alpha";
 option java_outer_classname = "AlsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.accesslog.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/data/accesslog/v3alpha/accesslog.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: gRPC Access Log Service (ALS)]
@@ -28,12 +30,20 @@ service AccessLogService {
 
 // Empty response for the StreamAccessLogs API. Will never be sent. See below.
 message StreamAccessLogsResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.accesslog.v2.StreamAccessLogsResponse";
 }
 
 // Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream
 // access logs without ever expecting a response.
 message StreamAccessLogsMessage {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.accesslog.v2.StreamAccessLogsMessage";
+
   message Identifier {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.accesslog.v2.StreamAccessLogsMessage.Identifier";
+
     // The node sending the access log messages over the stream.
     api.v3alpha.core.Node node = 1 [(validate.rules).message = {required: true}];
 
@@ -44,12 +54,18 @@ message StreamAccessLogsMessage {
 
   // Wrapper for batches of HTTP access log entries.
   message HTTPAccessLogEntries {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.accesslog.v2.StreamAccessLogsMessage.HTTPAccessLogEntries";
+
     repeated data.accesslog.v3alpha.HTTPAccessLogEntry log_entry = 1
         [(validate.rules).repeated = {min_items: 1}];
   }
 
   // Wrapper for batches of TCP access log entries.
   message TCPAccessLogEntries {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.accesslog.v2.StreamAccessLogsMessage.TCPAccessLogEntries";
+
     repeated data.accesslog.v3alpha.TCPAccessLogEntry log_entry = 1
         [(validate.rules).repeated = {min_items: 1}];
   }
diff --git a/api/envoy/service/auth/v2/attribute_context.proto b/api/envoy/service/auth/v2/attribute_context.proto
index a694b13763f0..82b2065f80f4 100644
--- a/api/envoy/service/auth/v2/attribute_context.proto
+++ b/api/envoy/service/auth/v2/attribute_context.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.auth.v2;
 
+option java_package = "io.envoyproxy.envoy.service.auth.v2";
 option java_outer_classname = "AttributeContextProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.auth.v2";
 
 import "envoy/api/v2/core/address.proto";
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/service/auth/v2/external_auth.proto b/api/envoy/service/auth/v2/external_auth.proto
index 7b2144cf8b8c..b9da8d6dad25 100644
--- a/api/envoy/service/auth/v2/external_auth.proto
+++ b/api/envoy/service/auth/v2/external_auth.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.auth.v2;
 
+option java_package = "io.envoyproxy.envoy.service.auth.v2";
 option java_outer_classname = "ExternalAuthProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.auth.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/service/auth/v3alpha/BUILD b/api/envoy/service/auth/v3alpha/BUILD
index aee908d7d475..e78836008e08 100644
--- a/api/envoy/service/auth/v3alpha/BUILD
+++ b/api/envoy/service/auth/v3alpha/BUILD
@@ -8,6 +8,8 @@ api_proto_package(
     has_services = True,
     deps = [
         "//envoy/api/v3alpha/core:pkg",
+        "//envoy/service/auth/v2:pkg",
         "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/service/auth/v3alpha/attribute_context.proto b/api/envoy/service/auth/v3alpha/attribute_context.proto
index 22e059598b93..e50f6d6381e6 100644
--- a/api/envoy/service/auth/v3alpha/attribute_context.proto
+++ b/api/envoy/service/auth/v3alpha/attribute_context.proto
@@ -2,15 +2,17 @@ syntax = "proto3";
 
 package envoy.service.auth.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.auth.v3alpha";
 option java_outer_classname = "AttributeContextProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.auth.v3alpha";
 
 import "envoy/api/v3alpha/core/address.proto";
 import "envoy/api/v3alpha/core/base.proto";
 
 import "google/protobuf/timestamp.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#protodoc-title: Attribute Context ]
 
 // See :ref:`network filter configuration overview <config_network_filters_ext_authz>`
@@ -35,12 +37,18 @@ import "google/protobuf/timestamp.proto";
 // - which return values are copied into request_headers]
 // [#next-free-field: 12]
 message AttributeContext {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.auth.v2.AttributeContext";
+
   // This message defines attributes for a node that handles a network request.
   // The node can be either a service or an application that sends, forwards,
   // or receives the request. Service peers should fill in the `service`,
   // `principal`, and `labels` as appropriate.
   // [#next-free-field: 6]
   message Peer {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.auth.v2.AttributeContext.Peer";
+
     // The address of the peer, this is typically the IP address.
     // It can also be UDS path, or others.
     api.v3alpha.core.Address address = 1;
@@ -75,6 +83,9 @@ message AttributeContext {
 
   // Represents a network request, such as an HTTP request.
   message Request {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.auth.v2.AttributeContext.Request";
+
     // The timestamp when the proxy receives the first byte of the request.
     google.protobuf.Timestamp time = 1;
 
@@ -86,6 +97,9 @@ message AttributeContext {
   // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
   // [#next-free-field: 12]
   message HttpRequest {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.auth.v2.AttributeContext.HttpRequest";
+
     // The unique ID for a request, which can be propagated to downstream
     // systems. The ID should have low probability of collision
     // within a single day for a specific service.
diff --git a/api/envoy/service/auth/v3alpha/external_auth.proto b/api/envoy/service/auth/v3alpha/external_auth.proto
index 7de354566fb2..8daf10f01a44 100644
--- a/api/envoy/service/auth/v3alpha/external_auth.proto
+++ b/api/envoy/service/auth/v3alpha/external_auth.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.auth.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.auth.v3alpha";
 option java_outer_classname = "ExternalAuthProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.auth.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/core/base.proto";
@@ -13,6 +13,8 @@ import "envoy/type/v3alpha/http_status.proto";
 
 import "google/rpc/status.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Authorization Service ]
@@ -30,12 +32,17 @@ service Authorization {
 }
 
 message CheckRequest {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.CheckRequest";
+
   // The request attributes.
   AttributeContext attributes = 1;
 }
 
 // HTTP attributes for a denied response.
 message DeniedHttpResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.auth.v2.DeniedHttpResponse";
+
   // This field allows the authorization service to send a HTTP response status
   // code to the downstream client other than 403 (Forbidden).
   type.v3alpha.HttpStatus status = 1 [(validate.rules).message = {required: true}];
@@ -51,6 +58,9 @@ message DeniedHttpResponse {
 
 // HTTP attributes for an ok response.
 message OkHttpResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.auth.v2.OkHttpResponse";
+
   // HTTP entity headers in addition to the original request headers. This allows the authorization
   // service to append, to add or to override headers from the original request before
   // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`,
@@ -62,6 +72,9 @@ message OkHttpResponse {
 
 // Intended for gRPC and Network Authorization servers `only`.
 message CheckResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.auth.v2.CheckResponse";
+
   // Status `OK` allows the request. Any other status indicates the request should be denied.
   google.rpc.Status status = 1;
 
diff --git a/api/envoy/service/discovery/v2/ads.proto b/api/envoy/service/discovery/v2/ads.proto
index 63b129069ede..40945e1b4d10 100644
--- a/api/envoy/service/discovery/v2/ads.proto
+++ b/api/envoy/service/discovery/v2/ads.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.discovery.v2;
 
+option java_package = "io.envoyproxy.envoy.service.discovery.v2";
 option java_outer_classname = "AdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.discovery.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/discovery.proto";
diff --git a/api/envoy/service/discovery/v2/hds.proto b/api/envoy/service/discovery/v2/hds.proto
index fe96f06a8977..1045d865042c 100644
--- a/api/envoy/service/discovery/v2/hds.proto
+++ b/api/envoy/service/discovery/v2/hds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.discovery.v2;
 
+option java_package = "io.envoyproxy.envoy.service.discovery.v2";
 option java_outer_classname = "HdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.discovery.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/core/base.proto";
@@ -62,10 +62,8 @@ service HealthDiscoveryService {
   // request/response. Should we add an identifier to the HealthCheckSpecifier
   // to bind with the response?
   rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) {
-    option (google.api.http) = {
-      post: "/v2/discovery:health_check"
-      body: "*"
-    };
+    option (google.api.http).post = "/v2/discovery:health_check";
+    option (google.api.http).body = "*";
   }
 }
 
diff --git a/api/envoy/service/discovery/v2/rtds.proto b/api/envoy/service/discovery/v2/rtds.proto
index 4dfe6f2a6645..eebf9e20e8a7 100644
--- a/api/envoy/service/discovery/v2/rtds.proto
+++ b/api/envoy/service/discovery/v2/rtds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.discovery.v2;
 
+option java_package = "io.envoyproxy.envoy.service.discovery.v2";
 option java_outer_classname = "RtdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.discovery.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/discovery.proto";
@@ -27,10 +27,8 @@ service RuntimeDiscoveryService {
   }
 
   rpc FetchRuntime(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v2/discovery:runtime"
-      body: "*"
-    };
+    option (google.api.http).post = "/v2/discovery:runtime";
+    option (google.api.http).body = "*";
   }
 }
 
diff --git a/api/envoy/service/discovery/v2/sds.proto b/api/envoy/service/discovery/v2/sds.proto
index 3a61947ed2bb..df517f06eed2 100644
--- a/api/envoy/service/discovery/v2/sds.proto
+++ b/api/envoy/service/discovery/v2/sds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.discovery.v2;
 
+option java_package = "io.envoyproxy.envoy.service.discovery.v2";
 option java_outer_classname = "SdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.discovery.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/discovery.proto";
@@ -20,10 +20,8 @@ service SecretDiscoveryService {
   }
 
   rpc FetchSecrets(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v2/discovery:secrets"
-      body: "*"
-    };
+    option (google.api.http).post = "/v2/discovery:secrets";
+    option (google.api.http).body = "*";
   }
 }
 
diff --git a/api/envoy/service/discovery/v3alpha/BUILD b/api/envoy/service/discovery/v3alpha/BUILD
index 1e8e081ae295..82ceae783174 100644
--- a/api/envoy/service/discovery/v3alpha/BUILD
+++ b/api/envoy/service/discovery/v3alpha/BUILD
@@ -10,5 +10,7 @@ api_proto_package(
         "//envoy/api/v3alpha:pkg",
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/api/v3alpha/endpoint:pkg",
+        "//envoy/service/discovery/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/service/discovery/v3alpha/ads.proto b/api/envoy/service/discovery/v3alpha/ads.proto
index 2c31af80c4dd..a89bf61bdf57 100644
--- a/api/envoy/service/discovery/v3alpha/ads.proto
+++ b/api/envoy/service/discovery/v3alpha/ads.proto
@@ -2,13 +2,15 @@ syntax = "proto3";
 
 package envoy.service.discovery.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha";
 option java_outer_classname = "AdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/discovery.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // [#not-implemented-hide:] Discovery services for endpoints, clusters, routes,
 // and listeners are retained in the package `envoy.api.v2` for backwards
 // compatibility with existing management servers. New development in discovery
@@ -34,4 +36,6 @@ service AggregatedDiscoveryService {
 // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing
 // services: https://github.com/google/protobuf/issues/4221
 message AdsDummy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.AdsDummy";
 }
diff --git a/api/envoy/service/discovery/v3alpha/hds.proto b/api/envoy/service/discovery/v3alpha/hds.proto
index 242ae6f36b5f..ca472f02c6e7 100644
--- a/api/envoy/service/discovery/v3alpha/hds.proto
+++ b/api/envoy/service/discovery/v3alpha/hds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.discovery.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha";
 option java_outer_classname = "HdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/core/base.proto";
@@ -14,6 +14,8 @@ import "envoy/api/v3alpha/endpoint/endpoint.proto";
 import "google/api/annotations.proto";
 import "google/protobuf/duration.proto";
 
+import "udpa/annotations/versioning.proto";
+
 // HDS is Health Discovery Service. It compliments Envoyā€™s health checking
 // service by designating this Envoy to be a healthchecker for a subset of hosts
 // in the cluster. The status of these health checks will be reported to the
@@ -62,16 +64,17 @@ service HealthDiscoveryService {
   // request/response. Should we add an identifier to the HealthCheckSpecifier
   // to bind with the response?
   rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) {
-    option (google.api.http) = {
-      post: "/v3alpha/discovery:health_check"
-      body: "*"
-    };
+    option (google.api.http).post = "/v3alpha/discovery:health_check";
+    option (google.api.http).body = "*";
   }
 }
 
 // Defines supported protocols etc, so the management server can assign proper
 // endpoints to healthcheck.
 message Capability {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.Capability";
+
   // Different Envoy instances may have different capabilities (e.g. Redis)
   // and/or have ports enabled for different protocols.
   enum Protocol {
@@ -84,22 +87,34 @@ message Capability {
 }
 
 message HealthCheckRequest {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.HealthCheckRequest";
+
   api.v3alpha.core.Node node = 1;
 
   Capability capability = 2;
 }
 
 message EndpointHealth {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.EndpointHealth";
+
   api.v3alpha.endpoint.Endpoint endpoint = 1;
 
   api.v3alpha.core.HealthStatus health_status = 2;
 }
 
 message EndpointHealthResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.EndpointHealthResponse";
+
   repeated EndpointHealth endpoints_health = 1;
 }
 
 message HealthCheckRequestOrEndpointHealthResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.HealthCheckRequestOrEndpointHealthResponse";
+
   oneof request_type {
     HealthCheckRequest health_check_request = 1;
 
@@ -108,6 +123,9 @@ message HealthCheckRequestOrEndpointHealthResponse {
 }
 
 message LocalityEndpoints {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.LocalityEndpoints";
+
   api.v3alpha.core.Locality locality = 1;
 
   repeated api.v3alpha.endpoint.Endpoint endpoints = 2;
@@ -118,6 +136,9 @@ message LocalityEndpoints {
 // Envoy instance (outside of HDS). For maximum usefulness, it should match the
 // same cluster structure as that provided by EDS.
 message ClusterHealthCheck {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.ClusterHealthCheck";
+
   string cluster_name = 1;
 
   repeated api.v3alpha.core.HealthCheck health_checks = 2;
@@ -126,6 +147,9 @@ message ClusterHealthCheck {
 }
 
 message HealthCheckSpecifier {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.HealthCheckSpecifier";
+
   repeated ClusterHealthCheck cluster_health_checks = 1;
 
   // The default is 1 second.
diff --git a/api/envoy/service/discovery/v3alpha/rtds.proto b/api/envoy/service/discovery/v3alpha/rtds.proto
index c0ad2b9c41bc..ff1bc04de76e 100644
--- a/api/envoy/service/discovery/v3alpha/rtds.proto
+++ b/api/envoy/service/discovery/v3alpha/rtds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.discovery.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha";
 option java_outer_classname = "RtdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/discovery.proto";
@@ -12,6 +12,8 @@ import "envoy/api/v3alpha/discovery.proto";
 import "google/api/annotations.proto";
 import "google/protobuf/struct.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Runtime Discovery Service (RTDS)]
@@ -28,20 +30,22 @@ service RuntimeDiscoveryService {
   }
 
   rpc FetchRuntime(api.v3alpha.DiscoveryRequest) returns (api.v3alpha.DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v3alpha/discovery:runtime"
-      body: "*"
-    };
+    option (google.api.http).post = "/v3alpha/discovery:runtime";
+    option (google.api.http).body = "*";
   }
 }
 
 // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing
 // services: https://github.com/google/protobuf/issues/4221
 message RtdsDummy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.RtdsDummy";
 }
 
 // RTDS resource type. This describes a layer in the runtime virtual filesystem.
 message Runtime {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.Runtime";
+
   // Runtime resource name. This makes the Runtime a self-describing xDS
   // resource.
   string name = 1 [(validate.rules).string = {min_bytes: 1}];
diff --git a/api/envoy/service/discovery/v3alpha/sds.proto b/api/envoy/service/discovery/v3alpha/sds.proto
index df3dee17d867..6cdbd26a4d06 100644
--- a/api/envoy/service/discovery/v3alpha/sds.proto
+++ b/api/envoy/service/discovery/v3alpha/sds.proto
@@ -2,15 +2,17 @@ syntax = "proto3";
 
 package envoy.service.discovery.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha";
 option java_outer_classname = "SdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.discovery.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/discovery.proto";
 
 import "google/api/annotations.proto";
 
+import "udpa/annotations/versioning.proto";
+
 service SecretDiscoveryService {
   rpc DeltaSecrets(stream api.v3alpha.DeltaDiscoveryRequest)
       returns (stream api.v3alpha.DeltaDiscoveryResponse) {
@@ -21,14 +23,14 @@ service SecretDiscoveryService {
   }
 
   rpc FetchSecrets(api.v3alpha.DiscoveryRequest) returns (api.v3alpha.DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v3alpha/discovery:secrets"
-      body: "*"
-    };
+    option (google.api.http).post = "/v3alpha/discovery:secrets";
+    option (google.api.http).body = "*";
   }
 }
 
 // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing
 // services: https://github.com/google/protobuf/issues/4221
 message SdsDummy {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.discovery.v2.SdsDummy";
 }
diff --git a/api/envoy/service/load_stats/v2/lrs.proto b/api/envoy/service/load_stats/v2/lrs.proto
index c2f7b5a0e5a2..fe3e7e977ae5 100644
--- a/api/envoy/service/load_stats/v2/lrs.proto
+++ b/api/envoy/service/load_stats/v2/lrs.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.load_stats.v2;
 
+option java_package = "io.envoyproxy.envoy.service.load_stats.v2";
 option java_outer_classname = "LrsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.load_stats.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/service/load_stats/v3alpha/BUILD b/api/envoy/service/load_stats/v3alpha/BUILD
index 826dda1511e6..9e0349a44da6 100644
--- a/api/envoy/service/load_stats/v3alpha/BUILD
+++ b/api/envoy/service/load_stats/v3alpha/BUILD
@@ -9,5 +9,7 @@ api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/api/v3alpha/endpoint:pkg",
+        "//envoy/service/load_stats/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/service/load_stats/v3alpha/lrs.proto b/api/envoy/service/load_stats/v3alpha/lrs.proto
index 0a2f603ec0d0..7718b168fb60 100644
--- a/api/envoy/service/load_stats/v3alpha/lrs.proto
+++ b/api/envoy/service/load_stats/v3alpha/lrs.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.load_stats.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.load_stats.v3alpha";
 option java_outer_classname = "LrsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.load_stats.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/core/base.proto";
@@ -12,6 +12,8 @@ import "envoy/api/v3alpha/endpoint/load_report.proto";
 
 import "google/protobuf/duration.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Load reporting service]
@@ -52,6 +54,9 @@ service LoadReportingService {
 // A load report Envoy sends to the management server.
 // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.
 message LoadStatsRequest {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.load_stats.v2.LoadStatsRequest";
+
   // Node identifier for Envoy instance.
   api.v3alpha.core.Node node = 1;
 
@@ -63,6 +68,9 @@ message LoadStatsRequest {
 // is interested in learning load stats about.
 // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.
 message LoadStatsResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.load_stats.v2.LoadStatsResponse";
+
   // Clusters to report stats for.
   repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}];
 
diff --git a/api/envoy/service/metrics/v2/metrics_service.proto b/api/envoy/service/metrics/v2/metrics_service.proto
index dc7a53787fbe..28484b22417f 100644
--- a/api/envoy/service/metrics/v2/metrics_service.proto
+++ b/api/envoy/service/metrics/v2/metrics_service.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.metrics.v2;
 
+option java_package = "io.envoyproxy.envoy.service.metrics.v2";
 option java_outer_classname = "MetricsServiceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.metrics.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/service/metrics/v3alpha/BUILD b/api/envoy/service/metrics/v3alpha/BUILD
index 1ea4f2b0b07b..7ac7ae3268c6 100644
--- a/api/envoy/service/metrics/v3alpha/BUILD
+++ b/api/envoy/service/metrics/v3alpha/BUILD
@@ -8,6 +8,8 @@ api_proto_package(
     has_services = True,
     deps = [
         "//envoy/api/v3alpha/core:pkg",
+        "//envoy/service/metrics/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
         "@prometheus_metrics_model//:client_model",
     ],
 )
diff --git a/api/envoy/service/metrics/v3alpha/metrics_service.proto b/api/envoy/service/metrics/v3alpha/metrics_service.proto
index c2e3523dbcd0..98d7a5a90ed2 100644
--- a/api/envoy/service/metrics/v3alpha/metrics_service.proto
+++ b/api/envoy/service/metrics/v3alpha/metrics_service.proto
@@ -2,14 +2,15 @@ syntax = "proto3";
 
 package envoy.service.metrics.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.metrics.v3alpha";
 option java_outer_classname = "MetricsServiceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.metrics.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/core/base.proto";
 
 import "metrics.proto";
+import "udpa/annotations/versioning.proto";
 
 import "validate/validate.proto";
 
@@ -23,10 +24,18 @@ service MetricsService {
 }
 
 message StreamMetricsResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.metrics.v2.StreamMetricsResponse";
 }
 
 message StreamMetricsMessage {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.metrics.v2.StreamMetricsMessage";
+
   message Identifier {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.metrics.v2.StreamMetricsMessage.Identifier";
+
     // The node sending metrics over the stream.
     api.v3alpha.core.Node node = 1 [(validate.rules).message = {required: true}];
   }
diff --git a/api/envoy/service/ratelimit/v2/rls.proto b/api/envoy/service/ratelimit/v2/rls.proto
index 8dd4460fa265..d184657ab8b4 100644
--- a/api/envoy/service/ratelimit/v2/rls.proto
+++ b/api/envoy/service/ratelimit/v2/rls.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.ratelimit.v2;
 
+option java_package = "io.envoyproxy.envoy.service.ratelimit.v2";
 option java_outer_classname = "RlsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.ratelimit.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/service/ratelimit/v3alpha/BUILD b/api/envoy/service/ratelimit/v3alpha/BUILD
index 054af9a28b5b..2881cd28e2b4 100644
--- a/api/envoy/service/ratelimit/v3alpha/BUILD
+++ b/api/envoy/service/ratelimit/v3alpha/BUILD
@@ -9,5 +9,7 @@ api_proto_package(
     deps = [
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/api/v3alpha/ratelimit:pkg",
+        "//envoy/service/ratelimit/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/service/ratelimit/v3alpha/rls.proto b/api/envoy/service/ratelimit/v3alpha/rls.proto
index d6c2921a81e3..5c1430350a24 100644
--- a/api/envoy/service/ratelimit/v3alpha/rls.proto
+++ b/api/envoy/service/ratelimit/v3alpha/rls.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.service.ratelimit.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.ratelimit.v3alpha";
 option java_outer_classname = "RlsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.ratelimit.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/api/v3alpha/ratelimit/ratelimit.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Rate Limit Service (RLS)]
@@ -28,6 +30,9 @@ service RateLimitService {
 // of them are over limit. This enables more complex application level rate limiting scenarios
 // if desired.
 message RateLimitRequest {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.ratelimit.v2.RateLimitRequest";
+
   // All rate limit requests must specify a domain. This enables the configuration to be per
   // application without fear of overlap. E.g., "envoy".
   string domain = 1;
@@ -44,6 +49,9 @@ message RateLimitRequest {
 
 // A response from a ShouldRateLimit call.
 message RateLimitResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.ratelimit.v2.RateLimitResponse";
+
   enum Code {
     // The response code is not known.
     UNKNOWN = 0;
@@ -57,6 +65,9 @@ message RateLimitResponse {
 
   // Defines an actual rate limit in terms of requests per unit of time and the unit itself.
   message RateLimit {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.ratelimit.v2.RateLimitResponse.RateLimit";
+
     enum Unit {
       // The time unit is not known.
       UNKNOWN = 0;
@@ -82,6 +93,9 @@ message RateLimitResponse {
   }
 
   message DescriptorStatus {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.ratelimit.v2.RateLimitResponse.DescriptorStatus";
+
     // The response code for an individual descriptor.
     Code code = 1;
 
diff --git a/api/envoy/service/tap/v2alpha/common.proto b/api/envoy/service/tap/v2alpha/common.proto
index 66ac87cc28ae..098ee6c67298 100644
--- a/api/envoy/service/tap/v2alpha/common.proto
+++ b/api/envoy/service/tap/v2alpha/common.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.tap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.service.tap.v2alpha";
 option java_outer_classname = "CommonProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.tap.v2alpha";
 
 import "envoy/api/v2/core/base.proto";
 import "envoy/api/v2/core/grpc_service.proto";
diff --git a/api/envoy/service/tap/v2alpha/tap.proto b/api/envoy/service/tap/v2alpha/tap.proto
index 8294f254d224..bdf461508cbc 100644
--- a/api/envoy/service/tap/v2alpha/tap.proto
+++ b/api/envoy/service/tap/v2alpha/tap.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.tap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.service.tap.v2alpha";
 option java_outer_classname = "TapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.tap.v2alpha";
 option java_generic_services = true;
 
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/service/tap/v2alpha/tapds.proto b/api/envoy/service/tap/v2alpha/tapds.proto
index 246d9d17ae2d..280d848f6a03 100644
--- a/api/envoy/service/tap/v2alpha/tapds.proto
+++ b/api/envoy/service/tap/v2alpha/tapds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.tap.v2alpha;
 
+option java_package = "io.envoyproxy.envoy.service.tap.v2alpha";
 option java_outer_classname = "TapdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.tap.v2alpha";
 option java_generic_services = true;
 
 import "envoy/api/v2/discovery.proto";
@@ -26,10 +26,8 @@ service TapDiscoveryService {
   }
 
   rpc FetchTapConfigs(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v2/discovery:tap_configs"
-      body: "*"
-    };
+    option (google.api.http).post = "/v2/discovery:tap_configs";
+    option (google.api.http).body = "*";
   }
 }
 
diff --git a/api/envoy/service/tap/v3alpha/BUILD b/api/envoy/service/tap/v3alpha/BUILD
index 1db858abe610..bd221e80f9ca 100644
--- a/api/envoy/service/tap/v3alpha/BUILD
+++ b/api/envoy/service/tap/v3alpha/BUILD
@@ -11,5 +11,7 @@ api_proto_package(
         "//envoy/api/v3alpha/core:pkg",
         "//envoy/api/v3alpha/route:pkg",
         "//envoy/data/tap/v3alpha:pkg",
+        "//envoy/service/tap/v2alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
     ],
 )
diff --git a/api/envoy/service/tap/v3alpha/common.proto b/api/envoy/service/tap/v3alpha/common.proto
index 67b3b221e691..dcb1f5ce083e 100644
--- a/api/envoy/service/tap/v3alpha/common.proto
+++ b/api/envoy/service/tap/v3alpha/common.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.tap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.tap.v3alpha";
 option java_outer_classname = "CommonProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.tap.v3alpha";
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/api/v3alpha/core/grpc_service.proto";
@@ -12,6 +12,8 @@ import "envoy/api/v3alpha/route/route.proto";
 
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Common tap configuration]
@@ -20,6 +22,9 @@ import "validate/validate.proto";
 message TapConfig {
   // [#comment:TODO(mattklein123): Rate limiting]
 
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.TapConfig";
+
   // The match configuration. If the configuration matches the data source being tapped, a tap will
   // occur, with the result written to the configured output.
   MatchPredicate match_config = 1 [(validate.rules).message = {required: true}];
@@ -43,8 +48,14 @@ message TapConfig {
 // configurations to be built using various logical operators.
 // [#next-free-field: 9]
 message MatchPredicate {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.MatchPredicate";
+
   // A set of match configurations used for logical operations.
   message MatchSet {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.tap.v2alpha.MatchPredicate.MatchSet";
+
     // The list of rules that make up the set.
     repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}];
   }
@@ -82,12 +93,18 @@ message MatchPredicate {
 
 // HTTP headers match configuration.
 message HttpHeadersMatch {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.HttpHeadersMatch";
+
   // HTTP headers to match.
   repeated api.v3alpha.route.HeaderMatcher headers = 1;
 }
 
 // Tap output configuration.
 message OutputConfig {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.OutputConfig";
+
   // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple
   // sink types are supported this constraint will be relaxed.
   repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}];
@@ -115,6 +132,9 @@ message OutputConfig {
 
 // Tap output sink configuration.
 message OutputSink {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.OutputSink";
+
   // Output format. All output is in the form of one or more :ref:`TraceWrapper
   // <envoy_api_msg_data.tap.v3alpha.TraceWrapper>` messages. This enumeration indicates
   // how those messages are written. Note that not all sinks support all output formats. See
@@ -180,10 +200,15 @@ message OutputSink {
 
 // Streaming admin sink configuration.
 message StreamingAdminSink {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.StreamingAdminSink";
 }
 
 // The file per tap sink outputs a discrete file for every tapped stream.
 message FilePerTapSink {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.FilePerTapSink";
+
   // Path prefix. The output file will be of the form <path_prefix>_<id>.pb, where <id> is an
   // identifier distinguishing the recorded trace for stream instances (the Envoy
   // connection ID, HTTP stream ID, etc.).
@@ -193,6 +218,9 @@ message FilePerTapSink {
 // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC
 // server.
 message StreamingGrpcSink {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.StreamingGrpcSink";
+
   // Opaque identifier, that will be sent back to the streaming grpc server.
   string tap_id = 1;
 
diff --git a/api/envoy/service/tap/v3alpha/tap.proto b/api/envoy/service/tap/v3alpha/tap.proto
index b3808eb9aeca..4bb11675eed9 100644
--- a/api/envoy/service/tap/v3alpha/tap.proto
+++ b/api/envoy/service/tap/v3alpha/tap.proto
@@ -2,14 +2,16 @@ syntax = "proto3";
 
 package envoy.service.tap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.tap.v3alpha";
 option java_outer_classname = "TapProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.tap.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/core/base.proto";
 import "envoy/data/tap/v3alpha/wrapper.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Tap Sink Service]
@@ -27,7 +29,13 @@ service TapSinkService {
 // [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server
 // and stream taps without ever expecting a response.
 message StreamTapsRequest {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.StreamTapsRequest";
+
   message Identifier {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.tap.v2alpha.StreamTapsRequest.Identifier";
+
     // The node sending taps over the stream.
     api.v3alpha.core.Node node = 1 [(validate.rules).message = {required: true}];
 
@@ -50,4 +58,6 @@ message StreamTapsRequest {
 
 // [#not-implemented-hide:]
 message StreamTapsResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.StreamTapsResponse";
 }
diff --git a/api/envoy/service/tap/v3alpha/tapds.proto b/api/envoy/service/tap/v3alpha/tapds.proto
index 68d00b4f7892..c4c5962e5bad 100644
--- a/api/envoy/service/tap/v3alpha/tapds.proto
+++ b/api/envoy/service/tap/v3alpha/tapds.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.tap.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.tap.v3alpha";
 option java_outer_classname = "TapdsProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.tap.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/discovery.proto";
@@ -12,6 +12,8 @@ import "envoy/service/tap/v3alpha/common.proto";
 
 import "google/api/annotations.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: Tap discovery service]
@@ -27,16 +29,17 @@ service TapDiscoveryService {
   }
 
   rpc FetchTapConfigs(api.v3alpha.DiscoveryRequest) returns (api.v3alpha.DiscoveryResponse) {
-    option (google.api.http) = {
-      post: "/v3alpha/discovery:tap_configs"
-      body: "*"
-    };
+    option (google.api.http).post = "/v3alpha/discovery:tap_configs";
+    option (google.api.http).body = "*";
   }
 }
 
 // [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name
 // The filter TapDS config references this name.
 message TapResource {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.tap.v2alpha.TapResource";
+
   // The name of the tap configuration.
   string name = 1 [(validate.rules).string = {min_bytes: 1}];
 
diff --git a/api/envoy/service/trace/v2/trace_service.proto b/api/envoy/service/trace/v2/trace_service.proto
index 00b1a5b7dbd8..7396d9752c08 100644
--- a/api/envoy/service/trace/v2/trace_service.proto
+++ b/api/envoy/service/trace/v2/trace_service.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.trace.v2;
 
+option java_package = "io.envoyproxy.envoy.service.trace.v2";
 option java_outer_classname = "TraceServiceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.trace.v2";
 option java_generic_services = true;
 
 import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/service/trace/v3alpha/BUILD b/api/envoy/service/trace/v3alpha/BUILD
index 85bf5d4a8744..e896be494369 100644
--- a/api/envoy/service/trace/v3alpha/BUILD
+++ b/api/envoy/service/trace/v3alpha/BUILD
@@ -8,6 +8,8 @@ api_proto_package(
     has_services = True,
     deps = [
         "//envoy/api/v3alpha/core:pkg",
+        "//envoy/service/trace/v2:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
         "@opencensus_proto//opencensus/proto/trace/v1:trace_proto",
     ],
 )
diff --git a/api/envoy/service/trace/v3alpha/trace_service.proto b/api/envoy/service/trace/v3alpha/trace_service.proto
index 343e76bbb342..fbbe174d7f08 100644
--- a/api/envoy/service/trace/v3alpha/trace_service.proto
+++ b/api/envoy/service/trace/v3alpha/trace_service.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.service.trace.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.service.trace.v3alpha";
 option java_outer_classname = "TraceServiceProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.service.trace.v3alpha";
 option java_generic_services = true;
 
 import "envoy/api/v3alpha/core/base.proto";
@@ -12,6 +12,7 @@ import "envoy/api/v3alpha/core/base.proto";
 import "google/api/annotations.proto";
 
 import "opencensus/proto/trace/v1/trace.proto";
+import "udpa/annotations/versioning.proto";
 
 import "validate/validate.proto";
 
@@ -26,10 +27,18 @@ service TraceService {
 }
 
 message StreamTracesResponse {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.trace.v2.StreamTracesResponse";
 }
 
 message StreamTracesMessage {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.service.trace.v2.StreamTracesMessage";
+
   message Identifier {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.service.trace.v2.StreamTracesMessage.Identifier";
+
     // The node sending the access log messages over the stream.
     api.v3alpha.core.Node node = 1 [(validate.rules).message = {required: true}];
   }
diff --git a/api/envoy/type/hash_policy.proto b/api/envoy/type/hash_policy.proto
index 077e2c2528df..29950a6599fb 100644
--- a/api/envoy/type/hash_policy.proto
+++ b/api/envoy/type/hash_policy.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type;
 
+option java_package = "io.envoyproxy.envoy.type";
 option java_outer_classname = "HashPolicyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/type/http.proto b/api/envoy/type/http.proto
index 2d0f5161d175..89944fd22838 100644
--- a/api/envoy/type/http.proto
+++ b/api/envoy/type/http.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type;
 
+option java_package = "io.envoyproxy.envoy.type";
 option java_outer_classname = "HttpProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type";
 
 enum CodecClientType {
   HTTP1 = 0;
diff --git a/api/envoy/type/http_status.proto b/api/envoy/type/http_status.proto
index acde59c49eb2..3b99cf0cfe67 100644
--- a/api/envoy/type/http_status.proto
+++ b/api/envoy/type/http_status.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type;
 
+option java_package = "io.envoyproxy.envoy.type";
 option java_outer_classname = "HttpStatusProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/type/matcher/metadata.proto b/api/envoy/type/matcher/metadata.proto
index d2936a5c4317..65e478c6a7e8 100644
--- a/api/envoy/type/matcher/metadata.proto
+++ b/api/envoy/type/matcher/metadata.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type.matcher;
 
+option java_package = "io.envoyproxy.envoy.type.matcher";
 option java_outer_classname = "MetadataProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.matcher";
 
 import "envoy/type/matcher/value.proto";
 
diff --git a/api/envoy/type/matcher/number.proto b/api/envoy/type/matcher/number.proto
index 09eb811606aa..de00fbed3981 100644
--- a/api/envoy/type/matcher/number.proto
+++ b/api/envoy/type/matcher/number.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type.matcher;
 
+option java_package = "io.envoyproxy.envoy.type.matcher";
 option java_outer_classname = "NumberProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.matcher";
 
 import "envoy/type/range.proto";
 
diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto
index 98819364d9e2..e3a9d8e23b20 100644
--- a/api/envoy/type/matcher/regex.proto
+++ b/api/envoy/type/matcher/regex.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type.matcher;
 
+option java_package = "io.envoyproxy.envoy.type.matcher";
 option java_outer_classname = "RegexProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.matcher";
 
 import "google/protobuf/wrappers.proto";
 
diff --git a/api/envoy/type/matcher/string.proto b/api/envoy/type/matcher/string.proto
index 8837fab8ee71..ab252d6fa62d 100644
--- a/api/envoy/type/matcher/string.proto
+++ b/api/envoy/type/matcher/string.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type.matcher;
 
+option java_package = "io.envoyproxy.envoy.type.matcher";
 option java_outer_classname = "StringProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.matcher";
 
 import "envoy/type/matcher/regex.proto";
 
@@ -54,7 +54,7 @@ message StringMatcher {
     // .. attention::
     //   This field has been deprecated in favor of `safe_regex` as it is not safe for use with
     //   untrusted input in all cases.
-    string regex = 4 [(validate.rules).string = {max_bytes: 1024}, deprecated = true];
+    string regex = 4 [deprecated = true, (validate.rules).string = {max_bytes: 1024}];
 
     // The input string must match the regular expression specified here.
     RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}];
diff --git a/api/envoy/type/matcher/v3alpha/BUILD b/api/envoy/type/matcher/v3alpha/BUILD
index 30e23239cc1b..6bf7c97b6bc4 100644
--- a/api/envoy/type/matcher/v3alpha/BUILD
+++ b/api/envoy/type/matcher/v3alpha/BUILD
@@ -5,5 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 licenses(["notice"])  # Apache 2
 
 api_proto_package(
-    deps = ["//envoy/type/v3alpha:pkg"],
+    deps = [
+        "//envoy/type/matcher:pkg",
+        "//envoy/type/v3alpha:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
 )
diff --git a/api/envoy/type/matcher/v3alpha/metadata.proto b/api/envoy/type/matcher/v3alpha/metadata.proto
index 253015823abe..5c00876d57bb 100644
--- a/api/envoy/type/matcher/v3alpha/metadata.proto
+++ b/api/envoy/type/matcher/v3alpha/metadata.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.type.matcher.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha";
 option java_outer_classname = "MetadataProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha";
 
 import "envoy/type/matcher/v3alpha/value.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: MetadataMatcher]
@@ -71,10 +73,15 @@ import "validate/validate.proto";
 // <envoy_api_msg_config.rbac.v3alpha.Permission>` and :ref:`Principal
 // <envoy_api_msg_config.rbac.v3alpha.Principal>`.
 message MetadataMatcher {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.MetadataMatcher";
+
   // Specifies the segment in a path to retrieve value from Metadata.
   // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that
   // if the segment key refers to a list, it has to be the last segment in a path.
   message PathSegment {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.type.matcher.MetadataMatcher.PathSegment";
+
     oneof segment {
       option (validate.required) = true;
 
diff --git a/api/envoy/type/matcher/v3alpha/number.proto b/api/envoy/type/matcher/v3alpha/number.proto
index 5888cf69497d..da26510385b6 100644
--- a/api/envoy/type/matcher/v3alpha/number.proto
+++ b/api/envoy/type/matcher/v3alpha/number.proto
@@ -2,18 +2,22 @@ syntax = "proto3";
 
 package envoy.type.matcher.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha";
 option java_outer_classname = "NumberProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha";
 
 import "envoy/type/v3alpha/range.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: NumberMatcher]
 
 // Specifies the way to match a double value.
 message DoubleMatcher {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.DoubleMatcher";
+
   oneof match_pattern {
     option (validate.required) = true;
 
diff --git a/api/envoy/type/matcher/v3alpha/regex.proto b/api/envoy/type/matcher/v3alpha/regex.proto
index c4c97b22f5e6..51f296f15bb8 100644
--- a/api/envoy/type/matcher/v3alpha/regex.proto
+++ b/api/envoy/type/matcher/v3alpha/regex.proto
@@ -2,22 +2,29 @@ syntax = "proto3";
 
 package envoy.type.matcher.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha";
 option java_outer_classname = "RegexProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha";
 
 import "google/protobuf/wrappers.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: RegexMatcher]
 
 // A regex matcher designed for safety when used with untrusted input.
 message RegexMatcher {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatcher";
+
   // Google's `RE2 <https://github.com/google/re2>`_ regex engine. The regex string must adhere to
   // the documented `syntax <https://github.com/google/re2/wiki/Syntax>`_. The engine is designed
   // to complete execution in linear time as well as limit the amount of memory used.
   message GoogleRE2 {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.type.matcher.RegexMatcher.GoogleRE2";
+
     // This field controls the RE2 "program size" which is a rough estimate of how complex a
     // compiled regex is to evaluate. A regex that has a program size greater than the configured
     // value will fail to compile. In this case, the configured max program size can be increased
diff --git a/api/envoy/type/matcher/v3alpha/string.proto b/api/envoy/type/matcher/v3alpha/string.proto
index 99d83433e7fa..bba1b6b62685 100644
--- a/api/envoy/type/matcher/v3alpha/string.proto
+++ b/api/envoy/type/matcher/v3alpha/string.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
 
 package envoy.type.matcher.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha";
 option java_outer_classname = "StringProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha";
 
 import "envoy/type/matcher/v3alpha/regex.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: StringMatcher]
@@ -15,6 +17,8 @@ import "validate/validate.proto";
 // Specifies the way to match a string.
 // [#next-free-field: 6]
 message StringMatcher {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StringMatcher";
+
   reserved 4;
 
   reserved "regex";
@@ -52,5 +56,8 @@ message StringMatcher {
 
 // Specifies a list of ways to match a string.
 message ListStringMatcher {
+  option (udpa.annotations.versioning).previous_message_type =
+      "envoy.type.matcher.ListStringMatcher";
+
   repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}];
 }
diff --git a/api/envoy/type/matcher/v3alpha/value.proto b/api/envoy/type/matcher/v3alpha/value.proto
index 652cdebc70b0..7268df471665 100644
--- a/api/envoy/type/matcher/v3alpha/value.proto
+++ b/api/envoy/type/matcher/v3alpha/value.proto
@@ -2,13 +2,15 @@ syntax = "proto3";
 
 package envoy.type.matcher.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha";
 option java_outer_classname = "ValueProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.matcher.v3alpha";
 
 import "envoy/type/matcher/v3alpha/number.proto";
 import "envoy/type/matcher/v3alpha/string.proto";
 
+import "udpa/annotations/versioning.proto";
+
 import "validate/validate.proto";
 
 // [#protodoc-title: ValueMatcher]
@@ -17,8 +19,12 @@ import "validate/validate.proto";
 // StructValue is not supported and is always not matched.
 // [#next-free-field: 7]
 message ValueMatcher {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ValueMatcher";
+
   // NullMatch is an empty message to specify a null value.
   message NullMatch {
+    option (udpa.annotations.versioning).previous_message_type =
+        "envoy.type.matcher.ValueMatcher.NullMatch";
   }
 
   // Specifies how to match a value.
@@ -53,6 +59,8 @@ message ValueMatcher {
 
 // Specifies the way to match a list value.
 message ListMatcher {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ListMatcher";
+
   oneof match_pattern {
     option (validate.required) = true;
 
diff --git a/api/envoy/type/matcher/value.proto b/api/envoy/type/matcher/value.proto
index 71e02310ea12..f68d49afc0fc 100644
--- a/api/envoy/type/matcher/value.proto
+++ b/api/envoy/type/matcher/value.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type.matcher;
 
+option java_package = "io.envoyproxy.envoy.type.matcher";
 option java_outer_classname = "ValueProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.matcher";
 
 import "envoy/type/matcher/number.proto";
 import "envoy/type/matcher/string.proto";
diff --git a/api/envoy/type/metadata/v2/BUILD b/api/envoy/type/metadata/v2/BUILD
new file mode 100644
index 000000000000..5dc095ade27a
--- /dev/null
+++ b/api/envoy/type/metadata/v2/BUILD
@@ -0,0 +1,7 @@
+# DO NOT EDIT. This file is generated by tools/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"])  # Apache 2
+
+api_proto_package()
diff --git a/api/envoy/type/metadata/v2/metadata.proto b/api/envoy/type/metadata/v2/metadata.proto
new file mode 100644
index 000000000000..b1a92beff48c
--- /dev/null
+++ b/api/envoy/type/metadata/v2/metadata.proto
@@ -0,0 +1,95 @@
+syntax = "proto3";
+
+package envoy.type.metadata.v2;
+
+option java_package = "io.envoyproxy.envoy.type.metadata.v2";
+option java_outer_classname = "MetadataProto";
+option java_multiple_files = true;
+
+import "validate/validate.proto";
+
+// [#protodoc-title: Metadata]
+
+// MetadataKey provides a general interface using `key` and `path` to retrieve value from
+// :ref:`Metadata <envoy_api_msg_core.Metadata>`.
+//
+// For example, for the following Metadata:
+//
+// .. code-block:: yaml
+//
+//    filter_metadata:
+//      envoy.xxx:
+//        prop:
+//          foo: bar
+//          xyz:
+//            hello: envoy
+//
+// The following MetadataKey will retrieve a string value "bar" from the Metadata.
+//
+// .. code-block:: yaml
+//
+//    key: envoy.xxx
+//    path:
+//    - key: prop
+//    - key: foo
+//
+message MetadataKey {
+  // Specifies the segment in a path to retrieve value from Metadata.
+  // Currently it is only supported to specify the key, i.e. field name, as one segment of a path.
+  message PathSegment {
+    oneof segment {
+      option (validate.required) = true;
+
+      // If specified, use the key to retrieve the value in a Struct.
+      string key = 1 [(validate.rules).string = {min_bytes: 1}];
+    }
+  }
+
+  // The key name of Metadata to retrieve the Struct from the metadata.
+  // Typically, it represents a builtin subsystem or custom extension.
+  string key = 1 [(validate.rules).string = {min_bytes: 1}];
+
+  // The path to retrieve the Value from the Struct. It can be a prefix or a full path,
+  // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example,
+  // which depends on the particular scenario.
+  //
+  // Note: Due to that only the key type segment is supported, the path can not specify a list
+  // unless the list is the last segment.
+  repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];
+}
+
+// Describes what kind of metadata.
+message MetadataKind {
+  // Represents dynamic metadata associated with the request.
+  message Request {
+  }
+
+  // Represents metadata from :ref:`the route<envoy_api_field_route.Route.metadata>`.
+  message Route {
+  }
+
+  // Represents metadata from :ref:`the upstream cluster<envoy_api_field_Cluster.metadata>`.
+  message Cluster {
+  }
+
+  // Represents metadata from :ref:`the upstream
+  // host<envoy_api_field_endpoint.LbEndpoint.metadata>`.
+  message Host {
+  }
+
+  oneof kind {
+    option (validate.required) = true;
+
+    // Request kind of metadata.
+    Request request = 1;
+
+    // Route kind of metadata.
+    Route route = 2;
+
+    // Cluster kind of metadata.
+    Cluster cluster = 3;
+
+    // Host kind of metadata.
+    Host host = 4;
+  }
+}
diff --git a/api/envoy/type/percent.proto b/api/envoy/type/percent.proto
index 6d0868fd0ede..9c0aa65ad836 100644
--- a/api/envoy/type/percent.proto
+++ b/api/envoy/type/percent.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type;
 
+option java_package = "io.envoyproxy.envoy.type";
 option java_outer_classname = "PercentProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type";
 
 import "validate/validate.proto";
 
diff --git a/api/envoy/type/range.proto b/api/envoy/type/range.proto
index f31cf32f07c4..e000f93d5af8 100644
--- a/api/envoy/type/range.proto
+++ b/api/envoy/type/range.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type;
 
+option java_package = "io.envoyproxy.envoy.type";
 option java_outer_classname = "RangeProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type";
 
 // [#protodoc-title: Range]
 
diff --git a/api/envoy/type/tracing/v2/BUILD b/api/envoy/type/tracing/v2/BUILD
new file mode 100644
index 000000000000..7088ddfe0dad
--- /dev/null
+++ b/api/envoy/type/tracing/v2/BUILD
@@ -0,0 +1,9 @@
+# DO NOT EDIT. This file is generated by tools/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"])  # Apache 2
+
+api_proto_package(
+    deps = ["//envoy/type/metadata/v2:pkg"],
+)
diff --git a/api/envoy/type/tracing/v2/custom_tag.proto b/api/envoy/type/tracing/v2/custom_tag.proto
new file mode 100644
index 000000000000..de3b550d6034
--- /dev/null
+++ b/api/envoy/type/tracing/v2/custom_tag.proto
@@ -0,0 +1,83 @@
+syntax = "proto3";
+
+package envoy.type.tracing.v2;
+
+option java_package = "io.envoyproxy.envoy.type.tracing.v2";
+option java_outer_classname = "CustomTagProto";
+option java_multiple_files = true;
+
+import "envoy/type/metadata/v2/metadata.proto";
+
+import "validate/validate.proto";
+
+// [#protodoc-title: Custom Tag]
+
+// Describes custom tags for the active span.
+// [#next-free-field: 6]
+message CustomTag {
+  // Literal type custom tag with static value for the tag value.
+  message Literal {
+    // Static literal value to populate the tag value.
+    string value = 1 [(validate.rules).string = {min_bytes: 1}];
+  }
+
+  // Environment type custom tag with environment name and default value.
+  message Environment {
+    // Environment variable name to obtain the value to populate the tag value.
+    string name = 1 [(validate.rules).string = {min_bytes: 1}];
+
+    // When the environment variable is not found,
+    // the tag value will be populated with this default value if specified,
+    // otherwise no tag will be populated.
+    string default_value = 2;
+  }
+
+  // Header type custom tag with header name and default value.
+  message Header {
+    // Header name to obtain the value to populate the tag value.
+    string name = 1 [(validate.rules).string = {min_bytes: 1}];
+
+    // When the header does not exist,
+    // the tag value will be populated with this default value if specified,
+    // otherwise no tag will be populated.
+    string default_value = 2;
+  }
+
+  // Metadata type custom tag using
+  // :ref:`MetadataKey <envoy_api_msg_type.metadata.v2.MetadataKey>` to retrieve the protobuf value
+  // from :ref:`Metadata <envoy_api_msg_core.Metadata>`, and populate the tag value with
+  // `the canonical JSON <https://developers.google.com/protocol-buffers/docs/proto3#json>`_
+  // representation of it.
+  message Metadata {
+    // Specify what kind of metadata to obtain tag value from.
+    metadata.v2.MetadataKind kind = 1;
+
+    // Metadata key to define the path to retrieve the tag value.
+    metadata.v2.MetadataKey metadata_key = 2;
+
+    // When no valid metadata is found,
+    // the tag value would be populated with this default value if specified,
+    // otherwise no tag would be populated.
+    string default_value = 3;
+  }
+
+  // Used to populate the tag name.
+  string tag = 1 [(validate.rules).string = {min_bytes: 1}];
+
+  // Used to specify what kind of custom tag.
+  oneof type {
+    option (validate.required) = true;
+
+    // A literal custom tag.
+    Literal literal = 2;
+
+    // An environment custom tag.
+    Environment environment = 3;
+
+    // A request header custom tag.
+    Header request_header = 4;
+
+    // A custom tag to obtain tag value from the metadata.
+    Metadata metadata = 5;
+  }
+}
diff --git a/api/envoy/type/v3alpha/BUILD b/api/envoy/type/v3alpha/BUILD
index 5dc095ade27a..3535fe17b682 100644
--- a/api/envoy/type/v3alpha/BUILD
+++ b/api/envoy/type/v3alpha/BUILD
@@ -4,4 +4,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
 
 licenses(["notice"])  # Apache 2
 
-api_proto_package()
+api_proto_package(
+    deps = [
+        "//envoy/type:pkg",
+        "@com_github_cncf_udpa//udpa/annotations:pkg",
+    ],
+)
diff --git a/api/envoy/type/v3alpha/hash_policy.proto b/api/envoy/type/v3alpha/hash_policy.proto
index 88cabee2540c..06d98be442d9 100644
--- a/api/envoy/type/v3alpha/hash_policy.proto
+++ b/api/envoy/type/v3alpha/hash_policy.proto
@@ -2,9 +2,11 @@ syntax = "proto3";
 
 package envoy.type.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.type.v3alpha";
 option java_outer_classname = "HashPolicyProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.v3alpha";
+
+import "udpa/annotations/versioning.proto";
 
 import "validate/validate.proto";
 
@@ -12,9 +14,12 @@ import "validate/validate.proto";
 
 // Specifies the hash policy
 message HashPolicy {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.HashPolicy";
+
   // The source IP will be used to compute the hash used by hash-based load balancing
   // algorithms.
   message SourceIp {
+    option (udpa.annotations.versioning).previous_message_type = "envoy.type.HashPolicy.SourceIp";
   }
 
   oneof policy_specifier {
diff --git a/api/envoy/type/v3alpha/http.proto b/api/envoy/type/v3alpha/http.proto
index 2d0b19e5e0a6..799cbe13be12 100644
--- a/api/envoy/type/v3alpha/http.proto
+++ b/api/envoy/type/v3alpha/http.proto
@@ -2,9 +2,9 @@ syntax = "proto3";
 
 package envoy.type.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.type.v3alpha";
 option java_outer_classname = "HttpProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.v3alpha";
 
 enum CodecClientType {
   HTTP1 = 0;
diff --git a/api/envoy/type/v3alpha/http_status.proto b/api/envoy/type/v3alpha/http_status.proto
index 76b3a995ff2a..1a29e03dad89 100644
--- a/api/envoy/type/v3alpha/http_status.proto
+++ b/api/envoy/type/v3alpha/http_status.proto
@@ -2,9 +2,11 @@ syntax = "proto3";
 
 package envoy.type.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.type.v3alpha";
 option java_outer_classname = "HttpStatusProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.v3alpha";
+
+import "udpa/annotations/versioning.proto";
 
 import "validate/validate.proto";
 
@@ -130,6 +132,8 @@ enum StatusCode {
 
 // HTTP status.
 message HttpStatus {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.HttpStatus";
+
   // Supplies HTTP response code.
   StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}];
 }
diff --git a/api/envoy/type/v3alpha/percent.proto b/api/envoy/type/v3alpha/percent.proto
index 241d8a5027a4..56941bff55f8 100644
--- a/api/envoy/type/v3alpha/percent.proto
+++ b/api/envoy/type/v3alpha/percent.proto
@@ -2,9 +2,11 @@ syntax = "proto3";
 
 package envoy.type.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.type.v3alpha";
 option java_outer_classname = "PercentProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.v3alpha";
+
+import "udpa/annotations/versioning.proto";
 
 import "validate/validate.proto";
 
@@ -12,6 +14,8 @@ import "validate/validate.proto";
 
 // Identifies a percentage, in the range [0.0, 100.0].
 message Percent {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.Percent";
+
   double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}];
 }
 
@@ -22,6 +26,8 @@ message Percent {
 // * **Example**: 1/100 = 1%.
 // * **Example**: 3/10000 = 0.03%.
 message FractionalPercent {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.FractionalPercent";
+
   // Fraction percentages support several fixed denominator values.
   enum DenominatorType {
     // 100.
diff --git a/api/envoy/type/v3alpha/range.proto b/api/envoy/type/v3alpha/range.proto
index a7452adf4792..f8fda4cc2878 100644
--- a/api/envoy/type/v3alpha/range.proto
+++ b/api/envoy/type/v3alpha/range.proto
@@ -2,15 +2,19 @@ syntax = "proto3";
 
 package envoy.type.v3alpha;
 
+option java_package = "io.envoyproxy.envoy.type.v3alpha";
 option java_outer_classname = "RangeProto";
 option java_multiple_files = true;
-option java_package = "io.envoyproxy.envoy.type.v3alpha";
+
+import "udpa/annotations/versioning.proto";
 
 // [#protodoc-title: Range]
 
 // Specifies the int64 start and end of the range using half-open interval semantics [start,
 // end).
 message Int64Range {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.Int64Range";
+
   // start of the range (inclusive)
   int64 start = 1;
 
@@ -21,6 +25,8 @@ message Int64Range {
 // Specifies the double start and end of the range using half-open interval semantics [start,
 // end).
 message DoubleRange {
+  option (udpa.annotations.versioning).previous_message_type = "envoy.type.DoubleRange";
+
   // start of the range (inclusive)
   double start = 1;
 
diff --git a/api/test/build/BUILD b/api/test/build/BUILD
index 3e4a9bd8e940..2dae9fa0de03 100644
--- a/api/test/build/BUILD
+++ b/api/test/build/BUILD
@@ -11,7 +11,7 @@ api_cc_test(
         "//envoy/service/discovery/v2:pkg_cc_proto",
         "//envoy/service/metrics/v2:pkg_cc_proto",
         "//envoy/service/ratelimit/v2:pkg_cc_proto",
-        "@com_github_cncf_udpa//udpa/service/orca/v1:orca_export_cc",
+        "@com_github_cncf_udpa//udpa/service/orca/v1:pkg_cc_proto",
     ],
 )
 
diff --git a/api/xds_protocol.rst b/api/xds_protocol.rst
index 2634bf126d4d..21fc64d019de 100644
--- a/api/xds_protocol.rst
+++ b/api/xds_protocol.rst
@@ -7,7 +7,7 @@ Envoy discovers its various dynamic resources via the filesystem or by
 querying one or more management servers. Collectively, these discovery
 services and their corresponding APIs are referred to as *xDS*.
 Resources are requested via *subscriptions*, by specifying a filesystem
-path to watch, initiating gRPC streams or polling a REST-JSON URL. The
+path to watch, initiating gRPC streams, or polling a REST-JSON URL. The
 latter two methods involve sending requests with a :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>`
 proto payload. Resources are delivered in a
 :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`
@@ -20,10 +20,10 @@ Filesystem subscriptions
 The simplest approach to delivering dynamic configuration is to place it
 at a well known path specified in the :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>`.
 Envoy will use `inotify` (`kqueue` on macOS) to monitor the file for
-changes and parse the 
+changes and parse the
 :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` proto in the file on update.
 Binary protobufs, JSON, YAML and proto text are supported formats for
-the 
+the
 :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`.
 
 There is no mechanism available for filesystem subscriptions to ACK/NACK
@@ -36,43 +36,180 @@ occurs.
 Streaming gRPC subscriptions
 ----------------------------
 
-Singleton resource type discovery
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-A gRPC 
-:ref:`ApiConfigSource <envoy_api_msg_core.ApiConfigSource>`
-can be specified independently for each xDS API, pointing at an upstream
-cluster corresponding to a management server. This will initiate an
-independent bidirectional gRPC stream for each xDS resource type,
-potentially to distinct management servers. API delivery is eventually
-consistent. See :ref:`Aggregated Discovery Service <xds_protocol_ads>`
-below for situations in which explicit control of sequencing is required.
-
-Type URLs
-^^^^^^^^^
-
-Each xDS API is concerned with resources of a given type. There is a 1:1
-correspondence between an xDS API and a resource type. That is:
-
--  LDS: :ref:`envoy.api.v2.Listener <envoy_api_msg_Listener>`
--  RDS: :ref:`envoy.api.v2.RouteConfiguration <envoy_api_msg_RouteConfiguration>`
--  VHDS: :ref:`envoy.api.v2.Vhds <envoy_api_msg_RouteConfiguration>`
--  CDS: :ref:`envoy.api.v2.Cluster <envoy_api_msg_Cluster>`
--  EDS: :ref:`envoy.api.v2.ClusterLoadAssignment <envoy_api_msg_ClusterLoadAssignment>`
--  SDS: :ref:`envoy.api.v2.Auth.Secret <envoy_api_msg_Auth.Secret>`
--  RTDS: :ref:`envoy.service.discovery.v2.Runtime <envoy_api_msg_service.discovery.v2.Runtime>`
-
-The concept of `type URLs <https://developers.google.com/protocol-buffers/docs/proto3#any>`_ appears below, and takes the form
-`type.googleapis.com/<resource type>`, e.g.
-`type.googleapis.com/envoy.api.v2.Cluster` for CDS. In various
-requests from Envoy and responses by the management server, the resource
-type URL is stated.
+Resource Types
+~~~~~~~~~~~~~~
+
+Every configuration resource in the xDS API has a type associated with it. The following types are
+supported:
+
+-  :ref:`envoy.api.v2.Listener <envoy_api_msg_Listener>`
+-  :ref:`envoy.api.v2.RouteConfiguration <envoy_api_msg_RouteConfiguration>`
+-  :ref:`envoy.api.v2.ScopedRouteConfiguration <envoy_api_msg_ScopedRouteConfiguration>`
+-  :ref:`envoy.api.v2.route.VirtualHost <envoy_api_msg_route.VirtualHost>`
+-  :ref:`envoy.api.v2.Cluster <envoy_api_msg_Cluster>`
+-  :ref:`envoy.api.v2.ClusterLoadAssignment <envoy_api_msg_ClusterLoadAssignment>`
+-  :ref:`envoy.api.v2.Auth.Secret <envoy_api_msg_Auth.Secret>`
+-  :ref:`envoy.service.discovery.v2.Runtime <envoy_api_msg_service.discovery.v2.Runtime>`
+
+The concept of `type URLs <https://developers.google.com/protocol-buffers/docs/proto3#any>`_
+appears below, and takes the form `type.googleapis.com/<resource type>` -- e.g.,
+`type.googleapis.com/envoy.api.v2.Cluster` for a `Cluster` resource. In various requests from
+Envoy and responses by the management server, the resource type URL is stated.
+
+API flow
+~~~~~~~~
+
+For typical HTTP routing scenarios, the core resource types for the client's configuration are
+`Listener`, `RouteConfiguration`, `Cluster`, and `ClusterLoadAssignment`. Each `Listener` resource
+may point to a `RouteConfiguration` resource, which may point to one or more `Cluster` resources,
+and each Cluster` resource may point to a `ClusterLoadAssignment` resource.
+
+Envoy fetches all `Listener` and `Cluster` resources at startup. It then fetches whatever
+`RouteConfiguration` and `ClusterLoadAssignment` resources that are required by the `Listener` and
+`Cluster` resources. In effect, every `Listener` or `Cluster` resource is a root to part of Envoy's
+configuration tree.
+
+A non-proxy client such as gRPC might start by fetching only the specific `Listener` resources
+that it is interested in. It then fetches the `RouteConfiguration` resources required by those
+`Listener` resources, followed by whichever `Cluster` resources are required by those
+`RouteConfiguration` resources, followed by the `ClusterLoadAssignment` resources required
+by the `Cluster` resources. In effect, the original `Listener` resources are the roots to
+the client's configuration tree.
+
+Variants of the xDS Transport Protocol
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Four Variants
+^^^^^^^^^^^^^
+
+There are four variants of the xDS transport protocol used via streaming gRPC, which cover all
+combinations of two dimensions.
+
+The first dimension is State of the World (SotW) vs. incremental. The SotW approach was the
+original mechanism used by xDS, in which the client must specify all resource names it is
+interested in with each request (except when making a wildcard request in LDS/CDS), and the server
+must return all resources the client has subscribed to in each request (in LDS/CDS). This means
+that if the client is already subscribing to 99 resources and wants to add an additional one, it
+must send a request with all 100 resource names, rather than just the one new one. And the server
+must then respond by sending all 100 resources, even if the 99 that were already subscribed to have
+not changed (in LDS/CDS). This mechanism can be a scalability limitation, which is why the
+incremental protocol variant was introduced. The incremental approach allows both the client and
+server to indicate only deltas relative to their previous state -- i.e., the client can say that
+it wants to add or remove its subscription to a particular resource name without resending those
+that have not changed, and the server can send updates only for those resources that have changed.
+The incremental protocol also provides a mechanism for lazy loading of resources. For details on
+the incremental protocol, see :ref:`Incremental xDS <xds_protocol_delta>` below.
+
+The second dimension is using a separate gRPC stream for each resource type vs. aggregating all
+resource types onto a single gRPC stream. The former approach was the original mechanism used by
+xDS, and it offers an eventual consistency model. The latter approach was added for environments
+in which explicit control of sequencing is required. For details, see :ref:`Eventual consistency
+considerations <xds_protocol_eventual_consistency_considerations>` below.
+
+So, the four variants of the xDS transport protocol are:
+
+1. State of the World (Basic xDS): SotW, separate gRPC stream for each resource type
+2. Incremental xDS: incremental, separate gRPC stream for each resource type
+3. Aggregated Discovery Service (ADS): SotW, aggregate stream for all resource types
+4. Incremental ADS: incremental, aggregate stream for all resource types
+
+RPC Services and Methods for Each Variant
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+For the non-aggregated protocol variants, there is a separate RPC service for each resource type.
+Each of these RPC services can provide a method for each of the SotW and Incremental protocol
+variants. Here are the RPC services and methods for each resource type:
+
+-  Listener: Listener Discovery Service (LDS)
+   -  SotW: ListenerDiscoveryService.StreamListeners
+   -  Incremental: ListenerDiscoveryService.DeltaListeners
+-  RouteConfiguration: Route Discovery Service (RDS)
+   -  SotW: RouteDiscoveryService.StreamRoutes
+   -  Incremental: RouteDiscoveryService.DeltaRoutes
+-  ScopedRouteConfiguration: Scoped Route Discovery Service (SRDS)
+   -  SotW: ScopedRouteDiscoveryService.StreamScopedRoutes
+   -  Incremental: ScopedRouteDiscoveryService.DeltaScopedRoutes
+-  VirtualHost: Virtual Host Discovery Service (VHDS)
+   -  SotW: N/A
+   -  Incremental: VirtualHostDiscoveryService.DeltaVirtualHosts
+-  Cluster: Cluster Discovery Service (CDS)
+   -  SotW: ClusterDiscoveryService.StreamClusters
+   -  Incremental: ClusterDiscoveryService.DeltaClusters
+-  ClusterLoadAssignment: Endpoint Discovery Service (EDS)
+   -  SotW: EndpointDiscoveryService.StreamEndpoints
+   -  Incremental: EndpointDiscoveryService.DeltaEndpoints
+-  Secret: Secret Discovery Service (SDS)
+   -  SotW: SecretDiscoveryService.StreamSecrets
+   -  Incremental: SecretDiscoveryService.DeltaSecrets
+-  Runtime: Runtime Discovery Service (RTDS)
+   -  SotW: RuntimeDiscoveryService.StreamRuntime
+   -  Incremental: RuntimeDiscoveryService.DeltaRuntime
+
+In the aggregated protocol variants, all resource types are multiplexed on a single gRPC stream,
+where each resource type is treated as a separate logical stream within the aggregated stream.
+In effect, it simply combines all of the above separate APIs into a single stream by treating
+requests and responses for each resource type as a separate sub-stream on the single aggregated
+stream. The RPC service and methods for the aggregated protocol variants are:
+
+-  SotW: AggregatedDiscoveryService.StreamAggregatedResources
+-  Incremental: AggregatedDiscoveryService.DeltaAggregatedResources
+
+For all of the SotW methods, the request type is :ref:`DiscoveryRequest
+<envoy_api_msg_DiscoveryRequest>` and the response type is :ref:`DiscoveryResponse
+<envoy_api_msg_DiscoveryResponse>`.
+
+For all of the incremental methods, the request type is :ref:`DeltaDiscoveryRequest
+<envoy_api_msg_DeltaDiscoveryRequest>` and the response type is :ref:`DeltaDiscoveryResponse
+<envoy_api_msg_DeltaDiscoveryResponse>`.
+
+Configuring Which Variant to Use
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In the xDS API, the :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` message indicates how to
+obtain resources of a particular type. If the :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>`
+contains a gRPC :ref:`ApiConfigSource <envoy_api_msg_core.ApiConfigSource>`, it points to an
+upstream cluster for the management server; this will initiate an independent bidirectional gRPC
+stream for each xDS resource type, potentially to distinct management servers. If the
+:ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` contains a :ref:`AggregatedConfigSource
+<envoy_api_msg_core.AggregatedConfigSource>`, it tells the client to use :ref:`ADS
+<xds_protocol_ads>`.
+
+Currently, the client is expected to be given some local configuration that tells it how to obtain
+the :ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` resources.
+:ref:`Listener <envoy_api_msg_Listener>` resources may include a
+:ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` that indicates how the
+:ref:`RouteConfiguration <envoy_api_msg_RouteConfiguration>` resources are obtained, and
+:ref:`Cluster <envoy_api_msg_Cluster>` resources may include a
+:ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` that indicates how the
+:ref:`ClusterLoadAssignment <envoy_api_msg_ClusterLoadAssignment>` resources are obtained.
+
+Client Configuration
+""""""""""""""""""""
+
+In Envoy, the bootstrap file contains two :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>`
+messages, one indicating how :ref:`Listener <envoy_api_msg_Listener>` resources are obtained and
+another indicating how :ref:`Cluster <envoy_api_msg_Cluster>` resources are obtained. It also
+contains a separate :ref:`ApiConfigSource <envoy_api_msg_core.ApiConfigSource>` message indicating
+how to contact the ADS server, which will be used whenever a :ref:`ConfigSource
+<envoy_api_msg_core.ConfigSource>` message (either in the bootstrap file or in a :ref:`Listener
+<envoy_api_msg_Listener>` or :ref:`Cluster <envoy_api_msg_Cluster>` resource obtained from a
+management server) contains an :ref:`AggregatedConfigSource
+<envoy_api_msg_core.AggregatedConfigSource>` message.
+
+In a gRPC client that uses xDS, only ADS is supported, and the bootstrap file contains the name of
+the ADS server, which will be used for all resources. The :ref:`ConfigSource
+<envoy_api_msg_core.ConfigSource>` messages in the :ref:`Listener <envoy_api_msg_Listener>` and
+:ref:`Cluster <envoy_api_msg_Cluster>` resources must contain :ref:`AggregatedConfigSource
+<envoy_api_msg_core.AggregatedConfigSource>` messages.
+
+The xDS Protocol
+~~~~~~~~~~~~~~~~
 
 ACK/NACK and versioning
 ^^^^^^^^^^^^^^^^^^^^^^^
 
-Each stream begins with a 
-:ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` from Envoy, specifying
+Each xDS stream begins with a
+:ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` from the client, specifying
 the list of resources to subscribe to, the type URL corresponding to the
 subscribed resources, the node identifier and an empty :ref:`version_info <envoy_api_field_DiscoveryRequest.version_info>`.
 An example EDS request might be:
@@ -105,10 +242,10 @@ and the nonce provided by the management server. If the update was
 successfully applied, the :ref:`version_info <envoy_api_field_DiscoveryResponse.version_info>` will be **X**, as indicated
 in the sequence diagram:
 
-.. figure:: diagrams/simple-ack.svg 
+.. figure:: diagrams/simple-ack.svg
    :alt: Version update after ACK
 
-In this sequence diagram, and below, the following format is used to abbreviate messages: 
+In this sequence diagram, and below, the following format is used to abbreviate messages:
 
 - *DiscoveryRequest*: (V=version_info,R=resource_names,N=response_nonce,T=type_url)
 - *DiscoveryResponse*: (V=version_info,R=resources,N=nonce,T=type_url)
@@ -144,9 +281,6 @@ message for the node identifier as a result.
 
 .. _xds_protocol_resource_update:
 
-Resource Update
-~~~~~~~~~~~~~~~
-
 When to send an update
 ^^^^^^^^^^^^^^^^^^^^^^
 
@@ -155,45 +289,149 @@ the resources in the :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>`
 to any :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` with a :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` containing the
 ACK/NACK immediately after it has been either accepted or rejected. If
 the management server provides the same set of resources rather than
-waiting for a change to occur, it will cause Envoy and the management
-server to spin and have a severe performance impact.
+waiting for a change to occur, it will cause needless work on both the client and the management
+server, which could have a severe performance impact.
 
 Within a stream, new :ref:`DiscoveryRequests <envoy_api_msg_DiscoveryRequest>` supersede any prior
 :ref:`DiscoveryRequests <envoy_api_msg_DiscoveryRequest>` having the same resource type. This means that
 the management server only needs to respond to the latest
 :ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>` on each stream for any given resource type.
 
-Resource hints
-^^^^^^^^^^^^^^
+.. _xds_protocol_resource_hints:
+
+How the client specifies what resources to return
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+xDS requests allow the client to specify a set of resource names as a hint to the server about
+which resources the client is interested in. In the SotW protocol variants, this is done via the
+:ref:`resource_names <envoy_api_field_DiscoveryRequest.resource_names>` specified in the
+:ref:`DiscoveryRequest <envoy_api_msg_DiscoveryRequest>`; in the incremental protocol variants,
+this is done via the :ref:`resource_names_subscribe
+<envoy_api_field_DeltaDiscoveryRequest.resource_names_subscribe>` and
+:ref:`resource_names_unsubscribe
+<envoy_api_field_DeltaDiscoveryRequest.resource_names_unsubscribe>` fields in the
+:ref:`DeltaDiscoveryRequest <envoy_api_msg_DeltaDiscoveryRequest>`.
+
+Normally (see below for exceptions), requests must specify the set of resource names that the
+client is interested in. The management server must supply the requested resources if they exist.
+The client will silently ignore any supplied resources that were not explicitly requested. When
+the client sends a new request that changes the set of resources being requested, the server must
+resend any newly requested resources, even if it previously sent those resources without having
+been asked for them and the resources have not changed since that time. If the list of resource
+names becomes empty, that means that the client is no longer interested in any resources of the
+specified type.
+
+For :ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` resource
+types, there is also a "wildcard" mode, which is triggered when the initial request on the stream
+for that resource type contains no resource names. In this case, the server should use
+site-specific business logic to determine the full set of resources that the client is interested
+in, typically based on the client's :ref:`node <envoy_api_msg_Core.Node>` identification. Note
+that once a stream has entered wildcard mode for a given resource type, there is no way to change
+the stream out of wildcard mode; resource names specified in any subsequent request on the stream
+will be ignored.
+
+Client Behavior
+"""""""""""""""
+
+Envoy will always use wildcard mode for :ref:`Listener <envoy_api_msg_Listener>` and
+:ref:`Cluster <envoy_api_msg_Cluster>` resources. However, other xDS clients (such as gRPC clients
+that use xDS) may specify explicit resource names for these resource types, for example if they
+only have a singleton listener and already know its name from some out-of-band configuration.
+
+Grouping Resources into Responses
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In the incremental protocol variants, the server sends each resource in its own response. This
+means that if the server has previously sent 100 resources and only one of them has changed, it
+may send a response containing only the changed resource; it does not need to resend the 99
+resources that have not changed, and the client must not delete the unchanged resources.
+
+In the SotW protocol variants, all resource types except for :ref:`Listener
+<envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` are grouped into responses
+in the same way as in the incremental protocol variants. However,
+:ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` resource types
+are handled differently: the server must include the complete state of the world, meaning that all
+resources of the relevant type that are needed by the client must be included, even if they did
+not change since the last response. This means that if the server has previously sent 100
+resources and only one of them has changed, it must resend all 100 of them, even the 99 that were
+not modified.
+
+Note that all of the protocol variants operate on units of whole named resources. There is
+no mechanism for providing incremental updates of repeated fields within a named resource.
+Most notably, there is currently no mechanism for incrementally updating individual
+endpoints within an EDS response.
+
+Deleting Resources
+^^^^^^^^^^^^^^^^^^
+
+In the incremental proocol variants, the server signals the client that a resource should be
+deleted via the :ref:`removed_resources <envoy_api_field_DeltaDiscoveryResponse.removed_resources>`
+field of the response. This tells the client to remove the resource from its local cache.
+
+In the SotW protocol variants, the criteria for deleting resources is more complex. For
+:ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` resource types,
+if a previously seen resource is not present in a new response, that indicates that the resource
+has been removed, and the client must delete it; a response containing no resources means to delete
+all resources of that type. However, for other resource types, the API provides no mechanism for
+the server to tell the client that resources have been deleted; instead, deletions are indicated
+implicitly by parent resources being changed to no longer refer to a child resource. For example,
+when the client receives an LDS update removing a :ref:`Listener <envoy_api_msg_Listener>`
+that was previously pointing to :ref:`RouteConfiguration <envoy_api_msg_RouteConfiguration>` A,
+if no other :ref:`Listener <envoy_api_msg_Listener>` is pointing to :ref:`RouteConfiguration
+<envoy_api_msg_RouteConfiguration>` A, then the client may delete A. For those resource types,
+an empty :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` is effectively a no-op
+from the client's perspective.
+
+Knowing When a Requested Resource Does Not Exist
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In the SotW protocol variants, responses for :ref:`Listener <envoy_api_msg_Listener>` and
+:ref:`Cluster <envoy_api_msg_Cluster>` resource types must include all resources requested by the
+client. Therefore, if a client requests a resource that does not exist, it can immediately
+tell this from the response.
+
+However, for other resource types, because each resource can be sent in its own response, there is
+no way to know from the next response whether the newly requested resource exists, because the next
+response could be an unrelated update for another resource that had already been subscribed to
+previously. As a result, clients are expected to use a timeout (recommended duration is 15
+seconds) after sending a request for a new resource, after which they will consider the requested
+resource to not exist if they have not received the resource. In Envoy, this is done for
+:ref:`RouteConfiguration <envoy_api_msg_RouteConfiguration>` and :ref:`ClusterLoadAssignment
+<envoy_api_msg_ClusterLoadAssignment>` resources during :ref:`resource warming
+<xds_protocol_resource_warming>`.
+
+Note that clients may want to use the same timeout even for :ref:`Listener
+<envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>` resources, to protect
+against the case where the management server fails to send a response in a timely manner.
+
+Note that even if a requested resource does not exist at the moment when the client requests it,
+that resource could be created at any time. Management servers must remember the set of resources
+being requested by the client, and if one of those resources springs into existence later, the
+server must send an update to the client informing it of the new resource. Clients that initially
+see a resource that does not exist must be prepared for the resource to be created at any time.
+
+Unsubscribing From Resources
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In the incremental protocol variants, resources can be unsubscribed to via the
+:ref:`resource_names_unsubscribe
+<envoy_api_field_DeltaDiscoveryRequest.resource_names_unsubscribe>` field.
+
+In the SotW protocol variants, each request must contain the full list of resource names being
+subscribed to in the :ref:`resource_names <envoy_api_field_DiscoveryRequest.resource_names>` field,
+so unsubscribing to a set of resources is done by sending a new request containing all resource
+names that are still being subscribed to but not containing the resource names being unsubscribed
+to. For example, if the client had previously been subscribed to resources A and B but wishes to
+unsubscribe from B, it must send a new request containing only resource A.
+
+Note that for :ref:`Listener <envoy_api_msg_Listener>` and :ref:`Cluster <envoy_api_msg_Cluster>`
+resource types where the stream is in "wildcard" mode (see :ref:`How the client specifies what
+resources to return <xds_protocol_resource_hints>` for details), the set of resources being
+subscribed to is determined by the server instead of the client, so there is no mechanism
+for the client to unsubscribe from resources.
 
-The :ref:`resource_names <envoy_api_field_DiscoveryRequest.resource_names>` specified in the :ref:`DiscoveryRequest
-<envoy_api_msg_DiscoveryRequest>` are a hint about what resources the client is interested in.
-
-For :ref:`Listener Discovery Service (LDS) <envoy_api_msg_Listener>` and :ref:`Cluster Discovery Service (CDS)
-<envoy_api_msg_Cluster>`, Envoy will always set the resource hints to empty, in which case the server should use
-site-specific business logic to determine the full set of resources that the client is interested in, typically based on
-the client's node identification. However, other xDS clients may specify explicit LDS/CDS resources as resource hints, for
-example if they only have a singleton listener and already know its name from some out-of-band configuration. For EDS/RDS, the
-resource hints are required.
-
-When the resource hints are specified, the management server must supply the requested resources if they exist. The client will
-silently ignore any supplied resources that were not explicitly requested. When the client sends a new request that changes
-the *resource_names* list, the server must resend any newly requested resource, even if it previously sent it without having
-been asked for it and the resource has not changed since that time.
-
-For LDS and CDS, it is expected that the management server will provide the complete state of the LDS/CDS resources in each
-response; an absent `Listener` or `Cluster` will be deleted. For RDS or EDS, when a requested resource is
-missing, Envoy will retain the last known value for this resource except in the case where the `Cluster` or `Listener` is being
-warmed. See :ref:`Resource Warming <xds_protocol_resource_warming>` section below on the expectations during warming.
-An empty EDS/RDS :ref:`DiscoveryResponse <envoy_api_msg_DiscoveryResponse>` is effectively a nop from the perspective of the
-respective resources in the Envoy.
-
-When a `Listener` or `Cluster` is deleted, its corresponding EDS and
-RDS resources are also deleted inside the Envoy instance. In order for
-EDS resources to be known or tracked by Envoy, there must exist an
-applied `Cluster` definition (e.g. sourced via CDS). A similar
-relationship exists between RDS and `Listeners` (e.g. sourced via
-LDS).
+Requesting Multiple Resources on a Single Stream
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 For EDS/RDS, Envoy may either generate a distinct stream for each
 resource of a given type (e.g. if each :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` has its own
@@ -291,11 +529,11 @@ CDS/EDS update dropping **X**.
 In general, to avoid traffic drop, sequencing of updates should follow a
 make before break model, wherein:
 
-- CDS updates (if any) must always be pushed first. 
-- EDS updates (if any) must arrive after CDS updates for the respective clusters. 
-- LDS updates must arrive after corresponding CDS/EDS updates. 
-- RDS updates related to the newly added listeners must arrive after CDS/EDS/LDS updates. 
-- VHDS updates (if any) related to the newly added RouteConfigurations must arrive after RDS updates. 
+- CDS updates (if any) must always be pushed first.
+- EDS updates (if any) must arrive after CDS updates for the respective clusters.
+- LDS updates must arrive after corresponding CDS/EDS updates.
+- RDS updates related to the newly added listeners must arrive after CDS/EDS/LDS updates.
+- VHDS updates (if any) related to the newly added RouteConfigurations must arrive after RDS updates.
 - Stale CDS clusters and related EDS endpoints (ones no longer being referenced) can then be removed.
 
 xDS updates can be pushed independently if no new
@@ -387,10 +625,10 @@ to a :ref:`DeltaDiscoveryRequest <envoy_api_msg_DeltaDiscoveryRequest>`
 ACK or NACK. Optionally, a response message level :ref:`system_version_info <envoy_api_field_DeltaDiscoveryResponse.system_version_info>`
 is present for debugging purposes only.
 
-:ref:`DeltaDiscoveryRequest <envoy_api_msg_DeltaDiscoveryRequest>` can be sent in the following situations: 
+:ref:`DeltaDiscoveryRequest <envoy_api_msg_DeltaDiscoveryRequest>` can be sent in the following situations:
 
-- Initial message in a xDS bidirectional gRPC stream. 
-- As an ACK or NACK response to a previous :ref:`DeltaDiscoveryResponse <envoy_api_msg_DeltaDiscoveryResponse>`. In this case the :ref:`response_nonce <envoy_api_field_DiscoveryRequest.response_nonce>` is set to the nonce value in the Response. ACK or NACK is determined by the absence or presence of :ref:`error_detail <envoy_api_field_DiscoveryRequest.error_detail>`. 
+- Initial message in a xDS bidirectional gRPC stream.
+- As an ACK or NACK response to a previous :ref:`DeltaDiscoveryResponse <envoy_api_msg_DeltaDiscoveryResponse>`. In this case the :ref:`response_nonce <envoy_api_field_DiscoveryRequest.response_nonce>` is set to the nonce value in the Response. ACK or NACK is determined by the absence or presence of :ref:`error_detail <envoy_api_field_DiscoveryRequest.error_detail>`.
 - Spontaneous :ref:`DeltaDiscoveryRequests <envoy_api_msg_DeltaDiscoveryRequest>` from the client. This can be done to dynamically add or remove elements from the tracked :ref:`resource_names <envoy_api_field_DiscoveryRequest.resource_names>` set. In this case :ref:`response_nonce <envoy_api_field_DiscoveryRequest.response_nonce>` must be omitted.
 
 In this first example the client connects and receives a first update
diff --git a/bazel/BUILD b/bazel/BUILD
index 7d5ab93f6c3c..4d4cf2db4f07 100644
--- a/bazel/BUILD
+++ b/bazel/BUILD
@@ -116,6 +116,11 @@ config_setting(
     values = {"define": "ENVOY_CONFIG_TSAN=1"},
 )
 
+config_setting(
+    name = "msan_build",
+    values = {"define": "ENVOY_CONFIG_MSAN=1"},
+)
+
 config_setting(
     name = "coverage_build",
     values = {"define": "ENVOY_CONFIG_COVERAGE=1"},
diff --git a/bazel/PPROF.md b/bazel/PPROF.md
index 888b7810d616..97e1c0541181 100644
--- a/bazel/PPROF.md
+++ b/bazel/PPROF.md
@@ -1,12 +1,10 @@
-# Memory consumption testing with `pprof`
+# CPU or memory consumption testing with `pprof`
 
 To use `pprof` to analyze performance and memory consumption in Envoy, you can
 use the built-in statically linked profiler, or dynamically link it in to a
 specific place yourself.
 
-# Linking
-
-## Static Linking
+## Collecting CPU or heap profile for a full execution of envoy
 
 Static linking is already available (because of a `HeapProfilerDump()` call
 inside
@@ -18,80 +16,79 @@ Build the static binary using bazel:
 
     $ bazel build //source/exe:envoy-static
 
-### Running a statically-linked Envoy with `pprof`
+### Collecting the profile
 
-And run the binary with a `HEAPPROFILE` environment variable, like so:
+To collect a heap profile, run a statically-linked Envoy with `pprof`
 
-    $ HEAPPROFILE=/tmp/mybin.hprof bazel-bin/source/exe/envoy-static <args>
+and run the binary with a `CPUPROFILE` or `HEAPPROFILE` environment variable, like so:
 
-`HEAPPROFILE` sets a location for the profiler output. A statically-linked
-binary must be run with this environment variable; a dynamically-linked binary
-will populate the working directory by default. (See *Methodology*.)
+    $ CPUPROFILE=/tmp/mybin.cpuprof bazel-bin/source/exe/envoy-static <args>
+    $ HEAPPROFILE=/tmp/mybin.heapprof bazel-bin/source/exe/envoy-static <args>
 
-## Dynamic Linking
+`CPUPROFILE` or `HEAPPROFILE` sets a location for the profiler output. (See *Methodology*.)
 
-### Adding `tcmalloc_dep` to Envoy
+There are several other environment variables that can be set to tweak the behavior of gperftools. See https://gperftools.github.io/gperftools/ for more details.
 
-A statically-linked Envoy will profile everything. In a dynamically-linked
-Envoy, you must add the HeapProfiler instructions yourself.
-`HeapProfilerStart()` will start recording allocations, `HeapProfilerStop()`
-will stop recording, and `HeapProfilerDump()` will dump an output to the
-specified directory. (See [Gperftools Heap
-Profiler](https://gperftools.github.io/gperftools/heapprofile.html).)
+### Analyzing the profile
 
-To add a `HeapProfiler` breakpoint yourself, add `tcmalloc` as a
-dependency under the `envoy_cc_library` rule:
+[pprof](https://github.com/google/pprof) can be used to symbolize CPU and heap profiles. For example:
 
-`source/exe/BUILD`
+    $ pprof -text bazel-bin/source/exe/envoy-static /tmp/mybin.cpuprof
 
-```c++
-    envoy_cc_library(
-       name = "envoy_common_lib",
-+      tcmalloc_dep = 1,
-       deps = [
-       ...
-    )
-```
+## Collecting CPU or heap profile for the full execution of a test target
 
-It is then necessary to add `HeapProfilerStart()` and `HeapProfilerDump()`
-breakpoints somewhere in Envoy. One place to start profiling is at the
-instantiation of `MainCommonBase::MainCommonBase`:
+The profiler library is automatically linked into envoy_cc_test targets.
 
-`source/exe/main_common.cc`
+Run a test with heap profiling enabled, like so:
 
-```c++
-    // includes
-    #include "gperftools/heap-profiler.h"
-    ...
-    MainCommonBase::MainCommonBase(...) : ... {
-+       HeapProfilerStart("main_common_base"); // first line
-        ...
-    }
-```
+    $ bazel test --test_env=HEAPPROFILE=/tmp/heapprof <test target>
+
+Run a test with CPU profiling enabled, like so:
+
+    $ bazel test --test_env=CPUPROFILE=/tmp/cpuprof <test target>
+
+Note that heap checks and heap profile collection in tests have noticiable performance implications. Use the following command to collect a CPU profile from a test target with heap check and heap profile collection disabled:
+
+    $ bazel test --test_env=CPUPROFILE=/tmp/cpuprof --test_env=HEAPPROFILE= --test_env=HEAPCHECK= <test target>
 
-`source/server/server.cc`
+## Starting and stopping profile programmatically
+
+### Add `tcmalloc_dep` dependency to envoy_cc_library rules
+
+It is possible to start/stop the CPU or heap profiler programmatically.
+The [Gperftools CPU Profiler](https://gperftools.github.io/gperftools/cpuprofile.html)
+is controlled by `ProfilerStart()`/`ProfilerStop()`, and the
+[Gperftools Heap Profiler](https://gperftools.github.io/gperftools/heapprofile.html)
+is controlled by `HeapProfilerStart()`, `HeapProfilerStop()` and `HeapProfilerDump()`.
+
+These functions are wrapped by Envoy objects defined in [`source/common/profiler/profiler.h`](https://github.com/envoyproxy/envoy/blob/master/source/common/profiler/profiler.h)).
+
+To enable profiling programmatically:
+
+1. Add a library dependency on "//source/common/profiler:profiler_lib" to your envoy_cc_library build rule.
+2. Use the `startProfiler`/`stopProfiler` methods of `Envoy::Profiler::Cpu` or `Envoy::Profiler::Heap` to collect a profile.
+
+Note that `startProfiler` should only be called if no other profile of that type is currently active (e.i. `profilerEnabled()` returns false).
+
+Example:
 
 ```c++
     // includes
-    #include "gperftools/heap-profiler.h"
+    #include "common/profiler/profiler.h"
     ...
-    void InstanceImpl::Initialize(...) : ... {
+    Function(...) {
+        if (!Profiler::Cpu::startProfiler(profile_path)) {
+           // Error handling
+        }
         ...
-+       HeapProfilerDump("main_common_base"); // last line
+        Do expensive stuff in one or more threads.
+        ...
+
+        // Stop the profiler and dump output to the `profile_path` specified when profile was started.
+        Profiler::Cpu::stopProfiler();
     }
 ```
 
-Once these changes have been made in your working directory, it might make sense to
-save the diff as a patch (`git diff > file`), which can then be quickly
-applied/unapplied for testing and committing. (`git apply`, `git apply -R`)
-
-Build the binary using bazel, and run the binary without any environment variables:
-
-    $ bazel build //source/exe:envoy
-    $ bazel-bin/source/exe/envoy <args>
-
-This will dump your profiler output to the working directory.
-
 ## Memory Profiling in Tests
 To support memory leaks detection, tests are built with gperftools dependencies enabled by default.
 
diff --git a/bazel/README.md b/bazel/README.md
index ff50a66da8bb..14ffa1c463a0 100644
--- a/bazel/README.md
+++ b/bazel/README.md
@@ -397,6 +397,13 @@ Similarly, for [thread sanitizer (TSAN)](https://github.com/google/sanitizers/wi
 bazel test -c dbg --config=clang-tsan //test/...
 ```
 
+For [memory sanitizer (MSAN)](https://github.com/google/sanitizers/wiki/MemorySanitizer) testing,
+it has to be run under the docker sandbox which comes with MSAN instrumented libc++:
+
+```
+bazel test -c dbg --config=docker-msan //test/...
+```
+
 To run the sanitizers on OS X, prefix `macos-` to the config option, e.g.:
 
 ```
diff --git a/bazel/dev_binding.bzl b/bazel/dev_binding.bzl
new file mode 100644
index 000000000000..a56777ad5629
--- /dev/null
+++ b/bazel/dev_binding.bzl
@@ -0,0 +1,40 @@
+def _default_envoy_dev_impl(ctxt):
+    if "LLVM_CONFIG" in ctxt.os.environ:
+        ctxt.file("WORKSPACE", "")
+        ctxt.file("BUILD.bazel", "")
+        ctxt.symlink(ctxt.path(ctxt.attr.envoy_root).dirname.get_child("tools").get_child("clang_tools"), "clang_tools")
+
+_default_envoy_dev = repository_rule(
+    implementation = _default_envoy_dev_impl,
+    attrs = {
+        "envoy_root": attr.label(default = "@envoy//:BUILD"),
+    },
+)
+
+def _clang_tools_impl(ctxt):
+    if "LLVM_CONFIG" in ctxt.os.environ:
+        llvm_config_path = ctxt.os.environ["LLVM_CONFIG"]
+        exec_result = ctxt.execute([llvm_config_path, "--includedir"])
+        if exec_result.return_code != 0:
+            fail(llvm_config_path + " --includedir returned %d" % exec_result.return_code)
+        clang_tools_include_path = exec_result.stdout.rstrip()
+        exec_result = ctxt.execute([llvm_config_path, "--libdir"])
+        if exec_result.return_code != 0:
+            fail(llvm_config_path + " --libdir returned %d" % exec_result.return_code)
+        clang_tools_lib_path = exec_result.stdout.rstrip()
+        for include_dir in ["clang", "clang-c", "llvm", "llvm-c"]:
+            ctxt.symlink(clang_tools_include_path + "/" + include_dir, include_dir)
+        ctxt.symlink(clang_tools_lib_path, "lib")
+        ctxt.symlink(Label("@envoy_dev//clang_tools/support:BUILD.prebuilt"), "BUILD")
+
+_clang_tools = repository_rule(
+    implementation = _clang_tools_impl,
+    environ = ["LLVM_CONFIG"],
+)
+
+def envoy_dev_binding():
+    # Treat the Envoy developer tools that require llvm as an external repo, this avoids
+    # breaking bazel build //... when llvm is not installed.
+    if "envoy_dev" not in native.existing_rules().keys():
+        _default_envoy_dev(name = "envoy_dev")
+        _clang_tools(name = "clang_tools")
diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl
index 8a8a88ad2d0b..fef96d8b23ee 100644
--- a/bazel/envoy_internal.bzl
+++ b/bazel/envoy_internal.bzl
@@ -96,6 +96,7 @@ def envoy_select_force_libcpp(if_libcpp, default = None):
 def envoy_stdlib_deps():
     return select({
         "@envoy//bazel:asan_build": ["@envoy//bazel:dynamic_stdlib"],
+        "@envoy//bazel:msan_build": ["@envoy//bazel:dynamic_stdlib"],
         "@envoy//bazel:tsan_build": ["@envoy//bazel:dynamic_stdlib"],
         "//conditions:default": ["@envoy//bazel:static_stdlib"],
     })
diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl
index b2fa86ed3c9f..9855f9a7bf56 100644
--- a/bazel/envoy_library.bzl
+++ b/bazel/envoy_library.bzl
@@ -12,7 +12,7 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library")
 # As above, but wrapped in list form for adding to dep lists. This smell seems needed as
 # SelectorValue values have to match the attribute type. See
 # https://github.com/bazelbuild/bazel/issues/2273.
-def _tcmalloc_external_deps(repository):
+def tcmalloc_external_deps(repository):
     return select({
         repository + "//bazel:disable_tcmalloc": [],
         "//conditions:default": [envoy_external_dep_path("gperftools")],
@@ -92,7 +92,7 @@ def envoy_cc_library(
         strip_include_prefix = None,
         textual_hdrs = None):
     if tcmalloc_dep:
-        deps += _tcmalloc_external_deps(repository)
+        deps += tcmalloc_external_deps(repository)
 
     native.cc_library(
         name = name,
diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl
index 77876b7b4c62..439913704b77 100644
--- a/bazel/envoy_test.bzl
+++ b/bazel/envoy_test.bzl
@@ -2,6 +2,7 @@
 # Envoy test targets. This includes both test library and test binary targets.
 load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
 load(":envoy_binary.bzl", "envoy_cc_binary")
+load(":envoy_library.bzl", "tcmalloc_external_deps")
 load(
     ":envoy_internal.bzl",
     "envoy_copts",
@@ -26,6 +27,9 @@ def _envoy_cc_test_infrastructure_library(
         include_prefix = None,
         copts = [],
         **kargs):
+    # Add implicit tcmalloc external dependency(if available) in order to enable CPU and heap profiling in tests.
+    deps += tcmalloc_external_deps(repository)
+
     native.cc_library(
         name = name,
         srcs = srcs,
diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD
index 9f5c7694fe8d..17214b0546b3 100644
--- a/bazel/external/quiche.BUILD
+++ b/bazel/external/quiche.BUILD
@@ -1034,7 +1034,6 @@ envoy_cc_library(
         "quiche/quic/platform/api/quic_bug_tracker.h",
         "quiche/quic/platform/api/quic_client_stats.h",
         "quiche/quic/platform/api/quic_containers.h",
-        "quiche/quic/platform/api/quic_endian.h",
         "quiche/quic/platform/api/quic_error_code_wrappers.h",
         "quiche/quic/platform/api/quic_estimate_memory_usage.h",
         "quiche/quic/platform/api/quic_exported_stats.h",
@@ -1198,6 +1197,29 @@ envoy_cc_test_library(
     deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_thread_impl_lib"],
 )
 
+envoy_cc_library(
+    name = "quiche_common_platform_endian",
+    hdrs = ["quiche/common/platform/api/quiche_endian.h"],
+    repository = "@envoy",
+    tags = ["nofips"],
+    visibility = ["//visibility:public"],
+    deps =
+        [
+            ":quiche_common_platform_export",
+            "@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_endian_impl_lib",
+        ],
+)
+
+envoy_cc_library(
+    name = "quiche_common_platform_export",
+    hdrs = ["quiche/common/platform/api/quiche_export.h"],
+    repository = "@envoy",
+    tags = ["nofips"],
+    visibility = ["//visibility:public"],
+    deps =
+        ["@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_export_impl_lib"],
+)
+
 #TODO(danzh) Figure out why using envoy_proto_library() fails.
 proto_library(
     name = "quic_core_proto_cached_network_parameters_proto",
@@ -1602,7 +1624,6 @@ envoy_cc_library(
         ":quic_core_mtu_discovery_lib",
         ":quic_core_one_block_arena_lib",
         ":quic_core_packet_creator_lib",
-        ":quic_core_packet_generator_lib",
         ":quic_core_packet_writer_interface_lib",
         ":quic_core_packets_lib",
         ":quic_core_proto_cached_network_parameters_proto_header",
@@ -1625,6 +1646,7 @@ envoy_cc_library(
     deps = [
         ":quic_core_bandwidth_lib",
         ":quic_core_packets_lib",
+        ":quic_core_time_accumulator_lib",
         ":quic_core_time_lib",
         ":quic_platform_export",
     ],
@@ -1972,6 +1994,7 @@ envoy_cc_library(
         ":quic_core_constants_lib",
         ":quic_core_error_codes_lib",
         ":quic_core_interval_lib",
+        ":quic_core_interval_set_lib",
         ":quic_core_types_lib",
         ":quic_core_versions_lib",
         ":quic_platform_base",
@@ -2006,6 +2029,7 @@ envoy_cc_library(
     copts = quiche_copts,
     repository = "@envoy",
     tags = ["nofips"],
+    visibility = ["//visibility:public"],
     deps = [
         ":quic_core_alarm_interface_lib",
         ":quic_core_crypto_encryption_lib",
@@ -2141,7 +2165,6 @@ envoy_cc_library(
         ":quic_core_qpack_qpack_encoder_lib",
         ":quic_core_qpack_qpack_encoder_stream_sender_lib",
         ":quic_core_qpack_qpack_streams_lib",
-        ":quic_core_qpack_qpack_utils_lib",
         ":quic_core_session_lib",
         ":quic_core_utils_lib",
         ":quic_core_versions_lib",
@@ -2259,24 +2282,6 @@ envoy_cc_library(
     ],
 )
 
-envoy_cc_library(
-    name = "quic_core_packet_generator_lib",
-    srcs = ["quiche/quic/core/quic_packet_generator.cc"],
-    hdrs = ["quiche/quic/core/quic_packet_generator.h"],
-    copts = quiche_copts,
-    repository = "@envoy",
-    tags = ["nofips"],
-    deps = [
-        ":quic_core_crypto_random_lib",
-        ":quic_core_packet_creator_lib",
-        ":quic_core_sent_packet_manager_lib",
-        ":quic_core_types_lib",
-        ":quic_core_utils_lib",
-        ":quic_platform_base",
-        ":quic_platform_mem_slice_span",
-    ],
-)
-
 envoy_cc_library(
     name = "quic_core_packet_number_indexed_queue_lib",
     hdrs = ["quiche/quic/core/packet_number_indexed_queue.h"],
@@ -2364,9 +2369,9 @@ envoy_cc_library(
 )
 
 envoy_cc_library(
-    name = "quic_core_qpack_qpack_constants_lib",
-    srcs = ["quiche/quic/core/qpack/qpack_constants.cc"],
-    hdrs = ["quiche/quic/core/qpack/qpack_constants.h"],
+    name = "quic_core_qpack_qpack_instructions_lib",
+    srcs = ["quiche/quic/core/qpack/qpack_instructions.cc"],
+    hdrs = ["quiche/quic/core/qpack/qpack_instructions.h"],
     copts = quiche_copts,
     repository = "@envoy",
     tags = ["nofips"],
@@ -2399,12 +2404,12 @@ envoy_cc_library(
     tags = ["nofips"],
     deps = [
         ":quic_core_qpack_blocking_manager_lib",
-        ":quic_core_qpack_qpack_constants_lib",
         ":quic_core_qpack_qpack_decoder_stream_receiver_lib",
         ":quic_core_qpack_qpack_encoder_stream_sender_lib",
         ":quic_core_qpack_qpack_header_table_lib",
         ":quic_core_qpack_qpack_index_conversions_lib",
         ":quic_core_qpack_qpack_instruction_encoder_lib",
+        ":quic_core_qpack_qpack_instructions_lib",
         ":quic_core_qpack_qpack_required_insert_count_lib",
         ":quic_core_qpack_value_splitting_header_list_lib",
         ":quic_core_types_lib",
@@ -2436,7 +2441,7 @@ envoy_cc_library(
     deps = [
         ":http2_hpack_huffman_hpack_huffman_decoder_lib",
         ":http2_hpack_varint_hpack_varint_decoder_lib",
-        ":quic_core_qpack_qpack_constants_lib",
+        ":quic_core_qpack_qpack_instructions_lib",
         ":quic_platform_base",
     ],
 )
@@ -2451,7 +2456,7 @@ envoy_cc_library(
     deps = [
         ":http2_hpack_huffman_hpack_huffman_encoder_lib",
         ":http2_hpack_varint_hpack_varint_encoder_lib",
-        ":quic_core_qpack_qpack_constants_lib",
+        ":quic_core_qpack_qpack_instructions_lib",
         ":quic_platform",
     ],
 )
@@ -2464,12 +2469,12 @@ envoy_cc_library(
     repository = "@envoy",
     tags = ["nofips"],
     deps = [
-        ":quic_core_qpack_qpack_constants_lib",
         ":quic_core_qpack_qpack_decoder_stream_sender_lib",
         ":quic_core_qpack_qpack_encoder_stream_receiver_lib",
         ":quic_core_qpack_qpack_header_table_lib",
         ":quic_core_qpack_qpack_index_conversions_lib",
         ":quic_core_qpack_qpack_instruction_decoder_lib",
+        ":quic_core_qpack_qpack_instructions_lib",
         ":quic_core_qpack_qpack_required_insert_count_lib",
         ":quic_core_types_lib",
         ":quic_platform_base",
@@ -2486,15 +2491,6 @@ envoy_cc_library(
     deps = [":quic_platform_base"],
 )
 
-envoy_cc_library(
-    name = "quic_core_qpack_qpack_utils_lib",
-    hdrs = ["quiche/quic/core/qpack/qpack_utils.h"],
-    copts = quiche_copts,
-    repository = "@envoy",
-    tags = ["nofips"],
-    deps = [":quic_core_qpack_qpack_stream_sender_delegate_lib"],
-)
-
 envoy_cc_library(
     name = "quic_core_qpack_qpack_encoder_stream_sender_lib",
     srcs = ["quiche/quic/core/qpack/qpack_encoder_stream_sender.cc"],
@@ -2503,8 +2499,8 @@ envoy_cc_library(
     repository = "@envoy",
     tags = ["nofips"],
     deps = [
-        ":quic_core_qpack_qpack_constants_lib",
         ":quic_core_qpack_qpack_instruction_encoder_lib",
+        ":quic_core_qpack_qpack_instructions_lib",
         ":quic_core_qpack_qpack_stream_sender_delegate_lib",
         ":quic_core_types_lib",
         ":quic_platform_base",
@@ -2521,8 +2517,8 @@ envoy_cc_library(
     deps = [
         ":http2_decoder_decode_buffer_lib",
         ":http2_decoder_decode_status_lib",
-        ":quic_core_qpack_qpack_constants_lib",
         ":quic_core_qpack_qpack_instruction_decoder_lib",
+        ":quic_core_qpack_qpack_instructions_lib",
         ":quic_core_qpack_qpack_stream_receiver_lib",
         ":quic_platform_base",
     ],
@@ -2536,8 +2532,8 @@ envoy_cc_library(
     repository = "@envoy",
     tags = ["nofips"],
     deps = [
-        ":quic_core_qpack_qpack_constants_lib",
         ":quic_core_qpack_qpack_instruction_encoder_lib",
+        ":quic_core_qpack_qpack_instructions_lib",
         ":quic_core_qpack_qpack_stream_sender_delegate_lib",
         ":quic_core_types_lib",
         ":quic_platform_base",
@@ -2554,8 +2550,8 @@ envoy_cc_library(
     deps = [
         ":http2_decoder_decode_buffer_lib",
         ":http2_decoder_decode_status_lib",
-        ":quic_core_qpack_qpack_constants_lib",
         ":quic_core_qpack_qpack_instruction_decoder_lib",
+        ":quic_core_qpack_qpack_instructions_lib",
         ":quic_core_qpack_qpack_stream_receiver_lib",
         ":quic_core_types_lib",
         ":quic_platform_base",
@@ -2767,6 +2763,7 @@ envoy_cc_library(
         "quiche/quic/core/uber_quic_stream_id_manager.cc",
     ],
     hdrs = [
+        "quiche/quic/core/handshaker_delegate_interface.h",
         "quiche/quic/core/legacy_quic_stream_id_manager.h",
         "quiche/quic/core/quic_control_frame_manager.h",
         "quiche/quic/core/quic_crypto_client_handshaker.h",
@@ -2919,6 +2916,15 @@ envoy_cc_library(
     deps = [":quic_platform_base"],
 )
 
+envoy_cc_library(
+    name = "quic_core_time_accumulator_lib",
+    hdrs = ["quiche/quic/core/quic_time_accumulator.h"],
+    repository = "@envoy",
+    tags = ["nofips"],
+    visibility = ["//visibility:public"],
+    deps = [],
+)
+
 envoy_cc_library(
     name = "quic_core_time_wait_list_manager_lib",
     srcs = ["quiche/quic/core/quic_time_wait_list_manager.cc"],
@@ -2976,6 +2982,7 @@ envoy_cc_library(
         ":quic_core_error_codes_lib",
         ":quic_core_time_lib",
         ":quic_platform_base",
+        ":quiche_common_platform_endian",
     ],
 )
 
@@ -3056,6 +3063,7 @@ envoy_cc_library(
         ":quic_core_tag_lib",
         ":quic_core_types_lib",
         ":quic_platform_base",
+        ":quiche_common_platform_endian",
     ],
 )
 
@@ -3111,20 +3119,6 @@ envoy_cc_test_library(
     deps = [":quic_core_crypto_random_lib"],
 )
 
-envoy_cc_test_library(
-    name = "quic_test_tools_packet_generator_peer_lib",
-    srcs = ["quiche/quic/test_tools/quic_packet_generator_peer.cc"],
-    hdrs = ["quiche/quic/test_tools/quic_packet_generator_peer.h"],
-    copts = quiche_copts,
-    repository = "@envoy",
-    tags = ["nofips"],
-    deps = [
-        ":quic_core_packet_creator_lib",
-        ":quic_core_packet_generator_lib",
-        ":quic_core_packets_lib",
-    ],
-)
-
 envoy_cc_test_library(
     name = "quic_test_tools_sent_packet_manager_peer_lib",
     srcs = ["quiche/quic/test_tools/quic_sent_packet_manager_peer.cc"],
@@ -3246,7 +3240,6 @@ envoy_cc_test_library(
         ":quic_test_tools_framer_peer_lib",
         ":quic_test_tools_mock_clock_lib",
         ":quic_test_tools_mock_random_lib",
-        ":quic_test_tools_packet_generator_peer_lib",
         ":quic_test_tools_sent_packet_manager_peer_lib",
         ":quic_test_tools_simple_quic_framer_lib",
         ":quic_test_tools_stream_peer_lib",
@@ -3308,7 +3301,6 @@ envoy_cc_library(
     name = "quiche_common_platform",
     hdrs = [
         "quiche/common/platform/api/quiche_logging.h",
-        "quiche/common/platform/api/quiche_ptr_util.h",
         "quiche/common/platform/api/quiche_unordered_containers.h",
     ],
     repository = "@envoy",
@@ -3319,10 +3311,15 @@ envoy_cc_library(
 
 envoy_cc_test_library(
     name = "quiche_common_platform_test",
+    srcs = ["quiche/common/platform/api/quiche_endian_test.cc"],
     hdrs = ["quiche/common/platform/api/quiche_test.h"],
     repository = "@envoy",
     tags = ["nofips"],
-    deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quiche_common_platform_test_impl_lib"],
+    deps =
+        [
+            ":quiche_common_platform_endian",
+            "@envoy//test/extensions/quic_listeners/quiche/platform:quiche_common_platform_test_impl_lib",
+        ],
 )
 
 envoy_cc_library(
@@ -3427,7 +3424,6 @@ envoy_cc_test(
     name = "quic_platform_api_test",
     srcs = [
         "quiche/quic/platform/api/quic_containers_test.cc",
-        "quiche/quic/platform/api/quic_endian_test.cc",
         "quiche/quic/platform/api/quic_mem_slice_span_test.cc",
         "quiche/quic/platform/api/quic_mem_slice_storage_test.cc",
         "quiche/quic/platform/api/quic_mem_slice_test.cc",
diff --git a/bazel/external/wee8.genrule_cmd b/bazel/external/wee8.genrule_cmd
index 742527cabfd3..323402b49392 100644
--- a/bazel/external/wee8.genrule_cmd
+++ b/bazel/external/wee8.genrule_cmd
@@ -40,6 +40,7 @@ if [[ $${ENVOY_UBSAN_VPTR-} == "1" ]]; then
 fi
 if [[ $${ENVOY_MSAN-} == "1" ]]; then
   WEE8_BUILD_ARGS+=" is_msan=true"
+  export LDFLAGS="$${LDFLAGS} -L/opt/libcxx_msan/lib -Wl,-rpath,/opt/libcxx_msan/lib"
 fi
 if [[ $${ENVOY_TSAN-} == "1" ]]; then
   WEE8_BUILD_ARGS+=" is_tsan=true"
diff --git a/bazel/external/wee8.patch b/bazel/external/wee8.patch
index 97f0e14b3b9b..3f95bc83926a 100644
--- a/bazel/external/wee8.patch
+++ b/bazel/external/wee8.patch
@@ -1,5 +1,6 @@
 # 1. Fix linking with unbundled toolchain on macOS.
 # 2. Increase VSZ limit to 4TiB (allows us to start up to 370 VMs).
+# 3. Fix MSAN linking.
 --- wee8/build/toolchain/gcc_toolchain.gni
 +++ wee8/build/toolchain/gcc_toolchain.gni
 @@ -355,6 +355,8 @@ template("gcc_toolchain") {
@@ -31,3 +32,24 @@
  #else
  constexpr size_t kAddressSpaceLimit = 0xC0000000;  // 3 GiB
  #endif
+--- wee8/build/config/sanitizers/sanitizers.gni
++++ wee8/build/config/sanitizers/sanitizers.gni
+@@ -145,7 +145,7 @@ if (current_toolchain != default_toolchain) {
+ # standard system libraries. We have instrumented system libraries for msan,
+ # which requires them to prevent false positives.
+ # TODO(thakis): Maybe remove this variable.
+-use_prebuilt_instrumented_libraries = is_msan
++use_prebuilt_instrumented_libraries = false
+
+ # Whether we are doing a fuzzer build. Normally this should be checked instead
+ # of checking "use_libfuzzer || use_afl" because often developers forget to
+@@ -185,8 +185,7 @@ assert(!using_sanitizer || is_clang,
+ assert(!is_cfi || is_clang,
+        "is_cfi requires setting is_clang = true in 'gn args'")
+
+-prebuilt_instrumented_libraries_available =
+-    is_msan && (msan_track_origins == 0 || msan_track_origins == 2)
++prebuilt_instrumented_libraries_available = false
+
+ if (use_libfuzzer && is_linux) {
+   if (is_asan) {
diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD
index 82994cbd4434..f820d83f134f 100644
--- a/bazel/foreign_cc/BUILD
+++ b/bazel/foreign_cc/BUILD
@@ -155,8 +155,8 @@ envoy_cmake_external(
     defines = ["NGHTTP2_STATICLIB"],
     lib_source = "@com_github_nghttp2_nghttp2//:all",
     static_libraries = select({
-        "//bazel:windows_x86_64": ["nghttp2.lib"],
-        "//conditions:default": ["libnghttp2.a"],
+        "//bazel:windows_x86_64": ["nghttp2_static.lib"],
+        "//conditions:default": ["libnghttp2_static.a"],
     }),
 )
 
diff --git a/bazel/foreign_cc/luajit.patch b/bazel/foreign_cc/luajit.patch
index c39f116aabbb..82e8733a70fc 100644
--- a/bazel/foreign_cc/luajit.patch
+++ b/bazel/foreign_cc/luajit.patch
@@ -48,7 +48,7 @@ new file mode 100755
 index 0000000..9c71271
 --- /dev/null
 +++ b/build.py
-@@ -0,0 +1,35 @@
+@@ -0,0 +1,39 @@
 +#!/usr/bin/env python
 +
 +import argparse
@@ -73,6 +73,10 @@ index 0000000..9c71271
 +    # fail on it.
 +    os.environ["LSAN_OPTIONS"] = "exitcode=0"
 +
++    if "ENVOY_MSAN" in os.environ:
++      os.environ["HOST_CFLAGS"] = "-fno-sanitize=memory"
++      os.environ["HOST_LDFLAGS"] = "-fno-sanitize=memory"
++
 +    # Blacklist LuaJIT from ASAN for now.
 +    # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved.
 +    if "ENVOY_CONFIG_ASAN" in os.environ:
diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl
index 67c289b0f0e3..0f4135964129 100644
--- a/bazel/repositories.bzl
+++ b/bazel/repositories.bzl
@@ -1,4 +1,5 @@
 load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+load(":dev_binding.bzl", "envoy_dev_binding")
 load(":genrule_repository.bzl", "genrule_repository")
 load("@envoy_api//bazel:envoy_http_archive.bzl", "envoy_http_archive")
 load(":repository_locations.bzl", "REPOSITORY_LOCATIONS")
@@ -90,6 +91,9 @@ def _go_deps(skip_targets):
         _repository_impl("bazel_gazelle")
 
 def envoy_dependencies(skip_targets = []):
+    # Setup Envoy developer tools.
+    envoy_dev_binding()
+
     # Treat Envoy's overall build config as an external repo, so projects that
     # build Envoy as a subcomponent can easily override the config.
     if "envoy_build_config" not in native.existing_rules().keys():
diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl
index b47007af1ea6..b9eaa2099831 100644
--- a/bazel/repository_locations.bzl
+++ b/bazel/repository_locations.bzl
@@ -94,10 +94,10 @@ REPOSITORY_LOCATIONS = dict(
         urls = ["https://github.com/gabime/spdlog/archive/v1.3.1.tar.gz"],
     ),
     com_github_google_libprotobuf_mutator = dict(
-        sha256 = "f45c3ad82376d891cd0bcaa7165e83efd90e0014b00aebf0cbaf07eb05a1d3f9",
-        strip_prefix = "libprotobuf-mutator-d1fe8a7d8ae18f3d454f055eba5213c291986f21",
-        # 2019-07-10
-        urls = ["https://github.com/google/libprotobuf-mutator/archive/d1fe8a7d8ae18f3d454f055eba5213c291986f21.tar.gz"],
+        sha256 = "54597f640c0ab5e5d783d2f3d3cfe8ad6da999ef1a194d89c2c5ab89a1fd8e13",
+        strip_prefix = "libprotobuf-mutator-dd89da92b59b1714bab6e2a135093948a1cf1c6a",
+        # 2019-10-08
+        urls = ["https://github.com/google/libprotobuf-mutator/archive/dd89da92b59b1714bab6e2a135093948a1cf1c6a.tar.gz"],
     ),
     com_github_gperftools_gperftools = dict(
         # TODO(cmluciano): Bump to release 2.8
@@ -118,9 +118,9 @@ REPOSITORY_LOCATIONS = dict(
         urls = ["https://github.com/LuaJIT/LuaJIT/archive/v2.1.0-beta3.tar.gz"],
     ),
     com_github_nghttp2_nghttp2 = dict(
-        sha256 = "25b623cd04dc6a863ca3b34ed6247844effe1aa5458229590b3f56a6d53cd692",
-        strip_prefix = "nghttp2-1.39.1",
-        urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.39.1/nghttp2-1.39.1.tar.gz"],
+        sha256 = "eb9d9046495a49dd40c7ef5d6c9907b51e5a6b320ea6e2add11eb8b52c982c47",
+        strip_prefix = "nghttp2-1.40.0",
+        urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.40.0/nghttp2-1.40.0.tar.gz"],
     ),
     io_opentracing_cpp = dict(
         sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301",
@@ -261,10 +261,10 @@ REPOSITORY_LOCATIONS = dict(
         urls = ["https://files.pythonhosted.org/packages/dd/bf/4138e7bfb757de47d1f4b6994648ec67a51efe58fa907c1e11e350cddfca/six-1.12.0.tar.gz"],
     ),
     io_opencensus_cpp = dict(
-        sha256 = "8078195ce90925c142f5c030b9681771db7b7554ebe2156b08848adeb006c40e",
-        strip_prefix = "opencensus-cpp-d8ec569c6a9157e9cbc2c25021d1c61812891f91",
-        # 2019-11-04
-        urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/d8ec569c6a9157e9cbc2c25021d1c61812891f91.tar.gz"],
+        sha256 = "a9ba6027436cfa1264860c6be602da7633d9a1f9abcb8838f2ae6bda8c2c14f6",
+        strip_prefix = "opencensus-cpp-13b1a2f29f541b6b2c4cb8bc3f6fbf3589d44227",
+        # 2019-12-01
+        urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/13b1a2f29f541b6b2c4cb8bc3f6fbf3589d44227.tar.gz"],
     ),
     com_github_curl = dict(
         sha256 = "d0393da38ac74ffac67313072d7fe75b1fa1010eb5987f63f349b024a36b7ffb",
@@ -278,15 +278,15 @@ REPOSITORY_LOCATIONS = dict(
         urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-7.9.317.14.tar.gz"],
     ),
     com_googlesource_quiche = dict(
-        # Static snapshot of https://quiche.googlesource.com/quiche/+archive/9711a9e74b43c7390ef7bb66c75561ff796900bf.tar.gz
-        sha256 = "2346dada2c1af2d5703a0d7350aa82465cd564fd15de115a1377792e771c43b4",
-        urls = ["https://storage.googleapis.com/quiche-envoy-integration/9711a9e74b43c7390ef7bb66c75561ff796900bf.tar.gz"],
+        # Static snapshot of https://quiche.googlesource.com/quiche/+archive/0ce303d8f1a774c821f4370d4b84d02e488ce332.tar.gz
+        sha256 = "7648f75631623bd98859b9be068cb857b3e83ce67ebcdf8123ad5667eb00da02",
+        urls = ["https://storage.googleapis.com/quiche-envoy-integration/0ce303d8f1a774c821f4370d4b84d02e488ce332.tar.gz"],
     ),
     com_google_cel_cpp = dict(
-        sha256 = "6b056207f6a069ee6e28f31010262585cf6090e6c889cb98da29715cf544ac7d",
-        strip_prefix = "cel-cpp-750fd9a3cbf4470ee46c8deef0a4701b4cc8b1ce",
-        # 2019-11-12
-        urls = ["https://github.com/google/cel-cpp/archive/750fd9a3cbf4470ee46c8deef0a4701b4cc8b1ce.tar.gz"],
+        sha256 = "b4eaf871d4910c599bb70eaef2eec852747989f15f26885353b7c5188a940ca8",
+        strip_prefix = "cel-cpp-4767e5de36c5701fa8ea46d7de3765161ef98353",
+        # 2019-11-28
+        urls = ["https://github.com/google/cel-cpp/archive/4767e5de36c5701fa8ea46d7de3765161ef98353.tar.gz"],
     ),
     com_googlesource_code_re2 = dict(
         sha256 = "b0382aa7369f373a0148218f2df5a6afd6bfa884ce4da2dfb576b979989e615e",
diff --git a/bazel/setup_clang.sh b/bazel/setup_clang.sh
index 2539c2c1e82c..5c69f58dcda5 100755
--- a/bazel/setup_clang.sh
+++ b/bazel/setup_clang.sh
@@ -17,8 +17,9 @@ echo "# Generated file, do not edit. If you want to disable clang, just delete t
 build:clang --action_env=PATH=${PATH}
 build:clang --action_env=CC=clang
 build:clang --action_env=CXX=clang++
-build:clang --action_env=LD_LIBRARY_PATH=$(llvm-config --libdir)
-build:clang --test_env=LD_LIBRARY_PATH=$(llvm-config --libdir)
+build:clang --action_env=LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config
+build:clang --linkopt=-L$(llvm-config --libdir)
+build:clang --linkopt=-Wl,-rpath,$(llvm-config --libdir)
 
 build:clang-asan --action_env=ENVOY_UBSAN_VPTR=1
 build:clang-asan --copt=-fsanitize=vptr,function
diff --git a/ci/README.md b/ci/README.md
index 0fad59a89b48..deecdb41cc1f 100644
--- a/ci/README.md
+++ b/ci/README.md
@@ -106,6 +106,8 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh <TARGET>'` targets are:
 * `bazel.coverage` &mdash; build and run tests under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`.
 * `bazel.coverage <test>` &mdash; build and run a specified test or test dir under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`.
 * `bazel.coverity` &mdash; build Envoy static binary and run Coverity Scan static analysis.
+* `bazel.msan` &mdash; build and run tests under `-c dbg --config=clang-msan` with clang.
+* `bazel.msan <test>` &mdash; build and run a specified test or test dir under `-c dbg --config=clang-msan` with clang.
 * `bazel.tsan` &mdash; build and run tests under `-c dbg --config=clang-tsan` with clang.
 * `bazel.tsan <test>` &mdash; build and run a specified test or test dir under `-c dbg --config=clang-tsan` with clang.
 * `bazel.fuzz` &mdash; build and run fuzz tests under `-c dbg --config=asan-fuzzer` with clang.
diff --git a/ci/do_ci.sh b/ci/do_ci.sh
index 78a1a78d1dc6..9c765d3b5c9f 100755
--- a/ci/do_ci.sh
+++ b/ci/do_ci.sh
@@ -180,6 +180,14 @@ elif [[ "$CI_TARGET" == "bazel.tsan" ]]; then
   bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-tsan \
     //:echo2_integration_test //http-filter-example:http_filter_integration_test //:envoy_binary_test
   exit 0
+elif [[ "$CI_TARGET" == "bazel.msan" ]]; then
+  ENVOY_STDLIB=libc++
+  setup_clang_toolchain
+  # rbe-toolchain-msan must comes as first to win library link order.
+  BAZEL_BUILD_OPTIONS="--config=rbe-toolchain-msan ${BAZEL_BUILD_OPTIONS} -c dbg --build_tests_only"
+  echo "bazel MSAN debug build with tests"
+  echo "Building and testing envoy tests ${TEST_TARGETS}"
+  bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${TEST_TARGETS}
 elif [[ "$CI_TARGET" == "bazel.dev" ]]; then
   setup_clang_toolchain
   # This doesn't go into CI but is available for developer convenience.
diff --git a/ci/envoy_build_sha.sh b/ci/envoy_build_sha.sh
index 6ccd9848f8d1..0a81b9fe8d35 100644
--- a/ci/envoy_build_sha.sh
+++ b/ci/envoy_build_sha.sh
@@ -1,2 +1,2 @@
 ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu@sha256 $(dirname $0)/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu@sha256:\(.*\)#\1#' | uniq)
-[[ $(wc -l <<< "${ENVOY_BUILD_SHA}" | awk '{$1=$1};1') == 1 ]] || (echo ".circleci/config.yml hashes are inconsistent!" && exit 1)
+[[ $(wc -l <<< "${ENVOY_BUILD_SHA}" | awk '{$1=$1};1') == 1 ]] || (echo ".bazelrc envoyproxy/envoy-build-ubuntu hashes are inconsistent!" && exit 1)
diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh
index 94169f036ed4..0bdec2c20030 100755
--- a/ci/run_clang_tidy.sh
+++ b/ci/run_clang_tidy.sh
@@ -1,10 +1,10 @@
 #!/bin/bash
 
-set -e
+set -eo pipefail
 
 ENVOY_SRCDIR=${ENVOY_SRCDIR:-$(cd $(dirname $0)/.. && pwd)}
 
-LLVM_CONFIG=${LLVM_CONFIG:-llvm-config}
+export LLVM_CONFIG=${LLVM_CONFIG:-llvm-config}
 LLVM_PREFIX=${LLVM_PREFIX:-$(${LLVM_CONFIG} --prefix)}
 CLANG_TIDY=${CLANG_TIDY:-$(${LLVM_CONFIG} --bindir)/clang-tidy}
 CLANG_APPLY_REPLACEMENTS=${CLANG_APPLY_REPLACEMENTS:-$(${LLVM_CONFIG} --bindir)/clang-apply-replacements}
@@ -69,8 +69,7 @@ elif [[ "${BUILD_REASON}" != "PullRequest" ]]; then
       -p 1
 else
   echo "Running clang-tidy-diff against master branch..."
-  git fetch https://github.com/envoyproxy/envoy.git master
-  git diff "${SYSTEM_PULLREQUEST_TARGETBRANCH:-refs/heads/master}..HEAD" | filter_excludes | \
+  git diff "remotes/origin/${SYSTEM_PULLREQUEST_TARGETBRANCH}" | filter_excludes | \
     "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \
       -clang-tidy-binary=${CLANG_TIDY} \
       -p 1
diff --git a/docs/root/api-v2/config/cluster/cluster.rst b/docs/root/api-v2/config/cluster/cluster.rst
index d76baee4aeb3..18c82eb940e9 100644
--- a/docs/root/api-v2/config/cluster/cluster.rst
+++ b/docs/root/api-v2/config/cluster/cluster.rst
@@ -5,5 +5,6 @@ Cluster
   :glob:
   :maxdepth: 2
 
+  aggregate/v2alpha/*
   dynamic_forward_proxy/v2alpha/*
   redis/*
diff --git a/docs/root/api-v2/config/filter/filter.rst b/docs/root/api-v2/config/filter/filter.rst
index 6ddd5e15abf3..e7b8d2ff6f8e 100644
--- a/docs/root/api-v2/config/filter/filter.rst
+++ b/docs/root/api-v2/config/filter/filter.rst
@@ -5,10 +5,11 @@ Filters
   :glob:
   :maxdepth: 2
 
+  listener/listener
   network/network
+  udp/udp
   http/http
-  thrift/thrift
   accesslog/v2/accesslog.proto
   fault/v2/fault.proto
-  listener/listener
   dubbo/dubbo
+  thrift/thrift
diff --git a/docs/root/api-v2/config/filter/udp/udp.rst b/docs/root/api-v2/config/filter/udp/udp.rst
new file mode 100644
index 000000000000..9728ddad1497
--- /dev/null
+++ b/docs/root/api-v2/config/filter/udp/udp.rst
@@ -0,0 +1,8 @@
+UDP listener filters
+====================
+
+.. toctree::
+  :glob:
+  :maxdepth: 2
+
+  */v2alpha/*
diff --git a/docs/root/api-v2/types/types.rst b/docs/root/api-v2/types/types.rst
index 4d6c8ef2edd6..c8a39c59b55d 100644
--- a/docs/root/api-v2/types/types.rst
+++ b/docs/root/api-v2/types/types.rst
@@ -15,3 +15,5 @@ Types
   ../type/matcher/regex.proto
   ../type/matcher/string.proto
   ../type/matcher/value.proto
+  ../type/metadata/v2/metadata.proto
+  ../type/tracing/v2/custom_tag.proto
diff --git a/docs/root/configuration/listeners/listeners.rst b/docs/root/configuration/listeners/listeners.rst
index 73605a853658..9b3e2161ef0c 100644
--- a/docs/root/configuration/listeners/listeners.rst
+++ b/docs/root/configuration/listeners/listeners.rst
@@ -10,4 +10,5 @@ Listeners
   stats
   listener_filters/listener_filters
   network_filters/network_filters
+  udp_filters/udp_filters
   lds
diff --git a/docs/root/configuration/listeners/udp_filters/udp_filters.rst b/docs/root/configuration/listeners/udp_filters/udp_filters.rst
new file mode 100644
index 000000000000..1665052de2b6
--- /dev/null
+++ b/docs/root/configuration/listeners/udp_filters/udp_filters.rst
@@ -0,0 +1,11 @@
+.. _config_udp_listener_filters:
+
+UDP listener filters
+====================
+
+Envoy has the following builtin UDP listener filters.
+
+.. toctree::
+  :maxdepth: 2
+
+  udp_proxy
diff --git a/docs/root/configuration/listeners/udp_filters/udp_proxy.rst b/docs/root/configuration/listeners/udp_filters/udp_proxy.rst
new file mode 100644
index 000000000000..e5a4bfdb245e
--- /dev/null
+++ b/docs/root/configuration/listeners/udp_filters/udp_proxy.rst
@@ -0,0 +1,134 @@
+.. _config_udp_listener_filters_udp_proxy:
+
+UDP proxy
+=========
+
+.. attention::
+
+  UDP proxy support should be considered alpha and not production ready.
+
+* :ref:`v2 API reference <envoy_api_msg_config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig>`
+* This filter should be configured with the name *envoy.filters.udp_listener.udp_proxy*
+
+Overview
+--------
+
+The UDP proxy listener filter allows Envoy to operate as a *non-transparent* proxy between a
+UDP client and server. The lack of transparency means that the upstream server will see the
+source IP and port of the Envoy instance versus the client. All datagrams flow from the client, to
+Envoy, to the upstream server, back to Envoy, and back to the client.
+
+Because UDP is not a connection oriented protocol, Envoy must keep track of a client's *session*
+such that the response datagrams from an upstream server can be routed back to the correct client.
+Each session is index by the 4-tuple consisting of source IP/port and local IP/port that the
+datagram is received on. Sessions last until the :ref:`idle timeout
+<envoy_api_field_config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig.idle_timeout>` is reached.
+
+Load balancing and unhealthy host handling
+------------------------------------------
+
+Envoy will fully utilize the configured load balancer for the configured upstream cluster when
+load balancing UDP datagrams. When a new session is created, Envoy will associate the session
+with an upstream host selected using the configured load balancer. All future datagrams that
+belong to the session will be routed to the same upstream host.
+
+When an upstream host becomes unhealthy (due to :ref:`active health checking
+<arch_overview_health_checking>`), Envoy will attempt to create a new session to a healthy host
+when the next datagram is received.
+
+Circuit breaking
+----------------
+
+The number of sessions that can be created per upstream cluster is limited by the cluster's
+:ref:`maximum connection circuit breaker <arch_overview_circuit_break_cluster_maximum_connections>`.
+By default this is 1024.
+
+Example configuration
+---------------------
+
+The following example configuration will cause Envoy to listen on UDP port 1234 and proxy to a UDP
+server listening on port 1235.
+
+  .. code-block:: yaml
+
+    admin:
+      access_log_path: /tmp/admin_access.log
+      address:
+        socket_address:
+          protocol: TCP
+          address: 127.0.0.1
+          port_value: 9901
+    static_resources:
+      listeners:
+      - name: listener_0
+        address:
+          socket_address:
+            protocol: UDP
+            address: 127.0.0.1
+            port_value: 1234
+        listener_filters:
+          name: envoy.filters.udp_listener.udp_proxy
+          typed_config:
+            '@type': type.googleapis.com/envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig
+            stat_prefix: service
+            cluster: service_udp
+      clusters:
+      - name: service_udp
+        connect_timeout: 0.25s
+        type: STATIC
+        lb_policy: ROUND_ROBIN
+        load_assignment:
+          cluster_name: service_udp
+          endpoints:
+          - lb_endpoints:
+            - endpoint:
+                address:
+                  socket_address:
+                    address: 127.0.0.1
+                    port_value: 1235
+
+Statistics
+----------
+
+The UDP proxy filter emits both its own downstream statistics as well as many of the :ref:`cluster
+upstream statistics <config_cluster_manager_cluster_stats>` where applicable. The downstream
+statistics are rooted at *udp.<stat_prefix>.* with the following statistics:
+
+.. csv-table::
+  :header: Name, Type, Description
+  :widths: 1, 1, 2
+
+  downstream_sess_no_route, Counter, Number of datagrams not routed due to no cluster
+  downstream_sess_rx_bytes, Counter, Number of bytes received
+  downstream_sess_rx_datagrams, Counter, Number of datagrams received
+  downstream_sess_rx_errors, Counter, Number of datagram receive errors
+  downstream_sess_total, Counter, Number sessions created in total
+  downstream_sess_tx_bytes, Counter, Number of bytes transmitted
+  downstream_sess_tx_datagrams, Counter, Number of datagrams transmitted
+  downstream_sess_tx_errors, counter, Number of datagram transmission errors
+  idle_timeout, Counter, Number of sessions destroyed due to idle timeout
+  downstream_sess_active, Gauge, Number of sessions currently active
+
+The following standard :ref:`upstream cluster stats <config_cluster_manager_cluster_stats>` are used
+by the UDP proxy:
+
+.. csv-table::
+  :header: Name, Type, Description
+  :widths: 1, 1, 2
+
+  upstream_cx_none_healthy, Counter, Number of datagrams dropped due to no healthy hosts
+  upstream_cx_overflow, Counter, Number of datagrams dropped due to hitting the session circuit breaker
+  upstream_cx_rx_bytes_total, Counter, Number of bytes received
+  upstream_cx_tx_bytes_total, Counter, Number of bytes transmitted
+
+The UDP proxy filter also emits custom upstream cluster stats prefixed with
+*cluster.<cluster_name>.udp.*:
+
+.. csv-table::
+  :header: Name, Type, Description
+  :widths: 1, 1, 2
+
+  sess_rx_datagrams, Counter, Number of datagrams received
+  sess_rx_errors, Counter, Number of datagram receive errors
+  sess_tx_datagrams, Counter, Number of datagrams transmitted
+  sess_tx_errors, Counter, Number of datagrams tramsitted
diff --git a/docs/root/configuration/observability/access_log.rst b/docs/root/configuration/observability/access_log.rst
index 337ceacd600b..0c1195002eea 100644
--- a/docs/root/configuration/observability/access_log.rst
+++ b/docs/root/configuration/observability/access_log.rst
@@ -60,12 +60,11 @@ Format Dictionaries
 -------------------
 
 Format dictionaries are dictionaries that specify a structured access log output format,
-specified using the ``json_format`` key. This allows logs to be output in a structured format
-such as JSON.
-Similar to format strings, command operators are evaluated and their values inserted into the format
-dictionary to construct the log output.
+specified using the ``json_format`` or ``typed_json_format`` keys. This allows logs to be output in
+a structured format such as JSON. Similar to format strings, command operators are evaluated and
+their values inserted into the format dictionary to construct the log output.
 
-For example, with the following format provided in the configuration:
+For example, with the following format provided in the configuration as ``json_format``:
 
 .. code-block:: json
 
@@ -87,9 +86,23 @@ The following JSON object would be written to the log file:
 
 This allows you to specify a custom key for each command operator.
 
+The ``typed_json_format`` differs from ``json_format`` in that values are rendered as JSON numbers,
+booleans, and nested objects or lists where applicable. In the example, the request duration
+would be rendered as the number ``123``.
+
 Format dictionaries have the following restrictions:
 
-* The dictionary must map strings to strings (specifically, strings to command operators). Nesting is not currently supported.
+* The dictionary must map strings to strings (specifically, strings to command operators). Nesting
+  is not currently supported.
+* When using the ``typed_json_format`` command operators will only produce typed output if the
+  command operator is the only string that appears in the dictionary value. For example,
+  ``"%DURATION%"`` will log a numeric duration value, but ``"%DURATION%.0"`` will log a string
+  value.
+
+.. note::
+
+  When using the ``typed_json_format``, integer values that exceed :math:`2^{53}` will be
+  represented with reduced precision as they must be converted to floating point numbers.
 
 Command Operators
 -----------------
@@ -99,7 +112,11 @@ The same operators are used by different types of access logs (such as HTTP and
 fields may have slightly different meanings, depending on what type of log it is. Differences
 are noted.
 
-Note that if a value is not set/empty, the logs will contain a '-' character.
+Note that if a value is not set/empty, the logs will contain a ``-`` character or, for JSON logs,
+the string ``"-"``. For typed JSON logs unset values are represented as ``null`` values and empty
+strings are rendered as ``""``.
+
+Unless otherwise noted, command operators produce string outputs for typed JSON logs.
 
 The following command operators are supported:
 
@@ -140,6 +157,8 @@ The following command operators are supported:
 
     %START_TIME(%s.%9f)%
 
+  In typed JSON logs, START_TIME is always rendered as a string.
+
 %BYTES_RECEIVED%
   HTTP
     Body bytes received.
@@ -147,6 +166,8 @@ The following command operators are supported:
   TCP
     Downstream bytes received on connection.
 
+  Renders a numeric value in typed JSON logs.
+
 %PROTOCOL%
   HTTP
     Protocol. Currently either *HTTP/1.1* or *HTTP/2*.
@@ -154,6 +175,9 @@ The following command operators are supported:
   TCP
     Not implemented ("-").
 
+  In typed JSON logs, PROTOCOL will render the string ``"-"`` if the protocol is not
+  available (e.g. in TCP logs).
+
 %RESPONSE_CODE%
   HTTP
     HTTP response code. Note that a response code of '0' means that the server never sent the
@@ -162,6 +186,8 @@ The following command operators are supported:
   TCP
     Not implemented ("-").
 
+  Renders a numeric value in typed JSON logs.
+
 .. _config_access_log_format_response_code_details:
 
 %RESPONSE_CODE_DETAILS%
@@ -179,6 +205,8 @@ The following command operators are supported:
   TCP
     Downstream bytes sent on connection.
 
+  Renders a numeric value in typed JSON logs.
+
 %DURATION%
   HTTP
     Total duration in milliseconds of the request from the start time to the last byte out.
@@ -186,6 +214,8 @@ The following command operators are supported:
   TCP
     Total duration in milliseconds of the downstream connection.
 
+  Renders a numeric value in typed JSON logs.
+
 %RESPONSE_DURATION%
   HTTP
     Total duration in milliseconds of the request from the start time to the first byte read from the
@@ -194,6 +224,8 @@ The following command operators are supported:
   TCP
     Not implemented ("-").
 
+  Renders a numeric value in typed JSON logs.
+
 .. _config_access_log_format_response_flags:
 
 %RESPONSE_FLAGS%
@@ -231,6 +263,8 @@ The following command operators are supported:
   TCP
     Not implemented ("-").
 
+  Renders a numeric value in typed JSON logs.
+
 %ROUTE_NAME%
   Name of the route.
 
@@ -281,7 +315,7 @@ The following command operators are supported:
 
   .. note::
 
-    This is always the physical remote address of the peer even if the downstream remote address has 
+    This is always the physical remote address of the peer even if the downstream remote address has
     been inferred from :ref:`proxy proto <envoy_api_field_listener.FilterChain.use_proxy_proto>`
     or :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`.
 
@@ -291,7 +325,7 @@ The following command operators are supported:
 
   .. note::
 
-    This is always the physical remote address of the peer even if the downstream remote address has 
+    This is always the physical remote address of the peer even if the downstream remote address has
     been inferred from :ref:`proxy proto <envoy_api_field_listener.FilterChain.use_proxy_proto>`
     or :ref:`x-forwarded-for <config_http_conn_man_headers_x-forwarded-for>`.
 
@@ -353,6 +387,13 @@ The following command operators are supported:
   TCP
     Not implemented ("-").
 
+  .. note::
+
+    For typed JSON logs, this operator renders a single value with string, numeric, or boolean type
+    when the referenced key is a simple value. If the referenced key is a struct or list value, a
+    JSON struct or list is rendered. Structs and lists may be nested. In any event, the maximum
+    length is ignored
+
 %FILTER_STATE(KEY):Z%
   HTTP
     :ref:`Filter State <arch_overview_data_sharing_between_filters>` info, where the KEY is required to
@@ -363,6 +404,13 @@ The following command operators are supported:
   TCP
     Same as HTTP, the filter state is from connection instead of a L7 request.
 
+  .. note::
+
+    For typed JSON logs, this operator renders a single value with string, numeric, or boolean type
+    when the referenced key is a simple value. If the referenced key is a struct or list value, a
+    JSON struct or list is rendered. Structs and lists may be nested. In any event, the maximum
+    length is ignored
+
 %REQUESTED_SERVER_NAME%
   HTTP
     String value set on ssl connection socket for Server Name Indication (SNI)
@@ -446,4 +494,3 @@ The following command operators are supported:
     The validity end date of the client certificate used to establish the downstream TLS connection.
   TCP
     The validity end date of the client certificate used to establish the downstream TLS connection.
-
diff --git a/docs/root/configuration/operations/tools/router_check.rst b/docs/root/configuration/operations/tools/router_check.rst
index 1d4dd2482757..650ed2820830 100644
--- a/docs/root/configuration/operations/tools/router_check.rst
+++ b/docs/root/configuration/operations/tools/router_check.rst
@@ -92,7 +92,10 @@ input
   random_value
     *(optional, integer)* An integer used to identify the target for weighted cluster selection
     and as a factor for the routing engine to decide whether a runtime based route takes effect.
-    The default value of random_value is 0.
+    The default value of random_value is 0. For routes with runtime fraction numerators of 0, 
+    the route checker tool changes the numerators to 1 so they can be tested with random_value
+    set to 0 to simulate the route being enabled and random_value set to any int >= 1 to
+    simulate the route being disabled.
 
   ssl
     *(optional, boolean)* A flag that determines whether to set x-forwarded-proto to https or http.
diff --git a/docs/root/intro/arch_overview/listeners/listeners.rst b/docs/root/intro/arch_overview/listeners/listeners.rst
index 2f841e220f4b..01dca7c998e4 100644
--- a/docs/root/intro/arch_overview/listeners/listeners.rst
+++ b/docs/root/intro/arch_overview/listeners/listeners.rst
@@ -5,9 +5,12 @@ Listeners
 
 The Envoy configuration supports any number of listeners within a single process. Generally we
 recommend running a single Envoy per machine regardless of the number of configured listeners. This
-allows for easier operation and a single source of statistics. Currently Envoy only supports TCP
+allows for easier operation and a single source of statistics. Envoy supports both TCP and UDP
 listeners.
 
+TCP
+---
+
 Each listener is independently configured with some number :ref:`filter chains
 <envoy_api_msg_listener.FilterChain>`, where an individual chain is selected based on its
 :ref:`match criteria <envoy_api_msg_listener.FilterChainMatch>`. An individual filter chain is
@@ -29,3 +32,14 @@ Listeners can also be fetched dynamically via the :ref:`listener discovery servi
 <config_listeners_lds>`.
 
 Listener :ref:`configuration <config_listeners>`.
+
+UDP
+---
+
+Envoy also supports UDP listeners and specifically :ref:`UDP listener filters
+<config_udp_listener_filters>`. UDP listener filters are instantiated once per worker and are global
+to that worker. Each listener filter processes each UDP datagram that is received by the worker
+listening on the port. In practice, UDP listeners are configured with the SO_REUSEPORT kernel option
+which will cause the kernel to consistently hash each UDP 4-tuple to the same worker. This allows a
+UDP listener filter to be "session" oriented if it so desires. A built-in example of this
+functionality is the :ref:`UDP proxy <config_udp_listener_filters_udp_proxy>` listener filter.
diff --git a/docs/root/intro/arch_overview/listeners/listeners_toc.rst b/docs/root/intro/arch_overview/listeners/listeners_toc.rst
index 5b488ad2488f..922cb3e72447 100644
--- a/docs/root/intro/arch_overview/listeners/listeners_toc.rst
+++ b/docs/root/intro/arch_overview/listeners/listeners_toc.rst
@@ -8,3 +8,4 @@ Listeners
   listener_filters
   network_filters
   tcp_proxy
+  udp_proxy
diff --git a/docs/root/intro/arch_overview/listeners/udp_proxy.rst b/docs/root/intro/arch_overview/listeners/udp_proxy.rst
new file mode 100644
index 000000000000..ea886a59fb18
--- /dev/null
+++ b/docs/root/intro/arch_overview/listeners/udp_proxy.rst
@@ -0,0 +1,5 @@
+UDP proxy
+=========
+
+Envoy supports UDP proxy via the :ref:`UDP proxy listener filter
+<config_udp_listener_filters_udp_proxy>`.
diff --git a/docs/root/intro/arch_overview/observability/tracing.rst b/docs/root/intro/arch_overview/observability/tracing.rst
index 24072465e26b..effccb636b84 100644
--- a/docs/root/intro/arch_overview/observability/tracing.rst
+++ b/docs/root/intro/arch_overview/observability/tracing.rst
@@ -94,11 +94,12 @@ associated with it. Each span generated by Envoy contains the following data:
 * Downstream cluster set via the :ref:`config_http_conn_man_headers_downstream-service-cluster`
   header.
 * HTTP request URL, method, protocol and user-agent.
-* Additional HTTP request headers set via :ref:`request_headers_for_tags
-  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing.request_headers_for_tags>`
+* Additional custom tags set via :ref:`custom_tags
+  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing.custom_tags>`.
+* Upstream cluster name and address.
 * HTTP response status code.
 * GRPC response status and message (if available).
-* An error tag when HTTP status is 5xx or GRPC status is not "OK"
+* An error tag when HTTP status is 5xx or GRPC status is not "OK".
 * Tracing system-specific metadata.
 
 The span also includes a name (or operation) which by default is defined as the host of the invoked
diff --git a/docs/root/intro/arch_overview/security/rbac_filter.rst b/docs/root/intro/arch_overview/security/rbac_filter.rst
index 96685df1ac07..d01dc3a1103d 100644
--- a/docs/root/intro/arch_overview/security/rbac_filter.rst
+++ b/docs/root/intro/arch_overview/security/rbac_filter.rst
@@ -77,6 +77,7 @@ The following attributes are exposed to the language runtime:
    request.id, string, Request ID
    request.size, int, Size of the request body
    request.total_size, int, Total size of the request including the headers
+   request.protocol, string, Request protocol e.g. "HTTP/2"
    response.code, int, Response HTTP status code
    response.headers, string map, All response headers
    response.trailers, string map, All response trailers
diff --git a/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst b/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst
new file mode 100644
index 000000000000..15b54befd0f2
--- /dev/null
+++ b/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst
@@ -0,0 +1,145 @@
+.. _arch_overview_aggregate_cluster:
+
+Aggregate Cluster
+=================
+
+Aggregate cluster is used for failover between clusters with different configuration, e.g., from EDS
+upstream cluster to STRICT_DNS upstream cluster, from cluster using ROUND_ROBIN load balancing 
+policy to cluster using MAGLEV, from cluster with 0.1s connection timeout to cluster with 1s 
+connection timeout, etc. Aggregate cluster loosely couples multiple clusters by referencing their 
+name in the :ref:`configuration <envoy_api_msg_config.cluster.aggregate.v2alpha.ClusterConfig>`. The
+fallback priority is defined implicitly by the ordering in the :ref:`clusters list <envoy_api_field_config.cluster.aggregate.v2alpha.ClusterConfig.clusters>`.
+Aggregate cluster uses tiered load balancing. The load balancer chooses cluster and piority first 
+and then delegates the load balancing to the load balancer of the selected cluster. The top level 
+load balancer reuses the existing load balancing algorithm by linearizing the priority set of 
+multiple clusters into one. 
+
+Linearize Priority Set
+----------------------
+
+Upstream hosts are divided into multiple :ref:`priority levels <arch_overview_load_balancing_priority_levels>` 
+and each priority level contains a list of healthy, degraded and unhealthy hosts. Linearization is 
+used to simplify the host selection during load balancing by merging priority levels from multiple 
+clusters. For example, primary cluster has 3 priority levels, secondary has 2 and tertiary has 2 and
+the failover ordering is primary, secondary, tertiary. 
+
++-----------+----------------+-------------------------------------+
+| Cluster   | Priority Level |  Priority Level after Linearization |
++===========+================+=====================================+
+| Primary   | 0              |  0                                  |
++-----------+----------------+-------------------------------------+
+| Primary   | 1              |  1                                  |
++-----------+----------------+-------------------------------------+
+| Primary   | 2              |  2                                  |
++-----------+----------------+-------------------------------------+
+| Secondary | 0              |  3                                  |
++-----------+----------------+-------------------------------------+
+| Secondary | 1              |  4                                  |
++-----------+----------------+-------------------------------------+
+| Tertiary  | 0              |  5                                  |
++-----------+----------------+-------------------------------------+
+| Tertiary  | 1              |  6                                  |
++-----------+----------------+-------------------------------------+
+
+Example
+-------
+
+A sample aggregate cluster configuration could be:
+
+.. code-block:: yaml
+
+  name: aggregate_cluster
+  connect_timeout: 0.25s
+  lb_policy: CLUSTER_PROVIDED
+  cluster_type:
+    name: envoy.clusters.aggregate
+    typed_config:
+      "@type": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig
+      clusters:
+      # cluster primary, secondary and tertiary should be defined outside.
+      - primary
+      - secondary
+      - tertiary
+
+Note: :ref:`PriorityLoad retry plugins <envoy_api_field_route.RetryPolicy.retry_priority>` won't 
+work for aggregate cluster because the aggregate load balancer will override the *PriorityLoad* 
+during load balancing.
+
+
+Load Balancing Example
+----------------------
+
+Aggregate cluster uses tiered load balancing algorithm and the top tier is distributing traffic to 
+different clusters according to the health score across all :ref:`priorities <arch_overview_load_balancing_priority_levels>` 
+in each cluster. The aggregate cluster in this section includes two clusters which is different from
+what the above configuration describes.
+ 
++-----------------------------------------------------------------------------------------------------------------------+--------------------+----------------------+
+| Cluster                                                                                                               | Traffic to Primary | Traffic to Secondary |                                                
++=======================================================================+===============================================+====================+======================+
+| Primary                                                               | Secondary                                     |                                           |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+                                           +
+| P=0 Healthy Endpoints | P=1 Healthy Endpoints | P=2 Healthy Endpoints | P=0 Healthy Endpoints | P=1 Healthy Endpoints |                                           |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+
+| 100%                  | 100%                  | 100%                  | 100%                  | 100%                  | 100%               | 0%                   |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+
+| 72%                   | 100%                  | 100%                  | 100%                  | 100%                  | 100%               | 0%                   |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+
+| 71%                   | 1%                    | 0%                    | 100%                  | 100%                  | 100%               | 0%                   |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+
+| 71%                   | 0%                    | 0%                    | 100%                  | 100%                  | 99%                | 1%                   |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+
+| 50%                   | 0%                    | 0%                    | 50%                   | 0%                    | 70%                | 30%                  |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+
+| 20%                   | 20%                   | 10%                   | 25%                   | 25%                   | 70%                | 30%                  |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+
+| 20%                   | 0%                    | 0%                    | 20%                   | 0%                    | 50%                | 50%                  |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+
+| 0%                    | 0%                    | 0%                    | 100%                  | 0%                    | 0%                 | 100%                 |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+
+| 0%                    | 0%                    | 0%                    | 72%                   | 0%                    | 0%                 | 100%                 |
++-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+--------------------+----------------------+
+
+Note: The above load balancing uses default :ref:`overprovisioning factor <arch_overview_load_balancing_overprovisioning_factor>` 
+which is 1.4 which means if 80% of the endpoints in a priority level are healthy, that level is 
+still considered fully healthy because 80 * 1.4 > 100.
+
+The example shows how the aggregate cluster level load balancer selects the cluster. E.g., healths 
+of {{20, 20, 10}, {25, 25}} would result in a priority load of {{28%, 28%, 14%}, {30%, 0%}} of 
+traffic. When normalized total health drops below 100, traffic is distributed after normalizing the 
+levels' health scores to that sub-100 total. E.g. healths of {{20, 0, 0}, {20, 0}} (yielding a 
+normalized total health of 56) would be normalized and each cluster will receive 20 * 1.4 / 56 = 50%
+of the traffic which results in a priority load of {{50%, 0%, 0%}, {50%, 0%, 0%}} of traffic.
+
+The load balancer reuses priority level logic to help with the cluster selection. The priority level
+logic works with integer health scores. The health score of a level is (percent of healthy hosts in 
+the level) * (overprovisioning factor), capped at 100%. P=0 endpoints receive level 0's health 
+score percent of the traffic, with the rest flowing to P=1 (assuming P=1 is 100% healthy - more on 
+that later). The integer percents of traffic that each cluster receives are collectively called the 
+system's "cluster priority load". For instance, for primary cluster, when 20% of P=0 endpoints are 
+healthy, 20% of P=1 endpoints are healthy, and 10% of P=2 endpoints are healthy; for secondary, when
+25% of P=0 endpoints are healthy and 25% of P=1 endpoints are healthy. The primary cluster will 
+receive 20% * 1.4 + 20% * 1.4 + 10% * 1.4 = 70% of the traffic. The secondary cluster will receive 
+min(100 - 70, 25% * 1.4 + 25% * 1.4) = 30% of the traffic. The traffic to all clusters sum up to 
+100. The normalized health score and priority load are pre-computed before selecting the cluster and 
+priority. 
+
+To sum this up in pseudo algorithms:
+
+::
+
+  health(P_X) = min(100, 1.4 * 100 * healthy_P_X_backends / total_P_X_backends), where 
+                  total_P_X_backends is the number of backends for priority P_X after linearization
+  normalized_total_health = min(100, Ī£(health(P_0)...health(P_X)))
+  cluster_priority_load(C_0) = min(100, Ī£(health(P_0)...health(P_k)) * 100 / normalized_total_health), 
+                  where P_0...P_k belong to C_0
+  cluster_priority_load(C_X) = min(100 - Ī£(priority_load(C_0)..priority_load(C_X-1)),
+                           Ī£(health(P_x)...health(P_X)) * 100 / normalized_total_health), 
+                           where P_x...P_X belong to C_X
+  map from priorities to clusters:
+    P_0 ... P_k ... ...P_x ... P_X
+    ^       ^          ^       ^
+    cluster C_0        cluster C_X
+
+The second tier is delegating the load balancing to the cluster selected in the first step and the 
+cluster could use any load balancing algorithms specified by :ref:`load balancer type <arch_overview_load_balancing_types>`.
diff --git a/docs/root/intro/arch_overview/upstream/upstream.rst b/docs/root/intro/arch_overview/upstream/upstream.rst
index da1887087bff..112dc7885446 100644
--- a/docs/root/intro/arch_overview/upstream/upstream.rst
+++ b/docs/root/intro/arch_overview/upstream/upstream.rst
@@ -9,6 +9,7 @@ Upstream clusters
   health_checking
   connection_pooling
   load_balancing/load_balancing
+  aggregate_cluster
   outlier
   circuit_breaking
   upstream_filters
diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst
index e1a4fb34f982..a64b11ec440e 100644
--- a/docs/root/intro/deprecated.rst
+++ b/docs/root/intro/deprecated.rst
@@ -12,6 +12,10 @@ Deprecated items below are listed in chronological order.
 
 Version 1.13.0 (pending)
 ========================
+* The `request_headers_for_tags` field in :ref:`HTTP connection manager
+  <envoy_api_msg_config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing>`
+  has been deprecated in favor of the :ref:`custom_tags
+  <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing.custom_tags>` field.
 
 
 Version 1.12.0 (October 31, 2019)
diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst
index 849ccaaef0ff..1e804c25c1a6 100644
--- a/docs/root/intro/version_history.rst
+++ b/docs/root/intro/version_history.rst
@@ -4,26 +4,39 @@ Version history
 1.13.0 (pending)
 ================
 * access log: added FILTER_STATE :ref:`access log formatters <config_access_log_format>` and gRPC access logger.
+* access log: added a :ref:`typed JSON logging mode <config_access_log_format_dictionaries>` to output access logs in JSON format with non-string values
 * api: remove all support for v1
 * buffer: remove old implementation
 * build: official released binary is now built against libc++.
+* cluster: added :ref: `aggregate cluster <arch_overview_aggregate_cluster>` that allows load balancing between clusters.
 * ext_authz: added :ref:`configurable ability<envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.include_peer_certificate>` to send the :ref:`certificate<envoy_api_field_service.auth.v2.AttributeContext.Peer.certificate>` to the `ext_authz` service.
 * health check: gRPC health checker sets the gRPC deadline to the configured timeout duration.
 * http: added the ability to sanitize headers nominated by the Connection header. This new behavior is guarded by envoy.reloadable_features.connection_header_sanitization which defaults to true.
 * http: support :ref:`auto_host_rewrite_header<envoy_api_field_config.filter.http.dynamic_forward_proxy.v2alpha.PerRouteConfig.auto_host_rewrite_header>` in the dynamic forward proxy.
 * jwt_authn: added :ref:`bypass_cors_preflight<envoy_api_field_config.filter.http.jwt_authn.v2alpha.JwtAuthentication.bypass_cors_preflight>` to allow bypassing the CORS preflight request.
 * lb_subset_config: new fallback policy for selectors: :ref:`KEYS_SUBSET<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy.KEYS_SUBSET>`
+* listeners: added :ref:`reuse_port<envoy_api_field_Listener.reuse_port>` option.
 * logger: added :ref:`--log-format-escaped <operations_cli>` command line option to escape newline characters in application logs.
+* rbac: added support for matching all subject alt names instead of first in :ref:`principal_name <envoy_api_field_config.rbac.v2.Principal.Authenticated.principal_name>`.
 * redis: performance improvement for larger split commands by avoiding string copies.
+* redis: correctly follow MOVE/ASK redirection for mirrored clusters.
 * router: added support for REQ(header-name) :ref:`header formatter <config_http_conn_man_headers_custom_request_headers>`.
+* router: allow using a :ref:`query parameter
+  <envoy_api_field_route.RouteAction.HashPolicy.query_parameter>` for HTTP consistent hashing.
 * router: skip the Location header when the response code is not a 201 or a 3xx.
+* router: exposed DOWNSTREAM_REMOTE_ADDRESS as custom HTTP request/response headers.
+* router check tool: added support for testing and marking coverage for routes of runtime fraction 0.
 * server: fixed a bug in config validation for configs with runtime layers
 * tcp_proxy: added :ref:`ClusterWeight.metadata_match<envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight.metadata_match>`
 * tcp_proxy: added :ref:`hash_policy<envoy_api_field_config.filter.network.tcp_proxy.v2.TcpProxy.hash_policy>`
 * thrift_proxy: added support for cluster header based routing.
+* thrift_proxy: added stats to the router filter.
 * tls: remove TLS 1.0 and 1.1 from client defaults
 * router: exposed DOWNSTREAM_REMOTE_ADDRESS as custom HTTP request/response headers.
 * api: added ability to specify `mode` for :ref:`Pipe <envoy_api_field_core.Pipe.mode>`.
+* tracing: added the ability to set custom tags on both the :ref:`HTTP connection manager<envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.tracing>` and the :ref:`HTTP route <envoy_api_field_route.Route.tracing>`.
+* tracing: added upstream_address tag.
+* udp: added initial support for :ref:`UDP proxy <config_udp_listener_filters_udp_proxy>`
 
 1.12.0 (October 31, 2019)
 =========================
diff --git a/docs/root/operations/hot_restarter.rst b/docs/root/operations/hot_restarter.rst
index 67609b169680..72e09b097686 100644
--- a/docs/root/operations/hot_restarter.rst
+++ b/docs/root/operations/hot_restarter.rst
@@ -20,8 +20,13 @@ The restarter is invoked like so:
   #!/bin/bash
 
   ulimit -n {{ pillar.get('envoy_max_open_files', '102400') }}
+  sysctl fs.inotify.max_user_watches={{ pillar.get('envoy_max_inotify_watches', '524288') }}
+  
   exec /usr/sbin/envoy -c /etc/envoy/envoy.cfg --restart-epoch $RESTART_EPOCH --service-cluster {{ grains['cluster_name'] }} --service-node {{ grains['service_node'] }} --service-zone {{ grains.get('ec2_availability-zone', 'unknown') }}
 
+Note on `inotify.max_user_watches`: If Envoy is being configured to watch many files for configuration in a directory
+on a Linux machine, increase this value as Linux enforces limits on the maximum number of files that can be watched.
+  
 The *RESTART_EPOCH* environment variable is set by the restarter on each restart and can be passed
 to the :option:`--restart-epoch` option.
 
diff --git a/examples/zipkin-tracing/front-envoy-zipkin.yaml b/examples/zipkin-tracing/front-envoy-zipkin.yaml
index c5ae662492fa..0199f9b969a8 100644
--- a/examples/zipkin-tracing/front-envoy-zipkin.yaml
+++ b/examples/zipkin-tracing/front-envoy-zipkin.yaml
@@ -27,6 +27,13 @@ static_resources:
                   cluster: service1
                 decorator:
                   operation: checkAvailability
+              response_headers_to_add:
+              - header:
+                  key: "x-b3-traceid"
+                  value: "%REQ(x-b3-traceid)%"
+              - header:
+                  key: "x-request-id"
+                  value: "%REQ(x-request-id)%"
           http_filters:
           - name: envoy.router
             typed_config: {}
diff --git a/include/envoy/access_log/BUILD b/include/envoy/access_log/BUILD
index da0469451a0b..9dc6453cf8f6 100644
--- a/include/envoy/access_log/BUILD
+++ b/include/envoy/access_log/BUILD
@@ -14,5 +14,6 @@ envoy_cc_library(
     deps = [
         "//include/envoy/http:header_map_interface",
         "//include/envoy/stream_info:stream_info_interface",
+        "//source/common/protobuf",
     ],
 )
diff --git a/include/envoy/access_log/access_log.h b/include/envoy/access_log/access_log.h
index 394df8f26ccb..25d5c72e659b 100644
--- a/include/envoy/access_log/access_log.h
+++ b/include/envoy/access_log/access_log.h
@@ -7,6 +7,8 @@
 #include "envoy/http/header_map.h"
 #include "envoy/stream_info/stream_info.h"
 
+#include "common/protobuf/protobuf.h"
+
 namespace Envoy {
 namespace AccessLog {
 
@@ -78,10 +80,6 @@ class Instance {
 
   /**
    * Log a completed request.
-   * Prior to logging, call refreshByteSize() on HeaderMaps to ensure that an accurate byte size
-   * count is logged.
-   * TODO(asraa): Remove refreshByteSize() requirement when entries in HeaderMap can no longer be
-   * modified by reference and headerMap holds an accurate internal byte size count.
    * @param request_headers supplies the incoming request headers after filtering.
    * @param response_headers supplies response headers.
    * @param response_trailers supplies response trailers.
@@ -139,6 +137,19 @@ class FormatterProvider {
                              const Http::HeaderMap& response_headers,
                              const Http::HeaderMap& response_trailers,
                              const StreamInfo::StreamInfo& stream_info) const PURE;
+  /**
+   * Extract a value from the provided headers/trailers/stream, preserving the value's type.
+   * @param request_headers supplies the request headers.
+   * @param response_headers supplies the response headers.
+   * @param response_trailers supplies the response trailers.
+   * @param stream_info supplies the stream info.
+   * @return ProtobufWkt::Value containing a single value extracted from the given
+   *         headers/trailers/stream.
+   */
+  virtual ProtobufWkt::Value formatValue(const Http::HeaderMap& request_headers,
+                                         const Http::HeaderMap& response_headers,
+                                         const Http::HeaderMap& response_trailers,
+                                         const StreamInfo::StreamInfo& stream_info) const PURE;
 };
 
 using FormatterProviderPtr = std::unique_ptr<FormatterProvider>;
diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h
index ee2354a22b92..823e38a514b4 100644
--- a/include/envoy/event/dispatcher.h
+++ b/include/envoy/event/dispatcher.h
@@ -28,11 +28,9 @@ namespace Event {
 /**
  * All dispatcher stats. @see stats_macros.h
  */
-// clang-format off
 #define ALL_DISPATCHER_STATS(HISTOGRAM)                                                            \
   HISTOGRAM(loop_duration_us, Microseconds)                                                        \
   HISTOGRAM(poll_delay_us, Microseconds)
-// clang-format on
 
 /**
  * Struct definition for all dispatcher stats. @see stats_macros.h
@@ -104,10 +102,13 @@ class Dispatcher {
    * dispatcher.
    * @param resolvers supplies the addresses of DNS resolvers that this resolver should use. If left
    * empty, it will not use any specific resolvers, but use defaults (/etc/resolv.conf)
+   * @param use_tcp_for_dns_lookups if set to true, tcp will be used to perform dns lookups.
+   * Otherwise, udp is used.
    * @return Network::DnsResolverSharedPtr that is owned by the caller.
    */
   virtual Network::DnsResolverSharedPtr
-  createDnsResolver(const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers) PURE;
+  createDnsResolver(const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,
+                    bool use_tcp_for_dns_lookups) PURE;
 
   /**
    * Creates a file event that will signal when a file is readable or writable. On UNIX systems this
diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h
index 53703e1fd5b1..9c65dfa1bc9b 100644
--- a/include/envoy/http/header_map.h
+++ b/include/envoy/http/header_map.h
@@ -113,7 +113,7 @@ class HeaderString {
    * @param ref_value MUST point to data that will live beyond the lifetime of any request/response
    *        using the string (since a codec may optimize for zero copy).
    */
-  explicit HeaderString(const std::string& ref_value);
+  explicit HeaderString(absl::string_view ref_value);
 
   HeaderString(HeaderString&& move_value) noexcept;
   ~HeaderString();
@@ -224,13 +224,6 @@ class HeaderEntry {
    */
   virtual const HeaderString& key() const PURE;
 
-  /**
-   * Set the header value by copying data into it (deprecated, use absl::string_view variant
-   * instead).
-   * TODO(htuch): Cleanup deprecated call sites.
-   */
-  virtual void value(const char* value, uint32_t size) PURE;
-
   /**
    * Set the header value by copying data into it.
    */
@@ -345,19 +338,23 @@ class HeaderEntry {
   HEADER_FUNC(Via)
 
 /**
- * The following functions are defined for each inline header above. E.g., for ContentLength we
- * have:
+ * The following functions are defined for each inline header above.
+
+ * E.g., for path we have:
+ * Path() -> returns the header entry if it exists or nullptr.
+ * appendPath(path, "/") -> appends the string path with delimiter "/" to the header value.
+ * setReferencePath(PATH) -> sets header value to reference string PATH.
+ * setPath(path_string) -> sets the header value to the string path_string by copying the data.
+ * removePath() -> removes the header if it exists.
  *
- * ContentLength() -> returns the header entry if it exists or nullptr.
- * insertContentLength() -> inserts the header if it does not exist, and returns a reference to it.
- * removeContentLength() -> removes the header if it exists.
+ * For inline headers that use integers, we have:
+ * setContentLength(5) -> sets the header value to the integer 5.
  *
- * TODO(asraa): Remove functions with a non-const HeaderEntry return value.
+ * TODO(asraa): Remove the integer set for inline headers that do not take integer values.
  */
 #define DEFINE_INLINE_HEADER(name)                                                                 \
   virtual const HeaderEntry* name() const PURE;                                                    \
-  virtual HeaderEntry* name() PURE;                                                                \
-  virtual HeaderEntry& insert##name() PURE;                                                        \
+  virtual void append##name(absl::string_view data, absl::string_view delimiter) PURE;             \
   virtual void setReference##name(absl::string_view value) PURE;                                   \
   virtual void set##name(absl::string_view value) PURE;                                            \
   virtual void set##name(uint64_t value) PURE;                                                     \
@@ -381,12 +378,11 @@ class HeaderMap {
    * Calling addReference multiple times for the same header will result in:
    * - Comma concatenation for predefined inline headers.
    * - Multiple headers being present in the HeaderMap for other headers.
-   * TODO(asraa): Replace const std::string& param with an absl::string_view.
    *
    * @param key specifies the name of the header to add; it WILL NOT be copied.
    * @param value specifies the value of the header to add; it WILL NOT be copied.
    */
-  virtual void addReference(const LowerCaseString& key, const std::string& value) PURE;
+  virtual void addReference(const LowerCaseString& key, absl::string_view value) PURE;
 
   /**
    * Add a header with a reference key to the map. The key MUST point to data that will live beyond
@@ -414,7 +410,7 @@ class HeaderMap {
    * @param key specifies the name of the header to add; it WILL NOT be copied.
    * @param value specifies the value of the header to add; it WILL be copied.
    */
-  virtual void addReferenceKey(const LowerCaseString& key, const std::string& value) PURE;
+  virtual void addReferenceKey(const LowerCaseString& key, absl::string_view value) PURE;
 
   /**
    * Add a header by copying both the header key and the value.
@@ -438,7 +434,20 @@ class HeaderMap {
    * @param key specifies the name of the header to add; it WILL be copied.
    * @param value specifies the value of the header to add; it WILL be copied.
    */
-  virtual void addCopy(const LowerCaseString& key, const std::string& value) PURE;
+  virtual void addCopy(const LowerCaseString& key, absl::string_view value) PURE;
+
+  /**
+   * Appends data to header. If header already has a value, the string "," is added between the
+   * existing value and data.
+   *
+   * @param key specifies the name of the header to append; it WILL be copied.
+   * @param value specifies the value of the header to add; it WILL be copied.
+   *
+   * Caution: This iterates over the HeaderMap to find the header to append. This will modify only
+   * the first occurrence of the header.
+   * TODO(asraa): Investigate whether necessary to append to all headers with the key.
+   */
+  virtual void appendCopy(const LowerCaseString& key, absl::string_view value) PURE;
 
   /**
    * Set a reference header in the map. Both key and value MUST point to data that will live beyond
@@ -447,12 +456,11 @@ class HeaderMap {
    *
    * Calling setReference multiple times for the same header will result in only the last header
    * being present in the HeaderMap.
-   * TODO(asraa): Replace const std::string& param with an absl::string_view.
    *
    * @param key specifies the name of the header to set; it WILL NOT be copied.
    * @param value specifies the value of the header to set; it WILL NOT be copied.
    */
-  virtual void setReference(const LowerCaseString& key, const std::string& value) PURE;
+  virtual void setReference(const LowerCaseString& key, absl::string_view value) PURE;
 
   /**
    * Set a header with a reference key in the map. The key MUST point to point to data that will
@@ -465,44 +473,28 @@ class HeaderMap {
    * @param key specifies the name of the header to set; it WILL NOT be copied.
    * @param value specifies the value of the header to set; it WILL be copied.
    */
-  virtual void setReferenceKey(const LowerCaseString& key, const std::string& value) PURE;
+  virtual void setReferenceKey(const LowerCaseString& key, absl::string_view value) PURE;
 
   /**
-   * HeaderMap contains an internal byte size count, updated as entries are added, removed, or
-   * modified through the HeaderMap interface. However, HeaderEntries can be accessed and modified
-   * by reference so that the HeaderMap can no longer accurately update the internal byte size
-   * count.
-   *
-   * Calling byteSize before a HeaderEntry is accessed will return the internal byte size count. The
-   * value is cleared when a HeaderEntry is accessed, and the value is updated and set again when
-   * refreshByteSize is called.
-   *
-   * To guarantee an accurate byte size count, call refreshByteSize.
+   * Replaces a header value by copying the value. Copies the key if the key does not exist.
    *
-   * @return uint64_t the approximate size of the header map in bytes if valid.
-   */
-  virtual absl::optional<uint64_t> byteSize() const PURE;
-
-  /**
-   * This returns the sum of the byte sizes of the keys and values in the HeaderMap. This also
-   * updates and sets the byte size count.
+   * Calling setCopy multiple times for the same header will result in only the last header
+   * being present in the HeaderMap.
    *
-   * To guarantee an accurate byte size count, use this. If it is known HeaderEntries have not been
-   * manipulated since a call to refreshByteSize, it is safe to use byteSize.
+   * @param key specifies the name of the header to set; it WILL be copied.
+   * @param value specifies the value of the header to set; it WILL be copied.
    *
-   * @return uint64_t the approximate size of the header map in bytes.
+   * Caution: This iterates over the HeaderMap to find the header to set. This will modify only the
+   * first occurrence of the header.
+   * TODO(asraa): Investigate whether necessary to set all headers with the key.
    */
-  virtual uint64_t refreshByteSize() PURE;
+  virtual void setCopy(const LowerCaseString& key, absl::string_view value) PURE;
 
   /**
-   * This returns the sum of the byte sizes of the keys and values in the HeaderMap.
-   *
-   * This iterates over the HeaderMap to calculate size and should only be called directly when the
-   * user wants an explicit recalculation of the byte size.
-   *
-   * @return uint64_t the approximate size of the header map in bytes.
+   * @return uint64_t the size of the header map in bytes. This is the sum of the header keys and
+   * values and does not account for data structure overhead.
    */
-  virtual uint64_t byteSizeInternal() const PURE;
+  virtual uint64_t byteSize() const PURE;
 
   /**
    * Get a header by key.
@@ -510,7 +502,6 @@ class HeaderMap {
    * @return the header entry if it exists otherwise nullptr.
    */
   virtual const HeaderEntry* get(const LowerCaseString& key) const PURE;
-  virtual HeaderEntry* get(const LowerCaseString& key) PURE;
 
   // aliases to make iterate() and iterateReverse() callbacks easier to read
   enum class Iterate { Continue, Break };
@@ -549,6 +540,11 @@ class HeaderMap {
    */
   virtual Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const PURE;
 
+  /**
+   * Clears the headers in the map.
+   */
+  virtual void clear() PURE;
+
   /**
    * Remove all instances of a header by key.
    * @param key supplies the header key to remove.
diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h
index 162c79aae80f..f5538c15c983 100644
--- a/include/envoy/network/listener.h
+++ b/include/envoy/network/listener.h
@@ -44,8 +44,7 @@ class ListenSocketFactory {
   virtual const Address::InstanceConstSharedPtr& localAddress() const PURE;
 
   /**
-   * @return the socket if getListenSocket() returns a shared socket among each call,
-   * nullopt otherwise.
+   * @return the socket shared by worker threads if any; otherwise return null.
    */
   virtual absl::optional<std::reference_wrapper<Socket>> sharedSocket() const PURE;
 };
diff --git a/include/envoy/registry/registry.h b/include/envoy/registry/registry.h
index 3f788abbb5c5..f02f10d84b7c 100644
--- a/include/envoy/registry/registry.h
+++ b/include/envoy/registry/registry.h
@@ -9,6 +9,7 @@
 
 #include "common/common/assert.h"
 #include "common/common/fmt.h"
+#include "common/common/logger.h"
 
 #include "absl/base/attributes.h"
 #include "absl/container/flat_hash_map.h"
@@ -105,7 +106,7 @@ template <typename T> class InjectFactory;
  * Example lookup: BaseFactoryType *factory =
  * FactoryRegistry<BaseFactoryType>::getFactory("example_factory_name");
  */
-template <class Base> class FactoryRegistry {
+template <class Base> class FactoryRegistry : public Logger::Loggable<Logger::Id::config> {
 public:
   /**
    * Return a sorted vector of registered factory names.
@@ -132,17 +133,30 @@ template <class Base> class FactoryRegistry {
     return *factories;
   }
 
-  static void registerFactory(Base& factory, absl::string_view name) {
+  static absl::flat_hash_map<std::string, std::string>& deprecatedFactoryNames() {
+    static auto* deprecated_factory_names = new absl::flat_hash_map<std::string, std::string>;
+    return *deprecated_factory_names;
+  }
+
+  /**
+   * instead_value are used when passed name was deprecated.
+   */
+  static void registerFactory(Base& factory, absl::string_view name,
+                              absl::string_view instead_value = "") {
     auto result = factories().emplace(std::make_pair(name, &factory));
     if (!result.second) {
       throw EnvoyException(fmt::format("Double registration for name: '{}'", factory.name()));
     }
+    if (!instead_value.empty()) {
+      deprecatedFactoryNames().emplace(std::make_pair(name, instead_value));
+    }
   }
 
   /**
    * Gets a factory by name. If the name isn't found in the registry, returns nullptr.
    */
   static Base* getFactory(absl::string_view name) {
+    checkDeprecated(name);
     auto it = factories().find(name);
     if (it == factories().end()) {
       return nullptr;
@@ -150,6 +164,14 @@ template <class Base> class FactoryRegistry {
     return it->second;
   }
 
+  static void checkDeprecated(absl::string_view name) {
+    auto it = deprecatedFactoryNames().find(name);
+    const bool status = it != deprecatedFactoryNames().end();
+    if (status) {
+      ENVOY_LOG(warn, "{} is deprecated, use {} instead.", it->first, it->second);
+    }
+  }
+
 private:
   // Allow factory injection only in tests.
   friend class InjectFactory<Base>;
@@ -227,7 +249,7 @@ template <class T, class Base> class RegisterFactory {
 
     for (auto deprecated_name : deprecated_names) {
       ASSERT(!deprecated_name.empty());
-      FactoryRegistry<Base>::registerFactory(instance_, deprecated_name);
+      FactoryRegistry<Base>::registerFactory(instance_, deprecated_name, instance_.name());
     }
 
     if (!FactoryCategoryRegistry::isRegistered(Base::category())) {
diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h
index 952fba669f11..fb398503f7b7 100644
--- a/include/envoy/router/router.h
+++ b/include/envoy/router/router.h
@@ -823,6 +823,12 @@ class RouteTracing {
    * @return the overall sampling percentage
    */
   virtual const envoy::type::FractionalPercent& getOverallSampling() const PURE;
+
+  /**
+   * This method returns the route level tracing custom tags.
+   * @return the tracing custom tags.
+   */
+  virtual const Tracing::CustomTagMap& getCustomTags() const PURE;
 };
 
 using RouteTracingConstPtr = std::unique_ptr<const RouteTracing>;
diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h
index 6494289402df..7eda91473c90 100644
--- a/include/envoy/runtime/runtime.h
+++ b/include/envoy/runtime/runtime.h
@@ -1,6 +1,7 @@
 #pragma once
 
 #include <cstdint>
+#include <limits>
 #include <memory>
 #include <string>
 #include <unordered_map>
@@ -30,10 +31,30 @@ class RandomGenerator {
 public:
   virtual ~RandomGenerator() = default;
 
+  using result_type = uint64_t; // NOLINT(readability-identifier-naming)
+
   /**
    * @return uint64_t a new random number.
    */
-  virtual uint64_t random() PURE;
+  virtual result_type random() PURE;
+
+  /*
+   * @return the smallest value that `operator()` may return. The value is
+   * strictly less than `max()`.
+   */
+  constexpr static result_type min() noexcept { return std::numeric_limits<result_type>::min(); };
+
+  /*
+   * @return the largest value that `operator()` may return. The value is
+   * strictly greater than `min()`.
+   */
+  constexpr static result_type max() noexcept { return std::numeric_limits<result_type>::max(); };
+
+  /*
+   * @return a value in the closed interval `[min(), max()]`. Has amortized
+   * constant complexity.
+   */
+  result_type operator()() { return result_type(random()); };
 
   /**
    * @return std::string containing uuid4 of 36 char length.
diff --git a/include/envoy/server/listener_manager.h b/include/envoy/server/listener_manager.h
index bc978047c12b..2e093fb8f556 100644
--- a/include/envoy/server/listener_manager.h
+++ b/include/envoy/server/listener_manager.h
@@ -31,6 +31,20 @@ class LdsApi {
 
 using LdsApiPtr = std::unique_ptr<LdsApi>;
 
+struct ListenSocketCreationParams {
+  ListenSocketCreationParams(bool bind_to_port, bool duplicate_parent_socket = true)
+      : bind_to_port(bind_to_port), duplicate_parent_socket(duplicate_parent_socket) {}
+
+  // For testing.
+  bool operator==(const ListenSocketCreationParams& rhs) const;
+  bool operator!=(const ListenSocketCreationParams& rhs) const;
+
+  // whether to actually bind the socket.
+  bool bind_to_port;
+  // whether to duplicate socket from hot restart parent.
+  bool duplicate_parent_socket;
+};
+
 /**
  * Factory for creating listener components.
  */
@@ -49,13 +63,14 @@ class ListenerComponentFactory {
    * @param address supplies the socket's address.
    * @param socket_type the type of socket (stream or datagram) to create.
    * @param options to be set on the created socket just before calling 'bind()'.
-   * @param bind_to_port supplies whether to actually bind the socket.
+   * @param params used to control how a socket being created.
    * @return Network::SocketSharedPtr an initialized and potentially bound socket.
    */
   virtual Network::SocketSharedPtr
   createListenSocket(Network::Address::InstanceConstSharedPtr address,
                      Network::Address::SocketType socket_type,
-                     const Network::Socket::OptionsSharedPtr& options, bool bind_to_port) PURE;
+                     const Network::Socket::OptionsSharedPtr& options,
+                     const ListenSocketCreationParams& params) PURE;
 
   /**
    * Creates a list of filter factories.
diff --git a/include/envoy/tracing/http_tracer.h b/include/envoy/tracing/http_tracer.h
index ec53d00e4bcc..09ed75b206aa 100644
--- a/include/envoy/tracing/http_tracer.h
+++ b/include/envoy/tracing/http_tracer.h
@@ -11,6 +11,9 @@
 namespace Envoy {
 namespace Tracing {
 
+class Span;
+using SpanPtr = std::unique_ptr<Span>;
+
 constexpr uint32_t DefaultMaxPathTagLength = 256;
 
 enum class OperationName { Ingress, Egress };
@@ -39,6 +42,38 @@ struct Decision {
   bool traced;
 };
 
+/**
+ * The context for the custom tag to obtain the tag value.
+ */
+struct CustomTagContext {
+  const Http::HeaderMap* request_headers;
+  const StreamInfo::StreamInfo& stream_info;
+};
+
+/**
+ * Tracing custom tag, with tag name and how it would be applied to the span.
+ */
+class CustomTag {
+public:
+  virtual ~CustomTag() = default;
+
+  /**
+   * @return the tag name view.
+   */
+  virtual absl::string_view tag() const PURE;
+
+  /**
+   * The way how to apply the custom tag to the span,
+   * generally obtain the tag value from the context and attached it to the span.
+   * @param span the active span.
+   * @param ctx the custom tag context.
+   */
+  virtual void apply(Span& span, const CustomTagContext& ctx) const PURE;
+};
+
+using CustomTagConstSharedPtr = std::shared_ptr<const CustomTag>;
+using CustomTagMap = absl::flat_hash_map<std::string, CustomTagConstSharedPtr>;
+
 /**
  * Tracing configuration, it carries additional data needed to populate the span.
  */
@@ -52,9 +87,9 @@ class Config {
   virtual OperationName operationName() const PURE;
 
   /**
-   * @return list of headers to populate tags on the active span.
+   * @return custom tags to be attached to the active span.
    */
-  virtual const std::vector<Http::LowerCaseString>& requestHeadersForTags() const PURE;
+  virtual const CustomTagMap* customTags() const PURE;
 
   /**
    * @return true if spans should be annotated with more detailed information.
@@ -67,9 +102,6 @@ class Config {
   virtual uint32_t maxPathTagLength() const PURE;
 };
 
-class Span;
-using SpanPtr = std::unique_ptr<Span>;
-
 /**
  * Basic abstraction for span.
  */
diff --git a/include/envoy/upstream/resource_manager.h b/include/envoy/upstream/resource_manager.h
index 4fe7681aaaa3..3571b64e4111 100644
--- a/include/envoy/upstream/resource_manager.h
+++ b/include/envoy/upstream/resource_manager.h
@@ -73,7 +73,7 @@ class ResourceManager {
   virtual ~ResourceManager() = default;
 
   /**
-   * @return Resource& active TCP connections.
+   * @return Resource& active TCP connections and UDP sessions.
    */
   virtual Resource& connections() PURE;
 
diff --git a/repokitteh.star b/repokitteh.star
index 2e3ea49218d8..1a204feb1bee 100644
--- a/repokitteh.star
+++ b/repokitteh.star
@@ -1,9 +1,9 @@
-use("github.com/repokitteh/modules/assign.star")
-use("github.com/repokitteh/modules/review.star")
-use("github.com/repokitteh/modules/wait.star")
-use("github.com/repokitteh/modules/circleci.star", secret_token=get_secret('circle_token'))
+use("github.com/repokitteh/modules/assign.star#22520d03464dd9503e036c7fa365c427723c4aaf")
+use("github.com/repokitteh/modules/review.star#22520d03464dd9503e036c7fa365c427723c4aaf")
+use("github.com/repokitteh/modules/wait.star#22520d03464dd9503e036c7fa365c427723c4aaf")
+use("github.com/repokitteh/modules/circleci.star#22520d03464dd9503e036c7fa365c427723c4aaf", secret_token=get_secret('circle_token'))
 use(
-  "github.com/repokitteh/modules/ownerscheck.star",
+  "github.com/repokitteh/modules/ownerscheck.star#22520d03464dd9503e036c7fa365c427723c4aaf",
   paths=[
     {
       "owner": "envoyproxy/api-shepherds!",
diff --git a/security/email-templates.md b/security/email-templates.md
index 654524b765f6..15888c3035a3 100644
--- a/security/email-templates.md
+++ b/security/email-templates.md
@@ -11,7 +11,7 @@ Cc: envoy-security@googlegroups.com, envoy-maintainers@googlegroups.com
 
 Hello Envoy Community,
 
-The Envoy maintainers would like to announce the forthcoming release of Envoy
+The Envoy security team would like to announce the forthcoming release of Envoy
 $VERSION.
 
 This release will be made available on the $ORDINALDAY of $MONTH $YEAR at
@@ -21,7 +21,7 @@ defect(s). The highest rated security defect is considered $SEVERITY severity.
 No further details or patches will be made available in advance of the release.
 
 Thanks,
-$PERSON (on behalf of the Envoy maintainers)
+$PERSON (on behalf of the Envoy security team and maintainers)
 ```
 
 ## Upcoming security release to cncf-envoy-distributors-announce@lists.cncf.io
@@ -61,7 +61,7 @@ or to envoy-security@googlegroups.com for direct communication with the Envoy
 security team.
 
 Thanks,
-$PERSON (on behalf of the Envoy security team)
+$PERSON (on behalf of the Envoy security team and maintainers)
 ```
 
 ## Candidate release patches to cncf-envoy-distributors-announce@lists.cncf.io
@@ -98,7 +98,7 @@ envoy-security@googlegroups.com for direct communication with the Envoy
 security team.
 
 Thanks,
-$PERSON (on behalf of the Envoy security team)
+$PERSON (on behalf of the Envoy security team and maintainers)
 ```
 
 ## Security Fix Announcement
@@ -110,7 +110,7 @@ Cc: envoy-security@googlegroups.com, envoy-maintainers@googlegroups.com
 
 Hello Envoy Community,
 
-The Envoy maintainers would like to announce the availability of Envoy $VERSION.
+The Envoy security team would like to announce the availability of Envoy $VERSION.
 This addresses the following CVE(s):
 
 * CVE-YEAR-ABCDEF (CVSS score $CVSS): $CVESUMMARY
@@ -164,5 +164,5 @@ coordination in making this release.
 
 Thanks,
 
-$PERSON (on behalf of the Envoy maintainers)
+$PERSON (on behalf of the Envoy security team and maintainers)
 ```
diff --git a/source/common/access_log/BUILD b/source/common/access_log/BUILD
index 46403d19b8d3..aa3267ebf869 100644
--- a/source/common/access_log/BUILD
+++ b/source/common/access_log/BUILD
@@ -39,6 +39,7 @@ envoy_cc_library(
     name = "access_log_formatter_lib",
     srcs = ["access_log_formatter.cc"],
     hdrs = ["access_log_formatter.h"],
+    external_deps = ["abseil_str_format"],
     deps = [
         "//include/envoy/access_log:access_log_interface",
         "//include/envoy/stream_info:stream_info_interface",
@@ -46,6 +47,7 @@ envoy_cc_library(
         "//source/common/common:utility_lib",
         "//source/common/config:metadata_lib",
         "//source/common/http:utility_lib",
+        "//source/common/protobuf:message_validator_lib",
         "//source/common/stream_info:utility_lib",
     ],
 )
diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc
index 1b73d4e8a4b7..83de403e3b5d 100644
--- a/source/common/access_log/access_log_formatter.cc
+++ b/source/common/access_log/access_log_formatter.cc
@@ -10,6 +10,7 @@
 #include "common/common/utility.h"
 #include "common/config/metadata.h"
 #include "common/http/utility.h"
+#include "common/protobuf/message_validator_impl.h"
 #include "common/stream_info/utility.h"
 
 #include "absl/strings/str_split.h"
@@ -21,48 +22,45 @@ namespace Envoy {
 namespace AccessLog {
 
 static const std::string UnspecifiedValueString = "-";
+static const std::string EmptyString = "";
 
 namespace {
 
-// Matches newline pattern in a StartTimeFormatter format string.
-const std::regex& getStartTimeNewlinePattern() {
-  CONSTRUCT_ON_FIRST_USE(std::regex, "%[-_0^#]*[1-9]*n");
+const ProtobufWkt::Value& unspecifiedValue() {
+  static const auto* v = []() -> ProtobufWkt::Value* {
+    auto* vv = new ProtobufWkt::Value();
+    vv->set_null_value(ProtobufWkt::NULL_VALUE);
+    return vv;
+  }();
+  return *v;
 }
-const std::regex& getNewlinePattern() { CONSTRUCT_ON_FIRST_USE(std::regex, "\n"); }
 
-// Helper that handles the case when the ConnectionInfo is missing or if the desired value is
-// empty.
-StreamInfoFormatter::FieldExtractor sslConnectionInfoStringExtractor(
-    std::function<std::string(const Ssl::ConnectionInfo& connection_info)> string_extractor) {
-  return [string_extractor](const StreamInfo::StreamInfo& stream_info) {
-    if (stream_info.downstreamSslConnection() == nullptr) {
-      return UnspecifiedValueString;
-    }
+ProtobufWkt::Value stringValue(const std::string& str) {
+  ProtobufWkt::Value val;
+  val.set_string_value(str);
+  return val;
+}
 
-    const auto value = string_extractor(*stream_info.downstreamSslConnection());
-    if (value.empty()) {
-      return UnspecifiedValueString;
-    } else {
-      return value;
-    }
-  };
-}
-
-// Helper that handles the case when the desired time field is empty.
-StreamInfoFormatter::FieldExtractor sslConnectionInfoStringTimeExtractor(
-    std::function<absl::optional<SystemTime>(const Ssl::ConnectionInfo& connection_info)>
-        time_extractor) {
-  return sslConnectionInfoStringExtractor(
-      [time_extractor](const Ssl::ConnectionInfo& connection_info) {
-        absl::optional<SystemTime> time = time_extractor(connection_info);
-        if (!time.has_value()) {
-          return UnspecifiedValueString;
-        }
+template <typename T> ProtobufWkt::Value numberValue(const T num) {
+  ProtobufWkt::Value val;
+  val.set_number_value(static_cast<double>(num));
+  return val;
+}
 
-        return AccessLogDateTimeFormatter::fromTime(time.value());
-      });
+void truncate(std::string& str, absl::optional<uint32_t> max_length) {
+  if (!max_length) {
+    return;
+  }
+
+  str = str.substr(0, max_length.value());
 }
 
+// Matches newline pattern in a StartTimeFormatter format string.
+const std::regex& getStartTimeNewlinePattern() {
+  CONSTRUCT_ON_FIRST_USE(std::regex, "%[-_0^#]*[1-9]*n");
+}
+const std::regex& getNewlinePattern() { CONSTRUCT_ON_FIRST_USE(std::regex, "\n"); }
+
 } // namespace
 
 const std::string AccessLogFormatUtils::DEFAULT_FORMAT =
@@ -76,19 +74,6 @@ FormatterPtr AccessLogFormatUtils::defaultAccessLogFormatter() {
   return FormatterPtr{new FormatterImpl(DEFAULT_FORMAT)};
 }
 
-std::string
-AccessLogFormatUtils::durationToString(const absl::optional<std::chrono::nanoseconds>& time) {
-  if (time) {
-    return durationToString(time.value());
-  } else {
-    return UnspecifiedValueString;
-  }
-}
-
-std::string AccessLogFormatUtils::durationToString(const std::chrono::nanoseconds& time) {
-  return fmt::format_int(std::chrono::duration_cast<std::chrono::milliseconds>(time).count()).str();
-}
-
 const std::string&
 AccessLogFormatUtils::protocolToString(const absl::optional<Http::Protocol>& protocol) {
   if (protocol) {
@@ -115,10 +100,11 @@ std::string FormatterImpl::format(const Http::HeaderMap& request_headers,
   return log_line;
 }
 
-JsonFormatterImpl::JsonFormatterImpl(std::unordered_map<std::string, std::string>& format_mapping) {
+JsonFormatterImpl::JsonFormatterImpl(std::unordered_map<std::string, std::string>& format_mapping,
+                                     bool preserve_types)
+    : preserve_types_(preserve_types) {
   for (const auto& pair : format_mapping) {
-    auto providers = AccessLogFormatParser::parse(pair.second);
-    json_output_format_.emplace(pair.first, FormatterPtr{new FormatterImpl(pair.second)});
+    json_output_format_.emplace(pair.first, AccessLogFormatParser::parse(pair.second));
   }
 }
 
@@ -126,32 +112,39 @@ std::string JsonFormatterImpl::format(const Http::HeaderMap& request_headers,
                                       const Http::HeaderMap& response_headers,
                                       const Http::HeaderMap& response_trailers,
                                       const StreamInfo::StreamInfo& stream_info) const {
-  const auto output_map = toMap(request_headers, response_headers, response_trailers, stream_info);
-
-  ProtobufWkt::Struct output_struct;
-  for (const auto& pair : output_map) {
-    ProtobufWkt::Value string_value;
-    string_value.set_string_value(pair.second);
-    (*output_struct.mutable_fields())[pair.first] = string_value;
-  }
-
-  std::string log_line;
-  const auto conversion_status = Protobuf::util::MessageToJsonString(output_struct, &log_line);
-  if (!conversion_status.ok()) {
-    log_line =
-        fmt::format("Error serializing access log to JSON: {}", conversion_status.ToString());
-  }
+  const auto output_struct =
+      toStruct(request_headers, response_headers, response_trailers, stream_info);
 
+  std::string log_line = MessageUtil::getJsonStringFromMessage(output_struct, false, true);
   return absl::StrCat(log_line, "\n");
 }
 
-std::unordered_map<std::string, std::string> JsonFormatterImpl::toMap(
-    const Http::HeaderMap& request_headers, const Http::HeaderMap& response_headers,
-    const Http::HeaderMap& response_trailers, const StreamInfo::StreamInfo& stream_info) const {
-  std::unordered_map<std::string, std::string> output;
+ProtobufWkt::Struct JsonFormatterImpl::toStruct(const Http::HeaderMap& request_headers,
+                                                const Http::HeaderMap& response_headers,
+                                                const Http::HeaderMap& response_trailers,
+                                                const StreamInfo::StreamInfo& stream_info) const {
+  ProtobufWkt::Struct output;
+  auto* fields = output.mutable_fields();
   for (const auto& pair : json_output_format_) {
-    output.emplace(pair.first, pair.second->format(request_headers, response_headers,
-                                                   response_trailers, stream_info));
+    const auto& providers = pair.second;
+    ASSERT(providers.size() >= 1);
+
+    if (providers.size() == 1) {
+      const auto& provider = providers.front();
+      auto val = preserve_types_ ? provider->formatValue(request_headers, response_headers,
+                                                         response_trailers, stream_info)
+                                 : stringValue(provider->format(request_headers, response_headers,
+                                                                response_trailers, stream_info));
+
+      (*fields)[pair.first] = val;
+    } else {
+      // Multiple providers forces string output.
+      std::string str;
+      for (const auto& provider : providers) {
+        str += provider->format(request_headers, response_headers, response_trailers, stream_info);
+      }
+      (*fields)[pair.first] = stringValue(str);
+    }
   }
   return output;
 }
@@ -326,195 +319,398 @@ std::vector<FormatterProviderPtr> AccessLogFormatParser::parse(const std::string
   return formatters;
 }
 
-StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) {
+// StreamInfo std::string field extractor.
+class StreamInfoStringFieldExtractor : public StreamInfoFormatter::FieldExtractor {
+public:
+  using FieldExtractor = std::function<std::string(const StreamInfo::StreamInfo&)>;
+
+  StreamInfoStringFieldExtractor(FieldExtractor f) : field_extractor_(f) {}
+
+  // StreamInfoFormatter::FieldExtractor
+  std::string extract(const StreamInfo::StreamInfo& stream_info) const override {
+    return field_extractor_(stream_info);
+  }
+  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {
+    return stringValue(field_extractor_(stream_info));
+  }
+
+private:
+  FieldExtractor field_extractor_;
+};
+
+// StreamInfo absl::optional<std::string> field extractor.
+class StreamInfoOptionalStringFieldExtractor : public StreamInfoFormatter::FieldExtractor {
+public:
+  using FieldExtractor = std::function<absl::optional<std::string>(const StreamInfo::StreamInfo&)>;
+
+  StreamInfoOptionalStringFieldExtractor(FieldExtractor f) : field_extractor_(f) {}
+
+  // StreamInfoFormatter::FieldExtractor
+  std::string extract(const StreamInfo::StreamInfo& stream_info) const override {
+    auto str = field_extractor_(stream_info);
+    if (!str) {
+      return UnspecifiedValueString;
+    }
+
+    return str.value();
+  }
+  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {
+    auto str = field_extractor_(stream_info);
+    if (!str) {
+      return unspecifiedValue();
+    }
+
+    return stringValue(str.value());
+  }
 
+private:
+  FieldExtractor field_extractor_;
+};
+
+// StreamInfo std::chrono_nanoseconds field extractor.
+class StreamInfoDurationFieldExtractor : public StreamInfoFormatter::FieldExtractor {
+public:
+  using FieldExtractor =
+      std::function<absl::optional<std::chrono::nanoseconds>(const StreamInfo::StreamInfo&)>;
+
+  StreamInfoDurationFieldExtractor(FieldExtractor f) : field_extractor_(f) {}
+
+  // StreamInfoFormatter::FieldExtractor
+  std::string extract(const StreamInfo::StreamInfo& stream_info) const override {
+    auto millis = extractMillis(stream_info);
+    if (!millis) {
+      return UnspecifiedValueString;
+    }
+
+    return fmt::format_int(millis.value()).str();
+  }
+  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {
+    auto millis = extractMillis(stream_info);
+    if (!millis) {
+      return unspecifiedValue();
+    }
+
+    return numberValue(millis.value());
+  }
+
+private:
+  absl::optional<uint32_t> extractMillis(const StreamInfo::StreamInfo& stream_info) const {
+    auto time = field_extractor_(stream_info);
+    if (time) {
+      return std::chrono::duration_cast<std::chrono::milliseconds>(time.value()).count();
+    }
+    return absl::nullopt;
+  }
+
+  FieldExtractor field_extractor_;
+};
+
+// StreamInfo uint64_t field extractor.
+class StreamInfoUInt64FieldExtractor : public StreamInfoFormatter::FieldExtractor {
+public:
+  using FieldExtractor = std::function<uint64_t(const StreamInfo::StreamInfo&)>;
+
+  StreamInfoUInt64FieldExtractor(FieldExtractor f) : field_extractor_(f) {}
+
+  // StreamInfoFormatter::FieldExtractor
+  std::string extract(const StreamInfo::StreamInfo& stream_info) const override {
+    return fmt::format_int(field_extractor_(stream_info)).str();
+  }
+  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {
+    return numberValue(field_extractor_(stream_info));
+  }
+
+private:
+  FieldExtractor field_extractor_;
+};
+
+// StreamInfo Envoy::Network::Address::InstanceConstSharedPtr field extractor.
+class StreamInfoAddressFieldExtractor : public StreamInfoFormatter::FieldExtractor {
+public:
+  using FieldExtractor =
+      std::function<Network::Address::InstanceConstSharedPtr(const StreamInfo::StreamInfo&)>;
+
+  static std::unique_ptr<StreamInfoAddressFieldExtractor> withPort(FieldExtractor f) {
+    return std::make_unique<StreamInfoAddressFieldExtractor>(f, true);
+  }
+
+  static std::unique_ptr<StreamInfoAddressFieldExtractor> withoutPort(FieldExtractor f) {
+    return std::make_unique<StreamInfoAddressFieldExtractor>(f, false);
+  }
+
+  StreamInfoAddressFieldExtractor(FieldExtractor f, bool include_port)
+      : field_extractor_(f), include_port_(include_port) {}
+
+  // StreamInfoFormatter::FieldExtractor
+  std::string extract(const StreamInfo::StreamInfo& stream_info) const override {
+    Network::Address::InstanceConstSharedPtr address = field_extractor_(stream_info);
+    if (!address) {
+      return UnspecifiedValueString;
+    }
+
+    return toString(*address);
+  }
+  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {
+    Network::Address::InstanceConstSharedPtr address = field_extractor_(stream_info);
+    if (!address) {
+      return unspecifiedValue();
+    }
+
+    return stringValue(toString(*address));
+  }
+
+private:
+  std::string toString(const Network::Address::Instance& address) const {
+    if (include_port_) {
+      return address.asString();
+    }
+
+    return StreamInfo::Utility::formatDownstreamAddressNoPort(address);
+  }
+
+  FieldExtractor field_extractor_;
+  const bool include_port_;
+};
+
+// Ssl::ConnectionInfo std::string field extractor.
+class StreamInfoSslConnectionInfoFieldExtractor : public StreamInfoFormatter::FieldExtractor {
+public:
+  using FieldExtractor = std::function<std::string(const Ssl::ConnectionInfo& connection_info)>;
+
+  StreamInfoSslConnectionInfoFieldExtractor(FieldExtractor f) : field_extractor_(f) {}
+
+  std::string extract(const StreamInfo::StreamInfo& stream_info) const override {
+    if (stream_info.downstreamSslConnection() == nullptr) {
+      return UnspecifiedValueString;
+    }
+
+    const auto value = field_extractor_(*stream_info.downstreamSslConnection());
+    if (value.empty()) {
+      return UnspecifiedValueString;
+    }
+
+    return value;
+  }
+
+  ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override {
+    if (stream_info.downstreamSslConnection() == nullptr) {
+      return unspecifiedValue();
+    }
+
+    const auto value = field_extractor_(*stream_info.downstreamSslConnection());
+    if (value.empty()) {
+      return unspecifiedValue();
+    }
+
+    return stringValue(value);
+  }
+
+private:
+  FieldExtractor field_extractor_;
+};
+
+StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) {
   if (field_name == "REQUEST_DURATION") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return AccessLogFormatUtils::durationToString(stream_info.lastDownstreamRxByteReceived());
-    };
+    field_extractor_ = std::make_unique<StreamInfoDurationFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.lastDownstreamRxByteReceived();
+        });
   } else if (field_name == "RESPONSE_DURATION") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return AccessLogFormatUtils::durationToString(stream_info.firstUpstreamRxByteReceived());
-    };
+    field_extractor_ = std::make_unique<StreamInfoDurationFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.firstUpstreamRxByteReceived();
+        });
   } else if (field_name == "RESPONSE_TX_DURATION") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      auto downstream = stream_info.lastDownstreamTxByteSent();
-      auto upstream = stream_info.firstUpstreamRxByteReceived();
+    field_extractor_ = std::make_unique<StreamInfoDurationFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          auto downstream = stream_info.lastDownstreamTxByteSent();
+          auto upstream = stream_info.firstUpstreamRxByteReceived();
 
-      if (downstream && upstream) {
-        auto val = downstream.value() - upstream.value();
-        return AccessLogFormatUtils::durationToString(val);
-      }
+          absl::optional<std::chrono::nanoseconds> result;
+          if (downstream && upstream) {
+            result = downstream.value() - upstream.value();
+          }
 
-      return UnspecifiedValueString;
-    };
+          return result;
+        });
   } else if (field_name == "BYTES_RECEIVED") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return fmt::format_int(stream_info.bytesReceived()).str();
-    };
+    field_extractor_ = std::make_unique<StreamInfoUInt64FieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) { return stream_info.bytesReceived(); });
   } else if (field_name == "PROTOCOL") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return AccessLogFormatUtils::protocolToString(stream_info.protocol());
-    };
+    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          return AccessLogFormatUtils::protocolToString(stream_info.protocol());
+        });
   } else if (field_name == "RESPONSE_CODE") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return stream_info.responseCode() ? fmt::format_int(stream_info.responseCode().value()).str()
-                                        : "0";
-    };
+    field_extractor_ = std::make_unique<StreamInfoUInt64FieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.responseCode() ? stream_info.responseCode().value() : 0;
+        });
   } else if (field_name == "RESPONSE_CODE_DETAILS") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return stream_info.responseCodeDetails() ? stream_info.responseCodeDetails().value()
-                                               : UnspecifiedValueString;
-    };
+    field_extractor_ = std::make_unique<StreamInfoOptionalStringFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.responseCodeDetails();
+        });
   } else if (field_name == "BYTES_SENT") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return fmt::format_int(stream_info.bytesSent()).str();
-    };
+    field_extractor_ = std::make_unique<StreamInfoUInt64FieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) { return stream_info.bytesSent(); });
   } else if (field_name == "DURATION") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return AccessLogFormatUtils::durationToString(stream_info.requestComplete());
-    };
+    field_extractor_ = std::make_unique<StreamInfoDurationFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) { return stream_info.requestComplete(); });
   } else if (field_name == "RESPONSE_FLAGS") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return StreamInfo::ResponseFlagUtils::toShortString(stream_info);
-    };
+    field_extractor_ = std::make_unique<StreamInfoStringFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          return StreamInfo::ResponseFlagUtils::toShortString(stream_info);
+        });
   } else if (field_name == "UPSTREAM_HOST") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      if (stream_info.upstreamHost()) {
-        return stream_info.upstreamHost()->address()->asString();
-      } else {
-        return UnspecifiedValueString;
-      }
-    };
+    field_extractor_ =
+        StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.upstreamHost() ? stream_info.upstreamHost()->address() : nullptr;
+        });
   } else if (field_name == "UPSTREAM_CLUSTER") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      std::string upstream_cluster_name;
-      if (nullptr != stream_info.upstreamHost()) {
-        upstream_cluster_name = stream_info.upstreamHost()->cluster().name();
-      }
-
-      return upstream_cluster_name.empty() ? UnspecifiedValueString : upstream_cluster_name;
-    };
+    field_extractor_ = std::make_unique<StreamInfoOptionalStringFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          std::string upstream_cluster_name;
+          if (nullptr != stream_info.upstreamHost()) {
+            upstream_cluster_name = stream_info.upstreamHost()->cluster().name();
+          }
+
+          return upstream_cluster_name.empty()
+                     ? absl::nullopt
+                     : absl::make_optional<std::string>(upstream_cluster_name);
+        });
   } else if (field_name == "UPSTREAM_LOCAL_ADDRESS") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return stream_info.upstreamLocalAddress() != nullptr
-                 ? stream_info.upstreamLocalAddress()->asString()
-                 : UnspecifiedValueString;
-    };
+    field_extractor_ =
+        StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.upstreamLocalAddress();
+        });
   } else if (field_name == "DOWNSTREAM_LOCAL_ADDRESS") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return stream_info.downstreamLocalAddress()->asString();
-    };
+    field_extractor_ =
+        StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.downstreamLocalAddress();
+        });
   } else if (field_name == "DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT") {
-    field_extractor_ = [](const Envoy::StreamInfo::StreamInfo& stream_info) {
-      return StreamInfo::Utility::formatDownstreamAddressNoPort(
-          *stream_info.downstreamLocalAddress());
-    };
+    field_extractor_ = StreamInfoAddressFieldExtractor::withoutPort(
+        [](const Envoy::StreamInfo::StreamInfo& stream_info) {
+          return stream_info.downstreamLocalAddress();
+        });
   } else if (field_name == "DOWNSTREAM_REMOTE_ADDRESS") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return stream_info.downstreamRemoteAddress()->asString();
-    };
+    field_extractor_ =
+        StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.downstreamRemoteAddress();
+        });
   } else if (field_name == "DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return StreamInfo::Utility::formatDownstreamAddressNoPort(
-          *stream_info.downstreamRemoteAddress());
-    };
+    field_extractor_ =
+        StreamInfoAddressFieldExtractor::withoutPort([](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.downstreamRemoteAddress();
+        });
   } else if (field_name == "DOWNSTREAM_DIRECT_REMOTE_ADDRESS") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return stream_info.downstreamDirectRemoteAddress()->asString();
-    };
+    field_extractor_ =
+        StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.downstreamDirectRemoteAddress();
+        });
   } else if (field_name == "DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      return StreamInfo::Utility::formatDownstreamAddressNoPort(
-          *stream_info.downstreamDirectRemoteAddress());
-    };
+    field_extractor_ =
+        StreamInfoAddressFieldExtractor::withoutPort([](const StreamInfo::StreamInfo& stream_info) {
+          return stream_info.downstreamDirectRemoteAddress();
+        });
   } else if (field_name == "REQUESTED_SERVER_NAME") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      if (!stream_info.requestedServerName().empty()) {
-        return stream_info.requestedServerName();
-      } else {
-        return UnspecifiedValueString;
-      }
-    };
+    field_extractor_ = std::make_unique<StreamInfoOptionalStringFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          absl::optional<std::string> result;
+          if (!stream_info.requestedServerName().empty()) {
+            result = stream_info.requestedServerName();
+          }
+          return result;
+        });
   } else if (field_name == "ROUTE_NAME") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      std::string route_name = stream_info.getRouteName();
-      return route_name.empty() ? UnspecifiedValueString : route_name;
-    };
+    field_extractor_ = std::make_unique<StreamInfoOptionalStringFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          absl::optional<std::string> result;
+          std::string route_name = stream_info.getRouteName();
+          if (!route_name.empty()) {
+            result = route_name;
+          }
+          return result;
+        });
   } else if (field_name == "DOWNSTREAM_PEER_URI_SAN") {
-    field_extractor_ =
-        sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) {
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
           return absl::StrJoin(connection_info.uriSanPeerCertificate(), ",");
         });
   } else if (field_name == "DOWNSTREAM_LOCAL_URI_SAN") {
-    field_extractor_ =
-        sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) {
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
           return absl::StrJoin(connection_info.uriSanLocalCertificate(), ",");
         });
   } else if (field_name == "DOWNSTREAM_PEER_SUBJECT") {
-    field_extractor_ =
-        sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) {
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
           return connection_info.subjectPeerCertificate();
         });
   } else if (field_name == "DOWNSTREAM_LOCAL_SUBJECT") {
-    field_extractor_ =
-        sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) {
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
           return connection_info.subjectLocalCertificate();
         });
   } else if (field_name == "DOWNSTREAM_TLS_SESSION_ID") {
-    field_extractor_ = sslConnectionInfoStringExtractor(
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
         [](const Ssl::ConnectionInfo& connection_info) { return connection_info.sessionId(); });
   } else if (field_name == "DOWNSTREAM_TLS_CIPHER") {
-    field_extractor_ =
-        sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) {
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
           return connection_info.ciphersuiteString();
         });
   } else if (field_name == "DOWNSTREAM_TLS_VERSION") {
-    field_extractor_ = sslConnectionInfoStringExtractor(
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
         [](const Ssl::ConnectionInfo& connection_info) { return connection_info.tlsVersion(); });
   } else if (field_name == "DOWNSTREAM_PEER_FINGERPRINT_256") {
-    field_extractor_ =
-        sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) {
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
           return connection_info.sha256PeerCertificateDigest();
         });
   } else if (field_name == "DOWNSTREAM_PEER_SERIAL") {
-    field_extractor_ =
-        sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) {
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
           return connection_info.serialNumberPeerCertificate();
         });
   } else if (field_name == "DOWNSTREAM_PEER_ISSUER") {
-    field_extractor_ =
-        sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) {
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
           return connection_info.issuerPeerCertificate();
         });
-  } else if (field_name == "DOWNSTREAM_PEER_SUBJECT") {
-    field_extractor_ =
-        sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) {
-          return connection_info.subjectPeerCertificate();
-        });
   } else if (field_name == "DOWNSTREAM_PEER_CERT") {
-    field_extractor_ =
-        sslConnectionInfoStringExtractor([](const Ssl::ConnectionInfo& connection_info) {
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
           return connection_info.urlEncodedPemEncodedPeerCertificate();
         });
   } else if (field_name == "DOWNSTREAM_PEER_CERT_V_START") {
-    field_extractor_ =
-        sslConnectionInfoStringTimeExtractor([](const Ssl::ConnectionInfo& connection_info) {
-          return connection_info.validFromPeerCertificate();
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
+          absl::optional<SystemTime> time = connection_info.validFromPeerCertificate();
+          if (!time.has_value()) {
+            return EmptyString;
+          }
+          return AccessLogDateTimeFormatter::fromTime(time.value());
         });
   } else if (field_name == "DOWNSTREAM_PEER_CERT_V_END") {
-    field_extractor_ =
-        sslConnectionInfoStringTimeExtractor([](const Ssl::ConnectionInfo& connection_info) {
-          return connection_info.expirationPeerCertificate();
+    field_extractor_ = std::make_unique<StreamInfoSslConnectionInfoFieldExtractor>(
+        [](const Ssl::ConnectionInfo& connection_info) {
+          absl::optional<SystemTime> time = connection_info.expirationPeerCertificate();
+          if (!time.has_value()) {
+            return EmptyString;
+          }
+          return AccessLogDateTimeFormatter::fromTime(time.value());
         });
   } else if (field_name == "UPSTREAM_TRANSPORT_FAILURE_REASON") {
-    field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) {
-      if (!stream_info.upstreamTransportFailureReason().empty()) {
-        return stream_info.upstreamTransportFailureReason();
-      } else {
-        return UnspecifiedValueString;
-      }
-    };
+    field_extractor_ = std::make_unique<StreamInfoOptionalStringFieldExtractor>(
+        [](const StreamInfo::StreamInfo& stream_info) {
+          absl::optional<std::string> result;
+          if (!stream_info.upstreamTransportFailureReason().empty()) {
+            result = stream_info.upstreamTransportFailureReason();
+          }
+          return result;
+        });
   } else {
     throw EnvoyException(fmt::format("Not supported field in StreamInfo: {}", field_name));
   }
@@ -523,14 +719,27 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) {
 std::string StreamInfoFormatter::format(const Http::HeaderMap&, const Http::HeaderMap&,
                                         const Http::HeaderMap&,
                                         const StreamInfo::StreamInfo& stream_info) const {
-  return field_extractor_(stream_info);
+  return field_extractor_->extract(stream_info);
+}
+
+ProtobufWkt::Value
+StreamInfoFormatter::formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                 const Http::HeaderMap&,
+                                 const StreamInfo::StreamInfo& stream_info) const {
+  return field_extractor_->extractValue(stream_info);
 }
 
-PlainStringFormatter::PlainStringFormatter(const std::string& str) : str_(str) {}
+PlainStringFormatter::PlainStringFormatter(const std::string& str) { str_.set_string_value(str); }
 
 std::string PlainStringFormatter::format(const Http::HeaderMap&, const Http::HeaderMap&,
                                          const Http::HeaderMap&,
                                          const StreamInfo::StreamInfo&) const {
+  return str_.string_value();
+}
+
+ProtobufWkt::Value PlainStringFormatter::formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                                     const Http::HeaderMap&,
+                                                     const StreamInfo::StreamInfo&) const {
   return str_;
 }
 
@@ -539,25 +748,36 @@ HeaderFormatter::HeaderFormatter(const std::string& main_header,
                                  absl::optional<size_t> max_length)
     : main_header_(main_header), alternative_header_(alternative_header), max_length_(max_length) {}
 
-std::string HeaderFormatter::format(const Http::HeaderMap& headers) const {
+const Http::HeaderEntry* HeaderFormatter::findHeader(const Http::HeaderMap& headers) const {
   const Http::HeaderEntry* header = headers.get(main_header_);
 
   if (!header && !alternative_header_.get().empty()) {
-    header = headers.get(alternative_header_);
+    return headers.get(alternative_header_);
   }
 
-  std::string header_value_string;
+  return header;
+}
+
+std::string HeaderFormatter::format(const Http::HeaderMap& headers) const {
+  const Http::HeaderEntry* header = findHeader(headers);
   if (!header) {
-    header_value_string = UnspecifiedValueString;
-  } else {
-    header_value_string = std::string(header->value().getStringView());
+    return UnspecifiedValueString;
   }
 
-  if (max_length_ && header_value_string.length() > max_length_.value()) {
-    return header_value_string.substr(0, max_length_.value());
+  std::string val = std::string(header->value().getStringView());
+  truncate(val, max_length_);
+  return val;
+}
+
+ProtobufWkt::Value HeaderFormatter::formatValue(const Http::HeaderMap& headers) const {
+  const Http::HeaderEntry* header = findHeader(headers);
+  if (!header) {
+    return unspecifiedValue();
   }
 
-  return header_value_string;
+  std::string val = std::string(header->value().getStringView());
+  truncate(val, max_length_);
+  return stringValue(val);
 }
 
 ResponseHeaderFormatter::ResponseHeaderFormatter(const std::string& main_header,
@@ -572,6 +792,13 @@ std::string ResponseHeaderFormatter::format(const Http::HeaderMap&,
   return HeaderFormatter::format(response_headers);
 }
 
+ProtobufWkt::Value ResponseHeaderFormatter::formatValue(const Http::HeaderMap&,
+                                                        const Http::HeaderMap& response_headers,
+                                                        const Http::HeaderMap&,
+                                                        const StreamInfo::StreamInfo&) const {
+  return HeaderFormatter::formatValue(response_headers);
+}
+
 RequestHeaderFormatter::RequestHeaderFormatter(const std::string& main_header,
                                                const std::string& alternative_header,
                                                absl::optional<size_t> max_length)
@@ -583,6 +810,13 @@ std::string RequestHeaderFormatter::format(const Http::HeaderMap& request_header
   return HeaderFormatter::format(request_headers);
 }
 
+ProtobufWkt::Value RequestHeaderFormatter::formatValue(const Http::HeaderMap& request_headers,
+                                                       const Http::HeaderMap&,
+                                                       const Http::HeaderMap&,
+                                                       const StreamInfo::StreamInfo&) const {
+  return HeaderFormatter::formatValue(request_headers);
+}
+
 ResponseTrailerFormatter::ResponseTrailerFormatter(const std::string& main_header,
                                                    const std::string& alternative_header,
                                                    absl::optional<size_t> max_length)
@@ -594,33 +828,48 @@ std::string ResponseTrailerFormatter::format(const Http::HeaderMap&, const Http:
   return HeaderFormatter::format(response_trailers);
 }
 
+ProtobufWkt::Value ResponseTrailerFormatter::formatValue(const Http::HeaderMap&,
+                                                         const Http::HeaderMap&,
+                                                         const Http::HeaderMap& response_trailers,
+                                                         const StreamInfo::StreamInfo&) const {
+  return HeaderFormatter::formatValue(response_trailers);
+}
+
 MetadataFormatter::MetadataFormatter(const std::string& filter_namespace,
                                      const std::vector<std::string>& path,
                                      absl::optional<size_t> max_length)
     : filter_namespace_(filter_namespace), path_(path), max_length_(max_length) {}
 
-std::string MetadataFormatter::format(const envoy::api::v2::core::Metadata& metadata) const {
-  const Protobuf::Message* data;
+std::string
+MetadataFormatter::formatMetadata(const envoy::api::v2::core::Metadata& metadata) const {
+  ProtobufWkt::Value value = formatMetadataValue(metadata);
+  if (value.kind_case() == ProtobufWkt::Value::kNullValue) {
+    return UnspecifiedValueString;
+  }
+
+  std::string json = MessageUtil::getJsonStringFromMessage(value, false, true);
+  truncate(json, max_length_);
+  return json;
+}
+
+ProtobufWkt::Value
+MetadataFormatter::formatMetadataValue(const envoy::api::v2::core::Metadata& metadata) const {
   if (path_.empty()) {
     const auto filter_it = metadata.filter_metadata().find(filter_namespace_);
     if (filter_it == metadata.filter_metadata().end()) {
-      return UnspecifiedValueString;
-    }
-    data = &(filter_it->second);
-  } else {
-    const ProtobufWkt::Value& val = Metadata::metadataValue(metadata, filter_namespace_, path_);
-    if (val.kind_case() == ProtobufWkt::Value::KindCase::KIND_NOT_SET) {
-      return UnspecifiedValueString;
+      return unspecifiedValue();
     }
-    data = &val;
+    ProtobufWkt::Value output;
+    output.mutable_struct_value()->CopyFrom(filter_it->second);
+    return output;
   }
-  std::string json;
-  const auto status = Protobuf::util::MessageToJsonString(*data, &json);
-  RELEASE_ASSERT(status.ok(), "");
-  if (max_length_ && json.length() > max_length_.value()) {
-    return json.substr(0, max_length_.value());
+
+  const ProtobufWkt::Value& val = Metadata::metadataValue(metadata, filter_namespace_, path_);
+  if (val.kind_case() == ProtobufWkt::Value::KindCase::KIND_NOT_SET) {
+    return unspecifiedValue();
   }
-  return json;
+
+  return val;
 }
 
 // TODO(glicht): Consider adding support for route/listener/cluster metadata as suggested by @htuch.
@@ -633,23 +882,35 @@ DynamicMetadataFormatter::DynamicMetadataFormatter(const std::string& filter_nam
 std::string DynamicMetadataFormatter::format(const Http::HeaderMap&, const Http::HeaderMap&,
                                              const Http::HeaderMap&,
                                              const StreamInfo::StreamInfo& stream_info) const {
-  return MetadataFormatter::format(stream_info.dynamicMetadata());
+  return MetadataFormatter::formatMetadata(stream_info.dynamicMetadata());
+}
+
+ProtobufWkt::Value
+DynamicMetadataFormatter::formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                      const Http::HeaderMap&,
+                                      const StreamInfo::StreamInfo& stream_info) const {
+  return MetadataFormatter::formatMetadataValue(stream_info.dynamicMetadata());
 }
 
 FilterStateFormatter::FilterStateFormatter(const std::string& key,
                                            absl::optional<size_t> max_length)
     : key_(key), max_length_(max_length) {}
 
-std::string FilterStateFormatter::format(const Http::HeaderMap&, const Http::HeaderMap&,
-                                         const Http::HeaderMap&,
-                                         const StreamInfo::StreamInfo& stream_info) const {
+ProtobufTypes::MessagePtr
+FilterStateFormatter::filterState(const StreamInfo::StreamInfo& stream_info) const {
   const StreamInfo::FilterState& filter_state = stream_info.filterState();
   if (!filter_state.hasDataWithName(key_)) {
-    return UnspecifiedValueString;
+    return nullptr;
   }
 
   const auto& object = filter_state.getDataReadOnly<StreamInfo::FilterState::Object>(key_);
-  ProtobufTypes::MessagePtr proto = object.serializeAsProto();
+  return object.serializeAsProto();
+}
+
+std::string FilterStateFormatter::format(const Http::HeaderMap&, const Http::HeaderMap&,
+                                         const Http::HeaderMap&,
+                                         const StreamInfo::StreamInfo& stream_info) const {
+  ProtobufTypes::MessagePtr proto = filterState(stream_info);
   if (proto == nullptr) {
     return UnspecifiedValueString;
   }
@@ -661,12 +922,29 @@ std::string FilterStateFormatter::format(const Http::HeaderMap&, const Http::Hea
     // TODO(lizan): add support of unknown Any.
     return UnspecifiedValueString;
   }
-  if (max_length_.has_value() && value.length() > max_length_.value()) {
-    return value.substr(0, max_length_.value());
-  }
+
+  truncate(value, max_length_);
   return value;
 }
 
+ProtobufWkt::Value
+FilterStateFormatter::formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                  const Http::HeaderMap&,
+                                  const StreamInfo::StreamInfo& stream_info) const {
+  ProtobufTypes::MessagePtr proto = filterState(stream_info);
+  if (proto == nullptr) {
+    return unspecifiedValue();
+  }
+
+  ProtobufWkt::Value val;
+  try {
+    MessageUtil::jsonConvertValue(*proto, val);
+  } catch (EnvoyException& ex) {
+    return unspecifiedValue();
+  }
+  return val;
+}
+
 StartTimeFormatter::StartTimeFormatter(const std::string& format) : date_formatter_(format) {}
 
 std::string StartTimeFormatter::format(const Http::HeaderMap&, const Http::HeaderMap&,
@@ -679,5 +957,11 @@ std::string StartTimeFormatter::format(const Http::HeaderMap&, const Http::Heade
   }
 }
 
+ProtobufWkt::Value StartTimeFormatter::formatValue(
+    const Http::HeaderMap& request_headers, const Http::HeaderMap& response_headers,
+    const Http::HeaderMap& response_trailers, const StreamInfo::StreamInfo& stream_info) const {
+  return stringValue(format(request_headers, response_headers, response_trailers, stream_info));
+}
+
 } // namespace AccessLog
 } // namespace Envoy
diff --git a/source/common/access_log/access_log_formatter.h b/source/common/access_log/access_log_formatter.h
index 1094ae732f93..741a17272ace 100644
--- a/source/common/access_log/access_log_formatter.h
+++ b/source/common/access_log/access_log_formatter.h
@@ -73,8 +73,6 @@ class AccessLogFormatUtils {
 public:
   static FormatterPtr defaultAccessLogFormatter();
   static const std::string& protocolToString(const absl::optional<Http::Protocol>& protocol);
-  static std::string durationToString(const absl::optional<std::chrono::nanoseconds>& time);
-  static std::string durationToString(const std::chrono::nanoseconds& time);
 
 private:
   AccessLogFormatUtils();
@@ -101,7 +99,8 @@ class FormatterImpl : public Formatter {
 
 class JsonFormatterImpl : public Formatter {
 public:
-  JsonFormatterImpl(std::unordered_map<std::string, std::string>& format_mapping);
+  JsonFormatterImpl(std::unordered_map<std::string, std::string>& format_mapping,
+                    bool preserve_types);
 
   // Formatter::format
   std::string format(const Http::HeaderMap& request_headers,
@@ -110,98 +109,128 @@ class JsonFormatterImpl : public Formatter {
                      const StreamInfo::StreamInfo& stream_info) const override;
 
 private:
-  std::vector<FormatterProviderPtr> providers_;
-  std::map<const std::string, Envoy::AccessLog::FormatterPtr> json_output_format_;
+  const bool preserve_types_;
+  std::map<const std::string, const std::vector<FormatterProviderPtr>> json_output_format_;
 
-  std::unordered_map<std::string, std::string>
-  toMap(const Http::HeaderMap& request_headers, const Http::HeaderMap& response_headers,
-        const Http::HeaderMap& response_trailers, const StreamInfo::StreamInfo& stream_info) const;
+  ProtobufWkt::Struct toStruct(const Http::HeaderMap& request_headers,
+                               const Http::HeaderMap& response_headers,
+                               const Http::HeaderMap& response_trailers,
+                               const StreamInfo::StreamInfo& stream_info) const;
 };
 
 /**
- * Formatter for string literal. It ignores headers and stream info and returns string by which it
- * was initialized.
+ * FormatterProvider for string literals. It ignores headers and stream info and returns string by
+ * which it was initialized.
  */
 class PlainStringFormatter : public FormatterProvider {
 public:
   PlainStringFormatter(const std::string& str);
 
-  // Formatter::format
+  // FormatterProvider
   std::string format(const Http::HeaderMap&, const Http::HeaderMap&, const Http::HeaderMap&,
                      const StreamInfo::StreamInfo&) const override;
+  ProtobufWkt::Value formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                 const Http::HeaderMap&,
+                                 const StreamInfo::StreamInfo&) const override;
 
 private:
-  std::string str_;
+  ProtobufWkt::Value str_;
 };
 
+/**
+ * Base formatter for headers.
+ */
 class HeaderFormatter {
 public:
   HeaderFormatter(const std::string& main_header, const std::string& alternative_header,
                   absl::optional<size_t> max_length);
 
+protected:
   std::string format(const Http::HeaderMap& headers) const;
+  ProtobufWkt::Value formatValue(const Http::HeaderMap& headers) const;
 
 private:
+  const Http::HeaderEntry* findHeader(const Http::HeaderMap& headers) const;
+
   Http::LowerCaseString main_header_;
   Http::LowerCaseString alternative_header_;
   absl::optional<size_t> max_length_;
 };
 
 /**
- * Formatter based on request header.
+ * FormatterProvider for request headers.
  */
 class RequestHeaderFormatter : public FormatterProvider, HeaderFormatter {
 public:
   RequestHeaderFormatter(const std::string& main_header, const std::string& alternative_header,
                          absl::optional<size_t> max_length);
 
-  // Formatter::format
+  // FormatterProvider
   std::string format(const Http::HeaderMap& request_headers, const Http::HeaderMap&,
                      const Http::HeaderMap&, const StreamInfo::StreamInfo&) const override;
+  ProtobufWkt::Value formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                 const Http::HeaderMap&,
+                                 const StreamInfo::StreamInfo&) const override;
 };
 
 /**
- * Formatter based on the response header.
+ * FormatterProvider for response headers.
  */
 class ResponseHeaderFormatter : public FormatterProvider, HeaderFormatter {
 public:
   ResponseHeaderFormatter(const std::string& main_header, const std::string& alternative_header,
                           absl::optional<size_t> max_length);
 
-  // Formatter::format
+  // FormatterProvider
   std::string format(const Http::HeaderMap&, const Http::HeaderMap& response_headers,
                      const Http::HeaderMap&, const StreamInfo::StreamInfo&) const override;
+  ProtobufWkt::Value formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                 const Http::HeaderMap&,
+                                 const StreamInfo::StreamInfo&) const override;
 };
 
 /**
- * Formatter based on the response trailer.
+ * FormatterProvider for response trailers.
  */
 class ResponseTrailerFormatter : public FormatterProvider, HeaderFormatter {
 public:
   ResponseTrailerFormatter(const std::string& main_header, const std::string& alternative_header,
                            absl::optional<size_t> max_length);
 
-  // Formatter::format
+  // FormatterProvider
   std::string format(const Http::HeaderMap&, const Http::HeaderMap&,
                      const Http::HeaderMap& response_trailers,
                      const StreamInfo::StreamInfo&) const override;
+  ProtobufWkt::Value formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                 const Http::HeaderMap&,
+                                 const StreamInfo::StreamInfo&) const override;
 };
 
 /**
- * Formatter based on the StreamInfo field.
+ * FormatterProvider based on StreamInfo fields.
  */
 class StreamInfoFormatter : public FormatterProvider {
 public:
   StreamInfoFormatter(const std::string& field_name);
 
-  // Formatter::format
+  // FormatterProvider
   std::string format(const Http::HeaderMap&, const Http::HeaderMap&, const Http::HeaderMap&,
-                     const StreamInfo::StreamInfo& stream_info) const override;
+                     const StreamInfo::StreamInfo&) const override;
+  ProtobufWkt::Value formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                 const Http::HeaderMap&,
+                                 const StreamInfo::StreamInfo&) const override;
+
+  class FieldExtractor {
+  public:
+    virtual ~FieldExtractor() = default;
 
-  using FieldExtractor = std::function<std::string(const StreamInfo::StreamInfo&)>;
+    virtual std::string extract(const StreamInfo::StreamInfo&) const PURE;
+    virtual ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo&) const PURE;
+  };
+  using FieldExtractorPtr = std::unique_ptr<FieldExtractor>;
 
 private:
-  FieldExtractor field_extractor_;
+  FieldExtractorPtr field_extractor_;
 };
 
 /**
@@ -212,7 +241,9 @@ class MetadataFormatter {
   MetadataFormatter(const std::string& filter_namespace, const std::vector<std::string>& path,
                     absl::optional<size_t> max_length);
 
-  std::string format(const envoy::api::v2::core::Metadata& metadata) const;
+protected:
+  std::string formatMetadata(const envoy::api::v2::core::Metadata& metadata) const;
+  ProtobufWkt::Value formatMetadataValue(const envoy::api::v2::core::Metadata& metadata) const;
 
 private:
   std::string filter_namespace_;
@@ -221,42 +252,55 @@ class MetadataFormatter {
 };
 
 /**
- * Formatter based on the DynamicMetadata from StreamInfo.
+ * FormatterProvider for DynamicMetadata from StreamInfo.
  */
 class DynamicMetadataFormatter : public FormatterProvider, MetadataFormatter {
 public:
   DynamicMetadataFormatter(const std::string& filter_namespace,
                            const std::vector<std::string>& path, absl::optional<size_t> max_length);
 
-  // FormatterProvider::format
+  // FormatterProvider
   std::string format(const Http::HeaderMap&, const Http::HeaderMap&, const Http::HeaderMap&,
-                     const StreamInfo::StreamInfo& stream_info) const override;
+                     const StreamInfo::StreamInfo&) const override;
+  ProtobufWkt::Value formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                 const Http::HeaderMap&,
+                                 const StreamInfo::StreamInfo&) const override;
 };
 
 /**
- * Formatter based on the FilterState from StreamInfo.
+ * FormatterProvider for FilterState from StreamInfo.
  */
 class FilterStateFormatter : public FormatterProvider {
 public:
   FilterStateFormatter(const std::string& key, absl::optional<size_t> max_length);
 
-  // FormatterProvider::format
+  // FormatterProvider
   std::string format(const Http::HeaderMap&, const Http::HeaderMap&, const Http::HeaderMap&,
-                     const StreamInfo::StreamInfo& stream_info) const override;
+                     const StreamInfo::StreamInfo&) const override;
+  ProtobufWkt::Value formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                 const Http::HeaderMap&,
+                                 const StreamInfo::StreamInfo&) const override;
 
 private:
+  ProtobufTypes::MessagePtr filterState(const StreamInfo::StreamInfo& stream_info) const;
+
   std::string key_;
   absl::optional<size_t> max_length_;
 };
 
 /**
- * Formatter
+ * FormatterProvider for request start time from StreamInfo.
  */
 class StartTimeFormatter : public FormatterProvider {
 public:
   StartTimeFormatter(const std::string& format);
+
+  // FormatterProvider
   std::string format(const Http::HeaderMap&, const Http::HeaderMap&, const Http::HeaderMap&,
                      const StreamInfo::StreamInfo&) const override;
+  ProtobufWkt::Value formatValue(const Http::HeaderMap&, const Http::HeaderMap&,
+                                 const Http::HeaderMap&,
+                                 const StreamInfo::StreamInfo&) const override;
 
 private:
   const Envoy::DateFormatter date_formatter_;
diff --git a/source/common/common/logger.h b/source/common/common/logger.h
index 810c5fff52fa..9602f955e09f 100644
--- a/source/common/common/logger.h
+++ b/source/common/common/logger.h
@@ -19,52 +19,52 @@
 namespace Envoy {
 namespace Logger {
 
-// clang-format off
 // TODO: find out a way for extensions to register new logger IDs
-#define ALL_LOGGER_IDS(FUNCTION) \
-  FUNCTION(admin)                \
-  FUNCTION(aws)                  \
-  FUNCTION(assert)               \
-  FUNCTION(backtrace)            \
-  FUNCTION(client)               \
-  FUNCTION(config)               \
-  FUNCTION(connection)           \
-  FUNCTION(conn_handler)         \
-  FUNCTION(dubbo)                \
-  FUNCTION(file)                 \
-  FUNCTION(filter)               \
-  FUNCTION(forward_proxy)        \
-  FUNCTION(grpc)                 \
-  FUNCTION(hc)                   \
-  FUNCTION(health_checker)       \
-  FUNCTION(http)                 \
-  FUNCTION(http2)                \
-  FUNCTION(hystrix)              \
-  FUNCTION(init)                 \
-  FUNCTION(io)                   \
-  FUNCTION(jwt)                  \
-  FUNCTION(kafka)                \
-  FUNCTION(lua)                  \
-  FUNCTION(main)                 \
-  FUNCTION(misc)                 \
-  FUNCTION(mongo)                \
-  FUNCTION(quic)                 \
-  FUNCTION(quic_stream)          \
-  FUNCTION(pool)                 \
-  FUNCTION(rbac)                 \
-  FUNCTION(redis)                \
-  FUNCTION(router)               \
-  FUNCTION(runtime)              \
-  FUNCTION(stats)                \
-  FUNCTION(secret)               \
-  FUNCTION(tap)                  \
-  FUNCTION(testing)              \
-  FUNCTION(thrift)               \
-  FUNCTION(tracing)              \
-  FUNCTION(upstream)             \
-  FUNCTION(udp)                  \
+#define ALL_LOGGER_IDS(FUNCTION)                                                                   \
+  FUNCTION(admin)                                                                                  \
+  FUNCTION(aws)                                                                                    \
+  FUNCTION(assert)                                                                                 \
+  FUNCTION(backtrace)                                                                              \
+  FUNCTION(client)                                                                                 \
+  FUNCTION(config)                                                                                 \
+  FUNCTION(connection)                                                                             \
+  FUNCTION(conn_handler)                                                                           \
+  FUNCTION(dubbo)                                                                                  \
+  FUNCTION(file)                                                                                   \
+  FUNCTION(filter)                                                                                 \
+  FUNCTION(forward_proxy)                                                                          \
+  FUNCTION(grpc)                                                                                   \
+  FUNCTION(hc)                                                                                     \
+  FUNCTION(health_checker)                                                                         \
+  FUNCTION(http)                                                                                   \
+  FUNCTION(http2)                                                                                  \
+  FUNCTION(hystrix)                                                                                \
+  FUNCTION(init)                                                                                   \
+  FUNCTION(io)                                                                                     \
+  FUNCTION(jwt)                                                                                    \
+  FUNCTION(kafka)                                                                                  \
+  FUNCTION(lua)                                                                                    \
+  FUNCTION(main)                                                                                   \
+  FUNCTION(misc)                                                                                   \
+  FUNCTION(mongo)                                                                                  \
+  FUNCTION(quic)                                                                                   \
+  FUNCTION(quic_stream)                                                                            \
+  FUNCTION(pool)                                                                                   \
+  FUNCTION(rbac)                                                                                   \
+  FUNCTION(redis)                                                                                  \
+  FUNCTION(router)                                                                                 \
+  FUNCTION(runtime)                                                                                \
+  FUNCTION(stats)                                                                                  \
+  FUNCTION(secret)                                                                                 \
+  FUNCTION(tap)                                                                                    \
+  FUNCTION(testing)                                                                                \
+  FUNCTION(thrift)                                                                                 \
+  FUNCTION(tracing)                                                                                \
+  FUNCTION(upstream)                                                                               \
+  FUNCTION(udp)                                                                                    \
   FUNCTION(wasm)
 
+// clang-format off
 enum class Id {
   ALL_LOGGER_IDS(GENERATE_ENUM)
 };
diff --git a/source/common/config/BUILD b/source/common/config/BUILD
index bbce50a5a4d4..807ba1d47d3b 100644
--- a/source/common/config/BUILD
+++ b/source/common/config/BUILD
@@ -8,6 +8,18 @@ load(
 
 envoy_package()
 
+envoy_cc_library(
+    name = "api_type_oracle_lib",
+    srcs = ["api_type_oracle.cc"],
+    hdrs = ["api_type_oracle.h"],
+    deps = [
+        "//source/common/protobuf",
+        "//source/common/protobuf:utility_lib",
+        "@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto",
+        "@com_github_cncf_udpa//udpa/type/v1:pkg_cc_proto",
+    ],
+)
+
 envoy_cc_library(
     name = "config_provider_lib",
     srcs = ["config_provider_impl.cc"],
@@ -204,6 +216,7 @@ envoy_cc_library(
         "//source/common/protobuf",
         "//source/common/protobuf:utility_lib",
         "@envoy_api//envoy/api/v2/core:pkg_cc_proto",
+        "@envoy_api//envoy/type/metadata/v2:pkg_cc_proto",
     ],
 )
 
@@ -289,7 +302,9 @@ envoy_cc_library(
     srcs = ["utility.cc"],
     hdrs = ["utility.h"],
     deps = [
+        ":api_type_oracle_lib",
         ":resources_lib",
+        ":version_converter_lib",
         "//include/envoy/config:grpc_mux_interface",
         "//include/envoy/config:subscription_interface",
         "//include/envoy/local_info:local_info_interface",
@@ -307,7 +322,7 @@ envoy_cc_library(
         "//source/common/stats:stats_lib",
         "//source/common/stats:stats_matcher_lib",
         "//source/common/stats:tag_producer_lib",
-        "@com_github_cncf_udpa//udpa/type/v1:typed_struct_cc",
+        "@com_github_cncf_udpa//udpa/type/v1:pkg_cc_proto",
         "@envoy_api//envoy/api/v2/core:pkg_cc_proto",
         "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto",
         "@envoy_api//envoy/config/filter/network/http_connection_manager/v2:pkg_cc_proto",
diff --git a/source/common/config/api_type_oracle.cc b/source/common/config/api_type_oracle.cc
new file mode 100644
index 000000000000..3381b4bdd3c7
--- /dev/null
+++ b/source/common/config/api_type_oracle.cc
@@ -0,0 +1,73 @@
+#include "common/config/api_type_oracle.h"
+
+#include "common/protobuf/utility.h"
+
+#include "udpa/annotations/versioning.pb.h"
+#include "udpa/type/v1/typed_struct.pb.h"
+
+namespace Envoy {
+namespace Config {
+
+namespace {
+
+using V2ApiTypeMap = absl::flat_hash_map<std::string, std::string>;
+
+const V2ApiTypeMap& v2ApiTypeMap() {
+  CONSTRUCT_ON_FIRST_USE(V2ApiTypeMap,
+                         {"envoy.ip_tagging", "envoy.config.filter.http.ip_tagging.v2.IPTagging"});
+}
+
+} // namespace
+
+const Protobuf::Descriptor*
+ApiTypeOracle::inferEarlierVersionDescriptor(absl::string_view extension_name,
+                                             const ProtobufWkt::Any& typed_config,
+                                             absl::string_view target_type) {
+  ENVOY_LOG_MISC(trace, "Inferring earlier type for {} (extension {})", target_type,
+                 extension_name);
+  // Determine what the type of configuration implied by typed_config is.
+  absl::string_view type = TypeUtil::typeUrlToDescriptorFullName(typed_config.type_url());
+  udpa::type::v1::TypedStruct typed_struct;
+  if (type == udpa::type::v1::TypedStruct::default_instance().GetDescriptor()->full_name()) {
+    MessageUtil::unpackTo(typed_config, typed_struct);
+    type = TypeUtil::typeUrlToDescriptorFullName(typed_struct.type_url());
+    ENVOY_LOG_MISC(trace, "Extracted embedded type {}", type);
+  }
+
+  // If we can't find an explicit type, this is likely v2, so we need to consult
+  // a static map.
+  if (type.empty()) {
+    auto it = v2ApiTypeMap().find(extension_name);
+    if (it == v2ApiTypeMap().end()) {
+      ENVOY_LOG_MISC(trace, "Missing v2 API type map");
+      return nullptr;
+    }
+    type = it->second;
+  }
+
+  // Determine if there is an earlier API version for target_type.
+  std::string previous_target_type;
+  const Protobuf::Descriptor* desc =
+      Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(std::string{target_type});
+  if (desc == nullptr) {
+    ENVOY_LOG_MISC(trace, "No descriptor found for {}", target_type);
+    return nullptr;
+  }
+  if (desc->options().HasExtension(udpa::annotations::versioning)) {
+    previous_target_type =
+        desc->options().GetExtension(udpa::annotations::versioning).previous_message_type();
+  }
+
+  if (!previous_target_type.empty() && type != target_type) {
+    const Protobuf::Descriptor* desc =
+        Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(previous_target_type);
+    ASSERT(desc != nullptr);
+    ENVOY_LOG_MISC(trace, "Inferred {}", desc->full_name());
+    return desc;
+  }
+
+  return nullptr;
+}
+
+} // namespace Config
+} // namespace Envoy
diff --git a/source/common/config/api_type_oracle.h b/source/common/config/api_type_oracle.h
new file mode 100644
index 000000000000..b1694330fa07
--- /dev/null
+++ b/source/common/config/api_type_oracle.h
@@ -0,0 +1,31 @@
+#pragma once
+
+#include "common/protobuf/protobuf.h"
+
+#include "absl/strings/string_view.h"
+
+namespace Envoy {
+namespace Config {
+
+class ApiTypeOracle {
+public:
+  /**
+   * Based on the presented extension config and name, determine if this is
+   * configuration for an earlier version than the latest alpha version
+   * supported by Envoy internally. If so, return the descriptor for the earlier
+   * message, to support upgrading via VersionConverter::upgrade().
+   *
+   * @param extension_name name of extension corresponding to config.
+   * @param typed_config opaque config packed in google.protobuf.Any.
+   * @param target_type target type of conversion.
+   * @return const Protobuf::Descriptor* descriptor for earlier message version
+   *         corresponding to config, if any, otherwise nullptr.
+   */
+  static const Protobuf::Descriptor*
+  inferEarlierVersionDescriptor(absl::string_view extension_name,
+                                const ProtobufWkt::Any& typed_config,
+                                absl::string_view target_type);
+};
+
+} // namespace Config
+} // namespace Envoy
diff --git a/source/common/config/metadata.cc b/source/common/config/metadata.cc
index fd249e9fdf7d..167057d17ce7 100644
--- a/source/common/config/metadata.cc
+++ b/source/common/config/metadata.cc
@@ -5,6 +5,18 @@
 namespace Envoy {
 namespace Config {
 
+MetadataKey::MetadataKey(const envoy::type::metadata::v2::MetadataKey& metadata_key)
+    : key_(metadata_key.key()) {
+  for (const auto& seg : metadata_key.path()) {
+    path_.push_back(seg.key());
+  }
+}
+
+const ProtobufWkt::Value& Metadata::metadataValue(const envoy::api::v2::core::Metadata& metadata,
+                                                  const MetadataKey& metadata_key) {
+  return metadataValue(metadata, metadata_key.key_, metadata_key.path_);
+}
+
 const ProtobufWkt::Value& Metadata::metadataValue(const envoy::api::v2::core::Metadata& metadata,
                                                   const std::string& filter,
                                                   const std::vector<std::string>& path) {
diff --git a/source/common/config/metadata.h b/source/common/config/metadata.h
index cf8b00a48979..e6894860b06b 100644
--- a/source/common/config/metadata.h
+++ b/source/common/config/metadata.h
@@ -7,12 +7,23 @@
 #include "envoy/api/v2/core/base.pb.h"
 #include "envoy/config/typed_metadata.h"
 #include "envoy/registry/registry.h"
+#include "envoy/type/metadata/v2/metadata.pb.h"
 
 #include "common/protobuf/protobuf.h"
 
 namespace Envoy {
 namespace Config {
 
+/**
+ * MetadataKey presents the key name and path to retrieve value from metadata.
+ */
+struct MetadataKey {
+  std::string key_;
+  std::vector<std::string> path_;
+
+  MetadataKey(const envoy::type::metadata::v2::MetadataKey& metadata_key);
+};
+
 /**
  * Config metadata helpers.
  */
@@ -38,6 +49,15 @@ class Metadata {
   static const ProtobufWkt::Value& metadataValue(const envoy::api::v2::core::Metadata& metadata,
                                                  const std::string& filter,
                                                  const std::vector<std::string>& path);
+  /**
+   * Lookup the value by a metadata key from a Metadata.
+   * @param metadata reference.
+   * @param metadata_key with key name and path to retrieve the value.
+   * @return const ProtobufWkt::Value& value if found, empty if not found.
+   */
+  static const ProtobufWkt::Value& metadataValue(const envoy::api::v2::core::Metadata& metadata,
+                                                 const MetadataKey& metadata_key);
+
   /**
    * Obtain mutable reference to metadata value for a given filter and key.
    * @param metadata reference.
diff --git a/source/common/config/remote_data_fetcher.h b/source/common/config/remote_data_fetcher.h
index 6455e44abf1b..2ef6b39a3c4f 100644
--- a/source/common/config/remote_data_fetcher.h
+++ b/source/common/config/remote_data_fetcher.h
@@ -68,7 +68,7 @@ class RemoteDataFetcher : public Logger::Loggable<Logger::Id::config>,
 
 private:
   Upstream::ClusterManager& cm_;
-  const envoy::api::v2::core::HttpUri& uri_;
+  const envoy::api::v2::core::HttpUri uri_;
   const std::string content_hash_;
   RemoteDataFetcherCallback& callback_;
 
diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc
index 88afd9033799..bc4322a15186 100644
--- a/source/common/config/utility.cc
+++ b/source/common/config/utility.cc
@@ -9,7 +9,9 @@
 #include "common/common/fmt.h"
 #include "common/common/hex.h"
 #include "common/common/utility.h"
+#include "common/config/api_type_oracle.h"
 #include "common/config/resources.h"
+#include "common/config/version_converter.h"
 #include "common/config/well_known_names.h"
 #include "common/protobuf/protobuf.h"
 #include "common/protobuf/utility.h"
@@ -240,20 +242,26 @@ envoy::api::v2::ClusterLoadAssignment Utility::translateClusterHosts(
   return load_assignment;
 }
 
-namespace {
-absl::string_view protoTypeUrlToDescriptorFullName(absl::string_view type_url) {
-  size_t pos = type_url.find_last_of('/');
-  if (pos != absl::string_view::npos) {
-    type_url = type_url.substr(pos + 1);
-  }
-  return type_url;
-}
-} // namespace
-
-void Utility::translateOpaqueConfig(const ProtobufWkt::Any& typed_config,
+void Utility::translateOpaqueConfig(absl::string_view extension_name,
+                                    const ProtobufWkt::Any& typed_config,
                                     const ProtobufWkt::Struct& config,
                                     ProtobufMessage::ValidationVisitor& validation_visitor,
                                     Protobuf::Message& out_proto) {
+  const Protobuf::Descriptor* earlier_version_desc = ApiTypeOracle::inferEarlierVersionDescriptor(
+      extension_name, typed_config, out_proto.GetDescriptor()->full_name());
+
+  if (earlier_version_desc != nullptr) {
+    Protobuf::DynamicMessageFactory dmf;
+    // Create a previous version message.
+    auto message = ProtobufTypes::MessagePtr(dmf.GetPrototype(earlier_version_desc)->New());
+    ASSERT(message != nullptr);
+    // Recurse and translateOpaqueConfig for previous version.
+    translateOpaqueConfig(extension_name, typed_config, config, validation_visitor, *message);
+    // Update from previous version to current version.
+    VersionConverter::upgrade(*message, out_proto);
+    return;
+  }
+
   static const std::string struct_type =
       ProtobufWkt::Struct::default_instance().GetDescriptor()->full_name();
   static const std::string typed_struct_type =
@@ -263,16 +271,16 @@ void Utility::translateOpaqueConfig(const ProtobufWkt::Any& typed_config,
 
     // Unpack methods will only use the fully qualified type name after the last '/'.
     // https://github.com/protocolbuffers/protobuf/blob/3.6.x/src/google/protobuf/any.proto#L87
-    absl::string_view type = protoTypeUrlToDescriptorFullName(typed_config.type_url());
+    absl::string_view type = TypeUtil::typeUrlToDescriptorFullName(typed_config.type_url());
 
     if (type == typed_struct_type) {
       udpa::type::v1::TypedStruct typed_struct;
-      typed_config.UnpackTo(&typed_struct);
+      MessageUtil::unpackTo(typed_config, typed_struct);
       // if out_proto is expecting Struct, return directly
       if (out_proto.GetDescriptor()->full_name() == struct_type) {
         out_proto.CopyFrom(typed_struct.value());
       } else {
-        type = protoTypeUrlToDescriptorFullName(typed_struct.type_url());
+        type = TypeUtil::typeUrlToDescriptorFullName(typed_struct.type_url());
         if (type != out_proto.GetDescriptor()->full_name()) {
           throw EnvoyException("Invalid proto type.\nExpected " +
                                out_proto.GetDescriptor()->full_name() +
@@ -282,10 +290,10 @@ void Utility::translateOpaqueConfig(const ProtobufWkt::Any& typed_config,
       }
     } // out_proto is expecting Struct, unpack directly
     else if (type != struct_type || out_proto.GetDescriptor()->full_name() == struct_type) {
-      typed_config.UnpackTo(&out_proto);
+      MessageUtil::unpackTo(typed_config, out_proto);
     } else {
       ProtobufWkt::Struct struct_config;
-      typed_config.UnpackTo(&struct_config);
+      MessageUtil::unpackTo(typed_config, struct_config);
       MessageUtil::jsonConvert(struct_config, validation_visitor, out_proto);
     }
   }
diff --git a/source/common/config/utility.h b/source/common/config/utility.h
index e67db9713892..8e8c25a52293 100644
--- a/source/common/config/utility.h
+++ b/source/common/config/utility.h
@@ -208,6 +208,7 @@ class Utility {
 
   /**
    * Translate a nested config into a proto message provided by the implementation factory.
+   * @param extension_name name of extension corresponding to config.
    * @param enclosing_message proto that contains a field 'config'. Note: the enclosing proto is
    * provided because for statically registered implementations, a custom config is generally
    * optional, which means the conversion must be done conditionally.
@@ -225,8 +226,8 @@ class Utility {
     // Fail in an obvious way if a plugin does not return a proto.
     RELEASE_ASSERT(config != nullptr, "");
 
-    translateOpaqueConfig(enclosing_message.typed_config(), enclosing_message.config(),
-                          validation_visitor, *config);
+    translateOpaqueConfig(factory.name(), enclosing_message.typed_config(),
+                          enclosing_message.config(), validation_visitor, *config);
 
     return config;
   }
@@ -268,12 +269,14 @@ class Utility {
   /**
    * Translate opaque config from google.protobuf.Any or google.protobuf.Struct to defined proto
    * message.
+   * @param extension_name name of extension corresponding to config.
    * @param typed_config opaque config packed in google.protobuf.Any
    * @param config the deprecated google.protobuf.Struct config, empty struct if doesn't exist.
    * @param validation_visitor message validation visitor instance.
    * @param out_proto the proto message instantiated by extensions
    */
-  static void translateOpaqueConfig(const ProtobufWkt::Any& typed_config,
+  static void translateOpaqueConfig(absl::string_view extension_name,
+                                    const ProtobufWkt::Any& typed_config,
                                     const ProtobufWkt::Struct& config,
                                     ProtobufMessage::ValidationVisitor& validation_visitor,
                                     Protobuf::Message& out_proto);
diff --git a/source/common/config/watch_map.cc b/source/common/config/watch_map.cc
index adc99f145f55..7cab1efa9887 100644
--- a/source/common/config/watch_map.cc
+++ b/source/common/config/watch_map.cc
@@ -150,6 +150,7 @@ std::set<std::string> WatchMap::findAdditions(const std::vector<std::string>& ne
       newly_added_to_subscription.insert(name);
       watch_interest_[name] = {watch};
     } else {
+      // Add this watch to the already-existing set at watch_interest_[name]
       entry->second.insert(watch);
     }
   }
diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc
index 5e7c6847eb33..85aad9f29a28 100644
--- a/source/common/event/dispatcher_impl.cc
+++ b/source/common/event/dispatcher_impl.cc
@@ -118,9 +118,11 @@ DispatcherImpl::createClientConnection(Network::Address::InstanceConstSharedPtr
 }
 
 Network::DnsResolverSharedPtr DispatcherImpl::createDnsResolver(
-    const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers) {
+    const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,
+    const bool use_tcp_for_dns_lookups) {
   ASSERT(isThreadSafe());
-  return Network::DnsResolverSharedPtr{new Network::DnsResolverImpl(*this, resolvers)};
+  return Network::DnsResolverSharedPtr{
+      new Network::DnsResolverImpl(*this, resolvers, use_tcp_for_dns_lookups)};
 }
 
 FileEventPtr DispatcherImpl::createFileEvent(int fd, FileReadyCb cb, FileTriggerType trigger,
diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h
index 2728da6fd706..2cfa68f5ad50 100644
--- a/source/common/event/dispatcher_impl.h
+++ b/source/common/event/dispatcher_impl.h
@@ -52,8 +52,9 @@ class DispatcherImpl : Logger::Loggable<Logger::Id::main>,
                          Network::Address::InstanceConstSharedPtr source_address,
                          Network::TransportSocketPtr&& transport_socket,
                          const Network::ConnectionSocket::OptionsSharedPtr& options) override;
-  Network::DnsResolverSharedPtr createDnsResolver(
-      const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers) override;
+  Network::DnsResolverSharedPtr
+  createDnsResolver(const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,
+                    const bool use_tcp_for_dns_lookups) override;
   FileEventPtr createFileEvent(int fd, FileReadyCb cb, FileTriggerType trigger,
                                uint32_t events) override;
   Filesystem::WatcherPtr createFilesystemWatcher() override;
diff --git a/source/common/filesystem/inotify/watcher_impl.cc b/source/common/filesystem/inotify/watcher_impl.cc
index 766411f1b728..9b4c67372e4c 100644
--- a/source/common/filesystem/inotify/watcher_impl.cc
+++ b/source/common/filesystem/inotify/watcher_impl.cc
@@ -24,7 +24,8 @@ WatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher)
             onInotifyEvent();
           },
           Event::FileTriggerType::Edge, Event::FileReadyType::Read)) {
-  RELEASE_ASSERT(inotify_fd_ >= 0, "");
+  RELEASE_ASSERT(inotify_fd_ >= 0,
+                 "Consider increasing value of user.max_inotify_watches via sysctl");
 }
 
 WatcherImpl::~WatcherImpl() { close(inotify_fd_); }
diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc
index 7b20c5d2d158..7129cf1f081a 100644
--- a/source/common/grpc/common.cc
+++ b/source/common/grpc/common.cc
@@ -170,7 +170,7 @@ std::chrono::milliseconds Common::getGrpcTimeout(const Http::HeaderMap& request_
   return timeout;
 }
 
-void Common::toGrpcTimeout(const std::chrono::milliseconds& timeout, Http::HeaderString& value) {
+void Common::toGrpcTimeout(const std::chrono::milliseconds& timeout, Http::HeaderMap& headers) {
   uint64_t time = timeout.count();
   static const char units[] = "mSMH";
   const char* unit = units; // start with milliseconds
@@ -187,8 +187,7 @@ void Common::toGrpcTimeout(const std::chrono::milliseconds& timeout, Http::Heade
       unit++;
     }
   }
-  value.setInteger(time);
-  value.append(unit, 1);
+  headers.setGrpcTimeout(absl::StrCat(time, absl::string_view(unit, 1)));
 }
 
 Http::MessagePtr Common::prepareHeaders(const std::string& upstream_cluster,
@@ -203,7 +202,7 @@ Http::MessagePtr Common::prepareHeaders(const std::string& upstream_cluster,
   // before Timeout and ContentType.
   message->headers().setReferenceTE(Http::Headers::get().TEValues.Trailers);
   if (timeout) {
-    toGrpcTimeout(timeout.value(), message->headers().insertGrpcTimeout().value());
+    toGrpcTimeout(timeout.value(), message->headers());
   }
   message->headers().setReferenceContentType(Http::Headers::get().ContentTypeValues.Grpc);
 
diff --git a/source/common/grpc/common.h b/source/common/grpc/common.h
index cae1a34b9621..a44117a82161 100644
--- a/source/common/grpc/common.h
+++ b/source/common/grpc/common.h
@@ -81,12 +81,12 @@ class Common {
   static std::chrono::milliseconds getGrpcTimeout(const Http::HeaderMap& request_headers);
 
   /**
-   * Encode 'timeout' into 'grpc-timeout' format.
+   * Encode 'timeout' into 'grpc-timeout' format in the grpc-timeout header.
    * @param timeout the duration in std::chrono::milliseconds.
-   * @param value the HeaderString onto which format the timeout in 'grpc-timeout' format, up to
-   *        8 decimal digits and a letter indicating the unit.
+   * @param headers the HeaderMap in which the grpc-timeout header will be set with the timeout in
+   * 'grpc-timeout' format, up to 8 decimal digits and a letter indicating the unit.
    */
-  static void toGrpcTimeout(const std::chrono::milliseconds& timeout, Http::HeaderString& value);
+  static void toGrpcTimeout(const std::chrono::milliseconds& timeout, Http::HeaderMap& headers);
 
   /**
    * Serialize protobuf message with gRPC frame header.
diff --git a/source/common/grpc/context_impl.cc b/source/common/grpc/context_impl.cc
index b47185fc06db..9e478a1e53d3 100644
--- a/source/common/grpc/context_impl.cc
+++ b/source/common/grpc/context_impl.cc
@@ -103,7 +103,9 @@ ContextImpl::resolveServiceAndMethod(const Http::HeaderEntry* path) {
   if (path == nullptr) {
     return request_names;
   }
-  const auto parts = StringUtil::splitToken(path->value().getStringView(), "/");
+  absl::string_view str = path->value().getStringView();
+  str = str.substr(0, str.find('?'));
+  const auto parts = StringUtil::splitToken(str, "/");
   if (parts.size() != 2) {
     return request_names;
   }
diff --git a/source/common/grpc/google_grpc_creds_impl.cc b/source/common/grpc/google_grpc_creds_impl.cc
index 38fc37125219..86187ee3cb4a 100644
--- a/source/common/grpc/google_grpc_creds_impl.cc
+++ b/source/common/grpc/google_grpc_creds_impl.cc
@@ -73,6 +73,21 @@ CredsUtility::callCredentials(const envoy::api::v2::core::GrpcService::GoogleGrp
                                                   credential.google_iam().authority_selector());
       break;
     }
+    case envoy::api::v2::core::GrpcService::GoogleGrpc::CallCredentials::kStsService: {
+      grpc::experimental::StsCredentialsOptions options = {
+          credential.sts_service().token_exchange_service_uri(),
+          credential.sts_service().resource(),
+          credential.sts_service().audience(),
+          credential.sts_service().scope(),
+          credential.sts_service().requested_token_type(),
+          credential.sts_service().subject_token_path(),
+          credential.sts_service().subject_token_type(),
+          credential.sts_service().actor_token_path(),
+          credential.sts_service().actor_token_type(),
+      };
+      new_call_creds = grpc::experimental::StsCredentials(options);
+      break;
+    }
     default:
       // We don't handle plugin credentials here, callers can do so instead if they want.
       continue;
diff --git a/source/common/http/BUILD b/source/common/http/BUILD
index d317963b33c1..effd5cfa0d83 100644
--- a/source/common/http/BUILD
+++ b/source/common/http/BUILD
@@ -48,8 +48,11 @@ envoy_cc_library(
         "//source/common/common:enum_to_int",
         "//source/common/common:linked_object",
         "//source/common/common:minimal_logger_lib",
+        "//source/common/config:utility_lib",
         "//source/common/http/http1:codec_lib",
         "//source/common/http/http2:codec_lib",
+        "//source/common/http/http3:quic_codec_factory_lib",
+        "//source/common/http/http3:well_known_names",
         "//source/common/network:filter_lib",
     ],
 )
@@ -180,8 +183,11 @@ envoy_cc_library(
         "//source/common/common:regex_lib",
         "//source/common/common:scope_tracker",
         "//source/common/common:utility_lib",
+        "//source/common/config:utility_lib",
         "//source/common/http/http1:codec_lib",
         "//source/common/http/http2:codec_lib",
+        "//source/common/http/http3:quic_codec_factory_lib",
+        "//source/common/http/http3:well_known_names",
         "//source/common/network:utility_lib",
         "//source/common/router:config_lib",
         "//source/common/runtime:uuid_util_lib",
diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h
index 5f4545604569..57f98a9d9e8f 100644
--- a/source/common/http/async_client_impl.h
+++ b/source/common/http/async_client_impl.h
@@ -144,9 +144,9 @@ class AsyncStreamImpl : public AsyncClient::Stream,
     }
     absl::optional<std::chrono::milliseconds> maxInterval() const override { return absl::nullopt; }
 
-    const std::vector<uint32_t> retriable_status_codes_;
-    const std::vector<Http::HeaderMatcherSharedPtr> retriable_headers_;
-    const std::vector<Http::HeaderMatcherSharedPtr> retriable_request_headers_;
+    const std::vector<uint32_t> retriable_status_codes_{};
+    const std::vector<Http::HeaderMatcherSharedPtr> retriable_headers_{};
+    const std::vector<Http::HeaderMatcherSharedPtr> retriable_request_headers_{};
   };
 
   struct NullShadowPolicy : public Router::ShadowPolicy {
diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc
index c6c3c42f3db3..673212d3e69b 100644
--- a/source/common/http/codec_client.cc
+++ b/source/common/http/codec_client.cc
@@ -6,9 +6,12 @@
 #include "envoy/http/codec.h"
 
 #include "common/common/enum_to_int.h"
+#include "common/config/utility.h"
 #include "common/http/exception.h"
 #include "common/http/http1/codec_impl.h"
 #include "common/http/http2/codec_impl.h"
+#include "common/http/http3/quic_codec_factory.h"
+#include "common/http/http3/well_known_names.h"
 #include "common/http/utility.h"
 
 namespace Envoy {
@@ -158,8 +161,10 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne
     break;
   }
   case Type::HTTP3: {
-    // TODO(danzh) Add QUIC codec;
-    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;
+    codec_ = std::unique_ptr<ClientConnection>(
+        Config::Utility::getAndCheckFactory<Http::QuicHttpClientConnectionFactory>(
+            Http::QuicCodecNames::get().Quiche)
+            .createQuicClientConnection(*connection_, *this));
   }
   }
 }
diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h
index 5a3221f358cb..8a8f1661663b 100644
--- a/source/common/http/conn_manager_config.h
+++ b/source/common/http/conn_manager_config.h
@@ -107,7 +107,7 @@ struct ConnectionManagerTracingStats {
  */
 struct TracingConnectionManagerConfig {
   Tracing::OperationName operation_name_;
-  std::vector<Http::LowerCaseString> request_headers_for_tags_;
+  Tracing::CustomTagMap custom_tags_;
   envoy::type::FractionalPercent client_sampling_;
   envoy::type::FractionalPercent random_sampling_;
   envoy::type::FractionalPercent overall_sampling_;
diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc
index b7aac3ae89f7..0feaa12d4e21 100644
--- a/source/common/http/conn_manager_impl.cc
+++ b/source/common/http/conn_manager_impl.cc
@@ -296,7 +296,6 @@ Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool
     }
   }
 
-  // TODO(alyssawilk) clean this up after #8352 is well vetted.
   bool redispatch;
   do {
     redispatch = false;
@@ -549,18 +548,6 @@ ConnectionManagerImpl::ActiveStream::~ActiveStream() {
   }
 
   connection_manager_.stats_.named_.downstream_rq_active_.dec();
-  // Refresh byte sizes of the HeaderMaps before logging.
-  // TODO(asraa): Remove this when entries in HeaderMap can no longer be modified by reference and
-  // HeaderMap holds an accurate internal byte size count.
-  if (request_headers_ != nullptr) {
-    request_headers_->refreshByteSize();
-  }
-  if (response_headers_ != nullptr) {
-    response_headers_->refreshByteSize();
-  }
-  if (response_trailers_ != nullptr) {
-    response_trailers_->refreshByteSize();
-  }
   for (const AccessLog::InstanceSharedPtr& access_log : connection_manager_.config_.accessLogs()) {
     access_log->log(request_headers_.get(), response_headers_.get(), response_trailers_.get(),
                     stream_info_);
@@ -1338,6 +1325,32 @@ void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() {
         connection_manager_.cluster_manager_.get(stream_info_.route_entry_->clusterName());
     cached_cluster_info_ = (nullptr == local_cluster) ? nullptr : local_cluster->info();
   }
+
+  refreshCachedTracingCustomTags();
+}
+
+void ConnectionManagerImpl::ActiveStream::refreshCachedTracingCustomTags() {
+  if (!connection_manager_.config_.tracingConfig()) {
+    return;
+  }
+  const Tracing::CustomTagMap& conn_manager_tags =
+      connection_manager_.config_.tracingConfig()->custom_tags_;
+  const Tracing::CustomTagMap* route_tags = nullptr;
+  if (hasCachedRoute() && cached_route_.value()->tracingConfig()) {
+    route_tags = &cached_route_.value()->tracingConfig()->getCustomTags();
+  }
+  const bool configured_in_conn = !conn_manager_tags.empty();
+  const bool configured_in_route = route_tags && !route_tags->empty();
+  if (!configured_in_conn && !configured_in_route) {
+    return;
+  }
+  Tracing::CustomTagMap& custom_tag_map = getOrMakeTracingCustomTagMap();
+  if (configured_in_route) {
+    custom_tag_map.insert(route_tags->begin(), route_tags->end());
+  }
+  if (configured_in_conn) {
+    custom_tag_map.insert(conn_manager_tags.begin(), conn_manager_tags.end());
+  }
 }
 
 void ConnectionManagerImpl::ActiveStream::sendLocalReply(
@@ -1800,9 +1813,8 @@ Tracing::OperationName ConnectionManagerImpl::ActiveStream::operationName() cons
   return connection_manager_.config_.tracingConfig()->operation_name_;
 }
 
-const std::vector<Http::LowerCaseString>&
-ConnectionManagerImpl::ActiveStream::requestHeadersForTags() const {
-  return connection_manager_.config_.tracingConfig()->request_headers_for_tags_;
+const Tracing::CustomTagMap* ConnectionManagerImpl::ActiveStream::customTags() const {
+  return tracing_custom_tags_.get();
 }
 
 bool ConnectionManagerImpl::ActiveStream::verbose() const {
@@ -2075,6 +2087,9 @@ Router::RouteConstSharedPtr ConnectionManagerImpl::ActiveStreamFilterBase::route
 void ConnectionManagerImpl::ActiveStreamFilterBase::clearRouteCache() {
   parent_.cached_route_ = absl::optional<Router::RouteConstSharedPtr>();
   parent_.cached_cluster_info_ = absl::optional<Upstream::ClusterInfoConstSharedPtr>();
+  if (parent_.tracing_custom_tags_) {
+    parent_.tracing_custom_tags_->clear();
+  }
 }
 
 Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamDecoderFilter::createBuffer() {
@@ -2214,6 +2229,7 @@ bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() {
   HeaderMapPtr request_headers(std::move(parent_.request_headers_));
   StreamEncoder* response_encoder = parent_.response_encoder_;
   parent_.response_encoder_ = nullptr;
+  response_encoder->getStream().removeCallbacks(parent_);
   // This functionally deletes the stream (via deferred delete) so do not
   // reference anything beyond this point.
   parent_.connection_manager_.doEndStream(this->parent_);
diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h
index f5280b078cd8..99ffee1d3b6f 100644
--- a/source/common/http/conn_manager_impl.h
+++ b/source/common/http/conn_manager_impl.h
@@ -477,7 +477,7 @@ class ConnectionManagerImpl : Logger::Loggable<Logger::Id::http>,
 
     // Tracing::TracingConfig
     Tracing::OperationName operationName() const override;
-    const std::vector<Http::LowerCaseString>& requestHeadersForTags() const override;
+    const Tracing::CustomTagMap* customTags() const override;
     bool verbose() const override;
     uint32_t maxPathTagLength() const override;
 
@@ -503,6 +503,8 @@ class ConnectionManagerImpl : Logger::Loggable<Logger::Id::http>,
 
     void refreshCachedRoute();
 
+    void refreshCachedTracingCustomTags();
+
     // Pass on watermark callbacks to watermark subscribers. This boils down to passing watermark
     // events for this stream and the downstream connection to the router filter.
     void callHighWatermarkCallbacks();
@@ -588,6 +590,13 @@ class ConnectionManagerImpl : Logger::Loggable<Logger::Id::http>,
       return request_metadata_map_vector_.get();
     }
 
+    Tracing::CustomTagMap& getOrMakeTracingCustomTagMap() {
+      if (tracing_custom_tags_ == nullptr) {
+        tracing_custom_tags_ = std::make_unique<Tracing::CustomTagMap>();
+      }
+      return *tracing_custom_tags_;
+    }
+
     ConnectionManagerImpl& connection_manager_;
     Router::ConfigConstSharedPtr snapped_route_config_;
     Router::ScopedConfigConstSharedPtr snapped_scoped_routes_config_;
@@ -632,6 +641,7 @@ class ConnectionManagerImpl : Logger::Loggable<Logger::Id::http>,
     // response.
     bool encoding_headers_only_{};
     Network::Socket::OptionsSharedPtr upstream_options_;
+    std::unique_ptr<Tracing::CustomTagMap> tracing_custom_tags_{nullptr};
   };
 
   using ActiveStreamPtr = std::unique_ptr<ActiveStream>;
diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc
index 5a9f968a7436..69e594bc6998 100644
--- a/source/common/http/conn_manager_utility.cc
+++ b/source/common/http/conn_manager_utility.cc
@@ -88,8 +88,6 @@ Network::Address::InstanceConstSharedPtr ConnectionManagerUtility::mutateRequest
   Network::Address::InstanceConstSharedPtr final_remote_address;
   bool single_xff_address;
   const uint32_t xff_num_trusted_hops = config.xffNumTrustedHops();
-  const bool trusted_forwarded_proto =
-      Runtime::runtimeFeatureEnabled("envoy.reloadable_features.trusted_forwarded_proto");
 
   if (config.useRemoteAddress()) {
     single_xff_address = request_headers.ForwardedFor() == nullptr;
@@ -113,18 +111,9 @@ Network::Address::InstanceConstSharedPtr ConnectionManagerUtility::mutateRequest
         Utility::appendXff(request_headers, *connection.remoteAddress());
       }
     }
-    if (trusted_forwarded_proto) {
-      // If the prior hop is not a trusted proxy, overwrite any x-forwarded-proto value it set as
-      // untrusted. Alternately if no x-forwarded-proto header exists, add one.
-      if (xff_num_trusted_hops == 0 || request_headers.ForwardedProto() == nullptr) {
-        request_headers.setReferenceForwardedProto(connection.ssl()
-                                                       ? Headers::get().SchemeValues.Https
-                                                       : Headers::get().SchemeValues.Http);
-      }
-    } else {
-      // Previously, before the trusted_forwarded_proto logic, Envoy would always overwrite the
-      // x-forwarded-proto header even if it was set by a trusted proxy. This code path is
-      // deprecated and will be removed.
+    // If the prior hop is not a trusted proxy, overwrite any x-forwarded-proto value it set as
+    // untrusted. Alternately if no x-forwarded-proto header exists, add one.
+    if (xff_num_trusted_hops == 0 || request_headers.ForwardedProto() == nullptr) {
       request_headers.setReferenceForwardedProto(
           connection.ssl() ? Headers::get().SchemeValues.Https : Headers::get().SchemeValues.Http);
     }
@@ -200,8 +189,8 @@ Network::Address::InstanceConstSharedPtr ConnectionManagerUtility::mutateRequest
 
   if (config.userAgent()) {
     request_headers.setEnvoyDownstreamServiceCluster(config.userAgent().value());
-    const HeaderEntry& user_agent_header = request_headers.insertUserAgent();
-    if (user_agent_header.value().empty()) {
+    const HeaderEntry* user_agent_header = request_headers.UserAgent();
+    if (!user_agent_header || user_agent_header->value().empty()) {
       // Following setReference() is safe because user agent is constant for the life of the
       // listener.
       request_headers.setReferenceUserAgent(config.userAgent().value());
@@ -283,7 +272,7 @@ void ConnectionManagerUtility::mutateTracingRequestHeader(HeaderMap& request_hea
     UuidUtils::setTraceableUuid(x_request_id, UuidTraceStatus::NoTrace);
   }
 
-  request_headers.RequestId()->value(x_request_id);
+  request_headers.setRequestId(x_request_id);
 }
 
 void ConnectionManagerUtility::mutateXfccRequestHeader(HeaderMap& request_headers,
@@ -364,8 +353,7 @@ void ConnectionManagerUtility::mutateXfccRequestHeader(HeaderMap& request_header
 
   const std::string client_cert_details_str = absl::StrJoin(client_cert_details, ";");
   if (config.forwardClientCert() == ForwardClientCertType::AppendForward) {
-    HeaderMapImpl::appendToHeader(request_headers.insertForwardedClientCert().value(),
-                                  client_cert_details_str);
+    request_headers.appendForwardedClientCert(client_cert_details_str, ",");
   } else if (config.forwardClientCert() == ForwardClientCertType::SanitizeSet) {
     request_headers.setForwardedClientCert(client_cert_details_str);
   } else {
@@ -411,11 +399,11 @@ bool ConnectionManagerUtility::maybeNormalizePath(HeaderMap& request_headers,
   ASSERT(request_headers.Path());
   bool is_valid_path = true;
   if (config.shouldNormalizePath()) {
-    is_valid_path = PathUtil::canonicalPath(*request_headers.Path());
+    is_valid_path = PathUtil::canonicalPath(request_headers);
   }
   // Merge slashes after path normalization to catch potential edge cases with percent encoding.
   if (is_valid_path && config.shouldMergeSlashes()) {
-    PathUtil::mergeSlashes(*request_headers.Path());
+    PathUtil::mergeSlashes(request_headers);
   }
   return is_valid_path;
 }
diff --git a/source/common/http/hash_policy.cc b/source/common/http/hash_policy.cc
index ec50a20194d1..b2cc447e4aab 100644
--- a/source/common/http/hash_policy.cc
+++ b/source/common/http/hash_policy.cc
@@ -83,6 +83,31 @@ class IpHashMethod : public HashMethodImplBase {
   }
 };
 
+class QueryParameterHashMethod : public HashMethodImplBase {
+public:
+  QueryParameterHashMethod(const std::string& parameter_name, bool terminal)
+      : HashMethodImplBase(terminal), parameter_name_(parameter_name) {}
+
+  absl::optional<uint64_t> evaluate(const Network::Address::Instance*, const HeaderMap& headers,
+                                    const HashPolicy::AddCookieCallback) const override {
+    absl::optional<uint64_t> hash;
+
+    const HeaderEntry* header = headers.Path();
+    if (header) {
+      Http::Utility::QueryParams query_parameters =
+          Http::Utility::parseQueryString(header->value().getStringView());
+      const auto& iter = query_parameters.find(parameter_name_);
+      if (iter != query_parameters.end()) {
+        hash = HashUtil::xxHash64(iter->second);
+      }
+    }
+    return hash;
+  }
+
+private:
+  const std::string parameter_name_;
+};
+
 HashPolicyImpl::HashPolicyImpl(
     absl::Span<const envoy::api::v2::route::RouteAction::HashPolicy* const> hash_policies) {
   // TODO(htuch): Add support for cookie hash policies, #1295
@@ -109,6 +134,10 @@ HashPolicyImpl::HashPolicyImpl(
         hash_impls_.emplace_back(new IpHashMethod(hash_policy->terminal()));
       }
       break;
+    case envoy::api::v2::route::RouteAction::HashPolicy::kQueryParameter:
+      hash_impls_.emplace_back(new QueryParameterHashMethod(hash_policy->query_parameter().name(),
+                                                            hash_policy->terminal()));
+      break;
     default:
       throw EnvoyException(
           fmt::format("Unsupported hash policy {}", hash_policy->policy_specifier_case()));
diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc
index bd870f6672b5..ecc98017b841 100644
--- a/source/common/http/header_map_impl.cc
+++ b/source/common/http/header_map_impl.cc
@@ -49,8 +49,8 @@ HeaderString::HeaderString(const LowerCaseString& ref_value) : type_(Type::Refer
   ASSERT(valid());
 }
 
-HeaderString::HeaderString(const std::string& ref_value) : type_(Type::Reference) {
-  buffer_.ref_ = ref_value.c_str();
+HeaderString::HeaderString(absl::string_view ref_value) : type_(Type::Reference) {
+  buffer_.ref_ = ref_value.data();
   string_length_ = ref_value.size();
   ASSERT(valid());
 }
@@ -261,13 +261,7 @@ HeaderMapImpl::HeaderEntryImpl::HeaderEntryImpl(const LowerCaseString& key, Head
 HeaderMapImpl::HeaderEntryImpl::HeaderEntryImpl(HeaderString&& key, HeaderString&& value)
     : key_(std::move(key)), value_(std::move(value)) {}
 
-void HeaderMapImpl::HeaderEntryImpl::value(const char* value, uint32_t size) {
-  value_.setCopy(value, size);
-}
-
-void HeaderMapImpl::HeaderEntryImpl::value(absl::string_view value) {
-  this->value(value.data(), static_cast<uint32_t>(value.size()));
-}
+void HeaderMapImpl::HeaderEntryImpl::value(absl::string_view value) { value_.setCopy(value); }
 
 void HeaderMapImpl::HeaderEntryImpl::value(uint64_t value) { value_.setInteger(value); }
 
@@ -295,20 +289,21 @@ struct HeaderMapImpl::StaticLookupTable : public TrieLookupTable<EntryCb> {
   }
 };
 
-uint64_t HeaderMapImpl::appendToHeader(HeaderString& header, absl::string_view data) {
+uint64_t HeaderMapImpl::appendToHeader(HeaderString& header, absl::string_view data,
+                                       absl::string_view delimiter) {
   if (data.empty()) {
     return 0;
   }
   uint64_t byte_size = 0;
   if (!header.empty()) {
-    header.append(",", 1);
-    byte_size += 1;
+    header.append(delimiter.data(), delimiter.size());
+    byte_size += delimiter.size();
   }
   header.append(data.data(), data.size());
   return data.size() + byte_size;
 }
 
-HeaderMapImpl::HeaderMapImpl() { memset(&inline_headers_, 0, sizeof(inline_headers_)); }
+HeaderMapImpl::HeaderMapImpl() { inline_headers_.clear(); }
 
 HeaderMapImpl::HeaderMapImpl(
     const std::initializer_list<std::pair<LowerCaseString, std::string>>& values)
@@ -320,29 +315,20 @@ HeaderMapImpl::HeaderMapImpl(
     value_string.setCopy(value.second.c_str(), value.second.size());
     addViaMove(std::move(key_string), std::move(value_string));
   }
+  verifyByteSize();
 }
 
 void HeaderMapImpl::updateSize(uint64_t from_size, uint64_t to_size) {
-  // Removes from_size from cached_byte_size_ and adds to_size.
-  if (cached_byte_size_.has_value()) {
-    ASSERT(cached_byte_size_ >= from_size);
-    cached_byte_size_.value() -= from_size;
-    cached_byte_size_.value() += to_size;
-  }
+  ASSERT(cached_byte_size_ >= from_size);
+  cached_byte_size_ -= from_size;
+  cached_byte_size_ += to_size;
 }
 
-void HeaderMapImpl::addSize(uint64_t size) {
-  // Adds size to cached_byte_size_ if it exists.
-  if (cached_byte_size_.has_value()) {
-    cached_byte_size_.value() += size;
-  }
-}
+void HeaderMapImpl::addSize(uint64_t size) { cached_byte_size_ += size; }
 
 void HeaderMapImpl::subtractSize(uint64_t size) {
-  if (cached_byte_size_.has_value()) {
-    ASSERT(cached_byte_size_ >= size);
-    cached_byte_size_.value() -= size;
-  }
+  ASSERT(cached_byte_size_ >= size);
+  cached_byte_size_ -= size;
 }
 
 void HeaderMapImpl::copyFrom(const HeaderMap& header_map) {
@@ -359,6 +345,7 @@ void HeaderMapImpl::copyFrom(const HeaderMap& header_map) {
         return HeaderMap::Iterate::Continue;
       },
       this);
+  verifyByteSize();
 }
 
 bool HeaderMapImpl::operator==(const HeaderMapImpl& rhs) const {
@@ -409,12 +396,14 @@ void HeaderMapImpl::addViaMove(HeaderString&& key, HeaderString&& value) {
   } else {
     insertByKey(std::move(key), std::move(value));
   }
+  verifyByteSize();
 }
 
-void HeaderMapImpl::addReference(const LowerCaseString& key, const std::string& value) {
+void HeaderMapImpl::addReference(const LowerCaseString& key, absl::string_view value) {
   HeaderString ref_key(key);
   HeaderString ref_value(value);
   addViaMove(std::move(ref_key), std::move(ref_value));
+  verifyByteSize();
 }
 
 void HeaderMapImpl::addReferenceKey(const LowerCaseString& key, uint64_t value) {
@@ -423,14 +412,16 @@ void HeaderMapImpl::addReferenceKey(const LowerCaseString& key, uint64_t value)
   new_value.setInteger(value);
   insertByKey(std::move(ref_key), std::move(new_value));
   ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move)
+  verifyByteSize();
 }
 
-void HeaderMapImpl::addReferenceKey(const LowerCaseString& key, const std::string& value) {
+void HeaderMapImpl::addReferenceKey(const LowerCaseString& key, absl::string_view value) {
   HeaderString ref_key(key);
   HeaderString new_value;
-  new_value.setCopy(value.c_str(), value.size());
+  new_value.setCopy(value);
   insertByKey(std::move(ref_key), std::move(new_value));
   ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move)
+  verifyByteSize();
 }
 
 void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) {
@@ -443,15 +434,16 @@ void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) {
     return;
   }
   HeaderString new_key;
-  new_key.setCopy(key.get().c_str(), key.get().size());
+  new_key.setCopy(key.get());
   HeaderString new_value;
   new_value.setInteger(value);
   insertByKey(std::move(new_key), std::move(new_value));
   ASSERT(new_key.empty());   // NOLINT(bugprone-use-after-move)
   ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move)
+  verifyByteSize();
 }
 
-void HeaderMapImpl::addCopy(const LowerCaseString& key, const std::string& value) {
+void HeaderMapImpl::addCopy(const LowerCaseString& key, absl::string_view value) {
   auto* entry = getExistingInline(key.get());
   if (entry != nullptr) {
     const uint64_t added_size = appendToHeader(entry->value(), value);
@@ -459,41 +451,61 @@ void HeaderMapImpl::addCopy(const LowerCaseString& key, const std::string& value
     return;
   }
   HeaderString new_key;
-  new_key.setCopy(key.get().c_str(), key.get().size());
+  new_key.setCopy(key.get());
   HeaderString new_value;
-  new_value.setCopy(value.c_str(), value.size());
+  new_value.setCopy(value);
   insertByKey(std::move(new_key), std::move(new_value));
   ASSERT(new_key.empty());   // NOLINT(bugprone-use-after-move)
   ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move)
+  verifyByteSize();
+}
+
+void HeaderMapImpl::appendCopy(const LowerCaseString& key, absl::string_view value) {
+  // TODO(#9221): converge on and document a policy for coalescing multiple headers.
+  auto* entry = getExisting(key);
+  if (entry) {
+    const uint64_t added_size = appendToHeader(entry->value(), value);
+    addSize(added_size);
+  } else {
+    addCopy(key, value);
+  }
+
+  verifyByteSize();
 }
 
-void HeaderMapImpl::setReference(const LowerCaseString& key, const std::string& value) {
+void HeaderMapImpl::setReference(const LowerCaseString& key, absl::string_view value) {
   HeaderString ref_key(key);
   HeaderString ref_value(value);
   remove(key);
   insertByKey(std::move(ref_key), std::move(ref_value));
+  verifyByteSize();
 }
 
-void HeaderMapImpl::setReferenceKey(const LowerCaseString& key, const std::string& value) {
+void HeaderMapImpl::setReferenceKey(const LowerCaseString& key, absl::string_view value) {
   HeaderString ref_key(key);
   HeaderString new_value;
-  new_value.setCopy(value.c_str(), value.size());
+  new_value.setCopy(value);
   remove(key);
   insertByKey(std::move(ref_key), std::move(new_value));
   ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move)
+  verifyByteSize();
 }
 
-absl::optional<uint64_t> HeaderMapImpl::byteSize() const { return cached_byte_size_; }
-
-uint64_t HeaderMapImpl::refreshByteSize() {
-  if (!cached_byte_size_.has_value()) {
-    // In this case, the cached byte size is not valid, and the byte size is computed via an
-    // iteration over the HeaderMap. The cached byte size is updated.
-    cached_byte_size_ = byteSizeInternal();
+void HeaderMapImpl::setCopy(const LowerCaseString& key, absl::string_view value) {
+  // Replaces the first occurrence of a header if it exists, otherwise adds by copy.
+  // TODO(#9221): converge on and document a policy for coalescing multiple headers.
+  auto* entry = getExisting(key);
+  if (entry) {
+    updateSize(entry->value().size(), value.size());
+    entry->value(value);
+  } else {
+    addCopy(key, value);
   }
-  return cached_byte_size_.value();
+  verifyByteSize();
 }
 
+uint64_t HeaderMapImpl::byteSize() const { return cached_byte_size_; }
+
 uint64_t HeaderMapImpl::byteSizeInternal() const {
   // Computes the total byte size by summing the byte size of the keys and values.
   uint64_t byte_size = 0;
@@ -514,10 +526,9 @@ const HeaderEntry* HeaderMapImpl::get(const LowerCaseString& key) const {
   return nullptr;
 }
 
-HeaderEntry* HeaderMapImpl::get(const LowerCaseString& key) {
+HeaderEntry* HeaderMapImpl::getExisting(const LowerCaseString& key) {
   for (HeaderEntryImpl& header : headers_) {
     if (header.key() == key.get().c_str()) {
-      cached_byte_size_.reset();
       return &header;
     }
   }
@@ -564,6 +575,12 @@ HeaderMap::Lookup HeaderMapImpl::lookup(const LowerCaseString& key,
   }
 }
 
+void HeaderMapImpl::clear() {
+  inline_headers_.clear();
+  headers_.clear();
+  cached_byte_size_ = 0;
+}
+
 void HeaderMapImpl::remove(const LowerCaseString& key) {
   EntryCb cb = ConstSingleton<StaticLookupTable>::get().find(key.get());
   if (cb) {
@@ -579,6 +596,7 @@ void HeaderMapImpl::remove(const LowerCaseString& key) {
       }
     }
   }
+  verifyByteSize();
 }
 
 void HeaderMapImpl::removePrefix(const LowerCaseString& prefix) {
@@ -602,6 +620,7 @@ void HeaderMapImpl::removePrefix(const LowerCaseString& prefix) {
     }
     return to_remove;
   });
+  verifyByteSize();
 }
 
 void HeaderMapImpl::dumpState(std::ostream& os, int indent_level) const {
@@ -665,6 +684,7 @@ void HeaderMapImpl::removeInline(HeaderEntryImpl** ptr_to_entry) {
   subtractSize(size_to_subtract);
   *ptr_to_entry = nullptr;
   headers_.erase(entry->entry_);
+  verifyByteSize();
 }
 
 } // namespace Http
diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h
index b46370ca5b14..bba7221c7d36 100644
--- a/source/common/http/header_map_impl.h
+++ b/source/common/http/header_map_impl.h
@@ -16,38 +16,34 @@ namespace Http {
 
 /**
  * These are definitions of all of the inline header access functions described inside header_map.h
- *
- * When a non-const reference or pointer to a HeaderEntry is returned, the internal byte size count
- * will be cleared, since HeaderMap will no longer be able to accurately update the size of that
- * HeaderEntry.
- * TODO(asraa): Remove functions with a non-const HeaderEntry return value.
+ * TODO(asraa): Simplify code here so macros expand into single virtual calls.
  */
 #define DEFINE_INLINE_HEADER_FUNCS(name)                                                           \
 public:                                                                                            \
   const HeaderEntry* name() const override { return inline_headers_.name##_; }                     \
-  HeaderEntry* name() override {                                                                   \
-    cached_byte_size_.reset();                                                                     \
-    return inline_headers_.name##_;                                                                \
-  }                                                                                                \
-  HeaderEntry& insert##name() override {                                                           \
-    cached_byte_size_.reset();                                                                     \
-    return maybeCreateInline(&inline_headers_.name##_, Headers::get().name);                       \
+  void append##name(absl::string_view data, absl::string_view delimiter) override {                \
+    HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name);         \
+    addSize(HeaderMapImpl::appendToHeader(entry.value(), data, delimiter));                        \
+    verifyByteSize();                                                                              \
   }                                                                                                \
   void setReference##name(absl::string_view value) override {                                      \
     HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name);         \
     updateSize(entry.value().size(), value.size());                                                \
     entry.value().setReference(value);                                                             \
+    verifyByteSize();                                                                              \
   }                                                                                                \
   void set##name(absl::string_view value) override {                                               \
     HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name);         \
     updateSize(entry.value().size(), value.size());                                                \
     entry.value().setCopy(value);                                                                  \
+    verifyByteSize();                                                                              \
   }                                                                                                \
   void set##name(uint64_t value) override {                                                        \
     HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name);         \
     subtractSize(inline_headers_.name##_->value().size());                                         \
     entry.value().setInteger(value);                                                               \
     addSize(inline_headers_.name##_->value().size());                                              \
+    verifyByteSize();                                                                              \
   }                                                                                                \
   void remove##name() override { removeInline(&inline_headers_.name##_); }
 
@@ -62,14 +58,6 @@ public:
  */
 class HeaderMapImpl : public HeaderMap, NonCopyable {
 public:
-  /**
-   * Appends data to header. If header already has a value, the string ',' is added between the
-   * existing value and data.
-   * @param header the header to append to.
-   * @param data to append to the header.
-   */
-  static uint64_t appendToHeader(HeaderString& header, absl::string_view data);
-
   HeaderMapImpl();
   explicit HeaderMapImpl(
       const std::initializer_list<std::pair<LowerCaseString, std::string>>& values);
@@ -89,21 +77,21 @@ class HeaderMapImpl : public HeaderMap, NonCopyable {
   bool operator!=(const HeaderMapImpl& rhs) const;
 
   // Http::HeaderMap
-  void addReference(const LowerCaseString& key, const std::string& value) override;
+  void addReference(const LowerCaseString& key, absl::string_view value) override;
   void addReferenceKey(const LowerCaseString& key, uint64_t value) override;
-  void addReferenceKey(const LowerCaseString& key, const std::string& value) override;
+  void addReferenceKey(const LowerCaseString& key, absl::string_view value) override;
   void addCopy(const LowerCaseString& key, uint64_t value) override;
-  void addCopy(const LowerCaseString& key, const std::string& value) override;
-  void setReference(const LowerCaseString& key, const std::string& value) override;
-  void setReferenceKey(const LowerCaseString& key, const std::string& value) override;
-  absl::optional<uint64_t> byteSize() const override;
-  uint64_t refreshByteSize() override;
-  uint64_t byteSizeInternal() const override;
+  void addCopy(const LowerCaseString& key, absl::string_view value) override;
+  void appendCopy(const LowerCaseString& key, absl::string_view value) override;
+  void setReference(const LowerCaseString& key, absl::string_view value) override;
+  void setReferenceKey(const LowerCaseString& key, absl::string_view value) override;
+  void setCopy(const LowerCaseString& key, absl::string_view value) override;
+  uint64_t byteSize() const override;
   const HeaderEntry* get(const LowerCaseString& key) const override;
-  HeaderEntry* get(const LowerCaseString& key) override;
   void iterate(ConstIterateCb cb, void* context) const override;
   void iterateReverse(ConstIterateCb cb, void* context) const override;
   Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const override;
+  void clear() override;
   void remove(const LowerCaseString& key) override;
   void removePrefix(const LowerCaseString& key) override;
   size_t size() const override { return headers_.size(); }
@@ -113,7 +101,6 @@ class HeaderMapImpl : public HeaderMap, NonCopyable {
 protected:
   // For tests only, unoptimized, they aren't intended for regular HeaderMapImpl users.
   void copyFrom(const HeaderMap& rhs);
-  void clear() { removePrefix(LowerCaseString("")); }
 
   struct HeaderEntryImpl : public HeaderEntry, NonCopyable {
     HeaderEntryImpl(const LowerCaseString& key);
@@ -122,7 +109,6 @@ class HeaderMapImpl : public HeaderMap, NonCopyable {
 
     // HeaderEntry
     const HeaderString& key() const override { return key_; }
-    void value(const char* value, uint32_t size) override;
     void value(absl::string_view value) override;
     void value(uint64_t value) override;
     void value(const HeaderEntry& header) override;
@@ -148,6 +134,8 @@ class HeaderMapImpl : public HeaderMap, NonCopyable {
   struct StaticLookupTable; // Defined in header_map_impl.cc.
 
   struct AllInlineHeaders {
+    void clear() { memset(this, 0, sizeof(*this)); }
+
     ALL_INLINE_HEADERS(DEFINE_INLINE_HEADER_STRUCT)
   };
 
@@ -209,6 +197,10 @@ class HeaderMapImpl : public HeaderMap, NonCopyable {
     std::list<HeaderEntryImpl>::const_reverse_iterator rend() const { return headers_.rend(); }
     size_t size() const { return headers_.size(); }
     bool empty() const { return headers_.empty(); }
+    void clear() {
+      headers_.clear();
+      pseudo_headers_end_ = headers_.end();
+    }
 
   private:
     std::list<HeaderEntryImpl> headers_;
@@ -216,9 +208,12 @@ class HeaderMapImpl : public HeaderMap, NonCopyable {
   };
 
   void insertByKey(HeaderString&& key, HeaderString&& value);
+  static uint64_t appendToHeader(HeaderString& header, absl::string_view data,
+                                 absl::string_view delimiter = ",");
   HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key);
   HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key,
                                      HeaderString&& value);
+  HeaderEntry* getExisting(const LowerCaseString& key);
   HeaderEntryImpl* getExistingInline(absl::string_view key);
 
   void removeInline(HeaderEntryImpl** entry);
@@ -229,9 +224,16 @@ class HeaderMapImpl : public HeaderMap, NonCopyable {
   AllInlineHeaders inline_headers_;
   HeaderList headers_;
 
-  // When present, this holds the internal byte size of the HeaderMap. The value is removed once an
-  // inline header entry is accessed and updated when refreshByteSize() is called.
-  absl::optional<uint64_t> cached_byte_size_ = 0;
+  // This holds the internal byte size of the HeaderMap.
+  uint64_t cached_byte_size_ = 0;
+  // Performs a manual byte size count.
+  uint64_t byteSizeInternal() const;
+  // In TestHeaderMapImpl and VerifiedHeaderMapImpl, this method is overridden to performs a
+  // time-consuming manual byte size count on each operation to verify the byte size. For prod
+  // HeaderMaps, this verification is skipped.
+  // TODO(asraa): Move this verification out of prod code and wrap virtual Http::HeaderMap methods
+  // in Http::TestHeaderMapImpl with the verification.
+  virtual void verifyByteSize() {}
 
   ALL_INLINE_HEADERS(DEFINE_INLINE_HEADER_FUNCS)
 };
diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc
index c0e1af24a69a..8f2d33d29d37 100644
--- a/source/common/http/http1/codec_impl.cc
+++ b/source/common/http/http1/codec_impl.cc
@@ -44,7 +44,9 @@ const std::string StreamEncoderImpl::LAST_CHUNK = "0\r\n\r\n";
 
 StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection,
                                      HeaderKeyFormatter* header_key_formatter)
-    : connection_(connection), header_key_formatter_(header_key_formatter) {
+    : connection_(connection), header_key_formatter_(header_key_formatter), chunk_encoding_(true),
+      processing_100_continue_(false), is_response_to_head_request_(false),
+      is_content_length_allowed_(true) {
   if (connection_.connection().aboveHighWatermark()) {
     runHighWatermarkCallbacks();
   }
@@ -355,14 +357,14 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& st
                                const uint32_t max_headers_count,
                                HeaderKeyFormatterPtr&& header_key_formatter)
     : connection_(connection), stats_{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(stats, "http1."))},
-      header_key_formatter_(std::move(header_key_formatter)),
+      header_key_formatter_(std::move(header_key_formatter)), handling_upgrade_(false),
+      reset_stream_called_(false), strict_header_validation_(Runtime::runtimeFeatureEnabled(
+                                       "envoy.reloadable_features.strict_header_validation")),
+      connection_header_sanitization_(Runtime::runtimeFeatureEnabled(
+          "envoy.reloadable_features.connection_header_sanitization")),
       output_buffer_([&]() -> void { this->onBelowLowWatermark(); },
                      [&]() -> void { this->onAboveHighWatermark(); }),
-      max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count),
-      strict_header_validation_(
-          Runtime::runtimeFeatureEnabled("envoy.reloadable_features.strict_header_validation")),
-      connection_header_sanitization_(Runtime::runtimeFeatureEnabled(
-          "envoy.reloadable_features.connection_header_sanitization")) {
+      max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) {
   output_buffer_.setWatermarks(connection.bufferLimit());
   http_parser_init(&parser_, type);
   parser_.data = this;
@@ -487,10 +489,8 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) {
   header_parsing_state_ = HeaderParsingState::Value;
   current_header_value_.append(data, length);
 
-  // Verify that the cached value in byte size exists.
-  ASSERT(current_header_map_->byteSize().has_value());
-  const uint32_t total = current_header_field_.size() + current_header_value_.size() +
-                         current_header_map_->byteSize().value();
+  const uint32_t total =
+      current_header_field_.size() + current_header_value_.size() + current_header_map_->byteSize();
   if (total > (max_headers_kb_ * 1024)) {
 
     error_code_ = Http::Code::RequestHeaderFieldsTooLarge;
@@ -502,10 +502,6 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) {
 int ConnectionImpl::onHeadersCompleteBase() {
   ENVOY_CONN_LOG(trace, "headers complete", connection_);
   completeLastHeader();
-  // Validate that the completed HeaderMap's cached byte size exists and is correct.
-  // This assert iterates over the HeaderMap.
-  ASSERT(current_header_map_->byteSize().has_value() &&
-         current_header_map_->byteSize() == current_header_map_->byteSizeInternal());
   if (!(parser_.http_major == 1 && parser_.http_minor == 1)) {
     // This is not necessarily true, but it's good enough since higher layers only care if this is
     // HTTP/1.1 or not.
@@ -526,7 +522,7 @@ int ConnectionImpl::onHeadersCompleteBase() {
         if (new_value.empty()) {
           current_header_map_->removeConnection();
         } else {
-          current_header_map_->Connection()->value(new_value);
+          current_header_map_->setConnection(new_value);
         }
       }
       current_header_map_->remove(Headers::get().Http2Settings);
diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h
index 717e3384e027..25d540000ec7 100644
--- a/source/common/http/http1/codec_impl.h
+++ b/source/common/http/http1/codec_impl.h
@@ -96,11 +96,11 @@ class StreamEncoderImpl : public StreamEncoder,
 
   void encodeFormattedHeader(absl::string_view key, absl::string_view value);
 
-  bool chunk_encoding_{true};
-  bool processing_100_continue_{false};
-  bool is_response_to_head_request_{false};
-  bool is_content_length_allowed_{true};
   const HeaderKeyFormatter* const header_key_formatter_;
+  bool chunk_encoding_ : 1;
+  bool processing_100_continue_ : 1;
+  bool is_response_to_head_request_ : 1;
+  bool is_content_length_allowed_ : 1;
 };
 
 /**
@@ -204,8 +204,11 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable<Log
   http_parser parser_;
   HeaderMapPtr deferred_end_stream_headers_;
   Http::Code error_code_{Http::Code::BadRequest};
-  bool handling_upgrade_{};
   const HeaderKeyFormatterPtr header_key_formatter_;
+  bool handling_upgrade_ : 1;
+  bool reset_stream_called_ : 1;
+  const bool strict_header_validation_ : 1;
+  const bool connection_header_sanitization_ : 1;
 
 private:
   enum class HeaderParsingState { Field, Value, Done };
@@ -300,16 +303,12 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable<Log
   HeaderParsingState header_parsing_state_{HeaderParsingState::Field};
   HeaderString current_header_field_;
   HeaderString current_header_value_;
-  bool reset_stream_called_{};
   Buffer::WatermarkBuffer output_buffer_;
   Buffer::RawSlice reserved_iovec_;
   char* reserved_current_{};
   Protocol protocol_{Protocol::Http11};
   const uint32_t max_headers_kb_;
   const uint32_t max_headers_count_;
-
-  const bool strict_header_validation_;
-  const bool connection_header_sanitization_;
 };
 
 /**
diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc
index de3f742478fe..f69bb7b4b850 100644
--- a/source/common/http/http2/codec_impl.cc
+++ b/source/common/http/http2/codec_impl.cc
@@ -509,10 +509,6 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) {
 
   switch (frame->hd.type) {
   case NGHTTP2_HEADERS: {
-    // Verify that the final HeaderMap's byte size is under the limit before decoding headers.
-    // This assert iterates over the HeaderMap.
-    ASSERT(stream->headers_->byteSize().has_value() &&
-           stream->headers_->byteSize().value() == stream->headers_->byteSizeInternal());
     stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;
     if (!stream->cookies_.empty()) {
       HeaderString key(Headers::get().Cookie);
@@ -624,12 +620,6 @@ int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) {
   case NGHTTP2_HEADERS:
   case NGHTTP2_DATA: {
     StreamImpl* stream = getStream(frame->hd.stream_id);
-    if (stream->headers_) {
-      // Verify that the final HeaderMap's byte size is under the limit before sending frames.
-      // This assert iterates over the HeaderMap.
-      ASSERT(stream->headers_->byteSize().has_value() &&
-             stream->headers_->byteSize().value() == stream->headers_->byteSizeInternal());
-    }
     stream->local_end_stream_sent_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;
     break;
   }
@@ -820,9 +810,7 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name,
   }
   stream->saveHeader(std::move(name), std::move(value));
 
-  // Verify that the cached value in byte size exists.
-  ASSERT(stream->headers_->byteSize().has_value());
-  if (stream->headers_->byteSize().value() > max_headers_kb_ * 1024 ||
+  if (stream->headers_->byteSize() > max_headers_kb_ * 1024 ||
       stream->headers_->size() > max_headers_count_) {
     // This will cause the library to reset/close the stream.
     stats_.header_overflow_.inc();
@@ -1066,6 +1054,13 @@ ConnectionImpl::Http2Options::Http2Options(const Http2Settings& http2_settings)
   if (http2_settings.allow_metadata_) {
     nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE);
   }
+
+  // nghttp2 v1.39.2 lowered the internal flood protection limit from 10K to 1K of ACK frames. This
+  // new limit may cause the internal nghttp2 mitigation to trigger more often (as it requires just
+  // 9K of incoming bytes for smallest 9 byte SETTINGS frame), bypassing the same mitigation and its
+  // associated behavior in the envoy HTTP/2 codec. Since envoy does not rely on this mitigation,
+  // set back to the old 10K number to avoid any changes in the HTTP/2 codec behavior.
+  nghttp2_option_set_max_outbound_ack(options_, 10000);
 }
 
 ConnectionImpl::Http2Options::~Http2Options() { nghttp2_option_del(options_); }
diff --git a/source/common/http/http3/BUILD b/source/common/http/http3/BUILD
new file mode 100644
index 000000000000..086a4f4b902d
--- /dev/null
+++ b/source/common/http/http3/BUILD
@@ -0,0 +1,21 @@
+licenses(["notice"])  # Apache 2
+
+load(
+    "//bazel:envoy_build_system.bzl",
+    "envoy_cc_library",
+    "envoy_package",
+)
+
+envoy_package()
+
+envoy_cc_library(
+    name = "quic_codec_factory_lib",
+    hdrs = ["quic_codec_factory.h"],
+    deps = ["//include/envoy/http:codec_interface"],
+)
+
+envoy_cc_library(
+    name = "well_known_names",
+    hdrs = ["well_known_names.h"],
+    deps = ["//source/common/singleton:const_singleton"],
+)
diff --git a/source/common/http/http3/quic_codec_factory.h b/source/common/http/http3/quic_codec_factory.h
new file mode 100644
index 000000000000..21ca7cab8704
--- /dev/null
+++ b/source/common/http/http3/quic_codec_factory.h
@@ -0,0 +1,38 @@
+#pragma once
+
+#include <string>
+
+#include "envoy/http/codec.h"
+#include "envoy/network/connection.h"
+
+namespace Envoy {
+namespace Http {
+
+// A factory to create Http::ServerConnection instance for QUIC.
+class QuicHttpServerConnectionFactory {
+public:
+  virtual ~QuicHttpServerConnectionFactory() {}
+
+  virtual std::string name() const PURE;
+
+  virtual std::unique_ptr<ServerConnection>
+  createQuicServerConnection(Network::Connection& connection, ConnectionCallbacks& callbacks) PURE;
+
+  static std::string category() { return "quic_client_codec"; }
+};
+
+// A factory to create Http::ClientConnection instance for QUIC.
+class QuicHttpClientConnectionFactory {
+public:
+  virtual ~QuicHttpClientConnectionFactory() {}
+
+  virtual std::string name() const PURE;
+
+  virtual std::unique_ptr<ClientConnection>
+  createQuicClientConnection(Network::Connection& connection, ConnectionCallbacks& callbacks) PURE;
+
+  static std::string category() { return "quic_server_codec"; }
+};
+
+} // namespace Http
+} // namespace Envoy
diff --git a/source/common/http/http3/well_known_names.h b/source/common/http/http3/well_known_names.h
new file mode 100644
index 000000000000..aace82a76d55
--- /dev/null
+++ b/source/common/http/http3/well_known_names.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include <string>
+
+#include "common/singleton/const_singleton.h"
+
+namespace Envoy {
+namespace Http {
+
+class QuicCodecNameValues {
+public:
+  // QUICHE is the only QUIC implementation for now.
+  const std::string Quiche = "quiche";
+};
+
+using QuicCodecNames = ConstSingleton<QuicCodecNameValues>;
+
+} // namespace Http
+} // namespace Envoy
diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc
index 49372fc50dbf..0266f0b63467 100644
--- a/source/common/http/path_utility.cc
+++ b/source/common/http/path_utility.cc
@@ -29,8 +29,8 @@ absl::optional<std::string> canonicalizePath(absl::string_view original_path) {
 } // namespace
 
 /* static */
-bool PathUtil::canonicalPath(HeaderEntry& path_header) {
-  const auto original_path = path_header.value().getStringView();
+bool PathUtil::canonicalPath(HeaderMap& headers) {
+  const auto original_path = headers.Path()->value().getStringView();
   // canonicalPath is supposed to apply on path component in URL instead of :path header
   const auto query_pos = original_path.find('?');
   auto normalized_path_opt = canonicalizePath(
@@ -50,12 +50,12 @@ bool PathUtil::canonicalPath(HeaderEntry& path_header) {
   if (!query_suffix.empty()) {
     normalized_path.insert(normalized_path.end(), query_suffix.begin(), query_suffix.end());
   }
-  path_header.value(normalized_path);
+  headers.setPath(normalized_path);
   return true;
 }
 
-void PathUtil::mergeSlashes(HeaderEntry& path_header) {
-  const auto original_path = path_header.value().getStringView();
+void PathUtil::mergeSlashes(HeaderMap& headers) {
+  const auto original_path = headers.Path()->value().getStringView();
   // Only operate on path component in URL.
   const absl::string_view::size_type query_start = original_path.find('?');
   const absl::string_view path = original_path.substr(0, query_start);
@@ -65,7 +65,7 @@ void PathUtil::mergeSlashes(HeaderEntry& path_header) {
   }
   const absl::string_view prefix = absl::StartsWith(path, "/") ? "/" : absl::string_view();
   const absl::string_view suffix = absl::EndsWith(path, "/") ? "/" : absl::string_view();
-  path_header.value(absl::StrCat(
+  headers.setPath(absl::StrCat(
       prefix, absl::StrJoin(absl::StrSplit(path, '/', absl::SkipEmpty()), "/"), query, suffix));
 }
 
diff --git a/source/common/http/path_utility.h b/source/common/http/path_utility.h
index a588f39de46e..8e635ed7d397 100644
--- a/source/common/http/path_utility.h
+++ b/source/common/http/path_utility.h
@@ -11,10 +11,10 @@ namespace Http {
 class PathUtil {
 public:
   // Returns if the normalization succeeds.
-  // If it is successful, the param will be updated with the normalized path.
-  static bool canonicalPath(HeaderEntry& path_header);
+  // If it is successful, the path header in header path will be updated with the normalized path.
+  static bool canonicalPath(HeaderMap& headers);
   // Merges two or more adjacent slashes in path part of URI into one.
-  static void mergeSlashes(HeaderEntry& path_header);
+  static void mergeSlashes(HeaderMap& headers);
 };
 
 } // namespace Http
diff --git a/source/common/http/user_agent.h b/source/common/http/user_agent.h
index bc568a0548dd..6a3ab2f00a06 100644
--- a/source/common/http/user_agent.h
+++ b/source/common/http/user_agent.h
@@ -14,12 +14,10 @@ namespace Envoy {
 /**
  * All stats for user agents. @see stats_macros.h
  */
-// clang-format off
 #define ALL_USER_AGENTS_STATS(COUNTER)                                                             \
   COUNTER(downstream_cx_total)                                                                     \
   COUNTER(downstream_cx_destroy_remote_active_rq)                                                  \
   COUNTER(downstream_rq_total)
-// clang-format on
 
 /**
  * Wrapper struct for user agent stats. @see stats_macros.h
diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc
index c6dad78c8552..2ac769ee2fce 100644
--- a/source/common/http/utility.cc
+++ b/source/common/http/utility.cc
@@ -23,8 +23,10 @@
 #include "common/protobuf/utility.h"
 
 #include "absl/strings/match.h"
+#include "absl/strings/numbers.h"
 #include "absl/strings/str_cat.h"
 #include "absl/strings/str_split.h"
+#include "absl/strings/string_view.h"
 
 namespace Envoy {
 namespace Http {
@@ -73,17 +75,14 @@ void Utility::appendXff(HeaderMap& headers, const Network::Address::Instance& re
     return;
   }
 
-  HeaderString& header = headers.insertForwardedFor().value();
-  const std::string& address_as_string = remote_address.ip()->addressAsString();
-  HeaderMapImpl::appendToHeader(header, address_as_string.c_str());
+  headers.appendForwardedFor(remote_address.ip()->addressAsString(), ",");
 }
 
 void Utility::appendVia(HeaderMap& headers, const std::string& via) {
-  HeaderString& header = headers.insertVia().value();
-  if (!header.empty()) {
-    header.append(", ", 2);
-  }
-  header.append(via.c_str(), via.size());
+  // TODO(asraa): Investigate whether it is necessary to append with whitespace here by:
+  //     (a) Validating we do not expect whitespace in via headers
+  //     (b) Add runtime guarding in case users have upstreams which expect it.
+  headers.appendVia(via, ", ");
 }
 
 std::string Utility::createSslRedirectPath(const HeaderMap& headers) {
@@ -429,7 +428,7 @@ bool Utility::sanitizeConnectionHeader(Http::HeaderMap& headers) {
     bool keep_header = false;
 
     // Determine whether the nominated header contains invalid values
-    HeaderEntry* nominated_header = NULL;
+    const HeaderEntry* nominated_header = NULL;
 
     if (lcs_header_to_remove == Http::Headers::get().Connection) {
       // Remove the connection header from the nominated tokens if it's self nominated
@@ -483,8 +482,7 @@ bool Utility::sanitizeConnectionHeader(Http::HeaderMap& headers) {
         }
 
         if (keep_header) {
-          nominated_header->value().setCopy(Http::Headers::get().TEValues.Trailers.data(),
-                                            Http::Headers::get().TEValues.Trailers.size());
+          headers.setTE(Http::Headers::get().TEValues.Trailers);
         }
       }
     }
@@ -504,7 +502,7 @@ bool Utility::sanitizeConnectionHeader(Http::HeaderMap& headers) {
     if (new_value.empty()) {
       headers.removeConnection();
     } else {
-      headers.Connection()->value(new_value);
+      headers.setConnection(new_value);
     }
   }
 
@@ -747,5 +745,49 @@ std::string Utility::PercentEncoding::decode(absl::string_view encoded) {
   return decoded;
 }
 
+Utility::AuthorityAttributes Utility::parseAuthority(absl::string_view host) {
+  // First try to see if there is a port included. This also checks to see that there is not a ']'
+  // as the last character which is indicative of an IPv6 address without a port. This is a best
+  // effort attempt.
+  const auto colon_pos = host.rfind(':');
+  absl::string_view host_to_resolve = host;
+  absl::optional<uint16_t> port;
+  if (colon_pos != absl::string_view::npos && host_to_resolve.back() != ']') {
+    const absl::string_view string_view_host = host;
+    host_to_resolve = string_view_host.substr(0, colon_pos);
+    const auto port_str = string_view_host.substr(colon_pos + 1);
+    uint64_t port64;
+    if (port_str.empty() || !absl::SimpleAtoi(port_str, &port64) || port64 > 65535) {
+      // Just attempt to resolve whatever we were given. This will very likely fail.
+      host_to_resolve = host;
+    } else {
+      port = static_cast<uint16_t>(port64);
+    }
+  }
+
+  // Now see if this is an IP address. We need to know this because some things (such as setting
+  // SNI) are special cased if this is an IP address. Either way, we still go through the normal
+  // resolver flow. We could short-circuit the DNS resolver in this case, but the extra code to do
+  // so is not worth it since the DNS resolver should handle it for us.
+  bool is_ip_address = false;
+  try {
+    absl::string_view potential_ip_address = host_to_resolve;
+    // TODO(mattklein123): Optimally we would support bracket parsing in parseInternetAddress(),
+    // but we still need to trim the brackets to send the IPv6 address into the DNS resolver. For
+    // now, just do all the trimming here, but in the future we should consider whether we can
+    // have unified [] handling as low as possible in the stack.
+    if (potential_ip_address.front() == '[' && potential_ip_address.back() == ']') {
+      potential_ip_address.remove_prefix(1);
+      potential_ip_address.remove_suffix(1);
+    }
+    Network::Utility::parseInternetAddress(std::string(potential_ip_address));
+    is_ip_address = true;
+    host_to_resolve = potential_ip_address;
+  } catch (const EnvoyException&) {
+  }
+
+  return {is_ip_address, host_to_resolve, port};
+}
+
 } // namespace Http
 } // namespace Envoy
diff --git a/source/common/http/utility.h b/source/common/http/utility.h
index a46bd93a2b64..420f20dad142 100644
--- a/source/common/http/utility.h
+++ b/source/common/http/utility.h
@@ -404,6 +404,25 @@ getMergedPerFilterConfig(const std::string& filter_name, const Router::RouteCons
   return merged;
 }
 
+struct AuthorityAttributes {
+  // whether parsed authority is pure ip address(IPv4/IPv6), if it is true
+  // passed that are not FQDN
+  bool is_ip_address_{};
+
+  // If parsed authority has host, that is stored here.
+  absl::string_view host_;
+
+  // If parsed authority has port, that is stored here.
+  absl::optional<uint16_t> port_;
+};
+
+/**
+ * Parse passed authority, and get that is valid FQDN or IPv4/IPv6 address, hostname and port-name.
+ * @param host host/authority
+ * @param default_port If passed authority does not have port, this value is returned
+ * @return hostname parse result. that includes whether host is IP Address, hostname and port-name
+ */
+AuthorityAttributes parseAuthority(absl::string_view host);
 } // namespace Utility
 } // namespace Http
 } // namespace Envoy
diff --git a/source/common/network/BUILD b/source/common/network/BUILD
index 4b558ffaea7e..28954514e8f4 100644
--- a/source/common/network/BUILD
+++ b/source/common/network/BUILD
@@ -52,6 +52,7 @@ envoy_cc_library(
         "//source/common/common:assert_lib",
         "//source/common/common:utility_lib",
         "@envoy_api//envoy/api/v2/core:pkg_cc_proto",
+        "@envoy_api//envoy/api/v3alpha/core:pkg_cc_proto",
     ],
 )
 
@@ -64,6 +65,17 @@ envoy_cc_library(
     ],
 )
 
+envoy_cc_library(
+    name = "connection_base_lib",
+    srcs = ["connection_impl_base.cc"],
+    hdrs = ["connection_impl_base.h"],
+    deps = [
+        ":filter_manager_lib",
+        "//include/envoy/event:dispatcher_interface",
+        "//source/common/common:assert_lib",
+    ],
+)
+
 envoy_cc_library(
     name = "connection_lib",
     srcs = ["connection_impl.cc"],
@@ -71,10 +83,9 @@ envoy_cc_library(
     external_deps = ["abseil_optional"],
     deps = [
         ":address_lib",
-        ":filter_manager_lib",
+        ":connection_base_lib",
         ":raw_buffer_socket_lib",
         ":utility_lib",
-        "//include/envoy/event:dispatcher_interface",
         "//include/envoy/event:timer_interface",
         "//include/envoy/network:connection_interface",
         "//include/envoy/network:filter_interface",
diff --git a/source/common/network/cidr_range.cc b/source/common/network/cidr_range.cc
index f59686389a88..1901c267824a 100644
--- a/source/common/network/cidr_range.cc
+++ b/source/common/network/cidr_range.cc
@@ -114,6 +114,10 @@ CidrRange CidrRange::create(const envoy::api::v2::core::CidrRange& cidr) {
   return create(Utility::parseInternetAddress(cidr.address_prefix()), cidr.prefix_len().value());
 }
 
+CidrRange CidrRange::create(const envoy::api::v3alpha::core::CidrRange& cidr) {
+  return create(Utility::parseInternetAddress(cidr.address_prefix()), cidr.prefix_len().value());
+}
+
 // static
 CidrRange CidrRange::create(const std::string& range) {
   const auto parts = StringUtil::splitToken(range, "/");
diff --git a/source/common/network/cidr_range.h b/source/common/network/cidr_range.h
index 44e72585973f..af5371db19f9 100644
--- a/source/common/network/cidr_range.h
+++ b/source/common/network/cidr_range.h
@@ -4,6 +4,7 @@
 #include <vector>
 
 #include "envoy/api/v2/core/address.pb.h"
+#include "envoy/api/v3alpha/core/address.pb.h"
 #include "envoy/json/json_object.h"
 #include "envoy/network/address.h"
 
@@ -99,6 +100,9 @@ class CidrRange {
    * TODO(ccaraman): Update CidrRange::create to support only constructing valid ranges.
    */
   static CidrRange create(const envoy::api::v2::core::CidrRange& cidr);
+  // The ::v2 and ::v3alpha variants will merge once we land API boosting
+  // automation. TODO(htuch): make sure this happens.
+  static CidrRange create(const envoy::api::v3alpha::core::CidrRange& cidr);
 
   /**
    * Given an IP address and a length of high order bits to keep, returns an address
diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc
index 933d2940b440..0c0c2757b092 100644
--- a/source/common/network/connection_impl.cc
+++ b/source/common/network/connection_impl.cc
@@ -42,15 +42,15 @@ std::atomic<uint64_t> ConnectionImpl::next_global_id_;
 
 ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPtr&& socket,
                                TransportSocketPtr&& transport_socket, bool connected)
-    : transport_socket_(std::move(transport_socket)), socket_(std::move(socket)),
+    : ConnectionImplBase(dispatcher, next_global_id_++),
+      transport_socket_(std::move(transport_socket)), socket_(std::move(socket)),
       filter_manager_(*this), stream_info_(dispatcher.timeSource()),
       write_buffer_(
           dispatcher.getWatermarkFactory().create([this]() -> void { this->onLowWatermark(); },
                                                   [this]() -> void { this->onHighWatermark(); })),
-      dispatcher_(dispatcher), id_(next_global_id_++), read_enabled_(true),
-      above_high_watermark_(false), detect_early_close_(true), enable_half_close_(false),
-      read_end_stream_raised_(false), read_end_stream_(false), write_end_stream_(false),
-      current_write_end_stream_(false), dispatch_buffered_data_(false) {
+      read_enabled_(true), above_high_watermark_(false), detect_early_close_(true),
+      enable_half_close_(false), read_end_stream_raised_(false), read_end_stream_(false),
+      write_end_stream_(false), current_write_end_stream_(false), dispatch_buffered_data_(false) {
   // Treat the lack of a valid fd (which in practice only happens if we run out of FDs) as an OOM
   // condition and just crash.
   RELEASE_ASSERT(ioHandle().fd() != -1, "");
@@ -121,7 +121,7 @@ void ConnectionImpl::close(ConnectionCloseType type) {
         file_event_->setEnabled(enable_half_close_ ? 0 : Event::FileReadyType::Closed);
       }
     } else {
-      closeSocket(ConnectionEvent::LocalClose);
+      closeConnectionImmediately();
     }
   } else {
     ASSERT(type == ConnectionCloseType::FlushWrite ||
@@ -175,6 +175,8 @@ Connection::State ConnectionImpl::state() const {
   }
 }
 
+void ConnectionImpl::closeConnectionImmediately() { closeSocket(ConnectionEvent::LocalClose); }
+
 void ConnectionImpl::closeSocket(ConnectionEvent close_type) {
   if (!ioHandle().isOpen()) {
     return;
@@ -200,8 +202,6 @@ void ConnectionImpl::closeSocket(ConnectionEvent close_type) {
   raiseEvent(close_type);
 }
 
-Event::Dispatcher& ConnectionImpl::dispatcher() { return dispatcher_; }
-
 void ConnectionImpl::noDelay(bool enable) {
   // There are cases where a connection to localhost can immediately fail (e.g., if the other end
   // does not have enough fds, reaches a backlog limit, etc.). Because we run with deferred error
@@ -238,8 +238,6 @@ void ConnectionImpl::noDelay(bool enable) {
   RELEASE_ASSERT(0 == rc, "");
 }
 
-uint64_t ConnectionImpl::id() const { return id_; }
-
 void ConnectionImpl::onRead(uint64_t read_buffer_size) {
   if (!read_enabled_ || inDelayedClose()) {
     return;
@@ -331,11 +329,7 @@ void ConnectionImpl::readDisable(bool disable) {
 }
 
 void ConnectionImpl::raiseEvent(ConnectionEvent event) {
-  for (ConnectionCallbacks* callback : callbacks_) {
-    // TODO(mattklein123): If we close while raising a connected event we should not raise further
-    // connected events.
-    callback->onEvent(event);
-  }
+  ConnectionImplBase::raiseConnectionEvent(event);
   // We may have pending data in the write buffer on transport handshake
   // completion, which may also have completed in the context of onReadReady(),
   // where no check of the write buffer is made. Provide an opportunity to flush
@@ -349,8 +343,6 @@ void ConnectionImpl::raiseEvent(ConnectionEvent event) {
 
 bool ConnectionImpl::readEnabled() const { return read_enabled_; }
 
-void ConnectionImpl::addConnectionCallbacks(ConnectionCallbacks& cb) { callbacks_.push_back(&cb); }
-
 void ConnectionImpl::addBytesSentCallback(BytesSentCb cb) {
   bytes_sent_callbacks_.emplace_back(cb);
 }
@@ -588,7 +580,7 @@ void ConnectionImpl::onWriteReady() {
       delayed_close_timer_->enableTimer(delayed_close_timeout_);
     } else {
       ASSERT(bothSidesHalfClosed() || delayed_close_state_ == DelayedCloseState::CloseAfterFlush);
-      closeSocket(ConnectionEvent::LocalClose);
+      closeConnectionImmediately();
     }
   } else {
     ASSERT(result.action_ == PostIoAction::KeepOpen);
@@ -608,13 +600,6 @@ void ConnectionImpl::onWriteReady() {
   }
 }
 
-void ConnectionImpl::setConnectionStats(const ConnectionStats& stats) {
-  ASSERT(!connection_stats_,
-         "Two network filters are attempting to set connection stats. This indicates an issue "
-         "with the configured filter chain.");
-  connection_stats_ = std::make_unique<ConnectionStats>(stats);
-}
-
 void ConnectionImpl::updateReadBufferStats(uint64_t num_read, uint64_t new_size) {
   if (!connection_stats_) {
     return;
@@ -640,23 +625,6 @@ bool ConnectionImpl::bothSidesHalfClosed() {
   return read_end_stream_ && write_end_stream_ && write_buffer_->length() == 0;
 }
 
-void ConnectionImpl::onDelayedCloseTimeout() {
-  delayed_close_timer_.reset();
-  ENVOY_CONN_LOG(debug, "triggered delayed close", *this);
-  if (connection_stats_ != nullptr && connection_stats_->delayed_close_timeouts_ != nullptr) {
-    connection_stats_->delayed_close_timeouts_->inc();
-  }
-  closeSocket(ConnectionEvent::LocalClose);
-}
-
-void ConnectionImpl::initializeDelayedCloseTimer() {
-  const auto timeout = delayed_close_timeout_.count();
-  ASSERT(delayed_close_timer_ == nullptr && timeout > 0);
-  delayed_close_timer_ = dispatcher_.createTimer([this]() -> void { onDelayedCloseTimeout(); });
-  ENVOY_CONN_LOG(debug, "setting delayed close timer with timeout {} ms", *this, timeout);
-  delayed_close_timer_->enableTimer(delayed_close_timeout_);
-}
-
 absl::string_view ConnectionImpl::transportFailureReason() const {
   return transport_socket_->failureReason();
 }
@@ -666,7 +634,7 @@ ClientConnectionImpl::ClientConnectionImpl(
     const Network::Address::InstanceConstSharedPtr& source_address,
     Network::TransportSocketPtr&& transport_socket,
     const Network::ConnectionSocket::OptionsSharedPtr& options)
-    : ConnectionImpl(dispatcher, std::make_unique<ClientSocketImpl>(remote_address),
+    : ConnectionImpl(dispatcher, std::make_unique<ClientSocketImpl>(remote_address, options),
                      std::move(transport_socket), false) {
   // There are no meaningful socket options or source address semantics for
   // non-IP sockets, so skip.
diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h
index d62f5e898e4b..90a114b3185f 100644
--- a/source/common/network/connection_impl.h
+++ b/source/common/network/connection_impl.h
@@ -6,14 +6,11 @@
 #include <memory>
 #include <string>
 
-#include "envoy/event/dispatcher.h"
-#include "envoy/network/connection.h"
 #include "envoy/network/transport_socket.h"
 
 #include "common/buffer/watermark_buffer.h"
-#include "common/common/logger.h"
 #include "common/event/libevent.h"
-#include "common/network/filter_manager_impl.h"
+#include "common/network/connection_impl_base.h"
 #include "common/stream_info/stream_info_impl.h"
 
 #include "absl/types/optional.h"
@@ -46,9 +43,7 @@ class ConnectionImplUtility {
 /**
  * Implementation of Network::Connection and Network::FilterManagerConnection.
  */
-class ConnectionImpl : public FilterManagerConnection,
-                       public TransportSocketCallbacks,
-                       protected Logger::Loggable<Logger::Id::connection> {
+class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallbacks {
 public:
   ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPtr&& socket,
                  TransportSocketPtr&& transport_socket, bool connected);
@@ -62,12 +57,9 @@ class ConnectionImpl : public FilterManagerConnection,
   bool initializeReadFilters() override;
 
   // Network::Connection
-  void addConnectionCallbacks(ConnectionCallbacks& cb) override;
   void addBytesSentCallback(BytesSentCb cb) override;
   void enableHalfClose(bool enabled) override;
   void close(ConnectionCloseType type) override;
-  Event::Dispatcher& dispatcher() override;
-  uint64_t id() const override;
   std::string nextProtocol() const override { return transport_socket_->protocol(); }
   void noDelay(bool enable) override;
   void readDisable(bool disable) override;
@@ -80,7 +72,6 @@ class ConnectionImpl : public FilterManagerConnection,
     return socket_->localAddress();
   }
   absl::optional<UnixDomainSocketPeerCredentials> unixSocketPeerCredentials() const override;
-  void setConnectionStats(const ConnectionStats& stats) override;
   Ssl::ConnectionInfoConstSharedPtr ssl() const override { return transport_socket_->ssl(); }
   State state() const override;
   void write(Buffer::Instance& data, bool end_stream) override;
@@ -125,13 +116,10 @@ class ConnectionImpl : public FilterManagerConnection,
   // Obtain global next connection ID. This should only be used in tests.
   static uint64_t nextGlobalIdForTest() { return next_global_id_; }
 
-  void setDelayedCloseTimeout(std::chrono::milliseconds timeout) override {
-    // Validate that this is only called prior to issuing a close() or closeSocket().
-    ASSERT(delayed_close_timer_ == nullptr && ioHandle().isOpen());
-    delayed_close_timeout_ = timeout;
-  }
-
 protected:
+  // Network::ConnectionImplBase
+  void closeConnectionImmediately() override;
+
   void closeSocket(ConnectionEvent close_type);
 
   void onLowWatermark();
@@ -149,9 +137,6 @@ class ConnectionImpl : public FilterManagerConnection,
   // write_buffer_ invokes during its clean up.
   Buffer::InstancePtr write_buffer_;
   uint32_t read_buffer_limit_ = 0;
-  std::chrono::milliseconds delayed_close_timeout_{0};
-
-protected:
   bool connecting_{false};
   ConnectionEvent immediate_error_event_{ConnectionEvent::Connected};
   bool bind_error_{false};
@@ -174,40 +159,14 @@ class ConnectionImpl : public FilterManagerConnection,
   // Returns true iff end of stream has been both written and read.
   bool bothSidesHalfClosed();
 
-  // Callback issued when a delayed close timeout triggers.
-  void onDelayedCloseTimeout();
-
-  void initializeDelayedCloseTimer();
-  bool inDelayedClose() const { return delayed_close_state_ != DelayedCloseState::None; }
-
   static std::atomic<uint64_t> next_global_id_;
 
-  // States associated with delayed closing of the connection (i.e., when the underlying socket is
-  // not immediately close()d as a result of a ConnectionImpl::close()).
-  enum class DelayedCloseState {
-    None,
-    // The socket will be closed immediately after the buffer is flushed _or_ if a period of
-    // inactivity after the last write event greater than or equal to delayed_close_timeout_ has
-    // elapsed.
-    CloseAfterFlush,
-    // The socket will be closed after a grace period of delayed_close_timeout_ has elapsed after
-    // the socket is flushed _or_ if a period of inactivity after the last write event greater than
-    // or equal to delayed_close_timeout_ has elapsed.
-    CloseAfterFlushAndWait
-  };
-  DelayedCloseState delayed_close_state_{DelayedCloseState::None};
-
-  Event::Dispatcher& dispatcher_;
-  const uint64_t id_;
-  Event::TimerPtr delayed_close_timer_;
-  std::list<ConnectionCallbacks*> callbacks_;
   std::list<BytesSentCb> bytes_sent_callbacks_;
   // Tracks the number of times reads have been disabled. If N different components call
   // readDisabled(true) this allows the connection to only resume reads when readDisabled(false)
   // has been called N times.
   uint64_t last_read_buffer_size_{};
   uint64_t last_write_buffer_size_{};
-  std::unique_ptr<ConnectionStats> connection_stats_;
   Buffer::Instance* current_write_buffer_{};
   uint32_t read_disable_count_{0};
   bool read_enabled_ : 1;
diff --git a/source/common/network/connection_impl_base.cc b/source/common/network/connection_impl_base.cc
new file mode 100644
index 000000000000..708038bf9d97
--- /dev/null
+++ b/source/common/network/connection_impl_base.cc
@@ -0,0 +1,51 @@
+#include "common/network/connection_impl_base.h"
+
+namespace Envoy {
+namespace Network {
+
+ConnectionImplBase::ConnectionImplBase(Event::Dispatcher& dispatcher, uint64_t id)
+    : dispatcher_(dispatcher), id_(id) {}
+
+void ConnectionImplBase::addConnectionCallbacks(ConnectionCallbacks& cb) {
+  callbacks_.push_back(&cb);
+}
+
+void ConnectionImplBase::setConnectionStats(const ConnectionStats& stats) {
+  ASSERT(!connection_stats_,
+         "Two network filters are attempting to set connection stats. This indicates an issue "
+         "with the configured filter chain.");
+  connection_stats_ = std::make_unique<ConnectionStats>(stats);
+}
+void ConnectionImplBase::setDelayedCloseTimeout(std::chrono::milliseconds timeout) {
+  // Validate that this is only called prior to issuing a close() or closeSocket().
+  ASSERT(delayed_close_timer_ == nullptr && state() == State::Open);
+  delayed_close_timeout_ = timeout;
+}
+
+void ConnectionImplBase::initializeDelayedCloseTimer() {
+  const auto timeout = delayed_close_timeout_.count();
+  ASSERT(delayed_close_timer_ == nullptr && timeout > 0);
+  delayed_close_timer_ = dispatcher_.createTimer([this]() -> void { onDelayedCloseTimeout(); });
+  ENVOY_CONN_LOG(debug, "setting delayed close timer with timeout {} ms", *this, timeout);
+  delayed_close_timer_->enableTimer(delayed_close_timeout_);
+}
+
+void ConnectionImplBase::raiseConnectionEvent(ConnectionEvent event) {
+  for (ConnectionCallbacks* callback : callbacks_) {
+    // TODO(mattklein123): If we close while raising a connected event we should not raise further
+    // connected events.
+    callback->onEvent(event);
+  }
+}
+
+void ConnectionImplBase::onDelayedCloseTimeout() {
+  delayed_close_timer_.reset();
+  ENVOY_CONN_LOG(debug, "triggered delayed close", *this);
+  if (connection_stats_ != nullptr && connection_stats_->delayed_close_timeouts_ != nullptr) {
+    connection_stats_->delayed_close_timeouts_->inc();
+  }
+  closeConnectionImmediately();
+}
+
+} // namespace Network
+} // namespace Envoy
diff --git a/source/common/network/connection_impl_base.h b/source/common/network/connection_impl_base.h
new file mode 100644
index 000000000000..e174de4981d2
--- /dev/null
+++ b/source/common/network/connection_impl_base.h
@@ -0,0 +1,61 @@
+#pragma once
+
+#include "envoy/event/dispatcher.h"
+
+#include "common/common/logger.h"
+#include "common/network/filter_manager_impl.h"
+
+namespace Envoy {
+namespace Network {
+
+class ConnectionImplBase : public FilterManagerConnection,
+                           protected Logger::Loggable<Logger::Id::connection> {
+public:
+  ConnectionImplBase(Event::Dispatcher& dispatcher, uint64_t id);
+  ~ConnectionImplBase() override {}
+
+  // Network::Connection
+  void addConnectionCallbacks(ConnectionCallbacks& cb) override;
+  Event::Dispatcher& dispatcher() override { return dispatcher_; }
+  uint64_t id() const override { return id_; }
+  void setConnectionStats(const ConnectionStats& stats) override;
+  void setDelayedCloseTimeout(std::chrono::milliseconds timeout) override;
+
+protected:
+  void initializeDelayedCloseTimer();
+
+  bool inDelayedClose() const { return delayed_close_state_ != DelayedCloseState::None; }
+
+  void raiseConnectionEvent(ConnectionEvent event);
+
+  virtual void closeConnectionImmediately() PURE;
+
+  // States associated with delayed closing of the connection (i.e., when the underlying socket is
+  // not immediately close()d as a result of a ConnectionImpl::close()).
+  enum class DelayedCloseState {
+    None,
+    // The socket will be closed immediately after the buffer is flushed _or_ if a period of
+    // inactivity after the last write event greater than or equal to delayed_close_timeout_ has
+    // elapsed.
+    CloseAfterFlush,
+    // The socket will be closed after a grace period of delayed_close_timeout_ has elapsed after
+    // the socket is flushed _or_ if a period of inactivity after the last write event greater than
+    // or equal to delayed_close_timeout_ has elapsed.
+    CloseAfterFlushAndWait
+  };
+  DelayedCloseState delayed_close_state_{DelayedCloseState::None};
+
+  Event::TimerPtr delayed_close_timer_;
+  std::chrono::milliseconds delayed_close_timeout_{0};
+  Event::Dispatcher& dispatcher_;
+  const uint64_t id_;
+  std::list<ConnectionCallbacks*> callbacks_;
+  std::unique_ptr<ConnectionStats> connection_stats_;
+
+private:
+  // Callback issued when a delayed close timeout triggers.
+  void onDelayedCloseTimeout();
+};
+
+} // namespace Network
+} // namespace Envoy
diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc
index 17be12924d9f..e40416dcda13 100644
--- a/source/common/network/dns_impl.cc
+++ b/source/common/network/dns_impl.cc
@@ -21,12 +21,20 @@ namespace Network {
 
 DnsResolverImpl::DnsResolverImpl(
     Event::Dispatcher& dispatcher,
-    const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers)
+    const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,
+    const bool use_tcp_for_dns_lookups)
     : dispatcher_(dispatcher),
       timer_(dispatcher.createTimer([this] { onEventCallback(ARES_SOCKET_BAD, 0); })) {
   ares_options options;
+  memset(&options, 0, sizeof(options));
+  int optmask = 0;
 
-  initializeChannel(&options, 0);
+  if (use_tcp_for_dns_lookups) {
+    optmask |= ARES_OPT_FLAGS;
+    options.flags |= ARES_FLAG_USEVC;
+  }
+
+  initializeChannel(&options, optmask);
 
   if (!resolvers.empty()) {
     std::vector<std::string> resolver_addrs;
diff --git a/source/common/network/dns_impl.h b/source/common/network/dns_impl.h
index 8f73c8922504..cecad8a2e728 100644
--- a/source/common/network/dns_impl.h
+++ b/source/common/network/dns_impl.h
@@ -27,7 +27,8 @@ class DnsResolverImplPeer;
 class DnsResolverImpl : public DnsResolver, protected Logger::Loggable<Logger::Id::upstream> {
 public:
   DnsResolverImpl(Event::Dispatcher& dispatcher,
-                  const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers);
+                  const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,
+                  const bool use_tcp_for_dns_lookups);
   ~DnsResolverImpl() override;
 
   // Network::DnsResolver
diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h
index 2cd7c6b32d67..519960522c82 100644
--- a/source/common/network/listen_socket_impl.h
+++ b/source/common/network/listen_socket_impl.h
@@ -175,9 +175,14 @@ class AcceptedSocketImpl : public ConnectionSocketImpl {
 // ConnectionSocket used with client connections.
 class ClientSocketImpl : public ConnectionSocketImpl {
 public:
-  ClientSocketImpl(const Address::InstanceConstSharedPtr& remote_address)
+  ClientSocketImpl(const Address::InstanceConstSharedPtr& remote_address,
+                   const OptionsSharedPtr& options)
       : ConnectionSocketImpl(remote_address->socket(Address::SocketType::Stream), nullptr,
-                             remote_address) {}
+                             remote_address) {
+    if (options) {
+      addOptions(options);
+    }
+  }
 };
 
 } // namespace Network
diff --git a/source/common/network/socket_option_factory.cc b/source/common/network/socket_option_factory.cc
index c89c2a6bbce9..8cac03b61b21 100644
--- a/source/common/network/socket_option_factory.cc
+++ b/source/common/network/socket_option_factory.cc
@@ -115,5 +115,12 @@ std::unique_ptr<Socket::Options> SocketOptionFactory::buildRxQueueOverFlowOption
   return options;
 }
 
+std::unique_ptr<Socket::Options> SocketOptionFactory::buildReusePortOptions() {
+  std::unique_ptr<Socket::Options> options = std::make_unique<Socket::Options>();
+  options->push_back(std::make_shared<Network::SocketOptionImpl>(
+      envoy::api::v2::core::SocketOption::STATE_PREBIND, ENVOY_SOCKET_SO_REUSEPORT, 1));
+  return options;
+}
+
 } // namespace Network
 } // namespace Envoy
diff --git a/source/common/network/socket_option_factory.h b/source/common/network/socket_option_factory.h
index f9b02f04864e..bcaabda83121 100644
--- a/source/common/network/socket_option_factory.h
+++ b/source/common/network/socket_option_factory.h
@@ -31,6 +31,7 @@ class SocketOptionFactory : Logger::Loggable<Logger::Id::connection> {
       const Protobuf::RepeatedPtrField<envoy::api::v2::core::SocketOption>& socket_options);
   static std::unique_ptr<Socket::Options> buildIpPacketInfoOptions();
   static std::unique_ptr<Socket::Options> buildRxQueueOverFlowOptions();
+  static std::unique_ptr<Socket::Options> buildReusePortOptions();
 };
 } // namespace Network
 } // namespace Envoy
diff --git a/source/common/network/socket_option_impl.h b/source/common/network/socket_option_impl.h
index 577608f1a778..688e42dbd1dc 100644
--- a/source/common/network/socket_option_impl.h
+++ b/source/common/network/socket_option_impl.h
@@ -46,6 +46,12 @@ namespace Network {
 #define ENVOY_SOCKET_SO_MARK Network::SocketOptionName()
 #endif
 
+#ifdef SO_REUSEPORT
+#define ENVOY_SOCKET_SO_REUSEPORT ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_REUSEPORT)
+#else
+#define ENVOY_SOCKET_SO_REUSEPORT Network::SocketOptionName()
+#endif
+
 #ifdef TCP_KEEPCNT
 #define ENVOY_SOCKET_TCP_KEEPCNT ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_TCP, TCP_KEEPCNT)
 #else
diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc
index 0fa1396433cd..9bc3a6ccc7ff 100644
--- a/source/common/protobuf/utility.cc
+++ b/source/common/protobuf/utility.cc
@@ -429,6 +429,14 @@ std::string MessageUtil::getJsonStringFromMessage(const Protobuf::Message& messa
   return json;
 }
 
+void MessageUtil::unpackTo(const ProtobufWkt::Any& any_message, Protobuf::Message& message) {
+  if (!any_message.UnpackTo(&message)) {
+    throw EnvoyException(fmt::format("Unable to unpack as {}: {}",
+                                     message.GetDescriptor()->full_name(),
+                                     any_message.DebugString()));
+  }
+}
+
 void MessageUtil::jsonConvert(const Protobuf::Message& source, ProtobufWkt::Struct& dest) {
   // Any proto3 message can be transformed to Struct, so there is no need to check for unknown
   // fields. There is one catch; Duration/Timestamp etc. which have non-object canonical JSON
@@ -442,6 +450,10 @@ void MessageUtil::jsonConvert(const ProtobufWkt::Struct& source,
   jsonConvertInternal(source, validation_visitor, dest);
 }
 
+void MessageUtil::jsonConvertValue(const Protobuf::Message& source, ProtobufWkt::Value& dest) {
+  jsonConvertInternal(source, ProtobufMessage::getNullValidationVisitor(), dest);
+}
+
 ProtobufWkt::Struct MessageUtil::keyValueStruct(const std::string& key, const std::string& value) {
   ProtobufWkt::Struct struct_obj;
   ProtobufWkt::Value val;
@@ -605,4 +617,12 @@ void TimestampUtil::systemClockToTimestamp(const SystemTime system_clock_time,
           .count()));
 }
 
+absl::string_view TypeUtil::typeUrlToDescriptorFullName(absl::string_view type_url) {
+  const size_t pos = type_url.rfind('/');
+  if (pos != absl::string_view::npos) {
+    type_url = type_url.substr(pos + 1);
+  }
+  return type_url;
+}
+
 } // namespace Envoy
diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h
index e9e1c237adb4..a5dee1c58bf1 100644
--- a/source/common/protobuf/utility.h
+++ b/source/common/protobuf/utility.h
@@ -116,6 +116,11 @@ class MissingFieldException : public EnvoyException {
   MissingFieldException(const std::string& field_name, const Protobuf::Message& message);
 };
 
+class TypeUtil {
+public:
+  static absl::string_view typeUrlToDescriptorFullName(absl::string_view type_url);
+};
+
 class RepeatedPtrUtil {
 public:
   static std::string join(const Protobuf::RepeatedPtrField<std::string>& source,
@@ -280,19 +285,27 @@ class MessageUtil {
     return typed_config;
   }
 
+  /**
+   * Convert from google.protobuf.Any to a typed message. This should be used
+   * instead of the inbuilt UnpackTo as it performs validation of results.
+   *
+   * @param any_message source google.protobuf.Any message.
+   * @param message destination to unpack to.
+   *
+   * @throw EnvoyException if the message does not unpack.
+   */
+  static void unpackTo(const ProtobufWkt::Any& any_message, Protobuf::Message& message);
+
   /**
    * Convert from google.protobuf.Any to a typed message.
    * @param message source google.protobuf.Any message.
-   * @param validation_visitor message validation visitor instance.
    *
    * @return MessageType the typed message inside the Any.
    */
   template <class MessageType>
   static inline MessageType anyConvert(const ProtobufWkt::Any& message) {
     MessageType typed_message;
-    if (!message.UnpackTo(&typed_message)) {
-      throw EnvoyException("Unable to unpack " + message.DebugString());
-    }
+    unpackTo(message, typed_message);
     return typed_message;
   };
 
@@ -308,6 +321,7 @@ class MessageUtil {
   static void jsonConvert(const ProtobufWkt::Struct& source,
                           ProtobufMessage::ValidationVisitor& validation_visitor,
                           Protobuf::Message& dest);
+  static void jsonConvertValue(const Protobuf::Message& source, ProtobufWkt::Value& dest);
 
   /**
    * Extract YAML as string from a google.protobuf.Message.
diff --git a/source/common/router/BUILD b/source/common/router/BUILD
index 693814d17982..abb15b4895ee 100644
--- a/source/common/router/BUILD
+++ b/source/common/router/BUILD
@@ -59,6 +59,7 @@ envoy_cc_library(
         "//source/common/http:headers_lib",
         "//source/common/http:utility_lib",
         "//source/common/protobuf:utility_lib",
+        "//source/common/tracing:http_tracer_lib",
         "//source/extensions/filters/http:well_known_names",
         "@envoy_api//envoy/api/v2:pkg_cc_proto",
         "@envoy_api//envoy/api/v2/route:pkg_cc_proto",
@@ -152,10 +153,13 @@ envoy_cc_library(
         "//include/envoy/thread_local:thread_local_interface",
         "//source/common/common:assert_lib",
         "//source/common/common:callback_impl_lib",
+        "//source/common/common:cleanup_lib",
         "//source/common/common:minimal_logger_lib",
         "//source/common/config:subscription_factory_lib",
         "//source/common/config:utility_lib",
+        "//source/common/init:manager_lib",
         "//source/common/init:target_lib",
+        "//source/common/init:watcher_lib",
         "//source/common/protobuf:utility_lib",
         "//source/common/router:route_config_update_impl_lib",
         "//source/common/router:vhds_lib",
diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc
index fb084e31cb57..3e83cfe9dba8 100644
--- a/source/common/router/config_impl.cc
+++ b/source/common/router/config_impl.cc
@@ -29,6 +29,7 @@
 #include "common/protobuf/protobuf.h"
 #include "common/protobuf/utility.h"
 #include "common/router/retry_state_impl.h"
+#include "common/tracing/http_tracer_impl.h"
 
 #include "extensions/filters/http/well_known_names.h"
 
@@ -218,6 +219,9 @@ RouteTracingImpl::RouteTracingImpl(const envoy::api::v2::route::Tracing& tracing
   } else {
     overall_sampling_ = tracing.overall_sampling();
   }
+  for (const auto& tag : tracing.custom_tags()) {
+    custom_tags_.emplace(tag.tag(), Tracing::HttpTracerUtility::createCustomTag(tag));
+  }
 }
 
 const envoy::type::FractionalPercent& RouteTracingImpl::getClientSampling() const {
@@ -231,6 +235,7 @@ const envoy::type::FractionalPercent& RouteTracingImpl::getRandomSampling() cons
 const envoy::type::FractionalPercent& RouteTracingImpl::getOverallSampling() const {
   return overall_sampling_;
 }
+const Tracing::CustomTagMap& RouteTracingImpl::getCustomTags() const { return custom_tags_; }
 
 RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost,
                                        const envoy::api::v2::route::Route& route,
@@ -424,13 +429,13 @@ void RouteEntryImplBase::finalizeRequestHeaders(Http::HeaderMap& headers,
   }
 
   if (!host_rewrite_.empty()) {
-    headers.Host()->value(host_rewrite_);
+    headers.setHost(host_rewrite_);
   } else if (auto_host_rewrite_header_) {
-    Http::HeaderEntry* header = headers.get(*auto_host_rewrite_header_);
+    const Http::HeaderEntry* header = headers.get(*auto_host_rewrite_header_);
     if (header != nullptr) {
       absl::string_view header_value = header->value().getStringView();
       if (!header_value.empty()) {
-        headers.Host()->value(header_value);
+        headers.setHost(header_value);
       }
     }
   }
@@ -1153,7 +1158,8 @@ createRouteSpecificFilterConfig(const std::string& name, const ProtobufWkt::Any&
   auto& factory = Envoy::Config::Utility::getAndCheckFactory<
       Server::Configuration::NamedHttpFilterConfigFactory>(name);
   ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto();
-  Envoy::Config::Utility::translateOpaqueConfig(typed_config, config, validator, *proto_config);
+  Envoy::Config::Utility::translateOpaqueConfig(name, typed_config, config, validator,
+                                                *proto_config);
   return factory.createRouteSpecificFilterConfig(*proto_config, factory_context, validator);
 }
 
diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h
index 04685587fb96..21258c90eec7 100644
--- a/source/common/router/config_impl.h
+++ b/source/common/router/config_impl.h
@@ -349,10 +349,13 @@ class RouteTracingImpl : public RouteTracing {
   // Tracing::getOverallSampling
   const envoy::type::FractionalPercent& getOverallSampling() const override;
 
+  const Tracing::CustomTagMap& getCustomTags() const override;
+
 private:
   envoy::type::FractionalPercent client_sampling_;
   envoy::type::FractionalPercent random_sampling_;
   envoy::type::FractionalPercent overall_sampling_;
+  Tracing::CustomTagMap custom_tags_;
 };
 
 /**
diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc
index db474d5f617f..6164b39a7d17 100644
--- a/source/common/router/rds_impl.cc
+++ b/source/common/router/rds_impl.cc
@@ -106,17 +106,21 @@ void RdsRouteConfigSubscription::onConfigUpdate(
     provider->validateConfig(route_config);
   }
 
+  std::unique_ptr<Init::ManagerImpl> noop_init_manager;
+  std::unique_ptr<Cleanup> resume_rds;
   if (config_update_info_->onRdsUpdate(route_config, version_info)) {
     stats_.config_reload_.inc();
 
     if (config_update_info_->routeConfiguration().has_vhds()) {
       ENVOY_LOG(debug, "rds: vhds configuration present, starting vhds: config_name={} hash={}",
                 route_config_name_, config_update_info_->configHash());
+      maybeCreateInitManager(version_info, noop_init_manager, resume_rds);
       // TODO(dmitri-d): It's unsafe to depend directly on factory context here,
       // the listener might have been torn down, need to remove this.
       vhds_subscription_ = std::make_unique<VhdsSubscription>(
           config_update_info_, factory_context_, stat_prefix_, route_config_providers_);
-      vhds_subscription_->registerInitTargetWithInitManager(getRdsConfigInitManager());
+      vhds_subscription_->registerInitTargetWithInitManager(
+          noop_init_manager == nullptr ? getRdsConfigInitManager() : *noop_init_manager);
     } else {
       ENVOY_LOG(debug, "rds: loading new configuration: config_name={} hash={}", route_config_name_,
                 config_update_info_->configHash());
@@ -132,6 +136,27 @@ void RdsRouteConfigSubscription::onConfigUpdate(
   init_target_.ready();
 }
 
+// Initialize a no-op InitManager in case the one in the factory_context has completed
+// initialization. This can happen if an RDS config update for an already established RDS
+// subscription contains VHDS configuration.
+void RdsRouteConfigSubscription::maybeCreateInitManager(
+    const std::string& version_info, std::unique_ptr<Init::ManagerImpl>& init_manager,
+    std::unique_ptr<Cleanup>& init_vhds) {
+  if (getRdsConfigInitManager().state() == Init::Manager::State::Initialized) {
+    init_manager = std::make_unique<Init::ManagerImpl>(
+        fmt::format("VHDS {}:{}", route_config_name_, version_info));
+    init_vhds = std::make_unique<Cleanup>([this, &init_manager, version_info] {
+      // For new RDS subscriptions created after listener warming up, we don't wait for them to warm
+      // up.
+      Init::WatcherImpl noop_watcher(
+          // Note: we just throw it away.
+          fmt::format("VHDS ConfigUpdate watcher {}:{}", route_config_name_, version_info),
+          []() { /*Do nothing.*/ });
+      init_manager->initialize(noop_watcher);
+    });
+  }
+}
+
 void RdsRouteConfigSubscription::onConfigUpdate(
     const Protobuf::RepeatedPtrField<envoy::api::v2::Resource>& added_resources,
     const Protobuf::RepeatedPtrField<std::string>& removed_resources, const std::string&) {
diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h
index 44e1cb208656..7b9812e2bb61 100644
--- a/source/common/router/rds_impl.h
+++ b/source/common/router/rds_impl.h
@@ -23,8 +23,12 @@
 #include "envoy/thread_local/thread_local.h"
 
 #include "common/common/callback_impl.h"
+#include "common/common/cleanup.h"
 #include "common/common/logger.h"
+#include "common/config/resources.h"
+#include "common/init/manager_impl.h"
 #include "common/init/target_impl.h"
+#include "common/init/watcher_impl.h"
 #include "common/protobuf/utility.h"
 #include "common/router/route_config_update_receiver_impl.h"
 #include "common/router/vhds.h"
@@ -82,13 +86,10 @@ class StaticRouteConfigProviderImpl : public RouteConfigProvider {
 /**
  * All RDS stats. @see stats_macros.h
  */
-// clang-format off
 #define ALL_RDS_STATS(COUNTER)                                                                     \
   COUNTER(config_reload)                                                                           \
   COUNTER(update_empty)
 
-// clang-format on
-
 /**
  * Struct definition for all RDS stats. @see stats_macros.h
  */
@@ -111,6 +112,9 @@ class RdsRouteConfigSubscription : Envoy::Config::SubscriptionCallbacks,
     return route_config_providers_;
   }
   RouteConfigUpdatePtr& routeConfigUpdate() { return config_update_info_; }
+  void maybeCreateInitManager(const std::string& version_info,
+                              std::unique_ptr<Init::ManagerImpl>& init_manager,
+                              std::unique_ptr<Cleanup>& resume_rds);
 
 private:
   // Config::SubscriptionCallbacks
diff --git a/source/common/router/router.cc b/source/common/router/router.cc
index 648a5d491135..eb48dbff40a6 100644
--- a/source/common/router/router.cc
+++ b/source/common/router/router.cc
@@ -160,26 +160,28 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he
     // If present, use that value as route timeout and don't override
     // *x-envoy-expected-rq-timeout-ms* header. At this point *x-envoy-upstream-rq-timeout-ms*
     // header should have been sanitized by egress Envoy.
-    Http::HeaderEntry* header_expected_timeout_entry =
+    const Http::HeaderEntry* header_expected_timeout_entry =
         request_headers.EnvoyExpectedRequestTimeoutMs();
     if (header_expected_timeout_entry) {
       trySetGlobalTimeout(header_expected_timeout_entry, timeout);
     } else {
-      Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs();
+      const Http::HeaderEntry* header_timeout_entry =
+          request_headers.EnvoyUpstreamRequestTimeoutMs();
 
       if (trySetGlobalTimeout(header_timeout_entry, timeout)) {
         request_headers.removeEnvoyUpstreamRequestTimeoutMs();
       }
     }
   } else {
-    Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs();
+    const Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs();
     if (trySetGlobalTimeout(header_timeout_entry, timeout)) {
       request_headers.removeEnvoyUpstreamRequestTimeoutMs();
     }
   }
 
   // See if there is a per try/retry timeout. If it's >= global we just ignore it.
-  Http::HeaderEntry* per_try_timeout_entry = request_headers.EnvoyUpstreamRequestPerTryTimeoutMs();
+  const Http::HeaderEntry* per_try_timeout_entry =
+      request_headers.EnvoyUpstreamRequestPerTryTimeoutMs();
   if (per_try_timeout_entry) {
     if (absl::SimpleAtoi(per_try_timeout_entry->value().getStringView(), &header_timeout)) {
       timeout.per_try_timeout_ = std::chrono::milliseconds(header_timeout);
@@ -209,8 +211,7 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he
   // in grpc-timeout, ensuring that the upstream gRPC server is aware of the actual timeout.
   // If the expected timeout is 0 set no timeout, as Envoy treats 0 as infinite timeout.
   if (grpc_request && route.maxGrpcTimeout() && expected_timeout != 0) {
-    Grpc::Common::toGrpcTimeout(std::chrono::milliseconds(expected_timeout),
-                                request_headers.insertGrpcTimeout().value());
+    Grpc::Common::toGrpcTimeout(std::chrono::milliseconds(expected_timeout), request_headers);
   }
 
   return timeout;
@@ -233,7 +234,8 @@ FilterUtility::HedgingParams FilterUtility::finalHedgingParams(const RouteEntry&
   HedgingParams hedging_params;
   hedging_params.hedge_on_per_try_timeout_ = route.hedgePolicy().hedgeOnPerTryTimeout();
 
-  Http::HeaderEntry* hedge_on_per_try_timeout_entry = request_headers.EnvoyHedgeOnPerTryTimeout();
+  const Http::HeaderEntry* hedge_on_per_try_timeout_entry =
+      request_headers.EnvoyHedgeOnPerTryTimeout();
   if (hedge_on_per_try_timeout_entry) {
     if (hedge_on_per_try_timeout_entry->value() == "true") {
       hedging_params.hedge_on_per_try_timeout_ = true;
@@ -1389,15 +1391,6 @@ Filter::UpstreamRequest::~UpstreamRequest() {
 
   stream_info_.setUpstreamTiming(upstream_timing_);
   stream_info_.onRequestComplete();
-  // Prior to logging, refresh the byte size of the HeaderMaps.
-  // TODO(asraa): Remove this when entries in HeaderMap can no longer be modified by reference and
-  // HeaderMap holds an accurate internal byte size count.
-  if (upstream_headers_ != nullptr) {
-    upstream_headers_->refreshByteSize();
-  }
-  if (upstream_trailers_ != nullptr) {
-    upstream_trailers_->refreshByteSize();
-  }
   for (const auto& upstream_log : parent_.config_.upstream_logs_) {
     upstream_log->log(parent_.downstream_headers_, upstream_headers_.get(),
                       upstream_trailers_.get(), stream_info_);
@@ -1639,7 +1632,7 @@ void Filter::UpstreamRequest::onPoolReady(Http::StreamEncoder& request_encoder,
   setRequestEncoder(request_encoder);
   calling_encode_headers_ = true;
   if (parent_.route_entry_->autoHostRewrite() && !host->hostname().empty()) {
-    parent_.downstream_headers_->Host()->value(host->hostname());
+    parent_.downstream_headers_->setHost(host->hostname());
   }
 
   if (span_ != nullptr) {
diff --git a/source/common/router/router.h b/source/common/router/router.h
index 29c4ed8175a4..c16624a2741a 100644
--- a/source/common/router/router.h
+++ b/source/common/router/router.h
@@ -91,7 +91,7 @@ class FilterUtility {
     using ParseRetryFlagsFunc = std::function<std::pair<uint32_t, bool>(absl::string_view)>;
 
   private:
-    static HeaderCheckResult hasValidRetryFields(Http::HeaderEntry* header_entry,
+    static HeaderCheckResult hasValidRetryFields(const Http::HeaderEntry* header_entry,
                                                  const ParseRetryFlagsFunc& parse_fn) {
       HeaderCheckResult r;
       if (header_entry) {
@@ -102,7 +102,7 @@ class FilterUtility {
       return r;
     }
 
-    static HeaderCheckResult isInteger(Http::HeaderEntry* header_entry) {
+    static HeaderCheckResult isInteger(const Http::HeaderEntry* header_entry) {
       HeaderCheckResult r;
       if (header_entry) {
         uint64_t out;
diff --git a/source/common/router/shadow_writer_impl.cc b/source/common/router/shadow_writer_impl.cc
index 1597fbb8486b..73906beb91a2 100644
--- a/source/common/router/shadow_writer_impl.cc
+++ b/source/common/router/shadow_writer_impl.cc
@@ -25,7 +25,7 @@ void ShadowWriterImpl::shadow(const std::string& cluster, Http::MessagePtr&& req
   // Switch authority to add a shadow postfix. This allows upstream logging to make more sense.
   auto parts = StringUtil::splitToken(request->headers().Host()->value().getStringView(), ":");
   ASSERT(!parts.empty() && parts.size() <= 2);
-  request->headers().Host()->value(
+  request->headers().setHost(
       parts.size() == 2
           ? absl::StrJoin(parts, "-shadow:")
           : absl::StrCat(request->headers().Host()->value().getStringView(), "-shadow"));
diff --git a/source/common/router/vhds.h b/source/common/router/vhds.h
index 0a3cee2fa802..62c305f12982 100644
--- a/source/common/router/vhds.h
+++ b/source/common/router/vhds.h
@@ -26,13 +26,10 @@
 namespace Envoy {
 namespace Router {
 
-// clang-format off
-#define ALL_VHDS_STATS(COUNTER)                                                                     \
+#define ALL_VHDS_STATS(COUNTER)                                                                    \
   COUNTER(config_reload)                                                                           \
   COUNTER(update_empty)
 
-// clang-format on
-
 struct VhdsStats {
   ALL_VHDS_STATS(GENERATE_COUNTER_STRUCT)
 };
diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc
index 5b5e410d865a..d8bb32412732 100644
--- a/source/common/runtime/runtime_features.cc
+++ b/source/common/runtime/runtime_features.cc
@@ -27,7 +27,6 @@ constexpr const char* runtime_features[] = {
     "envoy.reloadable_features.test_feature_true",
     "envoy.reloadable_features.strict_header_validation",
     "envoy.reloadable_features.buffer_filter_populate_content_length",
-    "envoy.reloadable_features.trusted_forwarded_proto",
     "envoy.reloadable_features.outlier_detection_support_for_grpc_status",
     "envoy.reloadable_features.connection_header_sanitization",
 };
diff --git a/source/common/tracing/BUILD b/source/common/tracing/BUILD
index 15061270fbe9..f3e31d3e3c2e 100644
--- a/source/common/tracing/BUILD
+++ b/source/common/tracing/BUILD
@@ -27,6 +27,7 @@ envoy_cc_library(
         "//source/common/common:base64_lib",
         "//source/common/common:macros",
         "//source/common/common:utility_lib",
+        "//source/common/config:metadata_lib",
         "//source/common/grpc:common_lib",
         "//source/common/http:codes_lib",
         "//source/common/http:header_map_lib",
@@ -34,7 +35,10 @@ envoy_cc_library(
         "//source/common/http:message_lib",
         "//source/common/http:utility_lib",
         "//source/common/json:json_loader_lib",
+        "//source/common/protobuf:utility_lib",
         "//source/common/runtime:uuid_util_lib",
         "//source/common/stream_info:utility_lib",
+        "@envoy_api//envoy/api/v2/core:pkg_cc_proto",
+        "@envoy_api//envoy/type/tracing/v2:pkg_cc_proto",
     ],
 )
diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc
index 8d23ecf6459e..a8a90107afa8 100644
--- a/source/common/tracing/http_tracer_impl.cc
+++ b/source/common/tracing/http_tracer_impl.cc
@@ -12,6 +12,7 @@
 #include "common/http/header_map_impl.h"
 #include "common/http/headers.h"
 #include "common/http/utility.h"
+#include "common/protobuf/utility.h"
 #include "common/runtime/uuid_util.h"
 #include "common/stream_info/utility.h"
 
@@ -163,13 +164,13 @@ void HttpTracerUtility::finalizeDownstreamSpan(Span& span, const Http::HeaderMap
       span.setTag(Tracing::Tags::get().GuidXClientTraceId,
                   std::string(request_headers->ClientTraceId()->value().getStringView()));
     }
+  }
+  CustomTagContext ctx{request_headers, stream_info};
 
-    // Build tags based on the custom headers.
-    for (const Http::LowerCaseString& header : tracing_config.requestHeadersForTags()) {
-      const Http::HeaderEntry* entry = request_headers->get(header);
-      if (entry) {
-        span.setTag(header.get(), entry->value().getStringView());
-      }
+  const CustomTagMap* custom_tag_map = tracing_config.customTags();
+  if (custom_tag_map) {
+    for (const auto& it : *custom_tag_map) {
+      it.second->apply(span, ctx);
     }
   }
   span.setTag(Tracing::Tags::get().RequestSize, std::to_string(stream_info.bytesReceived()));
@@ -187,6 +188,11 @@ void HttpTracerUtility::finalizeUpstreamSpan(Span& span, const Http::HeaderMap*
   span.setTag(Tracing::Tags::get().HttpProtocol,
               AccessLog::AccessLogFormatUtils::protocolToString(stream_info.protocol()));
 
+  if (stream_info.upstreamHost()) {
+    span.setTag(Tracing::Tags::get().UpstreamAddress,
+                stream_info.upstreamHost()->address()->asStringView());
+  }
+
   setCommonTags(span, response_headers, response_trailers, stream_info, tracing_config);
 
   span.finishSpan();
@@ -224,6 +230,22 @@ void HttpTracerUtility::setCommonTags(Span& span, const Http::HeaderMap* respons
   }
 }
 
+CustomTagConstSharedPtr
+HttpTracerUtility::createCustomTag(const envoy::type::tracing::v2::CustomTag& tag) {
+  switch (tag.type_case()) {
+  case envoy::type::tracing::v2::CustomTag::kLiteral:
+    return std::make_shared<const Tracing::LiteralCustomTag>(tag.tag(), tag.literal());
+  case envoy::type::tracing::v2::CustomTag::kEnvironment:
+    return std::make_shared<const Tracing::EnvironmentCustomTag>(tag.tag(), tag.environment());
+  case envoy::type::tracing::v2::CustomTag::kRequestHeader:
+    return std::make_shared<const Tracing::RequestHeaderCustomTag>(tag.tag(), tag.request_header());
+  case envoy::type::tracing::v2::CustomTag::kMetadata:
+    return std::make_shared<const Tracing::MetadataCustomTag>(tag.tag(), tag.metadata());
+  default:
+    NOT_REACHED_GCOVR_EXCL_LINE;
+  }
+}
+
 HttpTracerImpl::HttpTracerImpl(DriverPtr&& driver, const LocalInfo::LocalInfo& local_info)
     : driver_(std::move(driver)), local_info_(local_info) {}
 
@@ -249,5 +271,93 @@ SpanPtr HttpTracerImpl::startSpan(const Config& config, Http::HeaderMap& request
   return active_span;
 }
 
+void CustomTagBase::apply(Span& span, const CustomTagContext& ctx) const {
+  absl::string_view tag_value = value(ctx);
+  if (!tag_value.empty()) {
+    span.setTag(tag(), tag_value);
+  }
+}
+
+EnvironmentCustomTag::EnvironmentCustomTag(
+    const std::string& tag, const envoy::type::tracing::v2::CustomTag::Environment& environment)
+    : CustomTagBase(tag), name_(environment.name()), default_value_(environment.default_value()) {
+  const char* env = std::getenv(name_.data());
+  final_value_ = env ? env : default_value_;
+}
+
+RequestHeaderCustomTag::RequestHeaderCustomTag(
+    const std::string& tag, const envoy::type::tracing::v2::CustomTag::Header& request_header)
+    : CustomTagBase(tag), name_(Http::LowerCaseString(request_header.name())),
+      default_value_(request_header.default_value()) {}
+
+absl::string_view RequestHeaderCustomTag::value(const CustomTagContext& ctx) const {
+  if (!ctx.request_headers) {
+    return default_value_;
+  }
+  const Http::HeaderEntry* entry = ctx.request_headers->get(name_);
+  return entry ? entry->value().getStringView() : default_value_;
+}
+
+MetadataCustomTag::MetadataCustomTag(const std::string& tag,
+                                     const envoy::type::tracing::v2::CustomTag::Metadata& metadata)
+    : CustomTagBase(tag), kind_(metadata.kind().kind_case()),
+      metadata_key_(metadata.metadata_key()), default_value_(metadata.default_value()) {}
+
+void MetadataCustomTag::apply(Span& span, const CustomTagContext& ctx) const {
+  const envoy::api::v2::core::Metadata* meta = metadata(ctx);
+  if (!meta) {
+    if (!default_value_.empty()) {
+      span.setTag(tag(), default_value_);
+    }
+    return;
+  }
+  const ProtobufWkt::Value& value = Envoy::Config::Metadata::metadataValue(*meta, metadata_key_);
+  switch (value.kind_case()) {
+  case ProtobufWkt::Value::kBoolValue:
+    span.setTag(tag(), value.bool_value() ? "true" : "false");
+    return;
+  case ProtobufWkt::Value::kNumberValue:
+    span.setTag(tag(), fmt::format("{}", value.number_value()));
+    return;
+  case ProtobufWkt::Value::kStringValue:
+    span.setTag(tag(), value.string_value());
+    return;
+  case ProtobufWkt::Value::kListValue:
+    span.setTag(tag(), MessageUtil::getJsonStringFromMessage(value.list_value()));
+    return;
+  case ProtobufWkt::Value::kStructValue:
+    span.setTag(tag(), MessageUtil::getJsonStringFromMessage(value.struct_value()));
+    return;
+  default:
+    break;
+  }
+  if (!default_value_.empty()) {
+    span.setTag(tag(), default_value_);
+  }
+}
+
+const envoy::api::v2::core::Metadata*
+MetadataCustomTag::metadata(const CustomTagContext& ctx) const {
+  const StreamInfo::StreamInfo& info = ctx.stream_info;
+  switch (kind_) {
+  case envoy::type::metadata::v2::MetadataKind::kRequest:
+    return &info.dynamicMetadata();
+  case envoy::type::metadata::v2::MetadataKind::kRoute: {
+    const Router::RouteEntry* route_entry = info.routeEntry();
+    return route_entry ? &route_entry->metadata() : nullptr;
+  }
+  case envoy::type::metadata::v2::MetadataKind::kCluster: {
+    const auto& hostPtr = info.upstreamHost();
+    return hostPtr ? &hostPtr->cluster().metadata() : nullptr;
+  }
+  case envoy::type::metadata::v2::MetadataKind::kHost: {
+    const auto& hostPtr = info.upstreamHost();
+    return hostPtr ? hostPtr->metadata().get() : nullptr;
+  }
+  default:
+    NOT_REACHED_GCOVR_EXCL_LINE;
+  }
+}
+
 } // namespace Tracing
 } // namespace Envoy
diff --git a/source/common/tracing/http_tracer_impl.h b/source/common/tracing/http_tracer_impl.h
index 4d0cc4ec05c2..c9cfb856194a 100644
--- a/source/common/tracing/http_tracer_impl.h
+++ b/source/common/tracing/http_tracer_impl.h
@@ -2,13 +2,16 @@
 
 #include <string>
 
+#include "envoy/api/v2/core/base.pb.h"
 #include "envoy/common/platform.h"
 #include "envoy/local_info/local_info.h"
 #include "envoy/runtime/runtime.h"
 #include "envoy/thread_local/thread_local.h"
 #include "envoy/tracing/http_tracer.h"
+#include "envoy/type/tracing/v2/custom_tag.pb.h"
 #include "envoy/upstream/cluster_manager.h"
 
+#include "common/config/metadata.h"
 #include "common/http/header_map_impl.h"
 #include "common/json/json_loader.h"
 
@@ -53,6 +56,7 @@ class TracingTagValues {
   const std::string ResponseSize = "response_size";
   const std::string RetryCount = "retry.count";
   const std::string Status = "status";
+  const std::string UpstreamAddress = "upstream_address";
   const std::string UpstreamCluster = "upstream_cluster";
   const std::string UserAgent = "user_agent";
   const std::string Zone = "zone";
@@ -119,6 +123,12 @@ class HttpTracerUtility {
                                    const StreamInfo::StreamInfo& stream_info,
                                    const Config& tracing_config);
 
+  /**
+   * Create a custom tag according to the configuration.
+   * @param tag a tracing custom tag configuration.
+   */
+  static CustomTagConstSharedPtr createCustomTag(const envoy::type::tracing::v2::CustomTag& tag);
+
 private:
   static void setCommonTags(Span& span, const Http::HeaderMap* response_headers,
                             const Http::HeaderMap* response_trailers,
@@ -133,14 +143,9 @@ class EgressConfigImpl : public Config {
 public:
   // Tracing::Config
   Tracing::OperationName operationName() const override { return Tracing::OperationName::Egress; }
-  const std::vector<Http::LowerCaseString>& requestHeadersForTags() const override {
-    return request_headers_for_tags_;
-  }
+  const CustomTagMap* customTags() const override { return nullptr; }
   bool verbose() const override { return false; }
   uint32_t maxPathTagLength() const override { return Tracing::DefaultMaxPathTagLength; }
-
-private:
-  const std::vector<Http::LowerCaseString> request_headers_for_tags_{};
 };
 
 using EgressConfig = ConstSingleton<EgressConfigImpl>;
@@ -187,5 +192,65 @@ class HttpTracerImpl : public HttpTracer {
   const LocalInfo::LocalInfo& local_info_;
 };
 
+class CustomTagBase : public CustomTag {
+public:
+  explicit CustomTagBase(const std::string& tag) : tag_(tag) {}
+  absl::string_view tag() const override { return tag_; }
+  void apply(Span& span, const CustomTagContext& ctx) const override;
+
+  virtual absl::string_view value(const CustomTagContext& ctx) const PURE;
+
+protected:
+  const std::string tag_;
+};
+
+class LiteralCustomTag : public CustomTagBase {
+public:
+  LiteralCustomTag(const std::string& tag,
+                   const envoy::type::tracing::v2::CustomTag::Literal& literal)
+      : CustomTagBase(tag), value_(literal.value()) {}
+  absl::string_view value(const CustomTagContext&) const override { return value_; }
+
+private:
+  const std::string value_;
+};
+
+class EnvironmentCustomTag : public CustomTagBase {
+public:
+  EnvironmentCustomTag(const std::string& tag,
+                       const envoy::type::tracing::v2::CustomTag::Environment& environment);
+  absl::string_view value(const CustomTagContext&) const override { return final_value_; }
+
+private:
+  const std::string name_;
+  const std::string default_value_;
+  std::string final_value_;
+};
+
+class RequestHeaderCustomTag : public CustomTagBase {
+public:
+  RequestHeaderCustomTag(const std::string& tag,
+                         const envoy::type::tracing::v2::CustomTag::Header& request_header);
+  absl::string_view value(const CustomTagContext& ctx) const override;
+
+private:
+  const Http::LowerCaseString name_;
+  const std::string default_value_;
+};
+
+class MetadataCustomTag : public CustomTagBase {
+public:
+  MetadataCustomTag(const std::string& tag,
+                    const envoy::type::tracing::v2::CustomTag::Metadata& metadata);
+  void apply(Span& span, const CustomTagContext& ctx) const override;
+  absl::string_view value(const CustomTagContext&) const override { return default_value_; }
+  const envoy::api::v2::core::Metadata* metadata(const CustomTagContext& ctx) const;
+
+protected:
+  const envoy::type::metadata::v2::MetadataKind::KindCase kind_;
+  const Envoy::Config::MetadataKey metadata_key_;
+  const std::string default_value_;
+};
+
 } // namespace Tracing
 } // namespace Envoy
diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc
index bebf10b93326..ad557eab6d0b 100644
--- a/source/common/upstream/cluster_factory_impl.cc
+++ b/source/common/upstream/cluster_factory_impl.cc
@@ -84,7 +84,8 @@ ClusterFactoryImplBase::selectDnsResolver(const envoy::api::v2::Cluster& cluster
     for (const auto& resolver_addr : resolver_addrs) {
       resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr));
     }
-    return context.dispatcher().createDnsResolver(resolvers);
+    const bool use_tcp_for_dns_lookups = cluster.use_tcp_for_dns_lookups();
+    return context.dispatcher().createDnsResolver(resolvers, use_tcp_for_dns_lookups);
   }
 
   return context.dnsResolver();
diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h
index 4b3f536dd3c5..41759e66d390 100644
--- a/source/common/upstream/cluster_factory_impl.h
+++ b/source/common/upstream/cluster_factory_impl.h
@@ -175,8 +175,9 @@ template <class ConfigProto> class ConfigurableClusterFactoryBase : public Clust
                     Stats::ScopePtr&& stats_scope) override {
     ProtobufTypes::MessagePtr config = createEmptyConfigProto();
     Config::Utility::translateOpaqueConfig(
-        cluster.cluster_type().typed_config(), ProtobufWkt::Struct::default_instance(),
-        socket_factory_context.messageValidationVisitor(), *config);
+        cluster.cluster_type().name(), cluster.cluster_type().typed_config(),
+        ProtobufWkt::Struct::default_instance(), socket_factory_context.messageValidationVisitor(),
+        *config);
     return createClusterWithConfig(cluster,
                                    MessageUtil::downcastAndValidate<const ConfigProto&>(
                                        *config, context.messageValidationVisitor()),
diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc
index f3241d7dacab..e8688c9edcf0 100644
--- a/source/common/upstream/cluster_manager_impl.cc
+++ b/source/common/upstream/cluster_manager_impl.cc
@@ -119,18 +119,23 @@ void ClusterManagerInitHelper::initializeSecondaryClusters() {
 void ClusterManagerInitHelper::maybeFinishInitialize() {
   // Do not do anything if we are still doing the initial static load or if we are waiting for
   // CDS initialize.
+  ENVOY_LOG(debug, "maybe finish initialize state: {}", enumToInt(state_));
   if (state_ == State::Loading || state_ == State::WaitingForCdsInitialize) {
     return;
   }
 
   // If we are still waiting for primary clusters to initialize, do nothing.
   ASSERT(state_ == State::WaitingForStaticInitialize || state_ == State::CdsInitialized);
+  ENVOY_LOG(debug, "maybe finish initialize primary init clusters empty: {}",
+            primary_init_clusters_.empty());
   if (!primary_init_clusters_.empty()) {
     return;
   }
 
   // If we are still waiting for secondary clusters to initialize, see if we need to first call
   // initialize on them. This is only done once.
+  ENVOY_LOG(debug, "maybe finish initialize secondary init clusters empty: {}",
+            secondary_init_clusters_.empty());
   if (!secondary_init_clusters_.empty()) {
     if (!started_secondary_initialize_) {
       ENVOY_LOG(info, "cm init: initializing secondary clusters");
@@ -153,6 +158,7 @@ void ClusterManagerInitHelper::maybeFinishInitialize() {
   // At this point, if we are doing static init, and we have CDS, start CDS init. Otherwise, move
   // directly to initialized.
   started_secondary_initialize_ = false;
+  ENVOY_LOG(debug, "maybe finish initialize cds api ready: {}", cds_ != nullptr);
   if (state_ == State::WaitingForStaticInitialize && cds_) {
     ENVOY_LOG(info, "cm init: initializing cds");
     state_ = State::WaitingForCdsInitialize;
diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc
index bb35f32277b7..c1aad5a96ca2 100644
--- a/source/common/upstream/health_checker_impl.cc
+++ b/source/common/upstream/health_checker_impl.cc
@@ -638,8 +638,7 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onInterval() {
   headers_message->headers().setReferenceUserAgent(
       Http::Headers::get().UserAgentValues.EnvoyHealthChecker);
 
-  Grpc::Common::toGrpcTimeout(parent_.timeout_,
-                              headers_message->headers().insertGrpcTimeout().value());
+  Grpc::Common::toGrpcTimeout(parent_.timeout_, headers_message->headers());
 
   Router::FilterUtility::setUpstreamScheme(
       headers_message->headers(), host_->transportSocketFactory().implementsSecureTransport());
diff --git a/source/common/upstream/health_discovery_service.h b/source/common/upstream/health_discovery_service.h
index 801671785a28..b393190f3b47 100644
--- a/source/common/upstream/health_discovery_service.h
+++ b/source/common/upstream/health_discovery_service.h
@@ -90,12 +90,10 @@ using HdsClusterPtr = std::shared_ptr<HdsCluster>;
 /**
  * All hds stats. @see stats_macros.h
  */
-// clang-format off
-#define ALL_HDS_STATS(COUNTER)                                                           \
+#define ALL_HDS_STATS(COUNTER)                                                                     \
   COUNTER(requests)                                                                                \
   COUNTER(responses)                                                                               \
   COUNTER(errors)
-// clang-format on
 
 /**
  * Struct definition for all hds stats. @see stats_macros.h
diff --git a/source/common/upstream/load_stats_reporter.h b/source/common/upstream/load_stats_reporter.h
index 348d46aff1a0..f938fd366b95 100644
--- a/source/common/upstream/load_stats_reporter.h
+++ b/source/common/upstream/load_stats_reporter.h
@@ -13,12 +13,10 @@ namespace Upstream {
 /**
  * All load reporter stats. @see stats_macros.h
  */
-// clang-format off
 #define ALL_LOAD_REPORTER_STATS(COUNTER)                                                           \
   COUNTER(requests)                                                                                \
   COUNTER(responses)                                                                               \
   COUNTER(errors)
-// clang-format on
 
 /**
  * Struct definition for all load reporter stats. @see stats_macros.h
diff --git a/source/common/upstream/maglev_lb.h b/source/common/upstream/maglev_lb.h
index 34773cf99298..d48849ac3efc 100644
--- a/source/common/upstream/maglev_lb.h
+++ b/source/common/upstream/maglev_lb.h
@@ -26,8 +26,8 @@ struct MaglevLoadBalancerStats {
 /**
  * This is an implementation of Maglev consistent hashing as described in:
  * https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/44824.pdf
- * section 3.4. Specifically, the algorithm shown in pseudocode listening 1 is implemented
- * with a fixed table size of 65537. This is the recommended table size in section 5.3.
+ * section 3.4. Specifically, the algorithm shown in pseudocode listing 1 is implemented with a
+ * fixed table size of 65537. This is the recommended table size in section 5.3.
  */
 class MaglevTable : public ThreadAwareLoadBalancerBase::HashingLoadBalancer,
                     Logger::Loggable<Logger::Id::upstream> {
diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc
index 2698d06cf24d..db5f5d5eb99e 100644
--- a/source/common/upstream/subset_lb.cc
+++ b/source/common/upstream/subset_lb.cc
@@ -707,8 +707,6 @@ void SubsetLoadBalancer::HostSubsetImpl::update(const HostVector& hosts_added,
 
   // If we only have one locality we can avoid the first call to filter() by
   // just creating a new HostsPerLocality from the list of all hosts.
-  //
-  // TODO(rgs1): merge these two filter() calls in one loop.
   HostsPerLocalityConstSharedPtr hosts_per_locality;
 
   if (original_host_set_.hostsPerLocality().get().size() == 1) {
@@ -718,9 +716,9 @@ void SubsetLoadBalancer::HostSubsetImpl::update(const HostVector& hosts_added,
     hosts_per_locality = original_host_set_.hostsPerLocality().filter({cached_predicate})[0];
   }
 
-  HostsPerLocalityConstSharedPtr healthy_hosts_per_locality =
+  auto healthy_hosts_per_locality =
       original_host_set_.healthyHostsPerLocality().filter({cached_predicate})[0];
-  HostsPerLocalityConstSharedPtr degraded_hosts_per_locality =
+  auto degraded_hosts_per_locality =
       original_host_set_.degradedHostsPerLocality().filter({cached_predicate})[0];
   auto excluded_hosts_per_locality =
       original_host_set_.excludedHostsPerLocality().filter({cached_predicate})[0];
diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc
index cac0fb841af3..26f51bd8b7ae 100644
--- a/source/common/upstream/upstream_impl.cc
+++ b/source/common/upstream/upstream_impl.cc
@@ -137,7 +137,7 @@ createProtocolOptionsConfig(const std::string& name, const ProtobufWkt::Any& typ
     throw EnvoyException(fmt::format("filter {} does not support protocol options", name));
   }
 
-  Envoy::Config::Utility::translateOpaqueConfig(typed_config, config, validation_visitor,
+  Envoy::Config::Utility::translateOpaqueConfig(name, typed_config, config, validation_visitor,
                                                 *proto_config);
 
   return factory->createProtocolOptionsConfig(*proto_config, validation_visitor);
@@ -753,7 +753,7 @@ ClusterInfoImpl::ClusterInfoImpl(
         Server::Configuration::NamedUpstreamNetworkFilterConfigFactory>(string_name);
     auto message = factory.createEmptyConfigProto();
     if (!proto_config.typed_config().value().empty()) {
-      proto_config.typed_config().UnpackTo(message.get());
+      MessageUtil::unpackTo(proto_config.typed_config(), *message);
     }
     Network::FilterFactoryCb callback =
         factory.createFilterFactoryFromProto(*message, *factory_context_);
diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h
index c5f50b3540df..642e916184eb 100644
--- a/source/common/upstream/upstream_impl.h
+++ b/source/common/upstream/upstream_impl.h
@@ -86,9 +86,6 @@ class HostDescriptionImpl : virtual public HostDescription,
   // endpoints churning during a deploy of a large cluster). A possible improvement
   // would be to use TLS and post metadata updates from the main thread. This model would
   // possibly benefit other related and expensive computations too (e.g.: updating subsets).
-  //
-  // TODO(rgs1): we should move to absl locks, once there's support for R/W locks. We should
-  // also add lock annotations, once they work correctly with R/W locks.
   const std::shared_ptr<envoy::api::v2::core::Metadata> metadata() const override {
     absl::ReaderMutexLock lock(&metadata_mutex_);
     return metadata_;
diff --git a/source/extensions/access_loggers/common/access_log_base.h b/source/extensions/access_loggers/common/access_log_base.h
index abe7f009d66f..9a6d6caa6668 100644
--- a/source/extensions/access_loggers/common/access_log_base.h
+++ b/source/extensions/access_loggers/common/access_log_base.h
@@ -27,11 +27,6 @@ class ImplBase : public AccessLog::Instance {
 
   /**
    * Log a completed request if the underlying AccessLog `filter_` allows it.
-   *
-   * Prior to logging, call refreshByteSize() on HeaderMaps to ensure that an accurate byte size
-   * count is logged.
-   * TODO(asraa): Remove refreshByteSize() requirement when entries in HeaderMap can no longer be
-   * modified by reference and HeaderMap holds an accurate internal byte size count.
    */
   void log(const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers,
            const Http::HeaderMap* response_trailers,
diff --git a/source/extensions/access_loggers/file/config.cc b/source/extensions/access_loggers/file/config.cc
index fcf432a861bc..714a5fb4eb6d 100644
--- a/source/extensions/access_loggers/file/config.cc
+++ b/source/extensions/access_loggers/file/config.cc
@@ -39,10 +39,15 @@ FileAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config,
   } else if (fal_config.access_log_format_case() ==
              envoy::config::accesslog::v2::FileAccessLog::kJsonFormat) {
     auto json_format_map = this->convertJsonFormatToMap(fal_config.json_format());
-    formatter = std::make_unique<AccessLog::JsonFormatterImpl>(json_format_map);
+    formatter = std::make_unique<AccessLog::JsonFormatterImpl>(json_format_map, false);
+  } else if (fal_config.access_log_format_case() ==
+             envoy::config::accesslog::v2::FileAccessLog::kTypedJsonFormat) {
+    auto json_format_map = this->convertJsonFormatToMap(fal_config.typed_json_format());
+    formatter = std::make_unique<AccessLog::JsonFormatterImpl>(json_format_map, true);
   } else {
     throw EnvoyException(
-        "Invalid access_log format provided. Only 'format' and 'json_format' are supported.");
+        "Invalid access_log format provided. Only 'format', 'json_format', or 'typed_json_format' "
+        "are supported.");
   }
 
   return std::make_shared<FileAccessLog>(fal_config.path(), std::move(filter), std::move(formatter),
diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc
index 8c3bb139d069..1eda50f09274 100644
--- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc
+++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc
@@ -98,9 +98,7 @@ void HttpGrpcAccessLog::emitLog(const Http::HeaderMap& request_headers,
     request_properties->set_original_path(
         std::string(request_headers.EnvoyOriginalPath()->value().getStringView()));
   }
-  // TODO(asraa): This causes a manual iteration over the request_headers. Instead, refresh the byte
-  // size of this HeaderMap outside the loggers and use byteSize().
-  request_properties->set_request_headers_bytes(request_headers.byteSizeInternal());
+  request_properties->set_request_headers_bytes(request_headers.byteSize());
   request_properties->set_request_body_bytes(stream_info.bytesReceived());
   if (request_headers.Method() != nullptr) {
     envoy::api::v2::core::RequestMethod method =
@@ -128,8 +126,7 @@ void HttpGrpcAccessLog::emitLog(const Http::HeaderMap& request_headers,
   if (stream_info.responseCodeDetails()) {
     response_properties->set_response_code_details(stream_info.responseCodeDetails().value());
   }
-  ASSERT(response_headers.byteSize().has_value());
-  response_properties->set_response_headers_bytes(response_headers.byteSize().value());
+  response_properties->set_response_headers_bytes(response_headers.byteSize());
   response_properties->set_response_body_bytes(stream_info.bytesSent());
   if (!response_headers_to_log_.empty()) {
     auto* logged_headers = response_properties->mutable_response_headers();
diff --git a/source/extensions/clusters/aggregate/BUILD b/source/extensions/clusters/aggregate/BUILD
new file mode 100644
index 000000000000..3f6007f3b686
--- /dev/null
+++ b/source/extensions/clusters/aggregate/BUILD
@@ -0,0 +1,25 @@
+licenses(["notice"])  # Apache 2
+
+load(
+    "//bazel:envoy_build_system.bzl",
+    "envoy_cc_extension",
+    "envoy_package",
+)
+
+envoy_package()
+
+envoy_cc_extension(
+    name = "cluster",
+    srcs = ["cluster.cc"],
+    hdrs = [
+        "cluster.h",
+        "lb_context.h",
+    ],
+    security_posture = "requires_trusted_downstream_and_upstream",
+    deps = [
+        "//source/common/upstream:cluster_factory_lib",
+        "//source/common/upstream:upstream_includes",
+        "//source/extensions/clusters:well_known_names",
+        "@envoy_api//envoy/config/cluster/aggregate/v2alpha:pkg_cc_proto",
+    ],
+)
diff --git a/source/extensions/clusters/aggregate/cluster.cc b/source/extensions/clusters/aggregate/cluster.cc
new file mode 100644
index 000000000000..809aac1e28dc
--- /dev/null
+++ b/source/extensions/clusters/aggregate/cluster.cc
@@ -0,0 +1,150 @@
+#include "extensions/clusters/aggregate/cluster.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace Clusters {
+namespace Aggregate {
+
+Cluster::Cluster(const envoy::api::v2::Cluster& cluster,
+                 const envoy::config::cluster::aggregate::v2alpha::ClusterConfig& config,
+                 Upstream::ClusterManager& cluster_manager, Runtime::Loader& runtime,
+                 Runtime::RandomGenerator& random,
+                 Server::Configuration::TransportSocketFactoryContext& factory_context,
+                 Stats::ScopePtr&& stats_scope, ThreadLocal::SlotAllocator& tls, bool added_via_api)
+    : Upstream::ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope),
+                                added_via_api),
+      cluster_manager_(cluster_manager), runtime_(runtime), random_(random),
+      tls_(tls.allocateSlot()), clusters_(config.clusters().begin(), config.clusters().end()) {}
+
+PriorityContext
+Cluster::linearizePrioritySet(const std::function<bool(const std::string&)>& skip_predicate) {
+  Upstream::PrioritySetImpl priority_set;
+  std::vector<std::pair<uint32_t, Upstream::ThreadLocalCluster*>> priority_to_cluster;
+  uint32_t next_priority_after_linearizing = 0;
+
+  // Linearize the priority set. e.g. for clusters [C_0, C_1, C_2] referred in aggregate cluster
+  //    C_0 [P_0, P_1, P_2]
+  //    C_1 [P_0, P_1]
+  //    C_2 [P_0, P_1, P_2, P_3]
+  // The linearization result is:
+  //    [C_0.P_0, C_0.P_1, C_0.P_2, C_1.P_0, C_1.P_1, C_2.P_0, C_2.P_1, C_2.P_2, C_2.P_3]
+  // and the traffic will be distributed among these priorities.
+  for (const auto& cluster : clusters_) {
+    if (skip_predicate(cluster)) {
+      continue;
+    }
+    auto tlc = cluster_manager_.get(cluster);
+    // It is possible that the cluster doesn't exist, e.g., the cluster cloud be deleted or the
+    // cluster hasn't been added by xDS.
+    if (tlc == nullptr) {
+      continue;
+    }
+
+    uint32_t priority_in_current_cluster = 0;
+    for (const auto& host_set : tlc->prioritySet().hostSetsPerPriority()) {
+      if (!host_set->hosts().empty()) {
+        priority_set.updateHosts(
+            next_priority_after_linearizing++, Upstream::HostSetImpl::updateHostsParams(*host_set),
+            host_set->localityWeights(), host_set->hosts(), {}, host_set->overprovisioningFactor());
+        priority_to_cluster.emplace_back(std::make_pair(priority_in_current_cluster, tlc));
+      }
+      priority_in_current_cluster++;
+    }
+  }
+
+  return std::make_pair(std::move(priority_set), std::move(priority_to_cluster));
+}
+
+void Cluster::startPreInit() {
+  for (const auto& cluster : clusters_) {
+    auto tlc = cluster_manager_.get(cluster);
+    // It is possible when initializing the cluster, the included cluster doesn't exist. e.g., the
+    // cluster could be added dynamically by xDS.
+    if (tlc == nullptr) {
+      continue;
+    }
+
+    // Add callback for clusters initialized before aggregate cluster.
+    tlc->prioritySet().addMemberUpdateCb(
+        [this, cluster](const Upstream::HostVector&, const Upstream::HostVector&) {
+          ENVOY_LOG(debug, "member update for cluster '{}' in aggregate cluster '{}'", cluster,
+                    this->info()->name());
+          refresh();
+        });
+  }
+  refresh();
+  handle_ = cluster_manager_.addThreadLocalClusterUpdateCallbacks(*this);
+
+  onPreInitComplete();
+}
+
+void Cluster::refresh(const std::function<bool(const std::string&)>& skip_predicate) {
+  // Post the priority set to worker threads.
+  tls_->runOnAllThreads([this, skip_predicate, cluster_name = this->info()->name()]() {
+    PriorityContext priority_set = linearizePrioritySet(skip_predicate);
+    Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(cluster_name);
+    ASSERT(cluster != nullptr);
+    dynamic_cast<AggregateClusterLoadBalancer&>(cluster->loadBalancer()).refresh(priority_set);
+  });
+}
+
+void Cluster::onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) {
+  if (std::find(clusters_.begin(), clusters_.end(), cluster.info()->name()) != clusters_.end()) {
+    ENVOY_LOG(debug, "adding or updating cluster '{}' for aggregate cluster '{}'",
+              cluster.info()->name(), info()->name());
+    refresh();
+    cluster.prioritySet().addMemberUpdateCb(
+        [this](const Upstream::HostVector&, const Upstream::HostVector&) { refresh(); });
+  }
+}
+
+void Cluster::onClusterRemoval(const std::string& cluster_name) {
+  //  The onClusterRemoval callback is called before the thread local cluster is removed. There
+  //  will be a dangling pointer to the thread local cluster if the deleted cluster is not skipped
+  //  when we refresh the load balancer.
+  if (std::find(clusters_.begin(), clusters_.end(), cluster_name) != clusters_.end()) {
+    ENVOY_LOG(debug, "removing cluster '{}' from aggreagte cluster '{}'", cluster_name,
+              info()->name());
+    refresh([cluster_name](const std::string& c) { return cluster_name == c; });
+  }
+}
+
+Upstream::HostConstSharedPtr
+AggregateClusterLoadBalancer::LoadBalancerImpl::chooseHost(Upstream::LoadBalancerContext* context) {
+  const auto priority_pair =
+      choosePriority(random_.random(), per_priority_load_.healthy_priority_load_,
+                     per_priority_load_.degraded_priority_load_);
+  AggregateLoadBalancerContext aggregate_context(context, priority_pair.second,
+                                                 priority_to_cluster_[priority_pair.first].first);
+  return priority_to_cluster_[priority_pair.first].second->loadBalancer().chooseHost(
+      &aggregate_context);
+}
+
+Upstream::HostConstSharedPtr
+AggregateClusterLoadBalancer::chooseHost(Upstream::LoadBalancerContext* context) {
+  if (load_balancer_) {
+    return load_balancer_->chooseHost(context);
+  }
+  return nullptr;
+}
+
+std::pair<Upstream::ClusterImplBaseSharedPtr, Upstream::ThreadAwareLoadBalancerPtr>
+ClusterFactory::createClusterWithConfig(
+    const envoy::api::v2::Cluster& cluster,
+    const envoy::config::cluster::aggregate::v2alpha::ClusterConfig& proto_config,
+    Upstream::ClusterFactoryContext& context,
+    Server::Configuration::TransportSocketFactoryContext& socket_factory_context,
+    Stats::ScopePtr&& stats_scope) {
+  auto new_cluster = std::make_shared<Cluster>(
+      cluster, proto_config, context.clusterManager(), context.runtime(), context.random(),
+      socket_factory_context, std::move(stats_scope), context.tls(), context.addedViaApi());
+  auto lb = std::make_unique<AggregateThreadAwareLoadBalancer>(*new_cluster);
+  return std::make_pair(new_cluster, std::move(lb));
+}
+
+REGISTER_FACTORY(ClusterFactory, Upstream::ClusterFactory);
+
+} // namespace Aggregate
+} // namespace Clusters
+} // namespace Extensions
+} // namespace Envoy
diff --git a/source/extensions/clusters/aggregate/cluster.h b/source/extensions/clusters/aggregate/cluster.h
new file mode 100644
index 000000000000..23ea5b24d9bc
--- /dev/null
+++ b/source/extensions/clusters/aggregate/cluster.h
@@ -0,0 +1,158 @@
+#pragma once
+
+#include "envoy/config/cluster/aggregate/v2alpha/cluster.pb.h"
+#include "envoy/config/cluster/aggregate/v2alpha/cluster.pb.validate.h"
+
+#include "common/upstream/cluster_factory_impl.h"
+#include "common/upstream/upstream_impl.h"
+
+#include "extensions/clusters/aggregate/lb_context.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace Clusters {
+namespace Aggregate {
+
+using PriorityContext = std::pair<Upstream::PrioritySetImpl,
+                                  std::vector<std::pair<uint32_t, Upstream::ThreadLocalCluster*>>>;
+
+class Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbacks {
+public:
+  Cluster(const envoy::api::v2::Cluster& cluster,
+          const envoy::config::cluster::aggregate::v2alpha::ClusterConfig& config,
+          Upstream::ClusterManager& cluster_manager, Runtime::Loader& runtime,
+          Runtime::RandomGenerator& random,
+          Server::Configuration::TransportSocketFactoryContext& factory_context,
+          Stats::ScopePtr&& stats_scope, ThreadLocal::SlotAllocator& tls, bool added_via_api);
+
+  // Upstream::Cluster
+  Upstream::Cluster::InitializePhase initializePhase() const override {
+    return Upstream::Cluster::InitializePhase::Secondary;
+  }
+
+  // Upstream::ClusterUpdateCallbacks
+  void onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) override;
+  void onClusterRemoval(const std::string& cluster_name) override;
+
+  void refresh() {
+    refresh([](const std::string&) { return false; });
+  }
+
+  Upstream::ClusterUpdateCallbacksHandlePtr handle_;
+  Upstream::ClusterManager& cluster_manager_;
+  Runtime::Loader& runtime_;
+  Runtime::RandomGenerator& random_;
+  ThreadLocal::SlotPtr tls_;
+  const std::vector<std::string> clusters_;
+
+private:
+  // Upstream::ClusterImplBase
+  void startPreInit() override;
+
+  void refresh(const std::function<bool(const std::string&)>& skip_predicate);
+  PriorityContext
+  linearizePrioritySet(const std::function<bool(const std::string&)>& skip_predicate);
+};
+
+// Load balancer used by each worker thread. It will be refreshed when clusters, hosts or priorities
+// are updated.
+class AggregateClusterLoadBalancer : public Upstream::LoadBalancer {
+public:
+  AggregateClusterLoadBalancer(Upstream::ClusterStats& stats, Runtime::Loader& runtime,
+                               Runtime::RandomGenerator& random,
+                               const envoy::api::v2::Cluster::CommonLbConfig& common_config)
+      : stats_(stats), runtime_(runtime), random_(random), common_config_(common_config) {}
+
+  // Upstream::LoadBalancer
+  Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override;
+
+private:
+  // Use inner class to extend LoadBalancerBase. When initializing AggregateClusterLoadBalancer, the
+  // priority set could be empty, we cannot initialize LoadBalancerBase when priority set is empty.
+  class LoadBalancerImpl : public Upstream::LoadBalancerBase {
+  public:
+    LoadBalancerImpl(const PriorityContext& priority_context, Upstream::ClusterStats& stats,
+                     Runtime::Loader& runtime, Runtime::RandomGenerator& random,
+                     const envoy::api::v2::Cluster::CommonLbConfig& common_config)
+        : Upstream::LoadBalancerBase(priority_context.first, stats, runtime, random, common_config),
+          priority_to_cluster_(priority_context.second) {}
+
+    // Upstream::LoadBalancer
+    Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override;
+
+    // Upstream::LoadBalancerBase
+    Upstream::HostConstSharedPtr chooseHostOnce(Upstream::LoadBalancerContext*) override {
+      NOT_IMPLEMENTED_GCOVR_EXCL_LINE;
+    }
+
+  private:
+    std::vector<std::pair<uint32_t, Upstream::ThreadLocalCluster*>> priority_to_cluster_;
+  };
+
+  using LoadBalancerImplPtr = std::unique_ptr<LoadBalancerImpl>;
+
+  LoadBalancerImplPtr load_balancer_;
+  Upstream::ClusterStats& stats_;
+  Runtime::Loader& runtime_;
+  Runtime::RandomGenerator& random_;
+  const envoy::api::v2::Cluster::CommonLbConfig& common_config_;
+
+public:
+  void refresh(const PriorityContext& priority_context) {
+    if (!priority_context.first.hostSetsPerPriority().empty()) {
+      load_balancer_ = std::make_unique<LoadBalancerImpl>(priority_context, stats_, runtime_,
+                                                          random_, common_config_);
+    } else {
+      load_balancer_ = nullptr;
+    }
+  }
+};
+
+// Load balancer factory created by the main thread and will be called in each worker thread to
+// create the thread local load balancer.
+struct AggregateLoadBalancerFactory : public Upstream::LoadBalancerFactory {
+  AggregateLoadBalancerFactory(const Cluster& cluster) : cluster_(cluster) {}
+  // Upstream::LoadBalancerFactory
+  Upstream::LoadBalancerPtr create() override {
+    return std::make_unique<AggregateClusterLoadBalancer>(
+        cluster_.info()->stats(), cluster_.runtime_, cluster_.random_, cluster_.info()->lbConfig());
+  }
+
+  const Cluster& cluster_;
+};
+
+// Thread aware load balancer created by the main thread.
+struct AggregateThreadAwareLoadBalancer : public Upstream::ThreadAwareLoadBalancer {
+  AggregateThreadAwareLoadBalancer(const Cluster& cluster) : cluster_(cluster) {}
+
+  // Upstream::ThreadAwareLoadBalancer
+  Upstream::LoadBalancerFactorySharedPtr factory() override {
+    return std::make_shared<AggregateLoadBalancerFactory>(cluster_);
+  }
+  void initialize() override {}
+
+  const Cluster& cluster_;
+};
+
+class ClusterFactory : public Upstream::ConfigurableClusterFactoryBase<
+                           envoy::config::cluster::aggregate::v2alpha::ClusterConfig> {
+public:
+  ClusterFactory()
+      : ConfigurableClusterFactoryBase(Extensions::Clusters::ClusterTypes::get().Aggregate) {}
+
+private:
+  std::pair<Upstream::ClusterImplBaseSharedPtr, Upstream::ThreadAwareLoadBalancerPtr>
+  createClusterWithConfig(
+      const envoy::api::v2::Cluster& cluster,
+      const envoy::config::cluster::aggregate::v2alpha::ClusterConfig& proto_config,
+      Upstream::ClusterFactoryContext& context,
+      Server::Configuration::TransportSocketFactoryContext& socket_factory_context,
+      Stats::ScopePtr&& stats_scope) override;
+};
+
+DECLARE_FACTORY(ClusterFactory);
+
+} // namespace Aggregate
+} // namespace Clusters
+} // namespace Extensions
+} // namespace Envoy
diff --git a/source/extensions/clusters/aggregate/lb_context.h b/source/extensions/clusters/aggregate/lb_context.h
new file mode 100644
index 000000000000..1e6e6a6cca48
--- /dev/null
+++ b/source/extensions/clusters/aggregate/lb_context.h
@@ -0,0 +1,78 @@
+#pragma once
+
+#include "common/upstream/load_balancer_impl.h"
+#include "common/upstream/upstream_impl.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace Clusters {
+namespace Aggregate {
+
+// AggregateLoadBalancerContext wraps the load balancer context to re-assign priority load
+// according the to host priority selected by the aggregate load balancer.
+class AggregateLoadBalancerContext : public Upstream::LoadBalancerContext {
+public:
+  AggregateLoadBalancerContext(Upstream::LoadBalancerContext* context,
+                               Upstream::LoadBalancerBase::HostAvailability host_availability,
+                               uint32_t host_priority)
+      : host_availability_(host_availability), host_priority_(host_priority) {
+    if (context == nullptr) {
+      owned_context_ = std::make_unique<Upstream::LoadBalancerContextBase>();
+      context_ = owned_context_.get();
+    } else {
+      context_ = context;
+    }
+  }
+
+  // Upstream::LoadBalancerContext
+  absl::optional<uint64_t> computeHashKey() override { return context_->computeHashKey(); }
+  const Network::Connection* downstreamConnection() const override {
+    return context_->downstreamConnection();
+  }
+  const Router::MetadataMatchCriteria* metadataMatchCriteria() override {
+    return context_->metadataMatchCriteria();
+  }
+  const Http::HeaderMap* downstreamHeaders() const override {
+    return context_->downstreamHeaders();
+  }
+  const Upstream::HealthyAndDegradedLoad&
+  determinePriorityLoad(const Upstream::PrioritySet&,
+                        const Upstream::HealthyAndDegradedLoad& original_priority_load) override {
+    // Re-assign load. Set all traffic to the priority and availability selected in aggregate
+    // cluster.
+    // TODO(yxue): allow determinePriorityLoad to affect the load of top level cluster and verify it
+    // works with current retry plugin
+    const size_t priorities = original_priority_load.healthy_priority_load_.get().size();
+    priority_load_.healthy_priority_load_.get().assign(priorities, 0);
+    priority_load_.degraded_priority_load_.get().assign(priorities, 0);
+
+    if (host_availability_ == Upstream::LoadBalancerBase::HostAvailability::Healthy) {
+      priority_load_.healthy_priority_load_.get()[host_priority_] = 100;
+    } else {
+      priority_load_.degraded_priority_load_.get()[host_priority_] = 100;
+    }
+    return priority_load_;
+  }
+  bool shouldSelectAnotherHost(const Upstream::Host& host) override {
+    return context_->shouldSelectAnotherHost(host);
+  }
+  uint32_t hostSelectionRetryCount() const override { return context_->hostSelectionRetryCount(); }
+  Network::Socket::OptionsSharedPtr upstreamSocketOptions() const override {
+    return context_->upstreamSocketOptions();
+  }
+  Network::TransportSocketOptionsSharedPtr upstreamTransportSocketOptions() const override {
+    return context_->upstreamTransportSocketOptions();
+  }
+
+private:
+  Upstream::HealthyAndDegradedLoad priority_load_;
+  std::unique_ptr<Upstream::LoadBalancerContext> owned_context_;
+  Upstream::LoadBalancerContext* context_{nullptr};
+  const Upstream::LoadBalancerBase::HostAvailability host_availability_;
+  const uint32_t host_priority_;
+};
+
+} // namespace Aggregate
+} // namespace Clusters
+} // namespace Extensions
+} // namespace Envoy
\ No newline at end of file
diff --git a/source/extensions/clusters/redis/redis_cluster.h b/source/extensions/clusters/redis/redis_cluster.h
index 4fc46ac2a837..51077c3b652d 100644
--- a/source/extensions/clusters/redis/redis_cluster.h
+++ b/source/extensions/clusters/redis/redis_cluster.h
@@ -194,7 +194,7 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl {
 
   struct RedisDiscoverySession
       : public Extensions::NetworkFilters::Common::Redis::Client::Config,
-        public Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks {
+        public Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks {
     RedisDiscoverySession(RedisCluster& parent,
                           NetworkFilters::Common::Redis::Client::ClientFactory& client_factory);
 
@@ -225,11 +225,14 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl {
       return Extensions::NetworkFilters::Common::Redis::Client::ReadPolicy::Master;
     }
 
-    // Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks
+    // Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks
     void onResponse(NetworkFilters::Common::Redis::RespValuePtr&& value) override;
     void onFailure() override;
     // Note: Below callback isn't used in topology updates
-    bool onRedirection(const NetworkFilters::Common::Redis::RespValue&) override { return true; }
+    bool onRedirection(NetworkFilters::Common::Redis::RespValuePtr&&, const std::string&,
+                       bool) override {
+      return true;
+    }
     void onUnexpectedResponse(const NetworkFilters::Common::Redis::RespValuePtr&);
 
     Network::Address::InstanceConstSharedPtr
diff --git a/source/extensions/clusters/well_known_names.h b/source/extensions/clusters/well_known_names.h
index 48654e4f2c71..a387cf32c735 100644
--- a/source/extensions/clusters/well_known_names.h
+++ b/source/extensions/clusters/well_known_names.h
@@ -35,6 +35,10 @@ class ClusterTypeValues {
   // Dynamic forward proxy cluster. This cluster is designed to work directly with the
   // dynamic forward proxy HTTP filter.
   const std::string DynamicForwardProxy = "envoy.clusters.dynamic_forward_proxy";
+
+  // Aggregate cluster which may contain different types of clusters. It allows load balance between
+  // different type of clusters.
+  const std::string Aggregate = "envoy.clusters.aggregate";
 };
 
 using ClusterTypes = ConstSingleton<ClusterTypeValues>;
diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc
index b1a682c98650..a5ff26147a38 100644
--- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc
+++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc
@@ -1,5 +1,6 @@
 #include "extensions/common/dynamic_forward_proxy/dns_cache_impl.h"
 
+#include "common/http/utility.h"
 #include "common/network/utility.h"
 
 // TODO(mattklein123): Move DNS family helpers to a smaller include.
@@ -16,7 +17,7 @@ DnsCacheImpl::DnsCacheImpl(
     const envoy::config::common::dynamic_forward_proxy::v2alpha::DnsCacheConfig& config)
     : main_thread_dispatcher_(main_thread_dispatcher),
       dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())),
-      resolver_(main_thread_dispatcher.createDnsResolver({})), tls_slot_(tls.allocateSlot()),
+      resolver_(main_thread_dispatcher.createDnsResolver({}, false)), tls_slot_(tls.allocateSlot()),
       scope_(root_scope.createScope(fmt::format("dns_cache.{}.", config.name()))),
       stats_{ALL_DNS_CACHE_STATS(POOL_COUNTER(*scope_), POOL_GAUGE(*scope_))},
       refresh_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, dns_refresh_rate, 60000)),
@@ -90,55 +91,19 @@ void DnsCacheImpl::startCacheLoad(const std::string& host, uint16_t default_port
     return;
   }
 
-  // First try to see if there is a port included. This also checks to see that there is not a ']'
-  // as the last character which is indicative of an IPv6 address without a port. This is a best
-  // effort attempt.
-  const auto colon_pos = host.rfind(':');
-  absl::string_view host_to_resolve = host;
-  if (colon_pos != absl::string_view::npos && host_to_resolve.back() != ']') {
-    const absl::string_view string_view_host = host;
-    host_to_resolve = string_view_host.substr(0, colon_pos);
-    const auto port_str = string_view_host.substr(colon_pos + 1);
-    uint64_t port64;
-    if (port_str.empty() || !absl::SimpleAtoi(port_str, &port64) || port64 > 65535) {
-      // Just attempt to resolve whatever we were given. This will very likely fail.
-      host_to_resolve = host;
-    } else {
-      default_port = port64;
-    }
-  }
-
-  // Now see if this is an IP address. We need to know this because some things (such as setting
-  // SNI) are special cased if this is an IP address. Either way, we still go through the normal
-  // resolver flow. We could short-circuit the DNS resolver in this case, but the extra code to do
-  // so is not worth it since the DNS resolver should handle it for us.
-  bool is_ip_address = false;
-  try {
-    absl::string_view potential_ip_address = host_to_resolve;
-    // TODO(mattklein123): Optimally we would support bracket parsing in parseInternetAddress(),
-    // but we still need to trim the brackets to send the IPv6 address into the DNS resolver. For
-    // now, just do all the trimming here, but in the future we should consider whether we can
-    // have unified [] handling as low as possible in the stack.
-    if (potential_ip_address.front() == '[' && potential_ip_address.back() == ']') {
-      potential_ip_address.remove_prefix(1);
-      potential_ip_address.remove_suffix(1);
-    }
-    Network::Utility::parseInternetAddress(std::string(potential_ip_address));
-    is_ip_address = true;
-    host_to_resolve = potential_ip_address;
-  } catch (const EnvoyException&) {
-  }
+  const auto host_attributes = Http::Utility::parseAuthority(host);
 
   // TODO(mattklein123): Right now, the same host with different ports will become two
   // independent primary hosts with independent DNS resolutions. I'm not sure how much this will
   // matter, but we could consider collapsing these down and sharing the underlying DNS resolution.
-  auto& primary_host =
-      *primary_hosts_
-           // try_emplace() is used here for direct argument forwarding.
-           .try_emplace(host, std::make_unique<PrimaryHostInfo>(
-                                  *this, host_to_resolve, default_port, is_ip_address,
-                                  [this, host]() { onReResolve(host); }))
-           .first->second;
+  auto& primary_host = *primary_hosts_
+                            // try_emplace() is used here for direct argument forwarding.
+                            .try_emplace(host, std::make_unique<PrimaryHostInfo>(
+                                                   *this, std::string(host_attributes.host_),
+                                                   host_attributes.port_.value_or(default_port),
+                                                   host_attributes.is_ip_address_,
+                                                   [this, host]() { onReResolve(host); }))
+                            .first->second;
   startResolve(host, primary_host);
 }
 
diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl
index 9d7c18c07f8b..2503e4643416 100644
--- a/source/extensions/extensions_build_config.bzl
+++ b/source/extensions/extensions_build_config.bzl
@@ -12,6 +12,7 @@ EXTENSIONS = {
     # Clusters
     #
 
+    "envoy.clusters.aggregate":                         "//source/extensions/clusters/aggregate:cluster",
     "envoy.clusters.dynamic_forward_proxy":             "//source/extensions/clusters/dynamic_forward_proxy:cluster",
     "envoy.clusters.redis":                             "//source/extensions/clusters/redis:redis_cluster",
 
@@ -97,7 +98,6 @@ EXTENSIONS = {
     # UDP filters
     #
 
-    # WiP
     "envoy.filters.udp_listener.udp_proxy":             "//source/extensions/filters/udp/udp_proxy:config",
 
     #
diff --git a/source/extensions/filters/common/expr/context.cc b/source/extensions/filters/common/expr/context.cc
index 7b2912e2b4d1..75c91dc30fa0 100644
--- a/source/extensions/filters/common/expr/context.cc
+++ b/source/extensions/filters/common/expr/context.cc
@@ -1,5 +1,7 @@
 #include "extensions/filters/common/expr/context.h"
 
+#include "common/http/utility.h"
+
 #include "absl/strings/numbers.h"
 #include "absl/time/time.h"
 
@@ -82,6 +84,12 @@ absl::optional<CelValue> RequestWrapper::operator[](CelValue key) const {
     if (duration.has_value()) {
       return CelValue::CreateDuration(absl::FromChrono(duration.value()));
     }
+  } else if (value == Protocol) {
+    if (info_.protocol().has_value()) {
+      return CelValue::CreateString(&Http::Utility::getProtocolString(info_.protocol().value()));
+    } else {
+      return {};
+    }
   }
 
   if (headers_.value_ != nullptr) {
@@ -107,7 +115,7 @@ absl::optional<CelValue> RequestWrapper::operator[](CelValue key) const {
     } else if (value == UserAgent) {
       return convertHeaderEntry(headers_.value_->UserAgent());
     } else if (value == TotalSize) {
-      return CelValue::CreateInt64(info_.bytesReceived() + headers_.value_->byteSize().value());
+      return CelValue::CreateInt64(info_.bytesReceived() + headers_.value_->byteSize());
     }
   }
   return {};
diff --git a/source/extensions/filters/common/expr/context.h b/source/extensions/filters/common/expr/context.h
index 3dd108ff3109..8fd34742d6ac 100644
--- a/source/extensions/filters/common/expr/context.h
+++ b/source/extensions/filters/common/expr/context.h
@@ -30,6 +30,7 @@ constexpr absl::string_view UserAgent = "useragent";
 constexpr absl::string_view Size = "size";
 constexpr absl::string_view TotalSize = "total_size";
 constexpr absl::string_view Duration = "duration";
+constexpr absl::string_view Protocol = "protocol";
 
 // Symbols for traversing the response properties
 constexpr absl::string_view Response = "response";
diff --git a/source/extensions/filters/common/expr/evaluator.cc b/source/extensions/filters/common/expr/evaluator.cc
index 4d04bf534c0e..dff584d3ee9e 100644
--- a/source/extensions/filters/common/expr/evaluator.cc
+++ b/source/extensions/filters/common/expr/evaluator.cc
@@ -49,7 +49,7 @@ BuilderPtr createBuilder(Protobuf::Arena* arena) {
   auto register_status =
       google::api::expr::runtime::RegisterBuiltinFunctions(builder->GetRegistry(), options);
   if (!register_status.ok()) {
-    throw EnvoyException(
+    throw CelException(
         absl::StrCat("failed to register built-in functions: ", register_status.message()));
   }
   return builder;
@@ -59,7 +59,7 @@ ExpressionPtr createExpression(Builder& builder, const google::api::expr::v1alph
   google::api::expr::v1alpha1::SourceInfo source_info;
   auto cel_expression_status = builder.CreateExpression(&expr, &source_info);
   if (!cel_expression_status.ok()) {
-    throw EnvoyException(
+    throw CelException(
         absl::StrCat("failed to create an expression: ", cel_expression_status.status().message()));
   }
   return std::move(cel_expression_status.ValueOrDie());
diff --git a/source/extensions/filters/common/expr/evaluator.h b/source/extensions/filters/common/expr/evaluator.h
index 39812c60a343..16cdada5953d 100644
--- a/source/extensions/filters/common/expr/evaluator.h
+++ b/source/extensions/filters/common/expr/evaluator.h
@@ -52,6 +52,12 @@ absl::optional<CelValue> evaluate(const Expression& expr, Protobuf::Arena* arena
 bool matches(const Expression& expr, const StreamInfo::StreamInfo& info,
              const Http::HeaderMap& headers);
 
+// Thrown when there is an CEL library error.
+class CelException : public EnvoyException {
+public:
+  CelException(const std::string& what) : EnvoyException(what) {}
+};
+
 } // namespace Expr
 } // namespace Common
 } // namespace Filters
diff --git a/source/extensions/filters/common/rbac/matchers.cc b/source/extensions/filters/common/rbac/matchers.cc
index 271d54c29523..6a34606da2b2 100644
--- a/source/extensions/filters/common/rbac/matchers.cc
+++ b/source/extensions/filters/common/rbac/matchers.cc
@@ -140,22 +140,23 @@ bool AuthenticatedMatcher::matches(const Network::Connection& connection,
     return true;
   }
 
-  const auto uriSans = ssl->uriSanPeerCertificate();
-  std::string principal;
   // If set, The URI SAN  or DNS SAN in that order is used as Principal, otherwise the subject field
   // is used.
-  if (!uriSans.empty()) {
-    principal = uriSans[0];
-  } else {
-    const auto dnsSans = ssl->dnsSansPeerCertificate();
-    if (!dnsSans.empty()) {
-      principal = dnsSans[0];
-    } else {
-      principal = ssl->subjectPeerCertificate();
+  if (!ssl->uriSanPeerCertificate().empty()) {
+    for (const std::string& uri : ssl->uriSanPeerCertificate()) {
+      if (matcher_.value().match(uri)) {
+        return true;
+      }
     }
   }
-
-  return matcher_.value().match(principal);
+  if (!ssl->dnsSansPeerCertificate().empty()) {
+    for (const std::string& dns : ssl->dnsSansPeerCertificate()) {
+      if (matcher_.value().match(dns)) {
+        return true;
+      }
+    }
+  }
+  return matcher_.value().match(ssl->subjectPeerCertificate());
 }
 
 bool MetadataMatcher::matches(const Network::Connection&, const Envoy::Http::HeaderMap&,
diff --git a/source/extensions/filters/common/rbac/utility.h b/source/extensions/filters/common/rbac/utility.h
index 684c4204ecb6..4809e5fcb225 100644
--- a/source/extensions/filters/common/rbac/utility.h
+++ b/source/extensions/filters/common/rbac/utility.h
@@ -27,13 +27,11 @@ using DynamicMetadataKeysSingleton = ConstSingleton<DynamicMetadataKeys>;
 /**
  * All stats for the RBAC filter. @see stats_macros.h
  */
-// clang-format off
 #define ALL_RBAC_FILTER_STATS(COUNTER)                                                             \
   COUNTER(allowed)                                                                                 \
   COUNTER(denied)                                                                                  \
   COUNTER(shadow_allowed)                                                                          \
   COUNTER(shadow_denied)
-// clang-format on
 
 /**
  * Wrapper struct for RBAC filter stats. @see stats_macros.h
diff --git a/source/extensions/filters/http/cors/cors_filter.h b/source/extensions/filters/http/cors/cors_filter.h
index 51b13efeeb28..042c1fb10ced 100644
--- a/source/extensions/filters/http/cors/cors_filter.h
+++ b/source/extensions/filters/http/cors/cors_filter.h
@@ -14,11 +14,9 @@ namespace Cors {
 /**
  * All CORS filter stats. @see stats_macros.h
  */
-// clang-format off
-#define ALL_CORS_STATS(COUNTER)\
-  COUNTER(origin_valid)        \
-  COUNTER(origin_invalid)      \
-// clang-format on
+#define ALL_CORS_STATS(COUNTER)                                                                    \
+  COUNTER(origin_valid)                                                                            \
+  COUNTER(origin_invalid)
 
 /**
  * Struct definition for CORS stats. @see stats_macros.h
diff --git a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc
index bbc9526df086..214f11b87ce7 100644
--- a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc
+++ b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc
@@ -74,7 +74,7 @@ Http::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::HeaderMap& headers, b
   if (config != nullptr) {
     const auto& host_rewrite = config->hostRewrite();
     if (!host_rewrite.empty()) {
-      headers.Host()->value(host_rewrite);
+      headers.setHost(host_rewrite);
     }
 
     const auto& host_rewrite_header = config->hostRewriteHeader();
@@ -82,7 +82,7 @@ Http::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::HeaderMap& headers, b
       const auto* header = headers.get(host_rewrite_header);
       if (header != nullptr) {
         const auto& header_value = header->value().getStringView();
-        headers.Host()->value(header_value);
+        headers.setHost(header_value);
       }
     }
   }
diff --git a/source/extensions/filters/http/dynamo/dynamo_request_parser.cc b/source/extensions/filters/http/dynamo/dynamo_request_parser.cc
index d9dfaa8a6ecc..1ca8b604d160 100644
--- a/source/extensions/filters/http/dynamo/dynamo_request_parser.cc
+++ b/source/extensions/filters/http/dynamo/dynamo_request_parser.cc
@@ -21,7 +21,6 @@ namespace Dynamo {
 const Http::LowerCaseString RequestParser::X_AMZ_TARGET("X-AMZ-TARGET");
 
 // clang-format off
-
 const std::vector<std::string> RequestParser::SINGLE_TABLE_OPERATIONS{
     "CreateTable",
     "DeleteItem",
@@ -51,7 +50,6 @@ const std::vector<std::string> RequestParser::SUPPORTED_ERROR_TYPES{
     "TransactionInProgressException",
     "UnrecognizedClientException",
     "ValidationException"};
-
 // clang-format on
 
 const std::vector<std::string> RequestParser::BATCH_OPERATIONS{"BatchGetItem", "BatchWriteItem"};
diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc
index ffb336c12f2c..61414ae8d2e1 100644
--- a/source/extensions/filters/http/ext_authz/ext_authz.cc
+++ b/source/extensions/filters/http/ext_authz/ext_authz.cc
@@ -169,19 +169,14 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) {
       callbacks_->clearRouteCache();
     }
     for (const auto& header : response->headers_to_add) {
-      ENVOY_STREAM_LOG(trace, " '{}':'{}'", *callbacks_, header.first.get(), header.second);
-      Http::HeaderEntry* header_to_modify = request_headers_->get(header.first);
-      if (header_to_modify) {
-        header_to_modify->value(header.second.c_str(), header.second.size());
-      } else {
-        request_headers_->addCopy(header.first, header.second);
-      }
+      ENVOY_STREAM_LOG(trace, "'{}':'{}'", *callbacks_, header.first.get(), header.second);
+      request_headers_->setCopy(header.first, header.second);
     }
     for (const auto& header : response->headers_to_append) {
-      Http::HeaderEntry* header_to_modify = request_headers_->get(header.first);
+      const Http::HeaderEntry* header_to_modify = request_headers_->get(header.first);
       if (header_to_modify) {
-        ENVOY_STREAM_LOG(trace, " '{}':'{}'", *callbacks_, header.first.get(), header.second);
-        Http::HeaderMapImpl::appendToHeader(header_to_modify->value(), header.second);
+        ENVOY_STREAM_LOG(trace, "'{}':'{}'", *callbacks_, header.first.get(), header.second);
+        request_headers_->appendCopy(header.first, header.second);
       }
     }
     if (cluster_) {
diff --git a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc
index 1bf8bc4a2c2f..ab947cb9940e 100644
--- a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc
+++ b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc
@@ -75,7 +75,7 @@ Http::FilterTrailersStatus Http1BridgeFilter::encodeTrailers(Http::HeaderMap& tr
       uint64_t grpc_status_code;
       if (!absl::SimpleAtoi(grpc_status_header->value().getStringView(), &grpc_status_code) ||
           grpc_status_code != 0) {
-        response_headers_->Status()->value(enumToInt(Http::Code::ServiceUnavailable));
+        response_headers_->setStatus(enumToInt(Http::Code::ServiceUnavailable));
       }
       response_headers_->setGrpcStatus(grpc_status_header->value().getStringView());
     }
diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc
index ffec82888466..b669d6741ba7 100644
--- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc
+++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc
@@ -57,7 +57,7 @@ void adjustContentLength(Http::HeaderMap& headers,
   if (length_header != nullptr) {
     uint64_t length;
     if (absl::SimpleAtoi(length_header->value().getStringView(), &length)) {
-      length_header->value(adjustment(length));
+      headers.setContentLength(adjustment(length));
     }
   }
 }
@@ -137,7 +137,7 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::HeaderMap& headers, bool)
       headers.setStatus(enumToInt(Http::Code::OK));
 
       if (content_type != nullptr) {
-        content_type->value(content_type_);
+        headers.setContentType(content_type_);
       }
 
       decoder_callbacks_->streamInfo().setResponseCodeDetails(
@@ -146,7 +146,7 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::HeaderMap& headers, bool)
     }
 
     // Restore the content-type to match what the downstream sent.
-    content_type->value(content_type_);
+    headers.setContentType(content_type_);
 
     if (withhold_grpc_frames_) {
       // Adjust content-length to account for the frame header that's added.
diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc
index 345b0bf4e4ee..a89960942d60 100644
--- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc
+++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc
@@ -487,9 +487,9 @@ Http::FilterTrailersStatus JsonTranscoderFilter::encodeTrailers(Http::HeaderMap&
   bool is_trailers_only_response = response_headers_ == &trailers;
 
   if (!grpc_status || grpc_status.value() == Grpc::Status::WellKnownGrpcStatus::InvalidCode) {
-    response_headers_->Status()->value(enumToInt(Http::Code::ServiceUnavailable));
+    response_headers_->setStatus(enumToInt(Http::Code::ServiceUnavailable));
   } else {
-    response_headers_->Status()->value(Grpc::Utility::grpcToHttpStatus(grpc_status.value()));
+    response_headers_->setStatus(Grpc::Utility::grpcToHttpStatus(grpc_status.value()));
     if (!is_trailers_only_response) {
       response_headers_->setGrpcStatus(grpc_status.value());
     }
@@ -594,7 +594,7 @@ bool JsonTranscoderFilter::maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_
     return false;
   }
 
-  response_headers_->Status()->value(Grpc::Utility::grpcToHttpStatus(grpc_status));
+  response_headers_->setStatus(Grpc::Utility::grpcToHttpStatus(grpc_status));
 
   bool is_trailers_only_response = response_headers_ == &trailers;
   if (is_trailers_only_response) {
diff --git a/source/extensions/filters/http/gzip/gzip_filter.h b/source/extensions/filters/http/gzip/gzip_filter.h
index 99b3eaf3336e..8f59ac0a6493 100644
--- a/source/extensions/filters/http/gzip/gzip_filter.h
+++ b/source/extensions/filters/http/gzip/gzip_filter.h
@@ -28,20 +28,18 @@ namespace Gzip {
  * the user can measure the memory performance of the
  * compression.
  */
-// clang-format off
-#define ALL_GZIP_STATS(COUNTER)    \
-  COUNTER(compressed)              \
-  COUNTER(not_compressed)          \
-  COUNTER(no_accept_header)        \
-  COUNTER(header_identity)         \
-  COUNTER(header_gzip)             \
-  COUNTER(header_wildcard)         \
-  COUNTER(header_not_valid)        \
-  COUNTER(total_uncompressed_bytes)\
-  COUNTER(total_compressed_bytes)  \
-  COUNTER(content_length_too_small)\
-  COUNTER(not_compressed_etag)     \
-// clang-format on
+#define ALL_GZIP_STATS(COUNTER)                                                                    \
+  COUNTER(compressed)                                                                              \
+  COUNTER(not_compressed)                                                                          \
+  COUNTER(no_accept_header)                                                                        \
+  COUNTER(header_identity)                                                                         \
+  COUNTER(header_gzip)                                                                             \
+  COUNTER(header_wildcard)                                                                         \
+  COUNTER(header_not_valid)                                                                        \
+  COUNTER(total_uncompressed_bytes)                                                                \
+  COUNTER(total_compressed_bytes)                                                                  \
+  COUNTER(content_length_too_small)                                                                \
+  COUNTER(not_compressed_etag)
 
 /**
  * Struct definition for gzip stats. @see stats_macros.h
@@ -57,8 +55,7 @@ class GzipFilterConfig {
 
 public:
   GzipFilterConfig(const envoy::config::filter::http::gzip::v2::Gzip& gzip,
-                   const std::string& stats_prefix,
-                   Stats::Scope& scope, Runtime::Loader& runtime);
+                   const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime);
 
   Compressor::ZlibCompressorImpl::CompressionLevel compressionLevel() const {
     return compression_level_;
diff --git a/source/extensions/filters/http/health_check/health_check.cc b/source/extensions/filters/http/health_check/health_check.cc
index f22f39d1c8b4..e66f82f8fe7b 100644
--- a/source/extensions/filters/http/health_check/health_check.cc
+++ b/source/extensions/filters/http/health_check/health_check.cc
@@ -178,7 +178,7 @@ void HealthCheckFilter::onComplete() {
       final_status, "",
       [degraded](auto& headers) {
         if (degraded) {
-          headers.insertEnvoyDegraded();
+          headers.setEnvoyDegraded("");
         }
       },
       absl::nullopt, *details);
diff --git a/source/extensions/filters/http/ip_tagging/BUILD b/source/extensions/filters/http/ip_tagging/BUILD
index aab10c1124bd..d6baea069e27 100644
--- a/source/extensions/filters/http/ip_tagging/BUILD
+++ b/source/extensions/filters/http/ip_tagging/BUILD
@@ -24,7 +24,7 @@ envoy_cc_library(
         "//source/common/http:headers_lib",
         "//source/common/network:lc_trie_lib",
         "//source/common/stats:symbol_table_lib",
-        "@envoy_api//envoy/config/filter/http/ip_tagging/v2:pkg_cc_proto",
+        "@envoy_api//envoy/config/filter/http/ip_tagging/v3alpha:pkg_cc_proto",
     ],
 )
 
@@ -39,6 +39,6 @@ envoy_cc_extension(
         "//source/extensions/filters/http:well_known_names",
         "//source/extensions/filters/http/common:factory_base_lib",
         "//source/extensions/filters/http/ip_tagging:ip_tagging_filter_lib",
-        "@envoy_api//envoy/config/filter/http/ip_tagging/v2:pkg_cc_proto",
+        "@envoy_api//envoy/config/filter/http/ip_tagging/v3alpha:pkg_cc_proto",
     ],
 )
diff --git a/source/extensions/filters/http/ip_tagging/config.cc b/source/extensions/filters/http/ip_tagging/config.cc
index b8f4b51f2025..97830b2a5f64 100644
--- a/source/extensions/filters/http/ip_tagging/config.cc
+++ b/source/extensions/filters/http/ip_tagging/config.cc
@@ -1,6 +1,6 @@
 #include "extensions/filters/http/ip_tagging/config.h"
 
-#include "envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.validate.h"
+#include "envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.pb.validate.h"
 #include "envoy/registry/registry.h"
 
 #include "common/protobuf/utility.h"
@@ -13,7 +13,7 @@ namespace HttpFilters {
 namespace IpTagging {
 
 Http::FilterFactoryCb IpTaggingFilterFactory::createFilterFactoryFromProtoTyped(
-    const envoy::config::filter::http::ip_tagging::v2::IPTagging& proto_config,
+    const envoy::config::filter::http::ip_tagging::v3alpha::IPTagging& proto_config,
     const std::string& stat_prefix, Server::Configuration::FactoryContext& context) {
 
   IpTaggingFilterConfigSharedPtr config(
diff --git a/source/extensions/filters/http/ip_tagging/config.h b/source/extensions/filters/http/ip_tagging/config.h
index 361d4acfe6a7..24cbe630a827 100644
--- a/source/extensions/filters/http/ip_tagging/config.h
+++ b/source/extensions/filters/http/ip_tagging/config.h
@@ -1,7 +1,7 @@
 #pragma once
 
-#include "envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.h"
-#include "envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.validate.h"
+#include "envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.pb.h"
+#include "envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.pb.validate.h"
 
 #include "extensions/filters/http/common/factory_base.h"
 #include "extensions/filters/http/well_known_names.h"
@@ -15,13 +15,13 @@ namespace IpTagging {
  * Config registration for the router filter. @see NamedHttpFilterConfigFactory.
  */
 class IpTaggingFilterFactory
-    : public Common::FactoryBase<envoy::config::filter::http::ip_tagging::v2::IPTagging> {
+    : public Common::FactoryBase<envoy::config::filter::http::ip_tagging::v3alpha::IPTagging> {
 public:
   IpTaggingFilterFactory() : FactoryBase(HttpFilterNames::get().IpTagging) {}
 
 private:
   Http::FilterFactoryCb createFilterFactoryFromProtoTyped(
-      const envoy::config::filter::http::ip_tagging::v2::IPTagging& proto_config,
+      const envoy::config::filter::http::ip_tagging::v3alpha::IPTagging& proto_config,
       const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override;
 };
 
diff --git a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc
index fe3648c082de..5c593083ac13 100644
--- a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc
+++ b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc
@@ -11,7 +11,7 @@ namespace HttpFilters {
 namespace IpTagging {
 
 IpTaggingFilterConfig::IpTaggingFilterConfig(
-    const envoy::config::filter::http::ip_tagging::v2::IPTagging& config,
+    const envoy::config::filter::http::ip_tagging::v3alpha::IPTagging& config,
     const std::string& stat_prefix, Stats::Scope& scope, Runtime::Loader& runtime)
     : request_type_(requestTypeEnum(config.request_type())), scope_(scope), runtime_(runtime),
       stat_name_set_(scope.symbolTable().makeSet("IpTagging")),
@@ -33,7 +33,7 @@ IpTaggingFilterConfig::IpTaggingFilterConfig(
   for (const auto& ip_tag : config.ip_tags()) {
     std::vector<Network::Address::CidrRange> cidr_set;
     cidr_set.reserve(ip_tag.ip_list().size());
-    for (const envoy::api::v2::core::CidrRange& entry : ip_tag.ip_list()) {
+    for (const envoy::api::v3alpha::core::CidrRange& entry : ip_tag.ip_list()) {
 
       // Currently, CidrRange::create doesn't guarantee that the CidrRanges are valid.
       Network::Address::CidrRange cidr_entry = Network::Address::CidrRange::create(entry);
@@ -78,10 +78,9 @@ Http::FilterHeadersStatus IpTaggingFilter::decodeHeaders(Http::HeaderMap& header
 
   if (!tags.empty()) {
     const std::string tags_join = absl::StrJoin(tags, ",");
-    Http::HeaderMapImpl::appendToHeader(headers.insertEnvoyIpTags().value(), tags_join);
+    headers.appendEnvoyIpTags(tags_join, ",");
 
     // We must clear the route cache or else we can't match on x-envoy-ip-tags.
-    // TODO(rgs): this should either be configurable, because it's expensive, or optimized.
     callbacks_->clearRouteCache();
 
     // For a large number(ex > 1000) of tags, stats cardinality will be an issue.
diff --git a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h
index 95ab46909682..5c000665e8a4 100644
--- a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h
+++ b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h
@@ -7,7 +7,7 @@
 #include <vector>
 
 #include "envoy/common/exception.h"
-#include "envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.h"
+#include "envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.pb.h"
 #include "envoy/http/filter.h"
 #include "envoy/runtime/runtime.h"
 #include "envoy/stats/scope.h"
@@ -31,7 +31,7 @@ enum class FilterRequestType { INTERNAL, EXTERNAL, BOTH };
  */
 class IpTaggingFilterConfig {
 public:
-  IpTaggingFilterConfig(const envoy::config::filter::http::ip_tagging::v2::IPTagging& config,
+  IpTaggingFilterConfig(const envoy::config::filter::http::ip_tagging::v3alpha::IPTagging& config,
                         const std::string& stat_prefix, Stats::Scope& scope,
                         Runtime::Loader& runtime);
 
@@ -46,13 +46,13 @@ class IpTaggingFilterConfig {
 
 private:
   static FilterRequestType requestTypeEnum(
-      envoy::config::filter::http::ip_tagging::v2::IPTagging::RequestType request_type) {
+      envoy::config::filter::http::ip_tagging::v3alpha::IPTagging::RequestType request_type) {
     switch (request_type) {
-    case envoy::config::filter::http::ip_tagging::v2::IPTagging_RequestType_BOTH:
+    case envoy::config::filter::http::ip_tagging::v3alpha::IPTagging_RequestType_BOTH:
       return FilterRequestType::BOTH;
-    case envoy::config::filter::http::ip_tagging::v2::IPTagging_RequestType_INTERNAL:
+    case envoy::config::filter::http::ip_tagging::v3alpha::IPTagging_RequestType_INTERNAL:
       return FilterRequestType::INTERNAL;
-    case envoy::config::filter::http::ip_tagging::v2::IPTagging_RequestType_EXTERNAL:
+    case envoy::config::filter::http::ip_tagging::v3alpha::IPTagging_RequestType_EXTERNAL:
       return FilterRequestType::EXTERNAL;
     default:
       NOT_REACHED_GCOVR_EXCL_LINE;
diff --git a/source/extensions/filters/http/jwt_authn/filter_config.h b/source/extensions/filters/http/jwt_authn/filter_config.h
index 86a52f8d4e6c..aad41235f77b 100644
--- a/source/extensions/filters/http/jwt_authn/filter_config.h
+++ b/source/extensions/filters/http/jwt_authn/filter_config.h
@@ -42,13 +42,10 @@ class ThreadLocalCache : public ThreadLocal::ThreadLocalObject {
 /**
  * All stats for the Jwt Authn filter. @see stats_macros.h
  */
-
-// clang-format off
 #define ALL_JWT_AUTHN_FILTER_STATS(COUNTER)                                                        \
   COUNTER(allowed)                                                                                 \
   COUNTER(cors_preflight_bypassed)                                                                 \
   COUNTER(denied)
-// clang-format on
 
 /**
  * Wrapper struct for jwt_authn filter stats. @see stats_macros.h
diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc
index 4874a716cec9..a772f3c1edfe 100644
--- a/source/extensions/filters/http/lua/wrappers.cc
+++ b/source/extensions/filters/http/lua/wrappers.cc
@@ -79,12 +79,7 @@ int HeaderMapWrapper::luaReplace(lua_State* state) {
   const char* value = luaL_checkstring(state, 3);
   const Http::LowerCaseString lower_key(key);
 
-  Http::HeaderEntry* entry = headers_.get(lower_key);
-  if (entry != nullptr) {
-    entry->value(value, strlen(value));
-  } else {
-    headers_.addCopy(lower_key, value);
-  }
+  headers_.setCopy(lower_key, value);
 
   return 0;
 }
diff --git a/source/extensions/filters/http/rbac/rbac_filter.cc b/source/extensions/filters/http/rbac/rbac_filter.cc
index f184234f4d4c..a750dd47cc47 100644
--- a/source/extensions/filters/http/rbac/rbac_filter.cc
+++ b/source/extensions/filters/http/rbac/rbac_filter.cc
@@ -73,10 +73,6 @@ Http::FilterHeadersStatus RoleBasedAccessControlFilter::decodeHeaders(Http::Head
   if (shadow_engine != nullptr) {
     std::string shadow_resp_code =
         Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().EngineResultAllowed;
-    // Refresh headers byte size before checking if allowed.
-    // TODO(asraa): Remove this when entries in HeaderMap can no longer be modified by reference and
-    // HeaderMap holds an accurate internal byte size count.
-    headers.refreshByteSize();
     if (shadow_engine->allowed(*callbacks_->connection(), headers, callbacks_->streamInfo(),
                                &effective_policy_id)) {
       ENVOY_LOG(debug, "shadow allowed");
@@ -106,10 +102,6 @@ Http::FilterHeadersStatus RoleBasedAccessControlFilter::decodeHeaders(Http::Head
   const auto engine =
       config_->engine(callbacks_->route(), Filters::Common::RBAC::EnforcementMode::Enforced);
   if (engine != nullptr) {
-    // Refresh headers byte size before checking if allowed.
-    // TODO(asraa): Remove this when entries in HeaderMap can no longer be modified by reference and
-    // HeaderMap holds an accurate internal byte size count.
-    headers.refreshByteSize();
     if (engine->allowed(*callbacks_->connection(), headers, callbacks_->streamInfo(), nullptr)) {
       ENVOY_LOG(debug, "enforced allowed");
       config_->stats().allowed_.inc();
diff --git a/source/extensions/filters/network/common/redis/client.h b/source/extensions/filters/network/common/redis/client.h
index fc76c61ac44e..b420438ac55f 100644
--- a/source/extensions/filters/network/common/redis/client.h
+++ b/source/extensions/filters/network/common/redis/client.h
@@ -30,9 +30,9 @@ class PoolRequest {
 /**
  * Outbound request callbacks.
  */
-class PoolCallbacks {
+class ClientCallbacks {
 public:
-  virtual ~PoolCallbacks() = default;
+  virtual ~ClientCallbacks() = default;
 
   /**
    * Called when a pipelined response is received.
@@ -48,21 +48,26 @@ class PoolCallbacks {
   /**
    * Called when a MOVED or ASK redirection error is received, and the request must be retried.
    * @param value supplies the MOVED error response
+   * @param host_address supplies the redirection host address and port
+   * @param ask_redirection indicates if this is a ASK redirection
    * @return bool true if the request is successfully redirected, false otherwise
    */
-  virtual bool onRedirection(const Common::Redis::RespValue& value) PURE;
+  virtual bool onRedirection(RespValuePtr&& value, const std::string& host_address,
+                             bool ask_redirection) PURE;
 };
 
 /**
  * DoNothingPoolCallbacks is used for internally generated commands whose response is
  * transparently filtered, and redirection never occurs (e.g., "asking", "auth", etc.).
  */
-class DoNothingPoolCallbacks : public PoolCallbacks {
+class DoNothingPoolCallbacks : public ClientCallbacks {
 public:
-  // PoolCallbacks
+  // ClientCallbacks
   void onResponse(Common::Redis::RespValuePtr&&) override {}
   void onFailure() override {}
-  bool onRedirection(const Common::Redis::RespValue&) override { return false; }
+  bool onRedirection(Common::Redis::RespValuePtr&&, const std::string&, bool) override {
+    return false;
+  }
 };
 
 /**
@@ -95,7 +100,7 @@ class Client : public Event::DeferredDeletable {
    * @return PoolRequest* a handle to the active request or nullptr if the request could not be made
    *         for some reason.
    */
-  virtual PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) PURE;
+  virtual PoolRequest* makeRequest(const RespValue& request, ClientCallbacks& callbacks) PURE;
 
   /**
    * Initialize the connection. Issue the auth command and readonly command as needed.
diff --git a/source/extensions/filters/network/common/redis/client_impl.cc b/source/extensions/filters/network/common/redis/client_impl.cc
index 6e5936476d02..cf1d8b6c98b5 100644
--- a/source/extensions/filters/network/common/redis/client_impl.cc
+++ b/source/extensions/filters/network/common/redis/client_impl.cc
@@ -7,6 +7,8 @@ namespace Common {
 namespace Redis {
 namespace Client {
 namespace {
+// null_pool_callbacks is used for requests that must be filtered and not redirected such as
+// "asking".
 Common::Redis::Client::DoNothingPoolCallbacks null_pool_callbacks;
 } // namespace
 
@@ -97,7 +99,7 @@ void ClientImpl::flushBufferAndResetTimer() {
   connection_->write(encoder_buffer_, false);
 }
 
-PoolRequest* ClientImpl::makeRequest(const RespValue& request, PoolCallbacks& callbacks) {
+PoolRequest* ClientImpl::makeRequest(const RespValue& request, ClientCallbacks& callbacks) {
   ASSERT(connection_->state() == Network::Connection::State::Open);
 
   const bool empty_buffer = encoder_buffer_.length() == 0;
@@ -212,7 +214,7 @@ void ClientImpl::onRespValue(RespValuePtr&& value) {
   }
   request.aggregate_request_timer_->complete();
 
-  PoolCallbacks& callbacks = request.callbacks_;
+  ClientCallbacks& callbacks = request.callbacks_;
 
   // We need to ensure the request is popped before calling the callback, since the callback might
   // result in closing the connection.
@@ -223,9 +225,13 @@ void ClientImpl::onRespValue(RespValuePtr&& value) {
     std::vector<absl::string_view> err = StringUtil::splitToken(value->asString(), " ", false);
     bool redirected = false;
     if (err.size() == 3) {
+      // MOVED and ASK redirection errors have the following substrings: MOVED or ASK (err[0]), hash
+      // key slot (err[1]), and IP address and TCP port separated by a colon (err[2])
       if (err[0] == RedirectionResponse::get().MOVED || err[0] == RedirectionResponse::get().ASK) {
-        redirected = callbacks.onRedirection(*value);
-        if (redirected) {
+        redirected = true;
+        bool redirect_succeeded = callbacks.onRedirection(std::move(value), std::string(err[2]),
+                                                          err[0] == RedirectionResponse::get().ASK);
+        if (redirect_succeeded) {
           host_->cluster().stats().upstream_internal_redirect_succeeded_total_.inc();
         } else {
           host_->cluster().stats().upstream_internal_redirect_failed_total_.inc();
@@ -251,7 +257,7 @@ void ClientImpl::onRespValue(RespValuePtr&& value) {
   putOutlierEvent(Upstream::Outlier::Result::ExtOriginRequestSuccess);
 }
 
-ClientImpl::PendingRequest::PendingRequest(ClientImpl& parent, PoolCallbacks& callbacks,
+ClientImpl::PendingRequest::PendingRequest(ClientImpl& parent, ClientCallbacks& callbacks,
                                            Stats::StatName command)
     : parent_(parent), callbacks_(callbacks), command_{command},
       aggregate_request_timer_(parent_.redis_command_stats_->createAggregateTimer(
diff --git a/source/extensions/filters/network/common/redis/client_impl.h b/source/extensions/filters/network/common/redis/client_impl.h
index 1b6a4a978c28..a0609b3e8b04 100644
--- a/source/extensions/filters/network/common/redis/client_impl.h
+++ b/source/extensions/filters/network/common/redis/client_impl.h
@@ -83,7 +83,7 @@ class ClientImpl : public Client, public DecoderCallbacks, public Network::Conne
     connection_->addConnectionCallbacks(callbacks);
   }
   void close() override;
-  PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) override;
+  PoolRequest* makeRequest(const RespValue& request, ClientCallbacks& callbacks) override;
   bool active() override { return !pending_requests_.empty(); }
   void flushBufferAndResetTimer();
   void initialize(const std::string& auth_password) override;
@@ -104,14 +104,14 @@ class ClientImpl : public Client, public DecoderCallbacks, public Network::Conne
   };
 
   struct PendingRequest : public PoolRequest {
-    PendingRequest(ClientImpl& parent, PoolCallbacks& callbacks, Stats::StatName stat_name);
+    PendingRequest(ClientImpl& parent, ClientCallbacks& callbacks, Stats::StatName stat_name);
     ~PendingRequest() override;
 
     // PoolRequest
     void cancel() override;
 
     ClientImpl& parent_;
-    PoolCallbacks& callbacks_;
+    ClientCallbacks& callbacks_;
     Stats::StatName command_;
     bool canceled_{};
     Stats::TimespanPtr aggregate_request_timer_;
diff --git a/source/extensions/filters/network/common/redis/utility.cc b/source/extensions/filters/network/common/redis/utility.cc
index d3dbd24e1bc8..c652addb3e12 100644
--- a/source/extensions/filters/network/common/redis/utility.cc
+++ b/source/extensions/filters/network/common/redis/utility.cc
@@ -19,6 +19,13 @@ AuthRequest::AuthRequest(const std::string& password) {
   asArray().swap(values);
 }
 
+RespValuePtr makeError(const std::string& error) {
+  Common::Redis::RespValuePtr response(new RespValue());
+  response->type(Common::Redis::RespType::Error);
+  response->asString() = error;
+  return response;
+}
+
 ReadOnlyRequest::ReadOnlyRequest() {
   std::vector<RespValue> values(1);
   values[0].type(RespType::BulkString);
diff --git a/source/extensions/filters/network/common/redis/utility.h b/source/extensions/filters/network/common/redis/utility.h
index 925fe2ef01ae..b2e77b8e94ab 100644
--- a/source/extensions/filters/network/common/redis/utility.h
+++ b/source/extensions/filters/network/common/redis/utility.h
@@ -16,6 +16,8 @@ class AuthRequest : public Redis::RespValue {
   AuthRequest(const std::string& password);
 };
 
+RespValuePtr makeError(const std::string& error);
+
 class ReadOnlyRequest : public Redis::RespValue {
 public:
   ReadOnlyRequest();
diff --git a/source/extensions/filters/network/dubbo_proxy/config.cc b/source/extensions/filters/network/dubbo_proxy/config.cc
index bc686f42dc1b..249eaa8933f1 100644
--- a/source/extensions/filters/network/dubbo_proxy/config.cc
+++ b/source/extensions/filters/network/dubbo_proxy/config.cc
@@ -144,7 +144,7 @@ void ConfigImpl::registerFilter(const DubboFilterConfig& proto_config) {
       Envoy::Config::Utility::getAndCheckFactory<DubboFilters::NamedDubboFilterConfigFactory>(
           string_name);
   ProtobufTypes::MessagePtr message = factory.createEmptyConfigProto();
-  Envoy::Config::Utility::translateOpaqueConfig(proto_config.config(),
+  Envoy::Config::Utility::translateOpaqueConfig(string_name, proto_config.config(),
                                                 ProtobufWkt::Struct::default_instance(),
                                                 context_.messageValidationVisitor(), *message);
   DubboFilters::FilterFactoryCb callback =
diff --git a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc
index 35850cfee78b..0fcbabd4765e 100644
--- a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc
+++ b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc
@@ -167,7 +167,7 @@ RouteConstSharedPtr MethodRouteEntryImpl::matches(const MessageMetadata& metadat
   return clusterEntry(random_value);
 }
 
-SignleRouteMatcherImpl::SignleRouteMatcherImpl(const RouteConfig& config,
+SingleRouteMatcherImpl::SingleRouteMatcherImpl(const RouteConfig& config,
                                                Server::Configuration::FactoryContext&)
     : service_name_(config.interface()), group_(config.group()), version_(config.version()) {
   using envoy::config::filter::network::dubbo_proxy::v2alpha1::RouteMatch;
@@ -178,7 +178,7 @@ SignleRouteMatcherImpl::SignleRouteMatcherImpl(const RouteConfig& config,
   ENVOY_LOG(debug, "dubbo route matcher: routes list size {}", routes_.size());
 }
 
-RouteConstSharedPtr SignleRouteMatcherImpl::route(const MessageMetadata& metadata,
+RouteConstSharedPtr SingleRouteMatcherImpl::route(const MessageMetadata& metadata,
                                                   uint64_t random_value) const {
   ASSERT(metadata.hasInvocationInfo());
   const auto& invocation = metadata.invocation_info();
@@ -205,7 +205,7 @@ MultiRouteMatcher::MultiRouteMatcher(const RouteConfigList& route_config_list,
                                      Server::Configuration::FactoryContext& context) {
   for (const auto& route_config : route_config_list) {
     route_matcher_list_.emplace_back(
-        std::make_unique<SignleRouteMatcherImpl>(route_config, context));
+        std::make_unique<SingleRouteMatcherImpl>(route_config, context));
   }
   ENVOY_LOG(debug, "route matcher list size {}", route_matcher_list_.size());
 }
diff --git a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h
index 9ce491f0aec5..5414f511f378 100644
--- a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h
+++ b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h
@@ -127,10 +127,10 @@ class MethodRouteEntryImpl : public RouteEntryImplBase {
   std::shared_ptr<ParameterRouteEntryImpl> parameter_route_;
 };
 
-class SignleRouteMatcherImpl : public RouteMatcher, public Logger::Loggable<Logger::Id::dubbo> {
+class SingleRouteMatcherImpl : public RouteMatcher, public Logger::Loggable<Logger::Id::dubbo> {
 public:
   using RouteConfig = envoy::config::filter::network::dubbo_proxy::v2alpha1::RouteConfiguration;
-  SignleRouteMatcherImpl(const RouteConfig& config, Server::Configuration::FactoryContext& context);
+  SingleRouteMatcherImpl(const RouteConfig& config, Server::Configuration::FactoryContext& context);
 
   RouteConstSharedPtr route(const MessageMetadata& metadata, uint64_t random_value) const override;
 
diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD
index 3e0cf154ee1a..7707a7ef46a9 100644
--- a/source/extensions/filters/network/http_connection_manager/BUILD
+++ b/source/extensions/filters/network/http_connection_manager/BUILD
@@ -39,6 +39,7 @@ envoy_cc_extension(
         "//source/common/router:rds_lib",
         "//source/common/router:scoped_rds_lib",
         "//source/common/runtime:runtime_lib",
+        "//source/common/tracing:http_tracer_lib",
         "//source/extensions/filters/network:well_known_names",
         "//source/extensions/filters/network/common:factory_base_lib",
         "@envoy_api//envoy/config/filter/network/http_connection_manager/v2:pkg_cc_proto",
diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc
index e491d36be307..6ac9f27a5e39 100644
--- a/source/extensions/filters/network/http_connection_manager/config.cc
+++ b/source/extensions/filters/network/http_connection_manager/config.cc
@@ -18,11 +18,14 @@
 #include "common/http/default_server_string.h"
 #include "common/http/http1/codec_impl.h"
 #include "common/http/http2/codec_impl.h"
+#include "common/http/http3/quic_codec_factory.h"
+#include "common/http/http3/well_known_names.h"
 #include "common/http/utility.h"
 #include "common/protobuf/utility.h"
 #include "common/router/rds_impl.h"
 #include "common/router/scoped_rds.h"
 #include "common/runtime/runtime_impl.h"
+#include "common/tracing/http_tracer_impl.h"
 
 namespace Envoy {
 namespace Extensions {
@@ -256,7 +259,6 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig(
     const auto& tracing_config = config.tracing();
 
     Tracing::OperationName tracing_operation_name;
-    std::vector<Http::LowerCaseString> request_headers_for_tags;
 
     // Listener level traffic direction overrides the operation name
     switch (context.direction()) {
@@ -285,8 +287,15 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig(
       NOT_REACHED_GCOVR_EXCL_LINE;
     }
 
+    Tracing::CustomTagMap custom_tags;
     for (const std::string& header : tracing_config.request_headers_for_tags()) {
-      request_headers_for_tags.push_back(Http::LowerCaseString(header));
+      envoy::type::tracing::v2::CustomTag::Header headerTag;
+      headerTag.set_name(header);
+      custom_tags.emplace(
+          header, std::make_shared<const Tracing::RequestHeaderCustomTag>(header, headerTag));
+    }
+    for (const auto& tag : tracing_config.custom_tags()) {
+      custom_tags.emplace(tag.tag(), Tracing::HttpTracerUtility::createCustomTag(tag));
     }
 
     envoy::type::FractionalPercent client_sampling;
@@ -308,8 +317,8 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig(
 
     tracing_config_ =
         std::make_unique<Http::TracingConnectionManagerConfig>(Http::TracingConnectionManagerConfig{
-            tracing_operation_name, request_headers_for_tags, client_sampling, random_sampling,
-            overall_sampling, tracing_config.verbose(), max_path_tag_length});
+            tracing_operation_name, custom_tags, client_sampling, random_sampling, overall_sampling,
+            tracing_config.verbose(), max_path_tag_length});
   }
 
   for (const auto& access_log : config.access_log()) {
@@ -415,8 +424,14 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection,
         connection, callbacks, context_.scope(), http2_settings_, maxRequestHeadersKb(),
         maxRequestHeadersCount());
   case CodecType::HTTP3:
-    // TODO(danzh) create QUIC specific codec.
-    NOT_IMPLEMENTED_GCOVR_EXCL_LINE;
+    // Hard code Quiche factory name here to instantiate a QUIC codec implemented.
+    // TODO(danzh) Add support to get the factory name from config, possibly
+    // from HttpConnectionManager protobuf. This is not essential till there are multiple
+    // implementations of QUIC.
+    return std::unique_ptr<Http::ServerConnection>(
+        Config::Utility::getAndCheckFactory<Http::QuicHttpServerConnectionFactory>(
+            Http::QuicCodecNames::get().Quiche)
+            .createQuicServerConnection(connection, callbacks));
   case CodecType::AUTO:
     return Http::ConnectionManagerUtility::autoCreateCodec(
         connection, data, callbacks, context_.scope(), http1_settings_, http2_settings_,
diff --git a/source/extensions/filters/network/mysql_proxy/mysql_filter.h b/source/extensions/filters/network/mysql_proxy/mysql_filter.h
index 8ca5bb690785..2d73ef9b5846 100644
--- a/source/extensions/filters/network/mysql_proxy/mysql_filter.h
+++ b/source/extensions/filters/network/mysql_proxy/mysql_filter.h
@@ -26,18 +26,16 @@ namespace MySQLProxy {
 /**
  * All MySQL proxy stats. @see stats_macros.h
  */
-// clang-format off
-#define ALL_MYSQL_PROXY_STATS(COUNTER)                                           \
-  COUNTER(sessions)                                                              \
-  COUNTER(login_attempts)                                                        \
-  COUNTER(login_failures)                                                        \
-  COUNTER(decoder_errors)                                                        \
-  COUNTER(protocol_errors)                                                       \
-  COUNTER(upgraded_to_ssl)                                                       \
-  COUNTER(auth_switch_request)                                                   \
-  COUNTER(queries_parsed)                                                        \
-  COUNTER(queries_parse_error)                                                   \
-// clang-format on
+#define ALL_MYSQL_PROXY_STATS(COUNTER)                                                             \
+  COUNTER(sessions)                                                                                \
+  COUNTER(login_attempts)                                                                          \
+  COUNTER(login_failures)                                                                          \
+  COUNTER(decoder_errors)                                                                          \
+  COUNTER(protocol_errors)                                                                         \
+  COUNTER(upgraded_to_ssl)                                                                         \
+  COUNTER(auth_switch_request)                                                                     \
+  COUNTER(queries_parsed)                                                                          \
+  COUNTER(queries_parse_error)
 
 /**
  * Struct definition for all MySQL proxy stats. @see stats_macros.h
@@ -51,7 +49,7 @@ struct MySQLProxyStats {
  */
 class MySQLFilterConfig {
 public:
-  MySQLFilterConfig(const std::string &stat_prefix, Stats::Scope& scope);
+  MySQLFilterConfig(const std::string& stat_prefix, Stats::Scope& scope);
 
   const MySQLProxyStats& stats() { return stats_; }
 
@@ -60,10 +58,8 @@ class MySQLFilterConfig {
   MySQLProxyStats stats_;
 
 private:
-  MySQLProxyStats generateStats(const std::string& prefix,
-                                Stats::Scope& scope) {
-    return MySQLProxyStats{
-        ALL_MYSQL_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix))};
+  MySQLProxyStats generateStats(const std::string& prefix, Stats::Scope& scope) {
+    return MySQLProxyStats{ALL_MYSQL_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix))};
   }
 };
 
@@ -88,13 +84,13 @@ class MySQLFilter : public Network::Filter, DecoderCallbacks, Logger::Loggable<L
   // MySQLProxy::DecoderCallback
   void onProtocolError() override;
   void onNewMessage(MySQLSession::State state) override;
-  void onServerGreeting(ServerGreeting&) override {};
+  void onServerGreeting(ServerGreeting&) override{};
   void onClientLogin(ClientLogin& message) override;
   void onClientLoginResponse(ClientLoginResponse& message) override;
-  void onClientSwitchResponse(ClientSwitchResponse&) override {};
+  void onClientSwitchResponse(ClientSwitchResponse&) override{};
   void onMoreClientLoginResponse(ClientLoginResponse& message) override;
   void onCommand(Command& message) override;
-  void onCommandResponse(CommandResponse&) override {};
+  void onCommandResponse(CommandResponse&) override{};
 
   void doDecode(Buffer::Instance& buffer);
   DecoderPtr createDecoder(DecoderCallbacks& callbacks);
@@ -109,7 +105,7 @@ class MySQLFilter : public Network::Filter, DecoderCallbacks, Logger::Loggable<L
   bool sniffing_{true};
 };
 
-}  // namespace MySQLProxy
-}  // namespace NetworkFilters
-}  // namespace Extensions
-}  // namespace Envoy
+} // namespace MySQLProxy
+} // namespace NetworkFilters
+} // namespace Extensions
+} // namespace Envoy
diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc
index 0591b85f973d..69c5753ec74a 100644
--- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc
+++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc
@@ -7,51 +7,36 @@ namespace Extensions {
 namespace NetworkFilters {
 namespace RedisProxy {
 namespace CommandSplitter {
-
-Common::Redis::RespValuePtr Utility::makeError(const std::string& error) {
-  Common::Redis::RespValuePtr response(new Common::Redis::RespValue());
-  response->type(Common::Redis::RespType::Error);
-  response->asString() = error;
-  return response;
-}
-
 namespace {
 
 // null_pool_callbacks is used for requests that must be filtered and not redirected such as
 // "asking".
-Common::Redis::Client::DoNothingPoolCallbacks null_pool_callbacks;
+ConnPool::DoNothingPoolCallbacks null_pool_callbacks;
 
 /**
- * Validate the received moved/ask redirection error and the original redis request.
- * @param[in] original_request supplies the incoming request associated with the command splitter
- * request.
- * @param[in] error_response supplies the moved/ask redirection response from the upstream Redis
- * server.
- * @param[out] error_substrings the non-whitespace substrings of error_response.
- * @param[out] ask_redirection true if error_response is an ASK redirection error, false otherwise.
- * @return bool true if the original_request or error_response are not valid, false otherwise.
+ * Make request and maybe mirror the request based on the mirror policies of the route.
+ * @param route supplies the route matched with the request.
+ * @param command supplies the command of the request.
+ * @param key supplies the key of the request.
+ * @param incoming_request supplies the request.
+ * @param callbacks supplies the request completion callbacks.
+ * @return PoolRequest* a handle to the active request or nullptr if the request could not be made
+ *         for some reason.
  */
-bool redirectionArgsInvalid(const Common::Redis::RespValue* original_request,
-                            const Common::Redis::RespValue& error_response,
-                            std::vector<absl::string_view>& error_substrings,
-                            bool& ask_redirection) {
-  if ((original_request == nullptr) || (error_response.type() != Common::Redis::RespType::Error)) {
-    return true;
-  }
-  error_substrings = StringUtil::splitToken(error_response.asString(), " ", false);
-  if (error_substrings.size() != 3) {
-    return true;
-  }
-  if (error_substrings[0] == "ASK") {
-    ask_redirection = true;
-  } else if (error_substrings[0] == "MOVED") {
-    ask_redirection = false;
-  } else {
-    // The first substring must be MOVED or ASK.
-    return true;
+Common::Redis::Client::PoolRequest* makeSingleServerRequest(
+    const RouteSharedPtr& route, const std::string& command, const std::string& key,
+    Common::Redis::RespValueConstSharedPtr incoming_request, ConnPool::PoolCallbacks& callbacks) {
+  auto handler =
+      route->upstream()->makeRequest(key, ConnPool::RespVariant(incoming_request), callbacks);
+  if (handler) {
+    for (auto& mirror_policy : route->mirrorPolicies()) {
+      if (mirror_policy->shouldMirror(command)) {
+        mirror_policy->upstream()->makeRequest(key, ConnPool::RespVariant(incoming_request),
+                                               null_pool_callbacks);
+      }
+    }
   }
-  // Other validation done later to avoid duplicate processing.
-  return false;
+  return handler;
 }
 
 /**
@@ -64,15 +49,17 @@ bool redirectionArgsInvalid(const Common::Redis::RespValue* original_request,
  * @return PoolRequest* a handle to the active request or nullptr if the request could not be made
  *         for some reason.
  */
-Common::Redis::Client::PoolRequest* makeRequest(const RouteSharedPtr& route,
-                                                const std::string& command, const std::string& key,
-                                                const Common::Redis::RespValue& incoming_request,
-                                                Common::Redis::Client::PoolCallbacks& callbacks) {
-  auto handler = route->upstream()->makeRequest(key, incoming_request, callbacks);
+Common::Redis::Client::PoolRequest*
+makeFragmentedRequest(const RouteSharedPtr& route, const std::string& command,
+                      const std::string& key, const Common::Redis::RespValue& incoming_request,
+                      ConnPool::PoolCallbacks& callbacks) {
+  auto handler =
+      route->upstream()->makeRequest(key, ConnPool::RespVariant(incoming_request), callbacks);
   if (handler) {
     for (auto& mirror_policy : route->mirrorPolicies()) {
       if (mirror_policy->shouldMirror(command)) {
-        mirror_policy->upstream()->makeRequest(key, incoming_request, null_pool_callbacks);
+        mirror_policy->upstream()->makeRequest(key, ConnPool::RespVariant(incoming_request),
+                                               null_pool_callbacks);
       }
     }
   }
@@ -82,7 +69,7 @@ Common::Redis::Client::PoolRequest* makeRequest(const RouteSharedPtr& route,
 
 void SplitRequestBase::onWrongNumberOfArguments(SplitCallbacks& callbacks,
                                                 const Common::Redis::RespValue& request) {
-  callbacks.onResponse(Utility::makeError(
+  callbacks.onResponse(Common::Redis::Utility::makeError(
       fmt::format("wrong number of arguments for '{}' command", request.asArray()[0].asString())));
 }
 
@@ -106,37 +93,7 @@ void SingleServerRequest::onResponse(Common::Redis::RespValuePtr&& response) {
 void SingleServerRequest::onFailure() {
   handle_ = nullptr;
   updateStats(false);
-  callbacks_.onResponse(Utility::makeError(Response::get().UpstreamFailure));
-}
-
-bool SingleServerRequest::onRedirection(const Common::Redis::RespValue& value) {
-  std::vector<absl::string_view> err;
-  bool ask_redirection = false;
-  if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool_) {
-    return false;
-  }
-
-  // MOVED and ASK redirection errors have the following substrings: MOVED or ASK (err[0]), hash key
-  // slot (err[1]), and IP address and TCP port separated by a colon (err[2]).
-  const std::string host_address = std::string(err[2]);
-
-  // Prepend request with an asking command if redirected via an ASK error. The returned handle is
-  // not important since there is no point in being able to cancel the request. The use of
-  // null_pool_callbacks ensures the transparent filtering of the Redis server's response to the
-  // "asking" command; this is fine since the server either responds with an OK or an error message
-  // if cluster support is not enabled (in which case we should not get an ASK redirection error).
-  if (ask_redirection &&
-      !conn_pool_->makeRequestToHost(
-          host_address, Common::Redis::Utility::AskingRequest::instance(), null_pool_callbacks)) {
-    return false;
-  }
-  handle_ = conn_pool_->makeRequestToHost(host_address, *incoming_request_, *this);
-
-  if (handle_ != nullptr) {
-    conn_pool_->onRedirection();
-    return true;
-  }
-  return false;
+  callbacks_.onResponse(Common::Redis::Utility::makeError(Response::get().UpstreamFailure));
 }
 
 void SingleServerRequest::cancel() {
@@ -153,18 +110,17 @@ SplitRequestPtr SimpleRequest::create(Router& router,
 
   const auto route = router.upstreamPool(incoming_request->asArray()[1].asString());
   if (route) {
-    request_ptr->conn_pool_ = route->upstream();
+    Common::Redis::RespValueSharedPtr base_request = std::move(incoming_request);
     request_ptr->handle_ =
-        makeRequest(route, incoming_request->asArray()[0].asString(),
-                    incoming_request->asArray()[1].asString(), *incoming_request, *request_ptr);
+        makeSingleServerRequest(route, base_request->asArray()[0].asString(),
+                                base_request->asArray()[1].asString(), base_request, *request_ptr);
   }
 
   if (!request_ptr->handle_) {
-    callbacks.onResponse(Utility::makeError(Response::get().NoUpstreamHost));
+    callbacks.onResponse(Common::Redis::Utility::makeError(Response::get().NoUpstreamHost));
     return nullptr;
   }
 
-  request_ptr->incoming_request_ = std::move(incoming_request);
   return request_ptr;
 }
 
@@ -183,19 +139,18 @@ SplitRequestPtr EvalRequest::create(Router& router, Common::Redis::RespValuePtr&
 
   const auto route = router.upstreamPool(incoming_request->asArray()[3].asString());
   if (route) {
-    request_ptr->conn_pool_ = route->upstream();
+    Common::Redis::RespValueSharedPtr base_request = std::move(incoming_request);
     request_ptr->handle_ =
-        makeRequest(route, incoming_request->asArray()[0].asString(),
-                    incoming_request->asArray()[3].asString(), *incoming_request, *request_ptr);
+        makeSingleServerRequest(route, base_request->asArray()[0].asString(),
+                                base_request->asArray()[3].asString(), base_request, *request_ptr);
   }
 
   if (!request_ptr->handle_) {
     command_stats.error_.inc();
-    callbacks.onResponse(Utility::makeError(Response::get().NoUpstreamHost));
+    callbacks.onResponse(Common::Redis::Utility::makeError(Response::get().NoUpstreamHost));
     return nullptr;
   }
 
-  request_ptr->incoming_request_ = std::move(incoming_request);
   return request_ptr;
 }
 
@@ -217,7 +172,7 @@ void FragmentedRequest::cancel() {
 }
 
 void FragmentedRequest::onChildFailure(uint32_t index) {
-  onChildResponse(Utility::makeError(Response::get().UpstreamFailure), index);
+  onChildResponse(Common::Redis::Utility::makeError(Response::get().UpstreamFailure), index);
 }
 
 SplitRequestPtr MGETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request,
@@ -234,7 +189,7 @@ SplitRequestPtr MGETRequest::create(Router& router, Common::Redis::RespValuePtr&
   request_ptr->pending_response_->asArray().swap(responses);
 
   Common::Redis::RespValueSharedPtr base_request = std::move(incoming_request);
-  for (unsigned int i = 1; i < base_request->asArray().size(); i++) {
+  for (uint64_t i = 1; i < base_request->asArray().size(); i++) {
     request_ptr->pending_requests_.emplace_back(*request_ptr, i - 1);
     PendingRequest& pending_request = request_ptr->pending_requests_.back();
 
@@ -243,59 +198,22 @@ SplitRequestPtr MGETRequest::create(Router& router, Common::Redis::RespValuePtr&
       // Create composite array for a single get.
       const Common::Redis::RespValue single_mget(
           base_request, Common::Redis::Utility::GetRequest::instance(), i, i);
-      pending_request.conn_pool_ = route->upstream();
-      pending_request.handle_ = makeRequest(route, "get", base_request->asArray()[i].asString(),
-                                            single_mget, pending_request);
+      pending_request.handle_ = makeFragmentedRequest(
+          route, "get", base_request->asArray()[i].asString(), single_mget, pending_request);
     }
 
     if (!pending_request.handle_) {
-      pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost));
+      pending_request.onResponse(Common::Redis::Utility::makeError(Response::get().NoUpstreamHost));
     }
   }
 
   if (request_ptr->num_pending_responses_ > 0) {
-    request_ptr->incoming_request_ = std::move(base_request);
     return request_ptr;
   }
 
   return nullptr;
 }
 
-bool FragmentedRequest::onChildRedirection(const Common::Redis::RespValue& value, uint32_t index,
-                                           const ConnPool::InstanceSharedPtr& conn_pool) {
-  std::vector<absl::string_view> err;
-  bool ask_redirection = false;
-  if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool) {
-    return false;
-  }
-
-  // MOVED and ASK redirection errors have the following substrings: MOVED or ASK (err[0]), hash key
-  // slot (err[1]), and IP address and TCP port separated by a colon (err[2]).
-  std::string host_address = std::string(err[2]);
-  Common::Redis::RespValue request;
-  recreate(request, index);
-
-  // Prepend request with an asking command if redirected via an ASK error. The returned handle is
-  // not important since there is no point in being able to cancel the request. The use of
-  // null_pool_callbacks ensures the transparent filtering of the Redis server's response to the
-  // "asking" command; this is fine since the server either responds with an OK or an error message
-  // if cluster support is not enabled (in which case we should not get an ASK redirection error).
-  if (ask_redirection &&
-      !conn_pool->makeRequestToHost(host_address, Common::Redis::Utility::AskingRequest::instance(),
-                                    null_pool_callbacks)) {
-    return false;
-  }
-
-  pending_requests_[index].handle_ =
-      conn_pool->makeRequestToHost(host_address, request, pending_requests_[index]);
-
-  if (pending_requests_[index].handle_ != nullptr) {
-    conn_pool->onRedirection();
-    return true;
-  }
-  return false;
-}
-
 void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) {
   pending_requests_[index].handle_ = nullptr;
 
@@ -330,15 +248,6 @@ void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t
   }
 }
 
-void MGETRequest::recreate(Common::Redis::RespValue& request, uint32_t index) {
-  // 1st resp value in incoming_request_ is the command.
-  // index + 1 is the key for the request.
-  Common::Redis::RespValue::CompositeArray single_get(
-      incoming_request_, Common::Redis::Utility::GetRequest::instance(), index + 1, index + 1);
-  request.type(Common::Redis::RespType::CompositeArray);
-  std::swap(request.asCompositeArray(), single_get);
-}
-
 SplitRequestPtr MSETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request,
                                     SplitCallbacks& callbacks, CommandStats& command_stats,
                                     TimeSource& time_source) {
@@ -363,22 +272,20 @@ SplitRequestPtr MSETRequest::create(Router& router, Common::Redis::RespValuePtr&
 
     const auto route = router.upstreamPool(base_request->asArray()[i].asString());
     if (route) {
-      pending_request.conn_pool_ = route->upstream();
       // Create composite array for a single set command.
       const Common::Redis::RespValue single_set(
           base_request, Common::Redis::Utility::SetRequest::instance(), i, i + 1);
       ENVOY_LOG(debug, "redis: parallel set: '{}'", single_set.toString());
-      pending_request.handle_ = makeRequest(route, "set", base_request->asArray()[i].asString(),
-                                            single_set, pending_request);
+      pending_request.handle_ = makeFragmentedRequest(
+          route, "set", base_request->asArray()[i].asString(), single_set, pending_request);
     }
 
     if (!pending_request.handle_) {
-      pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost));
+      pending_request.onResponse(Common::Redis::Utility::makeError(Response::get().NoUpstreamHost));
     }
   }
 
   if (request_ptr->num_pending_responses_ > 0) {
-    request_ptr->incoming_request_ = std::move(base_request);
     return request_ptr;
   }
 
@@ -408,23 +315,12 @@ void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t
       pending_response_->asString() = Response::get().OK;
       callbacks_.onResponse(std::move(pending_response_));
     } else {
-      callbacks_.onResponse(
-          Utility::makeError(fmt::format("finished with {} error(s)", error_count_)));
+      callbacks_.onResponse(Common::Redis::Utility::makeError(
+          fmt::format("finished with {} error(s)", error_count_)));
     }
   }
 }
 
-void MSETRequest::recreate(Common::Redis::RespValue& request, uint32_t index) {
-  // 1st resp value in the incoming_request_ is the mset
-  // index*2 + 1 is the key for the request.
-  // index*2 + 2 is the value for the request.
-  Common::Redis::RespValue::CompositeArray single_set(
-      incoming_request_, Common::Redis::Utility::SetRequest::instance(), index * 2 + 1,
-      index * 2 + 2);
-  request.type(Common::Redis::RespType::CompositeArray);
-  std::swap(request.asCompositeArray(), single_set);
-}
-
 SplitRequestPtr SplitKeysSumResultRequest::create(Router& router,
                                                   Common::Redis::RespValuePtr&& incoming_request,
                                                   SplitCallbacks& callbacks,
@@ -450,19 +346,17 @@ SplitRequestPtr SplitKeysSumResultRequest::create(Router& router,
               single_fragment.toString());
     const auto route = router.upstreamPool(base_request->asArray()[i].asString());
     if (route) {
-      pending_request.conn_pool_ = route->upstream();
-      pending_request.handle_ =
-          makeRequest(route, base_request->asArray()[0].asString(),
-                      base_request->asArray()[i].asString(), single_fragment, pending_request);
+      pending_request.handle_ = makeFragmentedRequest(route, base_request->asArray()[0].asString(),
+                                                      base_request->asArray()[i].asString(),
+                                                      single_fragment, pending_request);
     }
 
     if (!pending_request.handle_) {
-      pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost));
+      pending_request.onResponse(Common::Redis::Utility::makeError(Response::get().NoUpstreamHost));
     }
   }
 
   if (request_ptr->num_pending_responses_ > 0) {
-    request_ptr->incoming_request_ = std::move(base_request);
     return request_ptr;
   }
 
@@ -491,21 +385,12 @@ void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& va
       pending_response_->asInteger() = total_;
       callbacks_.onResponse(std::move(pending_response_));
     } else {
-      callbacks_.onResponse(
-          Utility::makeError(fmt::format("finished with {} error(s)", error_count_)));
+      callbacks_.onResponse(Common::Redis::Utility::makeError(
+          fmt::format("finished with {} error(s)", error_count_)));
     }
   }
 }
 
-void SplitKeysSumResultRequest::recreate(Common::Redis::RespValue& request, uint32_t index) {
-  // 1st resp value in incoming_request_ is the command.
-  // index + 1 is the key for the request.
-  Common::Redis::RespValue::CompositeArray single_fragment(
-      incoming_request_, incoming_request_->asArray()[0], index + 1, index + 1);
-  request.type(Common::Redis::RespType::CompositeArray);
-  std::swap(request.asCompositeArray(), single_fragment);
-}
-
 InstanceImpl::InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix,
                            TimeSource& time_source, bool latency_in_micros)
     : router_(std::move(router)), simple_command_handler_(*router_),
@@ -560,7 +445,7 @@ SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request,
   }
 
   if (!callbacks.connectionAllowed()) {
-    callbacks.onResponse(Utility::makeError(Response::get().AuthRequiredError));
+    callbacks.onResponse(Common::Redis::Utility::makeError(Response::get().AuthRequiredError));
     return nullptr;
   }
 
@@ -582,7 +467,7 @@ SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request,
   auto handler = handler_lookup_table_.find(to_lower_string.c_str());
   if (handler == nullptr) {
     stats_.unsupported_command_.inc();
-    callbacks.onResponse(Utility::makeError(
+    callbacks.onResponse(Common::Redis::Utility::makeError(
         fmt::format("unsupported command '{}'", request->asArray()[0].asString())));
     return nullptr;
   }
@@ -595,7 +480,7 @@ SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request,
 
 void InstanceImpl::onInvalidRequest(SplitCallbacks& callbacks) {
   stats_.invalid_request_.inc();
-  callbacks.onResponse(Utility::makeError(Response::get().InvalidRequest));
+  callbacks.onResponse(Common::Redis::Utility::makeError(Response::get().InvalidRequest));
 }
 
 void InstanceImpl::addHandler(Stats::Scope& scope, const std::string& stat_prefix,
diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h
index 7c021abdaf67..6e0b0753abb1 100644
--- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h
+++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h
@@ -18,7 +18,7 @@
 #include "extensions/filters/network/common/redis/client_impl.h"
 #include "extensions/filters/network/common/redis/utility.h"
 #include "extensions/filters/network/redis_proxy/command_splitter.h"
-#include "extensions/filters/network/redis_proxy/conn_pool.h"
+#include "extensions/filters/network/redis_proxy/conn_pool_impl.h"
 #include "extensions/filters/network/redis_proxy/router.h"
 
 namespace Envoy {
@@ -38,20 +38,13 @@ struct ResponseValues {
 
 using Response = ConstSingleton<ResponseValues>;
 
-class Utility {
-public:
-  static Common::Redis::RespValuePtr makeError(const std::string& error);
-};
-
 /**
  * All command level stats. @see stats_macros.h
  */
-// clang-format off
 #define ALL_COMMAND_STATS(COUNTER)                                                                 \
   COUNTER(total)                                                                                   \
   COUNTER(success)                                                                                 \
   COUNTER(error)
-// clang-format on
 
 /**
  * Struct definition for all command stats. @see stats_macros.h
@@ -95,14 +88,13 @@ class SplitRequestBase : public SplitRequest {
 /**
  * SingleServerRequest is a base class for commands that hash to a single backend.
  */
-class SingleServerRequest : public SplitRequestBase, public Common::Redis::Client::PoolCallbacks {
+class SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallbacks {
 public:
   ~SingleServerRequest() override;
 
-  // Common::Redis::Client::PoolCallbacks
+  // ConnPool::PoolCallbacks
   void onResponse(Common::Redis::RespValuePtr&& response) override;
   void onFailure() override;
-  bool onRedirection(const Common::Redis::RespValue& value) override;
 
   // RedisProxy::CommandSplitter::SplitRequest
   void cancel() override;
@@ -162,34 +154,25 @@ class FragmentedRequest : public SplitRequestBase {
   FragmentedRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source)
       : SplitRequestBase(command_stats, time_source), callbacks_(callbacks) {}
 
-  struct PendingRequest : public Common::Redis::Client::PoolCallbacks {
+  struct PendingRequest : public ConnPool::PoolCallbacks {
     PendingRequest(FragmentedRequest& parent, uint32_t index) : parent_(parent), index_(index) {}
 
-    // Common::Redis::Client::PoolCallbacks
+    // ConnPool::PoolCallbacks
     void onResponse(Common::Redis::RespValuePtr&& value) override {
       parent_.onChildResponse(std::move(value), index_);
     }
     void onFailure() override { parent_.onChildFailure(index_); }
 
-    bool onRedirection(const Common::Redis::RespValue& value) override {
-      return parent_.onChildRedirection(value, index_, conn_pool_);
-    }
-
     FragmentedRequest& parent_;
     const uint32_t index_;
     Common::Redis::Client::PoolRequest* handle_{};
-    ConnPool::InstanceSharedPtr conn_pool_;
   };
 
   virtual void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) PURE;
   void onChildFailure(uint32_t index);
-  bool onChildRedirection(const Common::Redis::RespValue& value, uint32_t index,
-                          const ConnPool::InstanceSharedPtr& conn_pool);
-  virtual void recreate(Common::Redis::RespValue& request, uint32_t index) PURE;
 
   SplitCallbacks& callbacks_;
 
-  Common::Redis::RespValueSharedPtr incoming_request_;
   Common::Redis::RespValuePtr pending_response_;
   std::vector<PendingRequest> pending_requests_;
   uint32_t num_pending_responses_;
@@ -212,7 +195,6 @@ class MGETRequest : public FragmentedRequest, Logger::Loggable<Logger::Id::redis
 
   // RedisProxy::CommandSplitter::FragmentedRequest
   void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) override;
-  void recreate(Common::Redis::RespValue& request, uint32_t index) override;
 };
 
 /**
@@ -234,7 +216,6 @@ class SplitKeysSumResultRequest : public FragmentedRequest, Logger::Loggable<Log
 
   // RedisProxy::CommandSplitter::FragmentedRequest
   void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) override;
-  void recreate(Common::Redis::RespValue& request, uint32_t index) override;
 
   int64_t total_{0};
 };
@@ -256,7 +237,6 @@ class MSETRequest : public FragmentedRequest, Logger::Loggable<Logger::Id::redis
 
   // RedisProxy::CommandSplitter::FragmentedRequest
   void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) override;
-  void recreate(Common::Redis::RespValue& request, uint32_t index) override;
 };
 
 /**
@@ -276,11 +256,9 @@ class CommandHandlerFactory : public CommandHandler, CommandHandlerBase {
 /**
  * All splitter stats. @see stats_macros.h
  */
-// clang-format off
 #define ALL_COMMAND_SPLITTER_STATS(COUNTER)                                                        \
   COUNTER(invalid_request)                                                                         \
   COUNTER(unsupported_command)
-// clang-format on
 
 /**
  * Struct definition for all splitter stats. @see stats_macros.h
diff --git a/source/extensions/filters/network/redis_proxy/conn_pool.h b/source/extensions/filters/network/redis_proxy/conn_pool.h
index 421a88a42675..0fa1e68bec96 100644
--- a/source/extensions/filters/network/redis_proxy/conn_pool.h
+++ b/source/extensions/filters/network/redis_proxy/conn_pool.h
@@ -9,12 +9,41 @@
 #include "extensions/filters/network/common/redis/client.h"
 #include "extensions/filters/network/common/redis/codec.h"
 
+#include "absl/types/variant.h"
+
 namespace Envoy {
 namespace Extensions {
 namespace NetworkFilters {
 namespace RedisProxy {
 namespace ConnPool {
 
+/**
+ * Outbound request callbacks.
+ */
+class PoolCallbacks {
+public:
+  virtual ~PoolCallbacks() = default;
+
+  /**
+   * Called when a pipelined response is received.
+   * @param value supplies the response which is now owned by the callee.
+   */
+  virtual void onResponse(Common::Redis::RespValuePtr&& value) PURE;
+
+  /**
+   * Called when a network/protocol error occurs and there is no response.
+   */
+  virtual void onFailure() PURE;
+};
+
+/**
+ * A variant that either holds a shared pointer to a single server request or a composite array
+ * resp value. This is for performance reason to avoid creating RespValueSharedPtr for each
+ * composite arrays.
+ */
+using RespVariant =
+    absl::variant<const Common::Redis::RespValue, Common::Redis::RespValueConstSharedPtr>;
+
 /**
  * A redis connection pool. Wraps M connections to N upstream hosts, consistent hashing,
  * pipelining, failure handling, etc.
@@ -32,22 +61,7 @@ class Instance {
    *         for some reason.
    */
   virtual Common::Redis::Client::PoolRequest*
-  makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request,
-              Common::Redis::Client::PoolCallbacks& callbacks) PURE;
-
-  /**
-   * Makes a redis request based on IP address and TCP port of the upstream host (e.g., moved/ask
-   * cluster redirection).
-   * @param host_address supplies the IP address and TCP port of the upstream host to receive the
-   * request.
-   * @param request supplies the Redis request to make.
-   * @param callbacks supplies the request completion callbacks.
-   * @return PoolRequest* a handle to the active request or nullptr if the request could not be made
-   *         for some reason.
-   */
-  virtual Common::Redis::Client::PoolRequest*
-  makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request,
-                    Common::Redis::Client::PoolCallbacks& callbacks) PURE;
+  makeRequest(const std::string& hash_key, RespVariant&& request, PoolCallbacks& callbacks) PURE;
 
   /**
    * Notify the redirection manager singleton that a redirection error has been received from an
diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc
index 8bdaa8656894..96b1e5ed1907 100644
--- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc
+++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc
@@ -15,6 +15,19 @@ namespace Extensions {
 namespace NetworkFilters {
 namespace RedisProxy {
 namespace ConnPool {
+namespace {
+// null_pool_callbacks is used for requests that must be filtered and not redirected such as
+// "asking".
+Common::Redis::Client::DoNothingPoolCallbacks null_client_callbacks;
+
+const Common::Redis::RespValue& getRequest(const RespVariant& request) {
+  if (request.index() == 0) {
+    return absl::get<const Common::Redis::RespValue>(request);
+  } else {
+    return *(absl::get<Common::Redis::RespValueConstSharedPtr>(request));
+  }
+}
+} // namespace
 
 InstanceImpl::InstanceImpl(
     const std::string& cluster_name, Upstream::ClusterManager& cm,
@@ -35,15 +48,14 @@ InstanceImpl::InstanceImpl(
 }
 
 Common::Redis::Client::PoolRequest*
-InstanceImpl::makeRequest(const std::string& key, const Common::Redis::RespValue& request,
-                          Common::Redis::Client::PoolCallbacks& callbacks) {
-  return tls_->getTyped<ThreadLocalPool>().makeRequest(key, request, callbacks);
+InstanceImpl::makeRequest(const std::string& key, RespVariant&& request, PoolCallbacks& callbacks) {
+  return tls_->getTyped<ThreadLocalPool>().makeRequest(key, std::move(request), callbacks);
 }
 
 Common::Redis::Client::PoolRequest*
 InstanceImpl::makeRequestToHost(const std::string& host_address,
                                 const Common::Redis::RespValue& request,
-                                Common::Redis::Client::PoolCallbacks& callbacks) {
+                                Common::Redis::Client::ClientCallbacks& callbacks) {
   return tls_->getTyped<ThreadLocalPool>().makeRequestToHost(host_address, request, callbacks);
 }
 
@@ -64,6 +76,9 @@ InstanceImpl::ThreadLocalPool::~ThreadLocalPool() {
   if (host_set_member_update_cb_handle_ != nullptr) {
     host_set_member_update_cb_handle_->remove();
   }
+  while (!pending_requests_.empty()) {
+    pending_requests_.pop_front();
+  }
   while (!client_map_.empty()) {
     client_map_.begin()->second->redis_client_->close();
   }
@@ -201,9 +216,8 @@ InstanceImpl::ThreadLocalPool::threadLocalActiveClient(Upstream::HostConstShared
 }
 
 Common::Redis::Client::PoolRequest*
-InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key,
-                                           const Common::Redis::RespValue& request,
-                                           Common::Redis::Client::PoolCallbacks& callbacks) {
+InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key, RespVariant&& request,
+                                           PoolCallbacks& callbacks) {
   if (cluster_ == nullptr) {
     ASSERT(client_map_.empty());
     ASSERT(host_set_member_update_cb_handle_ == nullptr);
@@ -211,22 +225,28 @@ InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key,
   }
 
   Clusters::Redis::RedisLoadBalancerContextImpl lb_context(key, parent_.config_.enableHashtagging(),
-                                                           is_redis_cluster_, request,
+                                                           is_redis_cluster_, getRequest(request),
                                                            parent_.config_.readPolicy());
   Upstream::HostConstSharedPtr host = cluster_->loadBalancer().chooseHost(&lb_context);
   if (!host) {
     return nullptr;
   }
-
-  ThreadLocalActiveClientPtr& client = threadLocalActiveClient(host);
-
-  return client->redis_client_->makeRequest(request, callbacks);
+  pending_requests_.emplace_back(*this, std::move(request), callbacks);
+  PendingRequest& pending_request = pending_requests_.back();
+  ThreadLocalActiveClientPtr& client = this->threadLocalActiveClient(host);
+  pending_request.request_handler_ = client->redis_client_->makeRequest(
+      getRequest(pending_request.incoming_request_), pending_request);
+  if (pending_request.request_handler_) {
+    return &pending_request;
+  } else {
+    onRequestCompleted();
+    return nullptr;
+  }
 }
 
-Common::Redis::Client::PoolRequest*
-InstanceImpl::ThreadLocalPool::makeRequestToHost(const std::string& host_address,
-                                                 const Common::Redis::RespValue& request,
-                                                 Common::Redis::Client::PoolCallbacks& callbacks) {
+Common::Redis::Client::PoolRequest* InstanceImpl::ThreadLocalPool::makeRequestToHost(
+    const std::string& host_address, const Common::Redis::RespValue& request,
+    Common::Redis::Client::ClientCallbacks& callbacks) {
   if (cluster_ == nullptr) {
     ASSERT(client_map_.empty());
     ASSERT(host_set_member_update_cb_handle_ == nullptr);
@@ -295,6 +315,16 @@ InstanceImpl::ThreadLocalPool::makeRequestToHost(const std::string& host_address
   return client->redis_client_->makeRequest(request, callbacks);
 }
 
+void InstanceImpl::ThreadLocalPool::onRequestCompleted() {
+  ASSERT(!pending_requests_.empty());
+
+  // The response we got might not be in order, so flush out what we can. (A new response may
+  // unlock several out of order responses).
+  while (!pending_requests_.empty() && !pending_requests_.front().request_handler_) {
+    pending_requests_.pop_front();
+  }
+}
+
 void InstanceImpl::ThreadLocalActiveClient::onEvent(Network::ConnectionEvent event) {
   if (event == Network::ConnectionEvent::RemoteClose ||
       event == Network::ConnectionEvent::LocalClose) {
@@ -318,6 +348,64 @@ void InstanceImpl::ThreadLocalActiveClient::onEvent(Network::ConnectionEvent eve
   }
 }
 
+InstanceImpl::PendingRequest::PendingRequest(InstanceImpl::ThreadLocalPool& parent,
+                                             RespVariant&& incoming_request,
+                                             PoolCallbacks& pool_callbacks)
+    : parent_(parent), incoming_request_(std::move(incoming_request)),
+      pool_callbacks_(pool_callbacks) {}
+
+InstanceImpl::PendingRequest::~PendingRequest() {
+  if (request_handler_) {
+    request_handler_->cancel();
+    request_handler_ = nullptr;
+    // If we have to cancel the request on the client, then we'll treat this as failure for pool
+    // callback
+    pool_callbacks_.onFailure();
+  }
+}
+
+void InstanceImpl::PendingRequest::onResponse(Common::Redis::RespValuePtr&& response) {
+  request_handler_ = nullptr;
+  pool_callbacks_.onResponse(std::move(response));
+  parent_.onRequestCompleted();
+}
+
+void InstanceImpl::PendingRequest::onFailure() {
+  request_handler_ = nullptr;
+  pool_callbacks_.onFailure();
+  parent_.onRequestCompleted();
+}
+
+bool InstanceImpl::PendingRequest::onRedirection(Common::Redis::RespValuePtr&& value,
+                                                 const std::string& host_address,
+                                                 bool ask_redirection) {
+  // Prepend request with an asking command if redirected via an ASK error. The returned handle is
+  // not important since there is no point in being able to cancel the request. The use of
+  // null_pool_callbacks ensures the transparent filtering of the Redis server's response to the
+  // "asking" command; this is fine since the server either responds with an OK or an error message
+  // if cluster support is not enabled (in which case we should not get an ASK redirection error).
+  if (ask_redirection &&
+      !parent_.makeRequestToHost(host_address, Common::Redis::Utility::AskingRequest::instance(),
+                                 null_client_callbacks)) {
+    onResponse(std::move(value));
+    return false;
+  }
+  request_handler_ = parent_.makeRequestToHost(host_address, getRequest(incoming_request_), *this);
+  if (!request_handler_) {
+    onResponse(std::move(value));
+    return false;
+  } else {
+    parent_.parent_.onRedirection();
+    return true;
+  }
+}
+
+void InstanceImpl::PendingRequest::cancel() {
+  request_handler_->cancel();
+  request_handler_ = nullptr;
+  parent_.onRequestCompleted();
+}
+
 } // namespace ConnPool
 } // namespace RedisProxy
 } // namespace NetworkFilters
diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h
index 6121dd7598e4..bf6f997a01ee 100644
--- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h
+++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h
@@ -46,6 +46,12 @@ struct RedisClusterStats {
   REDIS_CLUSTER_STATS(GENERATE_COUNTER_STRUCT)
 };
 
+class DoNothingPoolCallbacks : public PoolCallbacks {
+public:
+  void onResponse(Common::Redis::RespValuePtr&&) override{};
+  void onFailure() override{};
+};
+
 class InstanceImpl : public Instance {
 public:
   InstanceImpl(
@@ -56,12 +62,22 @@ class InstanceImpl : public Instance {
       const Common::Redis::RedisCommandStatsSharedPtr& redis_command_stats,
       Extensions::Common::Redis::RedirectionManagerSharedPtr redirection_manager);
   // RedisProxy::ConnPool::Instance
-  Common::Redis::Client::PoolRequest*
-  makeRequest(const std::string& key, const Common::Redis::RespValue& request,
-              Common::Redis::Client::PoolCallbacks& callbacks) override;
+  Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, RespVariant&& request,
+                                                  PoolCallbacks& callbacks) override;
+  /**
+   * Makes a redis request based on IP address and TCP port of the upstream host (e.g.,
+   * moved/ask cluster redirection). This is now only kept mostly for testing.
+   * @param host_address supplies the IP address and TCP port of the upstream host to receive
+   * the request.
+   * @param request supplies the Redis request to make.
+   * @param callbacks supplies the request completion callbacks.
+   * @return PoolRequest* a handle to the active request or nullptr if the request could not be
+   * made for some reason.
+   */
   Common::Redis::Client::PoolRequest*
   makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request,
-                    Common::Redis::Client::PoolCallbacks& callbacks) override;
+                    Common::Redis::Client::ClientCallbacks& callbacks);
+
   bool onRedirection() override { return redirection_manager_->onRedirection(cluster_name_); }
 
   // Allow the unit test to have access to private members.
@@ -85,17 +101,38 @@ class InstanceImpl : public Instance {
 
   using ThreadLocalActiveClientPtr = std::unique_ptr<ThreadLocalActiveClient>;
 
+  struct PendingRequest : public Common::Redis::Client::ClientCallbacks,
+                          public Common::Redis::Client::PoolRequest {
+    PendingRequest(ThreadLocalPool& parent, RespVariant&& incoming_request,
+                   PoolCallbacks& pool_callbacks);
+    ~PendingRequest() override;
+
+    // Common::Redis::Client::ClientCallbacks
+    void onResponse(Common::Redis::RespValuePtr&& response) override;
+    void onFailure() override;
+    bool onRedirection(Common::Redis::RespValuePtr&& value, const std::string& host_address,
+                       bool ask_redirection) override;
+
+    // PoolRequest
+    void cancel() override;
+
+    ThreadLocalPool& parent_;
+    const RespVariant incoming_request_;
+    Common::Redis::Client::PoolRequest* request_handler_;
+    PoolCallbacks& pool_callbacks_;
+  };
+
   struct ThreadLocalPool : public ThreadLocal::ThreadLocalObject,
                            public Upstream::ClusterUpdateCallbacks {
     ThreadLocalPool(InstanceImpl& parent, Event::Dispatcher& dispatcher, std::string cluster_name);
     ~ThreadLocalPool() override;
     ThreadLocalActiveClientPtr& threadLocalActiveClient(Upstream::HostConstSharedPtr host);
-    Common::Redis::Client::PoolRequest*
-    makeRequest(const std::string& key, const Common::Redis::RespValue& request,
-                Common::Redis::Client::PoolCallbacks& callbacks);
+    Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, RespVariant&& request,
+                                                    PoolCallbacks& callbacks);
     Common::Redis::Client::PoolRequest*
     makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request,
-                      Common::Redis::Client::PoolCallbacks& callbacks);
+                      Common::Redis::Client::ClientCallbacks& callbacks);
+
     void onClusterAddOrUpdateNonVirtual(Upstream::ThreadLocalCluster& cluster);
     void onHostsAdded(const std::vector<Upstream::HostSharedPtr>& hosts_added);
     void onHostsRemoved(const std::vector<Upstream::HostSharedPtr>& hosts_removed);
@@ -107,6 +144,8 @@ class InstanceImpl : public Instance {
     }
     void onClusterRemoval(const std::string& cluster_name) override;
 
+    void onRequestCompleted();
+
     InstanceImpl& parent_;
     Event::Dispatcher& dispatcher_;
     const std::string cluster_name_;
@@ -118,6 +157,7 @@ class InstanceImpl : public Instance {
     std::string auth_password_;
     std::list<Upstream::HostSharedPtr> created_via_redirect_hosts_;
     std::list<ThreadLocalActiveClientPtr> clients_to_drain_;
+    std::list<PendingRequest> pending_requests_;
 
     /* This timer is used to poll the active clients in clients_to_drain_ to determine whether they
      * have been drained (have no active requests) or not. It is only enabled after a client has
diff --git a/source/extensions/filters/network/thrift_proxy/router/config.cc b/source/extensions/filters/network/thrift_proxy/router/config.cc
index ffdf57e82bd6..312f8143b445 100644
--- a/source/extensions/filters/network/thrift_proxy/router/config.cc
+++ b/source/extensions/filters/network/thrift_proxy/router/config.cc
@@ -14,10 +14,10 @@ ThriftFilters::FilterFactoryCb RouterFilterConfig::createFilterFactoryFromProtoT
     const envoy::config::filter::thrift::router::v2alpha1::Router& proto_config,
     const std::string& stat_prefix, Server::Configuration::FactoryContext& context) {
   UNREFERENCED_PARAMETER(proto_config);
-  UNREFERENCED_PARAMETER(stat_prefix);
 
-  return [&context](ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void {
-    callbacks.addDecoderFilter(std::make_shared<Router>(context.clusterManager()));
+  return [&context, stat_prefix](ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void {
+    callbacks.addDecoderFilter(
+        std::make_shared<Router>(context.clusterManager(), stat_prefix, context.scope()));
   };
 }
 
diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc
index 449fddb85f63..d7de50a5d665 100644
--- a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc
+++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc
@@ -209,13 +209,10 @@ FilterStatus Router::transportEnd() {
 }
 
 FilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) {
-  // TODO(zuercher): route stats (e.g., no_route, no_cluster, upstream_rq_maintenance_mode, no
-  // healthy upstream)
-
   route_ = callbacks_->route();
   if (!route_) {
-    ENVOY_STREAM_LOG(debug, "no cluster match for method '{}'", *callbacks_,
-                     metadata->methodName());
+    ENVOY_STREAM_LOG(debug, "no route match for method '{}'", *callbacks_, metadata->methodName());
+    stats_.route_missing_.inc();
     callbacks_->sendLocalReply(
         AppException(AppExceptionType::UnknownMethod,
                      fmt::format("no route for method '{}'", metadata->methodName())),
@@ -229,6 +226,7 @@ FilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) {
   Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(cluster_name);
   if (!cluster) {
     ENVOY_STREAM_LOG(debug, "unknown cluster '{}'", *callbacks_, cluster_name);
+    stats_.unknown_cluster_.inc();
     callbacks_->sendLocalReply(AppException(AppExceptionType::InternalError,
                                             fmt::format("unknown cluster '{}'", cluster_name)),
                                true);
@@ -240,6 +238,7 @@ FilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) {
                    metadata->methodName());
 
   if (cluster_->maintenanceMode()) {
+    stats_.upstream_rq_maintenance_mode_.inc();
     callbacks_->sendLocalReply(
         AppException(AppExceptionType::InternalError,
                      fmt::format("maintenance mode for cluster '{}'", cluster_name)),
@@ -263,6 +262,7 @@ FilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) {
   Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster(
       cluster_name, Upstream::ResourcePriority::Default, this);
   if (!conn_pool) {
+    stats_.no_healthy_upstream_.inc();
     callbacks_->sendLocalReply(
         AppException(AppExceptionType::InternalError,
                      fmt::format("no healthy upstream for '{}'", cluster_name)),
diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.h b/source/extensions/filters/network/thrift_proxy/router/router_impl.h
index d61c7fe7b59a..14f6e310a733 100644
--- a/source/extensions/filters/network/thrift_proxy/router/router_impl.h
+++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.h
@@ -6,6 +6,8 @@
 
 #include "envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.pb.h"
 #include "envoy/router/router.h"
+#include "envoy/stats/scope.h"
+#include "envoy/stats/stats_macros.h"
 #include "envoy/tcp/conn_pool.h"
 #include "envoy/upstream/load_balancer.h"
 
@@ -162,13 +164,25 @@ class RouteMatcher {
   std::vector<RouteEntryImplBaseConstSharedPtr> routes_;
 };
 
+#define ALL_THRIFT_ROUTER_STATS(COUNTER, GAUGE, HISTOGRAM)                                         \
+  COUNTER(route_missing)                                                                           \
+  COUNTER(unknown_cluster)                                                                         \
+  COUNTER(upstream_rq_maintenance_mode)                                                            \
+  COUNTER(no_healthy_upstream)
+
+struct RouterStats {
+  ALL_THRIFT_ROUTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)
+};
+
 class Router : public Tcp::ConnectionPool::UpstreamCallbacks,
                public Upstream::LoadBalancerContextBase,
                public ProtocolConverter,
                public ThriftFilters::DecoderFilter,
                Logger::Loggable<Logger::Id::thrift> {
 public:
-  Router(Upstream::ClusterManager& cluster_manager) : cluster_manager_(cluster_manager) {}
+  Router(Upstream::ClusterManager& cluster_manager, const std::string& stat_prefix,
+         Stats::Scope& scope)
+      : cluster_manager_(cluster_manager), stats_(generateStats(stat_prefix, scope)) {}
 
   ~Router() override = default;
 
@@ -239,8 +253,14 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks,
 
   void convertMessageBegin(MessageMetadataSharedPtr metadata);
   void cleanup();
+  RouterStats generateStats(const std::string& prefix, Stats::Scope& scope) {
+    return RouterStats{ALL_THRIFT_ROUTER_STATS(POOL_COUNTER_PREFIX(scope, prefix),
+                                               POOL_GAUGE_PREFIX(scope, prefix),
+                                               POOL_HISTOGRAM_PREFIX(scope, prefix))};
+  }
 
   Upstream::ClusterManager& cluster_manager_;
+  RouterStats stats_;
 
   ThriftFilters::DecoderFilterCallbacks* callbacks_{};
   RouteConstSharedPtr route_{};
diff --git a/source/extensions/filters/network/thrift_proxy/stats.h b/source/extensions/filters/network/thrift_proxy/stats.h
index 583c02882f86..9166f37be6ca 100644
--- a/source/extensions/filters/network/thrift_proxy/stats.h
+++ b/source/extensions/filters/network/thrift_proxy/stats.h
@@ -32,7 +32,7 @@ namespace ThriftProxy {
   HISTOGRAM(request_time_ms, Milliseconds)
 
 /**
- * Struct definition for all mongo proxy stats. @see stats_macros.h
+ * Struct definition for all thrift proxy stats. @see stats_macros.h
  */
 struct ThriftFilterStats {
   ALL_THRIFT_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT)
diff --git a/source/extensions/filters/network/thrift_proxy/thrift.h b/source/extensions/filters/network/thrift_proxy/thrift.h
index d16e074561a4..e42f1338adc6 100644
--- a/source/extensions/filters/network/thrift_proxy/thrift.h
+++ b/source/extensions/filters/network/thrift_proxy/thrift.h
@@ -156,7 +156,14 @@ enum class AppExceptionType {
   ProtocolError = 7,
   InvalidTransform = 8,
   InvalidProtocol = 9,
+  // FBThrift values.
+  // See https://github.com/facebook/fbthrift/blob/master/thrift/lib/cpp/TApplicationException.h#L52
   UnsupportedClientType = 10,
+  LoadShedding = 11,
+  Timeout = 12,
+  InjectedFailure = 13,
+  ChecksumMismatch = 14,
+  Interruption = 15,
 };
 
 } // namespace ThriftProxy
diff --git a/source/extensions/filters/network/zookeeper_proxy/filter.h b/source/extensions/filters/network/zookeeper_proxy/filter.h
index 56b9d858c35b..dc932764b156 100644
--- a/source/extensions/filters/network/zookeeper_proxy/filter.h
+++ b/source/extensions/filters/network/zookeeper_proxy/filter.h
@@ -24,65 +24,63 @@ namespace ZooKeeperProxy {
 /**
  * All ZooKeeper proxy stats. @see stats_macros.h
  */
-// clang-format off
-#define ALL_ZOOKEEPER_PROXY_STATS(COUNTER)                              \
-  COUNTER(decoder_error)                                                \
-  COUNTER(request_bytes)                                                \
-  COUNTER(connect_rq)                                                   \
-  COUNTER(connect_readonly_rq)                                          \
-  COUNTER(getdata_rq)                                                   \
-  COUNTER(create_rq)                                                    \
-  COUNTER(create2_rq)                                                   \
-  COUNTER(createcontainer_rq)                                           \
-  COUNTER(createttl_rq)                                                 \
-  COUNTER(setdata_rq)                                                   \
-  COUNTER(getchildren_rq)                                               \
-  COUNTER(getchildren2_rq)                                              \
-  COUNTER(getephemerals_rq)                                             \
-  COUNTER(getallchildrennumber_rq)                                      \
-  COUNTER(delete_rq)                                                    \
-  COUNTER(exists_rq)                                                    \
-  COUNTER(getacl_rq)                                                    \
-  COUNTER(setacl_rq)                                                    \
-  COUNTER(sync_rq)                                                      \
-  COUNTER(ping_rq)                                                      \
-  COUNTER(multi_rq)                                                     \
-  COUNTER(reconfig_rq)                                                  \
-  COUNTER(close_rq)                                                     \
-  COUNTER(setauth_rq)                                                   \
-  COUNTER(setwatches_rq)                                                \
-  COUNTER(checkwatches_rq)                                              \
-  COUNTER(removewatches_rq)                                             \
-  COUNTER(check_rq)                                                     \
-  COUNTER(response_bytes)                                               \
-  COUNTER(connect_resp)                                                 \
-  COUNTER(ping_resp)                                                    \
-  COUNTER(auth_resp)                                                    \
-  COUNTER(getdata_resp)                                                 \
-  COUNTER(create_resp)                                                  \
-  COUNTER(create2_resp)                                                 \
-  COUNTER(createcontainer_resp)                                         \
-  COUNTER(createttl_resp)                                               \
-  COUNTER(setdata_resp)                                                 \
-  COUNTER(getchildren_resp)                                             \
-  COUNTER(getchildren2_resp)                                            \
-  COUNTER(getephemerals_resp)                                           \
-  COUNTER(getallchildrennumber_resp)                                    \
-  COUNTER(delete_resp)                                                  \
-  COUNTER(exists_resp)                                                  \
-  COUNTER(getacl_resp)                                                  \
-  COUNTER(setacl_resp)                                                  \
-  COUNTER(sync_resp)                                                    \
-  COUNTER(multi_resp)                                                   \
-  COUNTER(reconfig_resp)                                                \
-  COUNTER(close_resp)                                                   \
-  COUNTER(setauth_resp)                                                 \
-  COUNTER(setwatches_resp)                                              \
-  COUNTER(checkwatches_resp)                                            \
-  COUNTER(removewatches_resp)                                           \
-  COUNTER(check_resp)                                                   \
+#define ALL_ZOOKEEPER_PROXY_STATS(COUNTER)                                                         \
+  COUNTER(decoder_error)                                                                           \
+  COUNTER(request_bytes)                                                                           \
+  COUNTER(connect_rq)                                                                              \
+  COUNTER(connect_readonly_rq)                                                                     \
+  COUNTER(getdata_rq)                                                                              \
+  COUNTER(create_rq)                                                                               \
+  COUNTER(create2_rq)                                                                              \
+  COUNTER(createcontainer_rq)                                                                      \
+  COUNTER(createttl_rq)                                                                            \
+  COUNTER(setdata_rq)                                                                              \
+  COUNTER(getchildren_rq)                                                                          \
+  COUNTER(getchildren2_rq)                                                                         \
+  COUNTER(getephemerals_rq)                                                                        \
+  COUNTER(getallchildrennumber_rq)                                                                 \
+  COUNTER(delete_rq)                                                                               \
+  COUNTER(exists_rq)                                                                               \
+  COUNTER(getacl_rq)                                                                               \
+  COUNTER(setacl_rq)                                                                               \
+  COUNTER(sync_rq)                                                                                 \
+  COUNTER(ping_rq)                                                                                 \
+  COUNTER(multi_rq)                                                                                \
+  COUNTER(reconfig_rq)                                                                             \
+  COUNTER(close_rq)                                                                                \
+  COUNTER(setauth_rq)                                                                              \
+  COUNTER(setwatches_rq)                                                                           \
+  COUNTER(checkwatches_rq)                                                                         \
+  COUNTER(removewatches_rq)                                                                        \
+  COUNTER(check_rq)                                                                                \
+  COUNTER(response_bytes)                                                                          \
+  COUNTER(connect_resp)                                                                            \
+  COUNTER(ping_resp)                                                                               \
+  COUNTER(auth_resp)                                                                               \
+  COUNTER(getdata_resp)                                                                            \
+  COUNTER(create_resp)                                                                             \
+  COUNTER(create2_resp)                                                                            \
+  COUNTER(createcontainer_resp)                                                                    \
+  COUNTER(createttl_resp)                                                                          \
+  COUNTER(setdata_resp)                                                                            \
+  COUNTER(getchildren_resp)                                                                        \
+  COUNTER(getchildren2_resp)                                                                       \
+  COUNTER(getephemerals_resp)                                                                      \
+  COUNTER(getallchildrennumber_resp)                                                               \
+  COUNTER(delete_resp)                                                                             \
+  COUNTER(exists_resp)                                                                             \
+  COUNTER(getacl_resp)                                                                             \
+  COUNTER(setacl_resp)                                                                             \
+  COUNTER(sync_resp)                                                                               \
+  COUNTER(multi_resp)                                                                              \
+  COUNTER(reconfig_resp)                                                                           \
+  COUNTER(close_resp)                                                                              \
+  COUNTER(setauth_resp)                                                                            \
+  COUNTER(setwatches_resp)                                                                         \
+  COUNTER(checkwatches_resp)                                                                       \
+  COUNTER(removewatches_resp)                                                                      \
+  COUNTER(check_resp)                                                                              \
   COUNTER(watch_event)
-// clang-format on
 
 /**
  * Struct definition for all ZooKeeper proxy stats. @see stats_macros.h
diff --git a/source/extensions/filters/udp/udp_proxy/BUILD b/source/extensions/filters/udp/udp_proxy/BUILD
index ca7466ea964b..0704d744ad64 100644
--- a/source/extensions/filters/udp/udp_proxy/BUILD
+++ b/source/extensions/filters/udp/udp_proxy/BUILD
@@ -29,7 +29,7 @@ envoy_cc_extension(
     srcs = ["config.cc"],
     hdrs = ["config.h"],
     security_posture = "robust_to_untrusted_downstream",
-    status = "wip",
+    status = "alpha",
     deps = [
         ":udp_proxy_filter_lib",
         "//include/envoy/registry",
diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc
index ad2eef579090..8afb8035dbf5 100644
--- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc
+++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc
@@ -7,53 +7,169 @@ namespace Extensions {
 namespace UdpFilters {
 namespace UdpProxy {
 
+UdpProxyFilter::UdpProxyFilter(Network::UdpReadFilterCallbacks& callbacks,
+                               const UdpProxyFilterConfigSharedPtr& config)
+    : UdpListenerReadFilter(callbacks), config_(config),
+      cluster_update_callbacks_(
+          config->clusterManager().addThreadLocalClusterUpdateCallbacks(*this)) {
+  Upstream::ThreadLocalCluster* cluster = config->clusterManager().get(config->cluster());
+  if (cluster != nullptr) {
+    onClusterAddOrUpdate(*cluster);
+  }
+}
+
+void UdpProxyFilter::onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) {
+  if (cluster.info()->name() != config_->cluster()) {
+    return;
+  }
+
+  ENVOY_LOG(debug, "udp proxy: attaching to cluster {}", cluster.info()->name());
+  ASSERT(cluster_info_ == absl::nullopt || &cluster_info_.value().cluster_ != &cluster);
+  cluster_info_.emplace(*this, cluster);
+}
+
+void UdpProxyFilter::onClusterRemoval(const std::string& cluster) {
+  if (cluster != config_->cluster()) {
+    return;
+  }
+
+  ENVOY_LOG(debug, "udp proxy: detaching from cluster {}", cluster);
+  cluster_info_.reset();
+}
+
 void UdpProxyFilter::onData(Network::UdpRecvData& data) {
+  if (!cluster_info_.has_value()) {
+    config_->stats().downstream_sess_no_route_.inc();
+    return;
+  }
+
+  cluster_info_.value().onData(data);
+}
+
+void UdpProxyFilter::onReceiveError(Api::IoError::IoErrorCode) {
+  config_->stats().downstream_sess_rx_errors_.inc();
+}
+
+UdpProxyFilter::ClusterInfo::ClusterInfo(UdpProxyFilter& filter,
+                                         Upstream::ThreadLocalCluster& cluster)
+    : filter_(filter), cluster_(cluster),
+      cluster_stats_(generateStats(cluster.info()->statsScope())),
+      member_update_cb_handle_(cluster.prioritySet().addMemberUpdateCb(
+          [this](const Upstream::HostVector&, const Upstream::HostVector& hosts_removed) {
+            for (const auto& host : hosts_removed) {
+              // This is similar to removeSession() but slightly different due to removeSession()
+              // also handling deletion of the host to session map entry if there are no sessions
+              // left. It would be nice to unify the logic but that can be cleaned up later.
+              auto host_sessions_it = host_to_sessions_.find(host.get());
+              if (host_sessions_it != host_to_sessions_.end()) {
+                for (const auto& session : host_sessions_it->second) {
+                  ASSERT(sessions_.count(session) == 1);
+                  sessions_.erase(session);
+                }
+                host_to_sessions_.erase(host_sessions_it);
+              }
+            }
+          })) {}
+
+UdpProxyFilter::ClusterInfo::~ClusterInfo() {
+  member_update_cb_handle_->remove();
+  // Sanity check the session accounting. This is not as fast as a straight teardown, but this is
+  // not a performance critical path.
+  while (!sessions_.empty()) {
+    removeSession(sessions_.begin()->get());
+  }
+  ASSERT(host_to_sessions_.empty());
+}
+
+void UdpProxyFilter::ClusterInfo::onData(Network::UdpRecvData& data) {
   const auto active_session_it = sessions_.find(data.addresses_);
   ActiveSession* active_session;
   if (active_session_it == sessions_.end()) {
-    // TODO(mattklein123): Session circuit breaker.
-    // TODO(mattklein123): Instead of looking up the cluster each time, keep track of it via
-    // cluster manager callbacks.
-    Upstream::ThreadLocalCluster* cluster = config_->getCluster();
-    // TODO(mattklein123): Handle the case where the cluster does not exist and add stat.
-    ASSERT(cluster != nullptr);
+    if (!cluster_.info()
+             ->resourceManager(Upstream::ResourcePriority::Default)
+             .connections()
+             .canCreate()) {
+      cluster_.info()->stats().upstream_cx_overflow_.inc();
+      return;
+    }
 
     // TODO(mattklein123): Pass a context and support hash based routing.
-    Upstream::HostConstSharedPtr host = cluster->loadBalancer().chooseHost(nullptr);
-    // TODO(mattklein123): Handle the case where the host does not exist.
-    ASSERT(host != nullptr);
+    Upstream::HostConstSharedPtr host = cluster_.loadBalancer().chooseHost(nullptr);
+    if (host == nullptr) {
+      cluster_.info()->stats().upstream_cx_none_healthy_.inc();
+      return;
+    }
 
-    auto new_session = std::make_unique<ActiveSession>(*this, std::move(data.addresses_), host);
-    active_session = new_session.get();
-    sessions_.emplace(std::move(new_session));
+    active_session = createSession(std::move(data.addresses_), host);
   } else {
-    // TODO(mattklein123): Handle the host going away going away or failing health checks.
     active_session = active_session_it->get();
+    if (active_session->host().health() == Upstream::Host::Health::Unhealthy) {
+      // If a host becomes unhealthy, we optimally would like to replace it with a new session
+      // to a healthy host. We may eventually want to make this behavior configurable, but for now
+      // this will be the universal behavior.
+
+      // TODO(mattklein123): Pass a context and support hash based routing.
+      Upstream::HostConstSharedPtr host = cluster_.loadBalancer().chooseHost(nullptr);
+      if (host != nullptr && host->health() != Upstream::Host::Health::Unhealthy &&
+          host.get() != &active_session->host()) {
+        ENVOY_LOG(debug, "upstream session unhealthy, recreating the session");
+        removeSession(active_session);
+        active_session = createSession(std::move(data.addresses_), host);
+      } else {
+        // In this case we could not get a better host, so just keep using the current session.
+        ENVOY_LOG(trace, "upstream session unhealthy, but unable to get a better host");
+      }
+    }
   }
 
   active_session->write(*data.buffer_);
 }
 
-void UdpProxyFilter::onReceiveError(Api::IoError::IoErrorCode) {
-  config_->stats().downstream_sess_rx_errors_.inc();
+UdpProxyFilter::ActiveSession*
+UdpProxyFilter::ClusterInfo::createSession(Network::UdpRecvData::LocalPeerAddresses&& addresses,
+                                           const Upstream::HostConstSharedPtr& host) {
+  auto new_session = std::make_unique<ActiveSession>(*this, std::move(addresses), host);
+  auto new_session_ptr = new_session.get();
+  sessions_.emplace(std::move(new_session));
+  host_to_sessions_[host.get()].emplace(new_session_ptr);
+  return new_session_ptr;
 }
 
-UdpProxyFilter::ActiveSession::ActiveSession(UdpProxyFilter& parent,
+void UdpProxyFilter::ClusterInfo::removeSession(const ActiveSession* session) {
+  // First remove from the host to sessions map.
+  ASSERT(host_to_sessions_[&session->host()].count(session) == 1);
+  auto host_sessions_it = host_to_sessions_.find(&session->host());
+  host_sessions_it->second.erase(session);
+  if (host_sessions_it->second.empty()) {
+    host_to_sessions_.erase(host_sessions_it);
+  }
+
+  // Now remove it from the primary map.
+  ASSERT(sessions_.count(session) == 1);
+  sessions_.erase(session);
+}
+
+UdpProxyFilter::ActiveSession::ActiveSession(ClusterInfo& cluster,
                                              Network::UdpRecvData::LocalPeerAddresses&& addresses,
                                              const Upstream::HostConstSharedPtr& host)
-    : parent_(parent), addresses_(std::move(addresses)), host_(host),
-      idle_timer_(parent.read_callbacks_->udpListener().dispatcher().createTimer(
+    : cluster_(cluster), addresses_(std::move(addresses)), host_(host),
+      idle_timer_(cluster.filter_.read_callbacks_->udpListener().dispatcher().createTimer(
           [this] { onIdleTimer(); })),
       // NOTE: The socket call can only fail due to memory/fd exhaustion. No local ephemeral port
       //       is bound until the first packet is sent to the upstream host.
-      io_handle_(parent.createIoHandle(host)),
-      socket_event_(parent.read_callbacks_->udpListener().dispatcher().createFileEvent(
+      io_handle_(cluster.filter_.createIoHandle(host)),
+      socket_event_(cluster.filter_.read_callbacks_->udpListener().dispatcher().createFileEvent(
           io_handle_->fd(), [this](uint32_t) { onReadReady(); }, Event::FileTriggerType::Edge,
           Event::FileReadyType::Read)) {
-  ENVOY_LOG(debug, "creating new session: downstream={} local={}", addresses_.peer_->asStringView(),
-            addresses_.local_->asStringView());
-  parent_.config_->stats().downstream_sess_total_.inc();
-  parent_.config_->stats().downstream_sess_active_.inc();
+  ENVOY_LOG(debug, "creating new session: downstream={} local={} upstream={}",
+            addresses_.peer_->asStringView(), addresses_.local_->asStringView(),
+            host->address()->asStringView());
+  cluster_.filter_.config_->stats().downstream_sess_total_.inc();
+  cluster_.filter_.config_->stats().downstream_sess_active_.inc();
+  cluster_.cluster_.info()
+      ->resourceManager(Upstream::ResourcePriority::Default)
+      .connections()
+      .inc();
 
   // TODO(mattklein123): Enable dropped packets socket option. In general the Socket abstraction
   // does not work well right now for client sockets. It's too heavy weight and is aimed at listener
@@ -63,27 +179,32 @@ UdpProxyFilter::ActiveSession::ActiveSession(UdpProxyFilter& parent,
 }
 
 UdpProxyFilter::ActiveSession::~ActiveSession() {
-  parent_.config_->stats().downstream_sess_active_.dec();
+  cluster_.filter_.config_->stats().downstream_sess_active_.dec();
+  cluster_.cluster_.info()
+      ->resourceManager(Upstream::ResourcePriority::Default)
+      .connections()
+      .dec();
 }
 
 void UdpProxyFilter::ActiveSession::onIdleTimer() {
   ENVOY_LOG(debug, "session idle timeout: downstream={} local={}", addresses_.peer_->asStringView(),
             addresses_.local_->asStringView());
-  parent_.config_->stats().idle_timeout_.inc();
-  parent_.sessions_.erase(addresses_);
+  cluster_.filter_.config_->stats().idle_timeout_.inc();
+  cluster_.removeSession(this);
 }
 
 void UdpProxyFilter::ActiveSession::onReadReady() {
-  idle_timer_->enableTimer(parent_.config_->sessionTimeout());
+  idle_timer_->enableTimer(cluster_.filter_.config_->sessionTimeout());
 
   // TODO(mattklein123): We should not be passing *addresses_.local_ to this function as we are
   //                     not trying to populate the local address for received packets.
   uint32_t packets_dropped = 0;
   const Api::IoErrorPtr result = Network::Utility::readPacketsFromSocket(
-      *io_handle_, *addresses_.local_, *this, parent_.config_->timeSource(), packets_dropped);
+      *io_handle_, *addresses_.local_, *this, cluster_.filter_.config_->timeSource(),
+      packets_dropped);
   // TODO(mattklein123): Handle no error when we limit the number of packets read.
   if (result->getErrorCode() != Api::IoError::IoErrorCode::Again) {
-    // TODO(mattklein123): Upstream cluster RX error stat.
+    cluster_.cluster_stats_.sess_rx_errors_.inc();
   }
 }
 
@@ -91,10 +212,11 @@ void UdpProxyFilter::ActiveSession::write(const Buffer::Instance& buffer) {
   ENVOY_LOG(trace, "writing {} byte datagram upstream: downstream={} local={} upstream={}",
             buffer.length(), addresses_.peer_->asStringView(), addresses_.local_->asStringView(),
             host_->address()->asStringView());
-  parent_.config_->stats().downstream_sess_rx_bytes_.add(buffer.length());
-  parent_.config_->stats().downstream_sess_rx_datagrams_.inc();
+  const uint64_t buffer_length = buffer.length();
+  cluster_.filter_.config_->stats().downstream_sess_rx_bytes_.add(buffer_length);
+  cluster_.filter_.config_->stats().downstream_sess_rx_datagrams_.inc();
 
-  idle_timer_->enableTimer(parent_.config_->sessionTimeout());
+  idle_timer_->enableTimer(cluster_.filter_.config_->sessionTimeout());
 
   // NOTE: On the first write, a local ephemeral port is bound, and thus this write can fail due to
   //       port exhaustion.
@@ -103,9 +225,10 @@ void UdpProxyFilter::ActiveSession::write(const Buffer::Instance& buffer) {
   Api::IoCallUint64Result rc =
       Network::Utility::writeToSocket(*io_handle_, buffer, nullptr, *host_->address());
   if (!rc.ok()) {
-    // TODO(mattklein123): Upstream cluster TX error stat.
+    cluster_.cluster_stats_.sess_tx_errors_.inc();
   } else {
-    // TODO(mattklein123): Upstream cluster TX byte/datagram stats.
+    cluster_.cluster_stats_.sess_tx_datagrams_.inc();
+    cluster_.cluster_.info()->stats().upstream_cx_tx_bytes_total_.add(buffer_length);
   }
 }
 
@@ -117,15 +240,16 @@ void UdpProxyFilter::ActiveSession::processPacket(Network::Address::InstanceCons
             host_->address()->asStringView());
   const uint64_t buffer_length = buffer->length();
 
-  // TODO(mattklein123): Upstream cluster RX byte/datagram stats.
+  cluster_.cluster_stats_.sess_rx_datagrams_.inc();
+  cluster_.cluster_.info()->stats().upstream_cx_rx_bytes_total_.add(buffer_length);
 
   Network::UdpSendData data{addresses_.local_->ip(), *addresses_.peer_, *buffer};
-  const Api::IoCallUint64Result rc = parent_.read_callbacks_->udpListener().send(data);
+  const Api::IoCallUint64Result rc = cluster_.filter_.read_callbacks_->udpListener().send(data);
   if (!rc.ok()) {
-    parent_.config_->stats().downstream_sess_tx_errors_.inc();
+    cluster_.filter_.config_->stats().downstream_sess_tx_errors_.inc();
   } else {
-    parent_.config_->stats().downstream_sess_tx_bytes_.add(buffer_length);
-    parent_.config_->stats().downstream_sess_tx_datagrams_.inc();
+    cluster_.filter_.config_->stats().downstream_sess_tx_bytes_.add(buffer_length);
+    cluster_.filter_.config_->stats().downstream_sess_tx_datagrams_.inc();
   }
 }
 
diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h
index e248dafb75cc..480d339449cb 100644
--- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h
+++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h
@@ -10,15 +10,18 @@
 
 #include "absl/container/flat_hash_set.h"
 
+// TODO(mattklein123): UDP session access logging.
+
 namespace Envoy {
 namespace Extensions {
 namespace UdpFilters {
 namespace UdpProxy {
 
 /**
- * All UDP proxy stats. @see stats_macros.h
+ * All UDP proxy downstream stats. @see stats_macros.h
  */
-#define ALL_UDP_PROXY_STATS(COUNTER, GAUGE)                                                        \
+#define ALL_UDP_PROXY_DOWNSTREAM_STATS(COUNTER, GAUGE)                                             \
+  COUNTER(downstream_sess_no_route)                                                                \
   COUNTER(downstream_sess_rx_bytes)                                                                \
   COUNTER(downstream_sess_rx_datagrams)                                                            \
   COUNTER(downstream_sess_rx_errors)                                                               \
@@ -30,10 +33,26 @@ namespace UdpProxy {
   GAUGE(downstream_sess_active, Accumulate)
 
 /**
- * Struct definition for all UDP proxy stats. @see stats_macros.h
+ * Struct definition for all UDP proxy downstream stats. @see stats_macros.h
+ */
+struct UdpProxyDownstreamStats {
+  ALL_UDP_PROXY_DOWNSTREAM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)
+};
+
+/**
+ * All UDP proxy upstream cluster stats. @see stats_macros.h
+ */
+#define ALL_UDP_PROXY_UPSTREAM_STATS(COUNTER)                                                      \
+  COUNTER(sess_rx_datagrams)                                                                       \
+  COUNTER(sess_rx_errors)                                                                          \
+  COUNTER(sess_tx_datagrams)                                                                       \
+  COUNTER(sess_tx_errors)
+
+/**
+ * Struct definition for all UDP proxy upstream stats. @see stats_macros.h
  */
-struct UdpProxyStats {
-  ALL_UDP_PROXY_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT)
+struct UdpProxyUpstreamStats {
+  ALL_UDP_PROXY_UPSTREAM_STATS(GENERATE_COUNTER_STRUCT)
 };
 
 class UdpProxyFilterConfig {
@@ -45,38 +64,43 @@ class UdpProxyFilterConfig {
         session_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, idle_timeout, 60 * 1000)),
         stats_(generateStats(config.stat_prefix(), root_scope)) {}
 
-  Upstream::ThreadLocalCluster* getCluster() const { return cluster_manager_.get(cluster_); }
+  const std::string& cluster() const { return cluster_; }
+  Upstream::ClusterManager& clusterManager() const { return cluster_manager_; }
   std::chrono::milliseconds sessionTimeout() const { return session_timeout_; }
-  UdpProxyStats& stats() const { return stats_; }
+  UdpProxyDownstreamStats& stats() const { return stats_; }
   TimeSource& timeSource() const { return time_source_; }
 
 private:
-  static UdpProxyStats generateStats(const std::string& stat_prefix, Stats::Scope& scope) {
+  static UdpProxyDownstreamStats generateStats(const std::string& stat_prefix,
+                                               Stats::Scope& scope) {
     const auto final_prefix = fmt::format("udp.{}", stat_prefix);
-    return {ALL_UDP_PROXY_STATS(POOL_COUNTER_PREFIX(scope, final_prefix),
-                                POOL_GAUGE_PREFIX(scope, final_prefix))};
+    return {ALL_UDP_PROXY_DOWNSTREAM_STATS(POOL_COUNTER_PREFIX(scope, final_prefix),
+                                           POOL_GAUGE_PREFIX(scope, final_prefix))};
   }
 
   Upstream::ClusterManager& cluster_manager_;
   TimeSource& time_source_;
   const std::string cluster_;
   const std::chrono::milliseconds session_timeout_;
-  mutable UdpProxyStats stats_;
+  mutable UdpProxyDownstreamStats stats_;
 };
 
 using UdpProxyFilterConfigSharedPtr = std::shared_ptr<const UdpProxyFilterConfig>;
 
-class UdpProxyFilter : public Network::UdpListenerReadFilter, Logger::Loggable<Logger::Id::filter> {
+class UdpProxyFilter : public Network::UdpListenerReadFilter,
+                       public Upstream::ClusterUpdateCallbacks,
+                       Logger::Loggable<Logger::Id::filter> {
 public:
   UdpProxyFilter(Network::UdpReadFilterCallbacks& callbacks,
-                 const UdpProxyFilterConfigSharedPtr& config)
-      : UdpListenerReadFilter(callbacks), config_(config) {}
+                 const UdpProxyFilterConfigSharedPtr& config);
 
   // Network::UdpListenerReadFilter
   void onData(Network::UdpRecvData& data) override;
   void onReceiveError(Api::IoError::IoErrorCode error_code) override;
 
 private:
+  class ClusterInfo;
+
   /**
    * An active session is similar to a TCP connection. It binds a 4-tuple (downstream IP/port, local
    * IP/port) to a selected upstream host for the purpose of packet forwarding. Unlike a TCP
@@ -87,10 +111,11 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, Logger::Loggable<L
    */
   class ActiveSession : public Network::UdpPacketProcessor {
   public:
-    ActiveSession(UdpProxyFilter& parent, Network::UdpRecvData::LocalPeerAddresses&& addresses,
+    ActiveSession(ClusterInfo& parent, Network::UdpRecvData::LocalPeerAddresses&& addresses,
                   const Upstream::HostConstSharedPtr& host);
     ~ActiveSession();
-    const Network::UdpRecvData::LocalPeerAddresses& addresses() { return addresses_; }
+    const Network::UdpRecvData::LocalPeerAddresses& addresses() const { return addresses_; }
+    const Upstream::Host& host() const { return *host_; }
     void write(const Buffer::Instance& buffer);
 
   private:
@@ -108,7 +133,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, Logger::Loggable<L
       return Network::MAX_UDP_PACKET_SIZE;
     }
 
-    UdpProxyFilter& parent_;
+    ClusterInfo& cluster_;
     const Network::UdpRecvData::LocalPeerAddresses addresses_;
     const Upstream::HostConstSharedPtr host_;
     // TODO(mattklein123): Consider replacing an idle timer for each session with a last used
@@ -142,6 +167,9 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, Logger::Loggable<L
     size_t operator()(const ActiveSessionPtr& value) const {
       return absl::Hash<const Network::UdpRecvData::LocalPeerAddresses>()(value->addresses());
     }
+    size_t operator()(const ActiveSession* value) const {
+      return absl::Hash<const Network::UdpRecvData::LocalPeerAddresses>()(value->addresses());
+    }
   };
 
   struct HeterogeneousActiveSessionEqual {
@@ -155,6 +183,40 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, Logger::Loggable<L
     bool operator()(const ActiveSessionPtr& lhs, const ActiveSessionPtr& rhs) const {
       return lhs->addresses() == rhs->addresses();
     }
+    bool operator()(const ActiveSessionPtr& lhs, const ActiveSession* rhs) const {
+      return lhs->addresses() == rhs->addresses();
+    }
+  };
+
+  /**
+   * Wraps all cluster specific UDP processing including session tracking, stats, etc. In the future
+   * we will very likely support different types of routing to multiple upstream clusters.
+   */
+  class ClusterInfo {
+  public:
+    ClusterInfo(UdpProxyFilter& filter, Upstream::ThreadLocalCluster& cluster);
+    ~ClusterInfo();
+    void onData(Network::UdpRecvData& data);
+    void removeSession(const ActiveSession* session);
+
+    UdpProxyFilter& filter_;
+    Upstream::ThreadLocalCluster& cluster_;
+    UdpProxyUpstreamStats cluster_stats_;
+
+  private:
+    ActiveSession* createSession(Network::UdpRecvData::LocalPeerAddresses&& addresses,
+                                 const Upstream::HostConstSharedPtr& host);
+    static UdpProxyUpstreamStats generateStats(Stats::Scope& scope) {
+      const auto final_prefix = "udp";
+      return {ALL_UDP_PROXY_UPSTREAM_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))};
+    }
+
+    Common::CallbackHandle* member_update_cb_handle_;
+    absl::flat_hash_set<ActiveSessionPtr, HeterogeneousActiveSessionHash,
+                        HeterogeneousActiveSessionEqual>
+        sessions_;
+    absl::flat_hash_map<const Upstream::Host*, absl::flat_hash_set<const ActiveSession*>>
+        host_to_sessions_;
   };
 
   virtual Network::IoHandlePtr createIoHandle(const Upstream::HostConstSharedPtr& host) {
@@ -162,10 +224,16 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, Logger::Loggable<L
     return host->address()->socket(Network::Address::SocketType::Datagram);
   }
 
+  // Upstream::ClusterUpdateCallbacks
+  void onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) override;
+  void onClusterRemoval(const std::string& cluster_name) override;
+
   const UdpProxyFilterConfigSharedPtr config_;
-  absl::flat_hash_set<ActiveSessionPtr, HeterogeneousActiveSessionHash,
-                      HeterogeneousActiveSessionEqual>
-      sessions_;
+  const Upstream::ClusterUpdateCallbacksHandlePtr cluster_update_callbacks_;
+  // Right now we support a single cluster to route to. It is highly likely in the future that
+  // we will support additional routing options either using filter chain matching, weighting,
+  // etc.
+  absl::optional<ClusterInfo> cluster_info_;
 };
 
 } // namespace UdpProxy
diff --git a/source/extensions/health_checkers/redis/redis.cc b/source/extensions/health_checkers/redis/redis.cc
index 22a7bd9be07f..0ba52b6dbd4f 100644
--- a/source/extensions/health_checkers/redis/redis.cc
+++ b/source/extensions/health_checkers/redis/redis.cc
@@ -113,7 +113,7 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onFailure() {
 }
 
 bool RedisHealthChecker::RedisActiveHealthCheckSession::onRedirection(
-    const NetworkFilters::Common::Redis::RespValue&) {
+    NetworkFilters::Common::Redis::RespValuePtr&&, const std::string&, bool) {
   // Treat any redirection error response from a Redis server as success.
   current_request_ = nullptr;
   handleSuccess();
diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h
index 5268e6e8e404..25f2b562134a 100644
--- a/source/extensions/health_checkers/redis/redis.h
+++ b/source/extensions/health_checkers/redis/redis.h
@@ -50,7 +50,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase {
   struct RedisActiveHealthCheckSession
       : public ActiveHealthCheckSession,
         public Extensions::NetworkFilters::Common::Redis::Client::Config,
-        public Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks,
+        public Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks,
         public Network::ConnectionCallbacks {
     RedisActiveHealthCheckSession(RedisHealthChecker& parent, const Upstream::HostSharedPtr& host);
     ~RedisActiveHealthCheckSession() override;
@@ -85,10 +85,11 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase {
     uint32_t maxUpstreamUnknownConnections() const override { return 0; }
     bool enableCommandStats() const override { return false; }
 
-    // Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks
+    // Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks
     void onResponse(NetworkFilters::Common::Redis::RespValuePtr&& value) override;
     void onFailure() override;
-    bool onRedirection(const NetworkFilters::Common::Redis::RespValue& value) override;
+    bool onRedirection(NetworkFilters::Common::Redis::RespValuePtr&&, const std::string&,
+                       bool) override;
 
     // Network::ConnectionCallbacks
     void onEvent(Network::ConnectionEvent event) override;
diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD
index 7dc560591820..ae124d4c3cf7 100644
--- a/source/extensions/quic_listeners/quiche/BUILD
+++ b/source/extensions/quic_listeners/quiche/BUILD
@@ -111,8 +111,12 @@ envoy_cc_library(
     hdrs = ["codec_impl.h"],
     tags = ["nofips"],
     deps = [
+        ":envoy_quic_client_session_lib",
         ":envoy_quic_server_session_lib",
         "//include/envoy/http:codec_interface",
+        "//include/envoy/registry",
+        "//source/common/http/http3:quic_codec_factory_lib",
+        "//source/common/http/http3:well_known_names",
         "@com_googlesource_quiche//:quic_core_http_spdy_session_lib",
     ],
 )
@@ -131,7 +135,7 @@ envoy_cc_library(
         "//source/common/common:assert_lib",
         "//source/common/common:empty_string",
         "//source/common/http:header_map_lib",
-        "//source/common/network:filter_manager_lib",
+        "//source/common/network:connection_base_lib",
         "//source/common/stream_info:stream_info_lib",
     ],
 )
@@ -159,6 +163,30 @@ envoy_cc_library(
     ],
 )
 
+envoy_cc_library(
+    name = "envoy_quic_client_session_lib",
+    srcs = [
+        "envoy_quic_client_session.cc",
+        "envoy_quic_client_stream.cc",
+    ],
+    hdrs = [
+        "envoy_quic_client_session.h",
+        "envoy_quic_client_stream.h",
+    ],
+    tags = ["nofips"],
+    deps = [
+        ":envoy_quic_client_connection_lib",
+        ":envoy_quic_stream_lib",
+        ":envoy_quic_utils_lib",
+        ":quic_filter_manager_connection_lib",
+        "//source/common/buffer:buffer_lib",
+        "//source/common/common:assert_lib",
+        "//source/common/http:header_map_lib",
+        "//source/extensions/quic_listeners/quiche/platform:quic_platform_mem_slice_storage_impl_lib",
+        "@com_googlesource_quiche//:quic_core_http_client_lib",
+    ],
+)
+
 envoy_cc_library(
     name = "quic_io_handle_wrapper_lib",
     hdrs = ["quic_io_handle_wrapper.h"],
@@ -194,6 +222,19 @@ envoy_cc_library(
     ],
 )
 
+envoy_cc_library(
+    name = "envoy_quic_client_connection_lib",
+    srcs = ["envoy_quic_client_connection.cc"],
+    hdrs = ["envoy_quic_client_connection.h"],
+    tags = ["nofips"],
+    deps = [
+        ":envoy_quic_connection_lib",
+        ":envoy_quic_packet_writer_lib",
+        "//include/envoy/event:dispatcher_interface",
+        "//source/common/network:socket_option_factory_lib",
+    ],
+)
+
 envoy_cc_library(
     name = "envoy_quic_dispatcher_lib",
     srcs = ["envoy_quic_dispatcher.cc"],
@@ -258,6 +299,8 @@ envoy_cc_library(
         "//include/envoy/http:codec_interface",
         "//source/common/http:header_map_lib",
         "//source/common/network:address_lib",
+        "//source/common/network:listen_socket_lib",
+        "//source/common/network:socket_option_factory_lib",
         "@com_googlesource_quiche//:quic_core_http_header_list_lib",
     ],
 )
diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.cc b/source/extensions/quic_listeners/quiche/active_quic_listener.cc
index 8a3c5a3803d1..68c519841a76 100644
--- a/source/extensions/quic_listeners/quiche/active_quic_listener.cc
+++ b/source/extensions/quic_listeners/quiche/active_quic_listener.cc
@@ -34,6 +34,8 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher,
       quic::QuicRandom::GetInstance(), std::make_unique<EnvoyQuicFakeProofSource>(),
       quic::KeyExchangeSource::Default());
   auto connection_helper = std::make_unique<EnvoyQuicConnectionHelper>(dispatcher_);
+  crypto_config_->AddDefaultConfig(random, connection_helper->GetClock(),
+                                   quic::QuicCryptoServerConfig::ConfigOptions());
   auto alarm_factory =
       std::make_unique<EnvoyQuicAlarmFactory>(dispatcher_, *connection_helper->GetClock());
   quic_dispatcher_ = std::make_unique<EnvoyQuicDispatcher>(
@@ -43,6 +45,8 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher,
   quic_dispatcher_->InitializeWithWriter(new EnvoyQuicPacketWriter(listen_socket_));
 }
 
+ActiveQuicListener::~ActiveQuicListener() { onListenerShutdown(); }
+
 void ActiveQuicListener::onListenerShutdown() {
   ENVOY_LOG(info, "Quic listener {} shutdown.", config_.name());
   quic_dispatcher_->Shutdown();
@@ -55,7 +59,7 @@ void ActiveQuicListener::onData(Network::UdpRecvData& data) {
       envoyAddressInstanceToQuicSocketAddress(data.addresses_.local_));
   quic::QuicTime timestamp =
       quic::QuicTime::Zero() +
-      quic::QuicTime::Delta::FromMilliseconds(std::chrono::duration_cast<std::chrono::milliseconds>(
+      quic::QuicTime::Delta::FromMicroseconds(std::chrono::duration_cast<std::chrono::microseconds>(
                                                   data.receive_time_.time_since_epoch())
                                                   .count());
   uint64_t num_slice = data.buffer_->getRawSlices(nullptr, 0);
@@ -64,7 +68,7 @@ void ActiveQuicListener::onData(Network::UdpRecvData& data) {
   data.buffer_->getRawSlices(&slice, 1);
   // TODO(danzh): pass in TTL and UDP header.
   quic::QuicReceivedPacket packet(reinterpret_cast<char*>(slice.mem_), slice.len_, timestamp,
-                                  /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/true,
+                                  /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/false,
                                   /*packet_headers=*/nullptr, /*headers_length=*/0,
                                   /*owns_header_buffer*/ false);
   quic_dispatcher_->ProcessPacket(self_address, peer_address, packet);
diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.h b/source/extensions/quic_listeners/quiche/active_quic_listener.h
index c4e52ae7d319..9c5c390712fb 100644
--- a/source/extensions/quic_listeners/quiche/active_quic_listener.h
+++ b/source/extensions/quic_listeners/quiche/active_quic_listener.h
@@ -26,6 +26,8 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks,
                      Network::SocketSharedPtr listen_socket,
                      Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config);
 
+  ~ActiveQuicListener() override;
+
   // TODO(#7465): Make this a callback.
   void onListenerShutdown();
 
diff --git a/source/extensions/quic_listeners/quiche/codec_impl.cc b/source/extensions/quic_listeners/quiche/codec_impl.cc
index cd082427b0ff..4af18bd949b3 100644
--- a/source/extensions/quic_listeners/quiche/codec_impl.cc
+++ b/source/extensions/quic_listeners/quiche/codec_impl.cc
@@ -1,12 +1,39 @@
 #include "extensions/quic_listeners/quiche/codec_impl.h"
 
+#include "extensions/quic_listeners/quiche/envoy_quic_client_stream.h"
 #include "extensions/quic_listeners/quiche/envoy_quic_server_stream.h"
 
 namespace Envoy {
 namespace Quic {
 
+// Converts a QuicStream instance to EnvoyQuicStream instance. The current stream implementation
+// inherits from these two interfaces, with the former one providing Quic interface and the latter
+// providing Envoy interface.
+EnvoyQuicStream* quicStreamToEnvoyStream(quic::QuicStream* stream) {
+  return dynamic_cast<EnvoyQuicStream*>(stream);
+}
+
 bool QuicHttpConnectionImplBase::wantsToWrite() { return quic_session_.HasDataToWrite(); }
 
+void QuicHttpConnectionImplBase::runWatermarkCallbacksForEachStream(
+    quic::QuicSmallMap<quic::QuicStreamId, std::unique_ptr<quic::QuicStream>, 10>& stream_map,
+    bool high_watermark) {
+  for (auto& it : stream_map) {
+    if (!it.second->is_static()) {
+      // Only call watermark callbacks on non QUIC static streams which are
+      // crypto stream and Google QUIC headers stream.
+      auto stream = quicStreamToEnvoyStream(it.second.get());
+      if (high_watermark) {
+        ENVOY_LOG(debug, "runHighWatermarkCallbacks on stream {}", it.first);
+        stream->runHighWatermarkCallbacks();
+      } else {
+        ENVOY_LOG(debug, "runLowWatermarkCallbacks on stream {}", it.first);
+        stream->runLowWatermarkCallbacks();
+      }
+    }
+  }
+}
+
 QuicHttpServerConnectionImpl::QuicHttpServerConnectionImpl(
     EnvoyQuicServerSession& quic_session, Http::ServerConnectionCallbacks& callbacks)
     : QuicHttpConnectionImplBase(quic_session), quic_server_session_(quic_session) {
@@ -14,28 +41,63 @@ QuicHttpServerConnectionImpl::QuicHttpServerConnectionImpl(
 }
 
 void QuicHttpServerConnectionImpl::onUnderlyingConnectionAboveWriteBufferHighWatermark() {
-  for (auto& it : quic_server_session_.stream_map()) {
-    if (!it.second->is_static()) {
-      // Only call watermark callbacks on non QUIC static streams which are
-      // crypto stream and Google QUIC headers stream.
-      ENVOY_LOG(debug, "runHighWatermarkCallbacks on stream {}", it.first);
-      dynamic_cast<EnvoyQuicServerStream*>(it.second.get())->runHighWatermarkCallbacks();
-    }
-  }
+  runWatermarkCallbacksForEachStream(quic_server_session_.stream_map(), true);
 }
 
 void QuicHttpServerConnectionImpl::onUnderlyingConnectionBelowWriteBufferLowWatermark() {
-  for (const auto& it : quic_server_session_.stream_map()) {
-    if (!it.second->is_static()) {
-      ENVOY_LOG(debug, "runLowWatermarkCallbacks on stream {}", it.first);
-      dynamic_cast<EnvoyQuicServerStream*>(it.second.get())->runLowWatermarkCallbacks();
-    }
-  }
+  runWatermarkCallbacksForEachStream(quic_server_session_.stream_map(), false);
 }
 
 void QuicHttpServerConnectionImpl::goAway() {
   quic_server_session_.SendGoAway(quic::QUIC_PEER_GOING_AWAY, "server shutdown imminent");
 }
 
+QuicHttpClientConnectionImpl::QuicHttpClientConnectionImpl(EnvoyQuicClientSession& session,
+                                                           Http::ConnectionCallbacks& callbacks)
+    : QuicHttpConnectionImplBase(session), quic_client_session_(session) {
+  session.setHttpConnectionCallbacks(callbacks);
+}
+
+Http::StreamEncoder&
+QuicHttpClientConnectionImpl::newStream(Http::StreamDecoder& response_decoder) {
+  EnvoyQuicStream* stream =
+      quicStreamToEnvoyStream(quic_client_session_.CreateOutgoingBidirectionalStream());
+  // TODO(danzh) handle stream creation failure gracefully. This can happen when
+  // there are already 100 open streams. In such case, caller should hold back
+  // the stream creation till an existing stream is closed.
+  ASSERT(stream != nullptr, "Fail to create QUIC stream.");
+  stream->setDecoder(response_decoder);
+  if (quic_client_session_.aboveHighWatermark()) {
+    stream->runHighWatermarkCallbacks();
+  }
+  return *stream;
+}
+
+void QuicHttpClientConnectionImpl::onUnderlyingConnectionAboveWriteBufferHighWatermark() {
+  runWatermarkCallbacksForEachStream(quic_client_session_.stream_map(), true);
+}
+
+void QuicHttpClientConnectionImpl::onUnderlyingConnectionBelowWriteBufferLowWatermark() {
+  runWatermarkCallbacksForEachStream(quic_client_session_.stream_map(), false);
+}
+
+std::unique_ptr<Http::ClientConnection>
+QuicHttpClientConnectionFactoryImpl::createQuicClientConnection(
+    Network::Connection& connection, Http::ConnectionCallbacks& callbacks) {
+  return std::make_unique<Quic::QuicHttpClientConnectionImpl>(
+      dynamic_cast<Quic::EnvoyQuicClientSession&>(connection), callbacks);
+}
+
+std::unique_ptr<Http::ServerConnection>
+QuicHttpServerConnectionFactoryImpl::createQuicServerConnection(
+    Network::Connection& connection, Http::ConnectionCallbacks& callbacks) {
+  return std::make_unique<Quic::QuicHttpServerConnectionImpl>(
+      dynamic_cast<Quic::EnvoyQuicServerSession&>(connection),
+      dynamic_cast<Http::ServerConnectionCallbacks&>(callbacks));
+}
+
+REGISTER_FACTORY(QuicHttpClientConnectionFactoryImpl, Http::QuicHttpClientConnectionFactory);
+REGISTER_FACTORY(QuicHttpServerConnectionFactoryImpl, Http::QuicHttpServerConnectionFactory);
+
 } // namespace Quic
 } // namespace Envoy
diff --git a/source/extensions/quic_listeners/quiche/codec_impl.h b/source/extensions/quic_listeners/quiche/codec_impl.h
index 07b2f2042ac1..9394ec11e198 100644
--- a/source/extensions/quic_listeners/quiche/codec_impl.h
+++ b/source/extensions/quic_listeners/quiche/codec_impl.h
@@ -1,8 +1,12 @@
 #include "envoy/http/codec.h"
+#include "envoy/registry/registry.h"
 
 #include "common/common/assert.h"
 #include "common/common/logger.h"
+#include "common/http/http3/quic_codec_factory.h"
+#include "common/http/http3/well_known_names.h"
 
+#include "extensions/quic_listeners/quiche/envoy_quic_client_session.h"
 #include "extensions/quic_listeners/quiche/envoy_quic_server_session.h"
 
 namespace Envoy {
@@ -21,17 +25,15 @@ class QuicHttpConnectionImplBase : public virtual Http::Connection,
     // Bypassed. QUIC connection already hands all data to streams.
     NOT_REACHED_GCOVR_EXCL_LINE;
   }
-  Http::Protocol protocol() override {
-    // From HCM's view, QUIC should behave the same as Http2, only the stats
-    // should be different.
-    // TODO(danzh) add Http3 enum value for QUIC.
-    return Http::Protocol::Http2;
-  }
-
+  Http::Protocol protocol() override { return Http::Protocol::Http3; }
   // Returns true if the session has data to send but queued in connection or
   // stream send buffer.
   bool wantsToWrite() override;
 
+  void runWatermarkCallbacksForEachStream(
+      quic::QuicSmallMap<quic::QuicStreamId, std::unique_ptr<quic::QuicStream>, 10>& stream_map,
+      bool high_watermark);
+
 protected:
   quic::QuicSpdySession& quic_session_;
 };
@@ -55,5 +57,47 @@ class QuicHttpServerConnectionImpl : public QuicHttpConnectionImplBase,
   EnvoyQuicServerSession& quic_server_session_;
 };
 
+class QuicHttpClientConnectionImpl : public QuicHttpConnectionImplBase,
+                                     public Http::ClientConnection {
+public:
+  QuicHttpClientConnectionImpl(EnvoyQuicClientSession& session,
+                               Http::ConnectionCallbacks& callbacks);
+
+  // Http::ClientConnection
+  Http::StreamEncoder& newStream(Http::StreamDecoder& response_decoder) override;
+
+  // Http::Connection
+  void goAway() override { NOT_REACHED_GCOVR_EXCL_LINE; }
+  void shutdownNotice() override { NOT_REACHED_GCOVR_EXCL_LINE; }
+  void onUnderlyingConnectionAboveWriteBufferHighWatermark() override;
+  void onUnderlyingConnectionBelowWriteBufferLowWatermark() override;
+
+private:
+  EnvoyQuicClientSession& quic_client_session_;
+};
+
+// A factory to create QuicHttpClientConnection.
+class QuicHttpClientConnectionFactoryImpl : public Http::QuicHttpClientConnectionFactory {
+public:
+  std::unique_ptr<Http::ClientConnection>
+  createQuicClientConnection(Network::Connection& connection,
+                             Http::ConnectionCallbacks& callbacks) override;
+
+  std::string name() const override { return Http::QuicCodecNames::get().Quiche; }
+};
+
+// A factory to create QuicHttpServerConnection.
+class QuicHttpServerConnectionFactoryImpl : public Http::QuicHttpServerConnectionFactory {
+public:
+  std::unique_ptr<Http::ServerConnection>
+  createQuicServerConnection(Network::Connection& connection,
+                             Http::ConnectionCallbacks& callbacks) override;
+
+  std::string name() const override { return Http::QuicCodecNames::get().Quiche; }
+};
+
+DECLARE_FACTORY(QuicHttpClientConnectionFactoryImpl);
+DECLARE_FACTORY(QuicHttpServerConnectionFactoryImpl);
+
 } // namespace Quic
 } // namespace Envoy
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.h b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.h
index 4152f4c101c3..115e68d1f882 100644
--- a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.h
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.h
@@ -20,7 +20,8 @@ class EnvoyQuicAlarm : public quic::QuicAlarm {
   EnvoyQuicAlarm(Event::Dispatcher& dispatcher, const quic::QuicClock& clock,
                  quic::QuicArenaScopedPtr<quic::QuicAlarm::Delegate> delegate);
 
-  ~EnvoyQuicAlarm() override { ASSERT(!IsSet()); };
+  // TimerImpl destruction deletes in-flight alarm firing event.
+  ~EnvoyQuicAlarm() override {}
 
   // quic::QuicAlarm
   void CancelImpl() override;
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc
new file mode 100644
index 000000000000..a3bd4a92d394
--- /dev/null
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc
@@ -0,0 +1,116 @@
+#include "extensions/quic_listeners/quiche/envoy_quic_client_connection.h"
+
+#include "common/network/listen_socket_impl.h"
+#include "common/network/socket_option_factory.h"
+
+#include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_utils.h"
+#include "extensions/transport_sockets/well_known_names.h"
+
+namespace Envoy {
+namespace Quic {
+
+EnvoyQuicClientConnection::EnvoyQuicClientConnection(
+    const quic::QuicConnectionId& server_connection_id,
+    Network::Address::InstanceConstSharedPtr& initial_peer_address,
+    quic::QuicConnectionHelperInterface& helper, quic::QuicAlarmFactory& alarm_factory,
+    const quic::ParsedQuicVersionVector& supported_versions,
+    Network::Address::InstanceConstSharedPtr local_addr, Event::Dispatcher& dispatcher,
+    const Network::ConnectionSocket::OptionsSharedPtr& options)
+    : EnvoyQuicClientConnection(server_connection_id, helper, alarm_factory, supported_versions,
+                                dispatcher,
+                                createConnectionSocket(initial_peer_address, local_addr, options)) {
+}
+
+EnvoyQuicClientConnection::EnvoyQuicClientConnection(
+    const quic::QuicConnectionId& server_connection_id, quic::QuicConnectionHelperInterface& helper,
+    quic::QuicAlarmFactory& alarm_factory, const quic::ParsedQuicVersionVector& supported_versions,
+    Event::Dispatcher& dispatcher, Network::ConnectionSocketPtr&& connection_socket)
+    : EnvoyQuicClientConnection(server_connection_id, helper, alarm_factory,
+                                new EnvoyQuicPacketWriter(*connection_socket), true,
+                                supported_versions, dispatcher, std::move(connection_socket)) {}
+
+EnvoyQuicClientConnection::EnvoyQuicClientConnection(
+    const quic::QuicConnectionId& server_connection_id, quic::QuicConnectionHelperInterface& helper,
+    quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer, bool owns_writer,
+    const quic::ParsedQuicVersionVector& supported_versions, Event::Dispatcher& dispatcher,
+    Network::ConnectionSocketPtr&& connection_socket)
+    : EnvoyQuicConnection(
+          server_connection_id,
+          envoyAddressInstanceToQuicSocketAddress(connection_socket->remoteAddress()), helper,
+          alarm_factory, writer, owns_writer, quic::Perspective::IS_CLIENT, supported_versions,
+          std::move(connection_socket)),
+      dispatcher_(dispatcher) {}
+
+void EnvoyQuicClientConnection::processPacket(
+    Network::Address::InstanceConstSharedPtr local_address,
+    Network::Address::InstanceConstSharedPtr peer_address, Buffer::InstancePtr buffer,
+    MonotonicTime receive_time) {
+  if (!connected()) {
+    return;
+  }
+  quic::QuicTime timestamp =
+      quic::QuicTime::Zero() +
+      quic::QuicTime::Delta::FromMicroseconds(
+          std::chrono::duration_cast<std::chrono::microseconds>(receive_time.time_since_epoch())
+              .count());
+  uint64_t num_slice = buffer->getRawSlices(nullptr, 0);
+  ASSERT(num_slice == 1);
+  Buffer::RawSlice slice;
+  buffer->getRawSlices(&slice, 1);
+  quic::QuicReceivedPacket packet(reinterpret_cast<char*>(slice.mem_), slice.len_, timestamp,
+                                  /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/false,
+                                  /*packet_headers=*/nullptr, /*headers_length=*/0,
+                                  /*owns_header_buffer*/ false);
+  ProcessUdpPacket(envoyAddressInstanceToQuicSocketAddress(local_address),
+                   envoyAddressInstanceToQuicSocketAddress(peer_address), packet);
+}
+
+uint64_t EnvoyQuicClientConnection::maxPacketSize() const {
+  // TODO(danzh) make this variable configurable to support jumbo frames.
+  return Network::MAX_UDP_PACKET_SIZE;
+}
+
+void EnvoyQuicClientConnection::setUpConnectionSocket() {
+  if (connectionSocket()->ioHandle().isOpen()) {
+    file_event_ = dispatcher_.createFileEvent(
+        connectionSocket()->ioHandle().fd(),
+        [this](uint32_t events) -> void { onFileEvent(events); }, Event::FileTriggerType::Edge,
+        Event::FileReadyType::Read | Event::FileReadyType::Write);
+
+    if (!Network::Socket::applyOptions(connectionSocket()->options(), *connectionSocket(),
+                                       envoy::api::v2::core::SocketOption::STATE_LISTENING)) {
+      ENVOY_CONN_LOG(error, "Fail to apply listening options", *this);
+      connectionSocket()->close();
+    }
+  }
+  if (!connectionSocket()->ioHandle().isOpen()) {
+    CloseConnection(quic::QUIC_CONNECTION_CANCELLED, "Fail to set up connection socket.",
+                    quic::ConnectionCloseBehavior::SILENT_CLOSE);
+  }
+}
+
+void EnvoyQuicClientConnection::onFileEvent(uint32_t events) {
+  ENVOY_CONN_LOG(trace, "socket event: {}", *this, events);
+  ASSERT(events & (Event::FileReadyType::Read | Event::FileReadyType::Write));
+
+  if (events & Event::FileReadyType::Write) {
+    OnCanWrite();
+  }
+
+  // It's possible for a write event callback to close the connection, in such case ignore read
+  // event processing.
+  if (connected() && (events & Event::FileReadyType::Read)) {
+    Api::IoErrorPtr err = Network::Utility::readPacketsFromSocket(
+        connectionSocket()->ioHandle(), *connectionSocket()->localAddress(), *this,
+        dispatcher_.timeSource(), packets_dropped_);
+    // TODO(danzh): Handle no error when we limit the number of packets read.
+    if (err->getErrorCode() != Api::IoError::IoErrorCode::Again) {
+      ENVOY_CONN_LOG(error, "recvmsg result {}: {}", *this, static_cast<int>(err->getErrorCode()),
+                     err->getErrorDetails());
+    }
+  }
+}
+
+} // namespace Quic
+} // namespace Envoy
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.h b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.h
new file mode 100644
index 000000000000..8ee2f22dad2e
--- /dev/null
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.h
@@ -0,0 +1,59 @@
+#pragma once
+
+#include "envoy/event/dispatcher.h"
+
+#include "common/network/utility.h"
+
+#include "extensions/quic_listeners/quiche/envoy_quic_connection.h"
+
+namespace Envoy {
+namespace Quic {
+
+// A client QuicConnection instance managing its own file events.
+class EnvoyQuicClientConnection : public EnvoyQuicConnection, public Network::UdpPacketProcessor {
+public:
+  // A connection socket will be created with given |local_addr|. If binding
+  // port not provided in |local_addr|, pick up a random port.
+  EnvoyQuicClientConnection(const quic::QuicConnectionId& server_connection_id,
+                            Network::Address::InstanceConstSharedPtr& initial_peer_address,
+                            quic::QuicConnectionHelperInterface& helper,
+                            quic::QuicAlarmFactory& alarm_factory,
+                            const quic::ParsedQuicVersionVector& supported_versions,
+                            Network::Address::InstanceConstSharedPtr local_addr,
+                            Event::Dispatcher& dispatcher,
+                            const Network::ConnectionSocket::OptionsSharedPtr& options);
+
+  EnvoyQuicClientConnection(const quic::QuicConnectionId& server_connection_id,
+                            quic::QuicConnectionHelperInterface& helper,
+                            quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer,
+                            bool owns_writer,
+                            const quic::ParsedQuicVersionVector& supported_versions,
+                            Event::Dispatcher& dispatcher,
+                            Network::ConnectionSocketPtr&& connection_socket);
+
+  // Network::UdpPacketProcessor
+  void processPacket(Network::Address::InstanceConstSharedPtr local_address,
+                     Network::Address::InstanceConstSharedPtr peer_address,
+                     Buffer::InstancePtr buffer, MonotonicTime receive_time) override;
+  uint64_t maxPacketSize() const override;
+
+  // Register file event and apply socket options.
+  void setUpConnectionSocket();
+
+private:
+  EnvoyQuicClientConnection(const quic::QuicConnectionId& server_connection_id,
+                            quic::QuicConnectionHelperInterface& helper,
+                            quic::QuicAlarmFactory& alarm_factory,
+                            const quic::ParsedQuicVersionVector& supported_versions,
+                            Event::Dispatcher& dispatcher,
+                            Network::ConnectionSocketPtr&& connection_socket);
+
+  void onFileEvent(uint32_t events);
+
+  uint32_t packets_dropped_{0};
+  Event::Dispatcher& dispatcher_;
+  Event::FileEventPtr file_event_;
+};
+
+} // namespace Quic
+} // namespace Envoy
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc
new file mode 100644
index 000000000000..d600055c5c8b
--- /dev/null
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc
@@ -0,0 +1,77 @@
+#include "extensions/quic_listeners/quiche/envoy_quic_client_session.h"
+
+namespace Envoy {
+namespace Quic {
+
+EnvoyQuicClientSession::EnvoyQuicClientSession(
+    const quic::QuicConfig& config, const quic::ParsedQuicVersionVector& supported_versions,
+    std::unique_ptr<EnvoyQuicClientConnection> connection, const quic::QuicServerId& server_id,
+    quic::QuicCryptoClientConfig* crypto_config,
+    quic::QuicClientPushPromiseIndex* push_promise_index, Event::Dispatcher& dispatcher,
+    uint32_t send_buffer_limit)
+    : QuicFilterManagerConnectionImpl(*connection, dispatcher, send_buffer_limit),
+      quic::QuicSpdyClientSession(config, supported_versions, connection.release(), server_id,
+                                  crypto_config, push_promise_index) {}
+
+EnvoyQuicClientSession::~EnvoyQuicClientSession() {
+  ASSERT(!connection()->connected());
+  quic_connection_ = nullptr;
+}
+
+absl::string_view EnvoyQuicClientSession::requestedServerName() const {
+  return {GetCryptoStream()->crypto_negotiated_params().sni};
+}
+
+void EnvoyQuicClientSession::connect() {
+  dynamic_cast<EnvoyQuicClientConnection*>(quic_connection_)->setUpConnectionSocket();
+  // Start version negotiation and crypto handshake during which the connection may fail if server
+  // doesn't support the one and only supported version.
+  CryptoConnect();
+  SetMaxAllowedPushId(0u);
+}
+
+void EnvoyQuicClientSession::OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame,
+                                                quic::ConnectionCloseSource source) {
+  quic::QuicSpdyClientSession::OnConnectionClosed(frame, source);
+  onConnectionCloseEvent(frame, source);
+}
+
+void EnvoyQuicClientSession::Initialize() {
+  quic::QuicSpdyClientSession::Initialize();
+  quic_connection_->setEnvoyConnection(*this);
+}
+
+void EnvoyQuicClientSession::OnGoAway(const quic::QuicGoAwayFrame& frame) {
+  ENVOY_CONN_LOG(debug, "GOAWAY received with error {}: {}", *this,
+                 quic::QuicErrorCodeToString(frame.error_code), frame.reason_phrase);
+  quic::QuicSpdyClientSession::OnGoAway(frame);
+  if (http_connection_callbacks_ != nullptr) {
+    http_connection_callbacks_->onGoAway();
+  }
+}
+
+void EnvoyQuicClientSession::OnCryptoHandshakeEvent(CryptoHandshakeEvent event) {
+  quic::QuicSpdyClientSession::OnCryptoHandshakeEvent(event);
+  if (event == HANDSHAKE_CONFIRMED) {
+    raiseConnectionEvent(Network::ConnectionEvent::Connected);
+  }
+}
+
+std::unique_ptr<quic::QuicSpdyClientStream> EnvoyQuicClientSession::CreateClientStream() {
+  return std::make_unique<EnvoyQuicClientStream>(GetNextOutgoingBidirectionalStreamId(), this,
+                                                 quic::BIDIRECTIONAL);
+}
+
+quic::QuicSpdyStream* EnvoyQuicClientSession::CreateIncomingStream(quic::QuicStreamId /*id*/) {
+  // Disallow server initiated stream.
+  NOT_REACHED_GCOVR_EXCL_LINE;
+}
+
+quic::QuicSpdyStream*
+EnvoyQuicClientSession::CreateIncomingStream(quic::PendingStream* /*pending*/) {
+  // Disallow server initiated stream.
+  NOT_REACHED_GCOVR_EXCL_LINE;
+}
+
+} // namespace Quic
+} // namespace Envoy
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h
new file mode 100644
index 000000000000..1bebce79ed2b
--- /dev/null
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h
@@ -0,0 +1,76 @@
+#pragma once
+
+#pragma GCC diagnostic push
+// QUICHE allows unused parameters.
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+// QUICHE uses offsetof().
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+#pragma GCC diagnostic ignored "-Wtype-limits"
+
+#include "quiche/quic/core/http/quic_spdy_client_session.h"
+
+#pragma GCC diagnostic pop
+
+#include "extensions/quic_listeners/quiche/envoy_quic_client_stream.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_client_connection.h"
+#include "extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h"
+
+namespace Envoy {
+namespace Quic {
+
+// Act as a Network::ClientConnection to ClientCodec.
+// TODO(danzh) This class doesn't need to inherit Network::FilterManager
+// interface but need all other Network::Connection implementation in
+// QuicFilterManagerConnectionImpl. Refactor QuicFilterManagerConnectionImpl to
+// move FilterManager interface to EnvoyQuicServerSession.
+class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl,
+                               public quic::QuicSpdyClientSession,
+                               public Network::ClientConnection {
+public:
+  EnvoyQuicClientSession(const quic::QuicConfig& config,
+                         const quic::ParsedQuicVersionVector& supported_versions,
+                         std::unique_ptr<EnvoyQuicClientConnection> connection,
+                         const quic::QuicServerId& server_id,
+                         quic::QuicCryptoClientConfig* crypto_config,
+                         quic::QuicClientPushPromiseIndex* push_promise_index,
+                         Event::Dispatcher& dispatcher, uint32_t send_buffer_limit);
+
+  ~EnvoyQuicClientSession() override;
+
+  // Called by QuicHttpClientConnectionImpl before creating data streams.
+  void setHttpConnectionCallbacks(Http::ConnectionCallbacks& callbacks) {
+    http_connection_callbacks_ = &callbacks;
+  }
+
+  // Network::Connection
+  absl::string_view requestedServerName() const override;
+
+  // Network::ClientConnection
+  // Set up socket and start handshake.
+  void connect() override;
+
+  // quic::QuicSession
+  void OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame,
+                          quic::ConnectionCloseSource source) override;
+  void Initialize() override;
+  void OnGoAway(const quic::QuicGoAwayFrame& frame) override;
+  // quic::QuicSpdyClientSessionBase
+  void OnCryptoHandshakeEvent(CryptoHandshakeEvent event) override;
+
+  using quic::QuicSpdyClientSession::stream_map;
+
+protected:
+  // quic::QuicSpdyClientSession
+  std::unique_ptr<quic::QuicSpdyClientStream> CreateClientStream() override;
+  // quic::QuicSpdySession
+  quic::QuicSpdyStream* CreateIncomingStream(quic::QuicStreamId id) override;
+  quic::QuicSpdyStream* CreateIncomingStream(quic::PendingStream* pending) override;
+
+private:
+  // These callbacks are owned by network filters and quic session should outlive
+  // them.
+  Http::ConnectionCallbacks* http_connection_callbacks_{nullptr};
+};
+
+} // namespace Quic
+} // namespace Envoy
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc
new file mode 100644
index 000000000000..b6fed6f10833
--- /dev/null
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc
@@ -0,0 +1,234 @@
+#include "extensions/quic_listeners/quiche/envoy_quic_client_stream.h"
+
+#pragma GCC diagnostic push
+// QUICHE allows unused parameters.
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+// QUICHE uses offsetof().
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+
+#include "quiche/quic/core/quic_session.h"
+#include "quiche/quic/core/http/quic_header_list.h"
+#include "quiche/spdy/core/spdy_header_block.h"
+#include "extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h"
+
+#pragma GCC diagnostic pop
+
+#include "extensions/quic_listeners/quiche/envoy_quic_utils.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_client_session.h"
+
+#include "common/buffer/buffer_impl.h"
+#include "common/http/header_map_impl.h"
+#include "common/common/assert.h"
+
+namespace Envoy {
+namespace Quic {
+
+EnvoyQuicClientStream::EnvoyQuicClientStream(quic::QuicStreamId id,
+                                             quic::QuicSpdyClientSession* client_session,
+                                             quic::StreamType type)
+    : quic::QuicSpdyClientStream(id, client_session, type),
+      EnvoyQuicStream(
+          // This should be larger than 8k to fully utilize congestion control
+          // window. And no larger than the max stream flow control window for
+          // the stream to buffer all the data.
+          // Ideally this limit should also correlate to peer's receive window
+          // but not fully depends on that.
+          16 * 1024, [this]() { runLowWatermarkCallbacks(); },
+          [this]() { runHighWatermarkCallbacks(); }) {}
+
+EnvoyQuicClientStream::EnvoyQuicClientStream(quic::PendingStream* pending,
+                                             quic::QuicSpdyClientSession* client_session,
+                                             quic::StreamType type)
+    : quic::QuicSpdyClientStream(pending, client_session, type),
+      EnvoyQuicStream(
+          16 * 1024, [this]() { runLowWatermarkCallbacks(); },
+          [this]() { runHighWatermarkCallbacks(); }) {}
+
+void EnvoyQuicClientStream::encode100ContinueHeaders(const Http::HeaderMap& headers) {
+  ASSERT(headers.Status()->value() == "100");
+  encodeHeaders(headers, false);
+}
+
+void EnvoyQuicClientStream::encodeHeaders(const Http::HeaderMap& headers, bool end_stream) {
+  ENVOY_STREAM_LOG(debug, "encodeHeaders: (end_stream={}) {}.", *this, end_stream, headers);
+  WriteHeaders(envoyHeadersToSpdyHeaderBlock(headers), end_stream, nullptr);
+  local_end_stream_ = end_stream;
+}
+
+void EnvoyQuicClientStream::encodeData(Buffer::Instance& data, bool end_stream) {
+  ENVOY_STREAM_LOG(debug, "encodeData (end_stream={}) of {} bytes.", *this, end_stream,
+                   data.length());
+  local_end_stream_ = end_stream;
+  // This is counting not serialized bytes in the send buffer.
+  uint64_t bytes_to_send_old = BufferedDataBytes();
+  // QUIC stream must take all.
+  WriteBodySlices(quic::QuicMemSliceSpan(quic::QuicMemSliceSpanImpl(data)), end_stream);
+  if (data.length() > 0) {
+    // Send buffer didn't take all the data, threshold needs to be adjusted.
+    Reset(quic::QUIC_BAD_APPLICATION_PAYLOAD);
+    return;
+  }
+
+  uint64_t bytes_to_send_new = BufferedDataBytes();
+  ASSERT(bytes_to_send_old <= bytes_to_send_new);
+  maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection());
+}
+
+void EnvoyQuicClientStream::encodeTrailers(const Http::HeaderMap& trailers) {
+  ASSERT(!local_end_stream_);
+  local_end_stream_ = true;
+  ENVOY_STREAM_LOG(debug, "encodeTrailers: {}.", *this, trailers);
+  WriteTrailers(envoyHeadersToSpdyHeaderBlock(trailers), nullptr);
+}
+
+void EnvoyQuicClientStream::encodeMetadata(const Http::MetadataMapVector& /*metadata_map_vector*/) {
+  // Metadata Frame is not supported in QUIC.
+  // TODO(danzh): add stats for metadata not supported error.
+}
+
+void EnvoyQuicClientStream::resetStream(Http::StreamResetReason reason) {
+  // Higher layers expect calling resetStream() to immediately raise reset callbacks.
+  runResetCallbacks(reason);
+  Reset(envoyResetReasonToQuicRstError(reason));
+}
+
+void EnvoyQuicClientStream::switchStreamBlockState(bool should_block) {
+  ASSERT(FinishedReadingHeaders(),
+         "Upper stream buffer limit is reached before response body is delivered.");
+  if (should_block) {
+    sequencer()->SetBlockedUntilFlush();
+  } else {
+    ASSERT(read_disable_counter_ == 0, "readDisable called in between.");
+    sequencer()->SetUnblocked();
+  }
+}
+
+void EnvoyQuicClientStream::OnInitialHeadersComplete(bool fin, size_t frame_len,
+                                                     const quic::QuicHeaderList& header_list) {
+  quic::QuicSpdyStream::OnInitialHeadersComplete(fin, frame_len, header_list);
+  if (rst_sent()) {
+    return;
+  }
+  ASSERT(decoder() != nullptr);
+  ASSERT(headers_decompressed());
+  decoder()->decodeHeaders(quicHeadersToEnvoyHeaders(header_list), /*end_stream=*/fin);
+  if (fin) {
+    end_stream_decoded_ = true;
+  }
+  ConsumeHeaderList();
+}
+
+void EnvoyQuicClientStream::OnBodyAvailable() {
+  ASSERT(FinishedReadingHeaders());
+  ASSERT(read_disable_counter_ == 0);
+  ASSERT(!in_decode_data_callstack_);
+  in_decode_data_callstack_ = true;
+
+  Buffer::InstancePtr buffer = std::make_unique<Buffer::OwnedImpl>();
+  // TODO(danzh): check Envoy per stream buffer limit.
+  // Currently read out all the data.
+  while (HasBytesToRead()) {
+    struct iovec iov;
+    int num_regions = GetReadableRegions(&iov, 1);
+    ASSERT(num_regions > 0);
+    size_t bytes_read = iov.iov_len;
+    Buffer::RawSlice slice;
+    buffer->reserve(bytes_read, &slice, 1);
+    ASSERT(slice.len_ >= bytes_read);
+    slice.len_ = bytes_read;
+    memcpy(slice.mem_, iov.iov_base, iov.iov_len);
+    buffer->commit(&slice, 1);
+    MarkConsumed(bytes_read);
+  }
+
+  // True if no trailer and FIN read.
+  bool finished_reading = IsDoneReading();
+  bool empty_payload_with_fin = buffer->length() == 0 && fin_received();
+  // If this call is triggered by an empty frame with FIN which is not from peer
+  // but synthesized by stream itself upon receiving HEADERS with FIN or
+  // TRAILERS, do not deliver end of stream here. Because either decodeHeaders
+  // already delivered it or decodeTrailers will be called.
+  bool skip_decoding = empty_payload_with_fin && (end_stream_decoded_ || !finished_reading);
+  if (!skip_decoding) {
+    decoder()->decodeData(*buffer, finished_reading);
+    if (finished_reading) {
+      end_stream_decoded_ = true;
+    }
+  }
+
+  if (!sequencer()->IsClosed()) {
+    in_decode_data_callstack_ = false;
+    if (read_disable_counter_ > 0) {
+      // If readDisable() was ever called during decodeData() and it meant to disable
+      // reading from downstream, the call must have been deferred. Call it now.
+      switchStreamBlockState(true);
+    }
+    return;
+  }
+
+  if (!quic::VersionUsesHttp3(transport_version()) && !FinishedReadingTrailers()) {
+    // For Google QUIC implementation, trailers may arrived earlier and wait to
+    // be consumed after reading all the body. Consume it here.
+    // IETF QUIC shouldn't reach here because trailers are sent on same stream.
+    decoder()->decodeTrailers(spdyHeaderBlockToEnvoyHeaders(received_trailers()));
+    MarkTrailersConsumed();
+  }
+  OnFinRead();
+  in_decode_data_callstack_ = false;
+}
+
+void EnvoyQuicClientStream::OnTrailingHeadersComplete(bool fin, size_t frame_len,
+                                                      const quic::QuicHeaderList& header_list) {
+  quic::QuicSpdyStream::OnTrailingHeadersComplete(fin, frame_len, header_list);
+  ASSERT(trailers_decompressed());
+  if (session()->connection()->connected() &&
+      (quic::VersionUsesHttp3(transport_version()) || sequencer()->IsClosed()) &&
+      !FinishedReadingTrailers()) {
+    // Before QPack, trailers can arrive before body. Only decode trailers after finishing decoding
+    // body.
+    ASSERT(decoder() != nullptr);
+    decoder()->decodeTrailers(spdyHeaderBlockToEnvoyHeaders(received_trailers()));
+    MarkTrailersConsumed();
+  }
+}
+
+void EnvoyQuicClientStream::OnStreamReset(const quic::QuicRstStreamFrame& frame) {
+  quic::QuicSpdyClientStream::OnStreamReset(frame);
+  runResetCallbacks(quicRstErrorToEnvoyResetReason(frame.error_code));
+}
+
+void EnvoyQuicClientStream::OnConnectionClosed(quic::QuicErrorCode error,
+                                               quic::ConnectionCloseSource source) {
+  quic::QuicSpdyClientStream::OnConnectionClosed(error, source);
+  runResetCallbacks(quicErrorCodeToEnvoyResetReason(error));
+}
+
+void EnvoyQuicClientStream::OnClose() {
+  quic::QuicSpdyClientStream::OnClose();
+  if (BufferedDataBytes() > 0) {
+    // If the stream is closed without sending out all buffered data, regard
+    // them as sent now and adjust connection buffer book keeping.
+    filterManagerConnection()->adjustBytesToSend(0 - BufferedDataBytes());
+  }
+}
+
+void EnvoyQuicClientStream::OnCanWrite() {
+  uint64_t buffered_data_old = BufferedDataBytes();
+  quic::QuicSpdyClientStream::OnCanWrite();
+  uint64_t buffered_data_new = BufferedDataBytes();
+  // As long as OnCanWriteNewData() is no-op, data to sent in buffer shouldn't
+  // increase.
+  ASSERT(buffered_data_new <= buffered_data_old);
+  maybeCheckWatermark(buffered_data_old, buffered_data_new, *filterManagerConnection());
+}
+
+uint32_t EnvoyQuicClientStream::streamId() { return id(); }
+
+Network::Connection* EnvoyQuicClientStream::connection() { return filterManagerConnection(); }
+
+QuicFilterManagerConnectionImpl* EnvoyQuicClientStream::filterManagerConnection() {
+  return dynamic_cast<QuicFilterManagerConnectionImpl*>(session());
+}
+
+} // namespace Quic
+} // namespace Envoy
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h
new file mode 100644
index 000000000000..6f99a8098041
--- /dev/null
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h
@@ -0,0 +1,60 @@
+#pragma once
+
+#pragma GCC diagnostic push
+// QUICHE allows unused parameters.
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+// QUICHE uses offsetof().
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+#include "quiche/quic/core/http/quic_spdy_client_stream.h"
+
+#pragma GCC diagnostic pop
+
+#include "extensions/quic_listeners/quiche/envoy_quic_stream.h"
+
+namespace Envoy {
+namespace Quic {
+
+// This class is a quic stream and also a request encoder.
+class EnvoyQuicClientStream : public quic::QuicSpdyClientStream, public EnvoyQuicStream {
+public:
+  EnvoyQuicClientStream(quic::QuicStreamId id, quic::QuicSpdyClientSession* client_session,
+                        quic::StreamType type);
+  EnvoyQuicClientStream(quic::PendingStream* pending, quic::QuicSpdyClientSession* client_session,
+                        quic::StreamType type);
+
+  // Http::StreamEncoder
+  void encode100ContinueHeaders(const Http::HeaderMap& headers) override;
+  void encodeHeaders(const Http::HeaderMap& headers, bool end_stream) override;
+  void encodeData(Buffer::Instance& data, bool end_stream) override;
+  void encodeTrailers(const Http::HeaderMap& trailers) override;
+  void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) override;
+
+  // Http::Stream
+  void resetStream(Http::StreamResetReason reason) override;
+  // quic::QuicSpdyStream
+  void OnBodyAvailable() override;
+  void OnStreamReset(const quic::QuicRstStreamFrame& frame) override;
+  void OnClose() override;
+  void OnCanWrite() override;
+  // quic::Stream
+  void OnConnectionClosed(quic::QuicErrorCode error, quic::ConnectionCloseSource source) override;
+
+protected:
+  // EnvoyQuicStream
+  void switchStreamBlockState(bool should_block) override;
+  uint32_t streamId() override;
+  Network::Connection* connection() override;
+
+  // quic::QuicSpdyStream
+  // Overridden to pass headers to decoder.
+  void OnInitialHeadersComplete(bool fin, size_t frame_len,
+                                const quic::QuicHeaderList& header_list) override;
+  void OnTrailingHeadersComplete(bool fin, size_t frame_len,
+                                 const quic::QuicHeaderList& header_list) override;
+
+private:
+  QuicFilterManagerConnectionImpl* filterManagerConnection();
+};
+
+} // namespace Quic
+} // namespace Envoy
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h
index 0861e09fb4d9..c8355717bccb 100644
--- a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h
@@ -48,7 +48,8 @@ class EnvoyQuicFakeProofVerifier : public quic::ProofVerifier {
                   const quic::ProofVerifyContext* /*context*/, std::string* /*error_details*/,
                   std::unique_ptr<quic::ProofVerifyDetails>* /*details*/,
                   std::unique_ptr<quic::ProofVerifierCallback> /*callback*/) override {
-    if (cert_sct == "Fake timestamp" && certs.size() == 1 && certs[0] == "Fake cert") {
+    // Cert SCT support is not enabled for fake ProofSource.
+    if (cert_sct == "" && certs.size() == 1 && certs[0] == "Fake cert") {
       return quic::QUIC_SUCCESS;
     }
     return quic::QUIC_FAILURE;
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc
index a1a47634ad81..c12a32690231 100644
--- a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc
@@ -23,10 +23,11 @@ EnvoyQuicServerSession::EnvoyQuicServerSession(
     uint32_t send_buffer_limit)
     : quic::QuicServerSessionBase(config, supported_versions, connection.get(), visitor, helper,
                                   crypto_config, compressed_certs_cache),
-      QuicFilterManagerConnectionImpl(connection.get(), dispatcher, send_buffer_limit),
+      QuicFilterManagerConnectionImpl(*connection, dispatcher, send_buffer_limit),
       quic_connection_(std::move(connection)) {}
 
 EnvoyQuicServerSession::~EnvoyQuicServerSession() {
+  ASSERT(!quic_connection_->connected());
   QuicFilterManagerConnectionImpl::quic_connection_ = nullptr;
 }
 
@@ -95,7 +96,7 @@ void EnvoyQuicServerSession::SendGoAway(quic::QuicErrorCode error_code, const st
 void EnvoyQuicServerSession::OnCryptoHandshakeEvent(CryptoHandshakeEvent event) {
   quic::QuicServerSessionBase::OnCryptoHandshakeEvent(event);
   if (event == HANDSHAKE_CONFIRMED) {
-    raiseEvent(Network::ConnectionEvent::Connected);
+    raiseConnectionEvent(Network::ConnectionEvent::Connected);
   }
 }
 
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc
index a815c2fbc4e8..ca3ff13ca47e 100644
--- a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc
@@ -96,7 +96,7 @@ void EnvoyQuicServerStream::encodeTrailers(const Http::HeaderMap& trailers) {
 
 void EnvoyQuicServerStream::encodeMetadata(const Http::MetadataMapVector& /*metadata_map_vector*/) {
   // Metadata Frame is not supported in QUIC.
-  NOT_IMPLEMENTED_GCOVR_EXCL_LINE;
+  // TODO(danzh): add stats for metadata not supported error.
 }
 
 void EnvoyQuicServerStream::resetStream(Http::StreamResetReason reason) {
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_stream.h
index 228d0cf64592..59a7341b5934 100644
--- a/source/extensions/quic_listeners/quiche/envoy_quic_stream.h
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_stream.h
@@ -47,7 +47,26 @@ class EnvoyQuicStream : public Http::StreamEncoder,
       // Avoid calling this while decoding data because transient disabling and
       // enabling reading may trigger another decoding data inside the
       // callstack which messes up stream state.
-      switchStreamBlockState(disable);
+      if (disable) {
+        // Block QUIC stream right away. And if there are queued switching
+        // state callback, update the desired state as well.
+        switchStreamBlockState(true);
+        if (unblock_posted_) {
+          should_block_ = true;
+        }
+      } else {
+        should_block_ = false;
+        if (!unblock_posted_) {
+          // If this is the first time unblocking stream is desired, post a
+          // callback to do it in next loop. This is because unblocking QUIC
+          // stream can lead to immediate upstream encoding.
+          unblock_posted_ = true;
+          connection()->dispatcher().post([this] {
+            unblock_posted_ = false;
+            switchStreamBlockState(should_block_);
+          });
+        }
+      }
     }
   }
 
@@ -109,6 +128,15 @@ class EnvoyQuicStream : public Http::StreamEncoder,
   // OnBodyDataAvailable() hands all the ready-to-use request data from stream sequencer to HCM
   // directly and buffers them in filters if needed. Itself doesn't buffer request data.
   EnvoyQuicSimulatedWatermarkBuffer send_buffer_simulation_;
+
+  // True if there is posted unblocking QUIC stream callback. There should be
+  // only one such callback no matter how many times readDisable() is called.
+  bool unblock_posted_{false};
+  // The latest state an unblocking QUIC stream callback should look at. As
+  // more readDisable() calls may happen between the callback is posted and it's
+  // executed, the stream might be unblocked and blocked several times. Only the
+  // latest desired state should be considered by the callback.
+  bool should_block_{false};
 };
 
 } // namespace Quic
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc
index 6eaf4616ecb4..b02f76c7b1ad 100644
--- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc
@@ -1,5 +1,9 @@
 #include "extensions/quic_listeners/quiche/envoy_quic_utils.h"
 
+#include <sys/socket.h>
+
+#include "common/network/socket_option_factory.h"
+
 namespace Envoy {
 namespace Quic {
 
@@ -105,5 +109,37 @@ Http::StreamResetReason quicErrorCodeToEnvoyResetReason(quic::QuicErrorCode erro
   }
 }
 
+Network::ConnectionSocketPtr
+createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr,
+                       Network::Address::InstanceConstSharedPtr& local_addr,
+                       const Network::ConnectionSocket::OptionsSharedPtr& options) {
+  Network::IoHandlePtr io_handle = peer_addr->socket(Network::Address::SocketType::Datagram);
+  auto connection_socket =
+      std::make_unique<Network::ConnectionSocketImpl>(std::move(io_handle), local_addr, peer_addr);
+  connection_socket->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions());
+  connection_socket->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions());
+  if (options != nullptr) {
+    connection_socket->addOptions(options);
+  }
+  if (!Network::Socket::applyOptions(connection_socket->options(), *connection_socket,
+                                     envoy::api::v2::core::SocketOption::STATE_PREBIND)) {
+    connection_socket->close();
+    ENVOY_LOG_MISC(error, "Fail to apply pre-bind options");
+    return connection_socket;
+  }
+  local_addr->bind(connection_socket->ioHandle().fd());
+  ASSERT(local_addr->ip());
+  if (local_addr->ip()->port() == 0) {
+    // Get ephemeral port number.
+    local_addr = Network::Address::addressFromFd(connection_socket->ioHandle().fd());
+  }
+  if (!Network::Socket::applyOptions(connection_socket->options(), *connection_socket,
+                                     envoy::api::v2::core::SocketOption::STATE_BOUND)) {
+    ENVOY_LOG_MISC(error, "Fail to apply post-bind options");
+    connection_socket->close();
+  }
+  return connection_socket;
+}
+
 } // namespace Quic
 } // namespace Envoy
diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h
index 816c5c99d1b3..4236c1deeea7 100644
--- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h
+++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h
@@ -4,6 +4,7 @@
 #include "common/common/assert.h"
 #include "common/http/header_map_impl.h"
 #include "common/network/address_impl.h"
+#include "common/network/listen_socket_impl.h"
 
 #pragma GCC diagnostic push
 
@@ -48,5 +49,12 @@ Http::StreamResetReason quicRstErrorToEnvoyResetReason(quic::QuicRstStreamErrorC
 // Called when underlying QUIC connection is closed either locally or by peer.
 Http::StreamResetReason quicErrorCodeToEnvoyResetReason(quic::QuicErrorCode error);
 
+// Create a connection socket instance and apply given socket options to the
+// socket. IP_PKTINFO and SO_RXQ_OVFL is always set if supported.
+Network::ConnectionSocketPtr
+createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr,
+                       Network::Address::InstanceConstSharedPtr& local_addr,
+                       const Network::ConnectionSocket::OptionsSharedPtr& options);
+
 } // namespace Quic
 } // namespace Envoy
diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD
index 4681f968ccc9..0ff2da72b201 100644
--- a/source/extensions/quic_listeners/quiche/platform/BUILD
+++ b/source/extensions/quic_listeners/quiche/platform/BUILD
@@ -120,7 +120,6 @@ envoy_cc_library(
         "quic_arraysize_impl.h",
         "quic_client_stats_impl.h",
         "quic_containers_impl.h",
-        "quic_endian_impl.h",
         "quic_error_code_wrappers_impl.h",
         "quic_estimate_memory_usage_impl.h",
         "quic_fallthrough_impl.h",
@@ -160,7 +159,6 @@ envoy_cc_library(
         "//include/envoy/api:io_error_interface",
         "//source/common/buffer:buffer_lib",
         "//source/common/common:assert_lib",
-        "//source/common/common:byte_order_lib",
         "//source/server:backtrace_lib",
         "@com_googlesource_quiche//:quic_core_buffer_allocator_lib",
         "@com_googlesource_quiche//:quic_platform_export",
@@ -263,7 +261,6 @@ envoy_cc_library(
     name = "quiche_common_platform_impl_lib",
     hdrs = [
         "quiche_logging_impl.h",
-        "quiche_ptr_util_impl.h",
         "quiche_unordered_containers_impl.h",
     ],
     external_deps = [
@@ -319,3 +316,21 @@ envoy_cc_library(
     visibility = ["//visibility:public"],
     deps = ["@com_googlesource_quiche//:spdy_simple_arena_lib"],
 )
+
+envoy_cc_library(
+    name = "quiche_common_platform_export_impl_lib",
+    hdrs = ["quiche_export_impl.h"],
+    tags = ["nofips"],
+    visibility = ["//visibility:public"],
+)
+
+envoy_cc_library(
+    name = "quiche_common_platform_endian_impl_lib",
+    hdrs = ["quiche_endian_impl.h"],
+    tags = ["nofips"],
+    visibility = ["//visibility:public"],
+    deps = [
+        "quiche_common_platform_export_impl_lib",
+        "//source/common/common:byte_order_lib",
+    ],
+)
diff --git a/source/extensions/quic_listeners/quiche/platform/flags_list.h b/source/extensions/quic_listeners/quiche/platform/flags_list.h
index b5cc4ecfc0b0..d51d17b1f030 100644
--- a/source/extensions/quic_listeners/quiche/platform/flags_list.h
+++ b/source/extensions/quic_listeners/quiche/platform/flags_list.h
@@ -1,5 +1,6 @@
-// This file intentionally does not have header guards, it's intended to be
-// included multiple times, each time with a different definition of QUICHE_FLAG.
+// This file intentionally does not have header guards. It is intended to be
+// included multiple times, each time with a different definition of
+// QUICHE_FLAG.
 
 // NOLINT(namespace-envoy)
 
@@ -7,10 +8,8 @@
 // consumed or referenced directly by other Envoy code. It serves purely as a
 // porting layer for QUICHE.
 
-// The contents of this file are based off of
-// //third_party/quic/core:quic_flags_list in google3, with the addition of
-// test-only flags for testing http2 and spdy flags APIs.
-// TODO(mpwarres): include generated flags_list.h as part of QUICHE.
+// This file is generated by //third_party/quic/tools:quic_flags_list in
+// Google3.
 
 #if defined(QUICHE_FLAG)
 
@@ -18,77 +17,70 @@ QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_debugips, fa
 
 QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_external_users, false, "")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_add_upper_limit_of_buffered_control_frames3, true,
-            "If true, close connection if there are too many (> 1000) buffered "
-            "control frames.")
-
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_backend_set_stream_ttl, false,
-            "If true, check backend response header for X-Response-Ttl. If it is "
-            "provided, the stream TTL is set. A QUIC stream will be immediately "
-            "canceled when tries to write data if this TTL expired.")
+            "If true, check backend response header for X-Response-Ttl. If it is provided, the "
+            "stream TTL is set. A QUIC stream will be immediately canceled when tries to write "
+            "data if this TTL expired.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_client_enabled_bbr_v2, true,
-            "If true, allow client to enable BBRv2 on server via connection "
-            "option 'B2ON'.")
+            "If true, allow client to enable BBRv2 on server via connection option 'B2ON'.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_alpn_dispatch, false,
             "Support different QUIC sessions, as indicated by ALPN. Used for QBONE.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_fix_inflight_bounds, false,
-            "If true, for QUIC BBRv2: 1) don't grow inflight_hi unless it's "
-            "fully used, and 2) cap inflight_lo in PROBE_CRUISE.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_exit_probe_bw_down_after_one_rtt, false,
+            "If true, for QUIC BBRv2 flows, exit PROBE_BW_DOWN phase after one round trip time.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_log_bbr2info_in_tracegraf, false,
-            "If true, for QUIC BBRv2 flows, log BBRv2-specific information to "
-            "tracegraf.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_donot_inject_bandwidth, false,
+            "If true, do not inject bandwidth in BbrSender::AdjustNetworkParameters.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_fix_pacing_rate, false,
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_fix_pacing_rate, true,
             "If true, re-calculate pacing rate when cwnd gets bootstrapped.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_flexible_app_limited, false,
-            "When true and the BBR9 connection option is present, BBR only considers "
-            "bandwidth samples app-limited if they're not filling the pipe.")
+            "When true and the BBR9 connection option is present, BBR only considers bandwidth "
+            "samples app-limited if they're not filling the pipe.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_less_probe_rtt, false,
             "Enables 3 new connection options to make PROBE_RTT more aggressive.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_no_bytes_acked_in_startup_recovery, false,
-            "When in STARTUP and recovery, do not add bytes_acked to QUIC BBR's "
-            "CWND in CalculateCongestionWindow()")
+            "When in STARTUP and recovery, do not add bytes_acked to QUIC BBR's CWND in "
+            "CalculateCongestionWindow()")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_one_mss_conservation, false,
-            "When true, ensure BBR allows at least one MSS to be sent in "
-            "response to an ACK in packet conservation.")
+            "When true, ensure BBR allows at least one MSS to be sent in response to an ACK in "
+            "packet conservation.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_slower_startup4, false,
-            "Enables the BBQ5 connection option, which forces saved aggregation values "
-            "to expire when the bandwidth increases more than 25% in QUIC BBR STARTUP.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_slower_startup4, true,
+            "Enables the BBQ5 connection option, which forces saved aggregation values to expire "
+            "when the bandwidth increases more than 25% in QUIC BBR STARTUP.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_startup_rate_reduction, false,
-            "When true, enables the BBS4 and BBS5 connection options, which reduce "
-            "BBR's pacing rate in STARTUP as more losses occur as a fraction of CWND.")
+            "When true, enables the BBS4 and BBS5 connection options, which reduce BBR's pacing "
+            "rate in STARTUP as more losses occur as a fraction of CWND.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_close_all_encryptions_levels, false,
-            "If true, QUIC connection close packet will be sent at all available "
-            "encryption levels.")
+QUICHE_FLAG(
+    bool, quic_reloadable_flag_quic_close_all_encryptions_levels2, false,
+    "If true, QUIC connection close packet will be sent at all available encryption levels.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_close_connection_on_wrong_offset, false,
-            "If true, connection will be closed if a stream receives stream "
-            "frame or RESET_STREAM frame with bad close offset.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_close_connection_and_discard_data_on_wrong_offset,
+            false,
+            "If true, when a stream receives data with wrong close offset, it closes the "
+            "connection. And the stream frame data will be discarded.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_coalesce_stream_frames, false,
-            "If true, Adjacent stream frames will be combined into one stream "
-            "frame before the packet is serialized.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_close_connection_on_failed_consume_data_fast_path,
+            false, "If true, close connection if CreateAndSerializeStreamFrame fails.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_combine_generator_and_creator, true,
-            "If true, combine QuicPacketGenerator and QuicPacketCreator.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_close_connection_on_wrong_offset, true,
+            "If true, connection will be closed if a stream receives stream frame or RESET_STREAM "
+            "frame with bad close offset.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false,
             "If true, set burst token to 2 in cwnd bootstrapping experiment.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_cwnd_and_pacing_gains, false,
-            "If true, uses conservative cwnd gain and pacing gain when cwnd gets "
-            "bootstrapped.")
+            "If true, uses conservative cwnd gain and pacing gain when cwnd gets bootstrapped.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_debug_wrong_qos, false,
             "If true, consider getting QoS after stream has been detached as GFE bug.")
@@ -97,38 +89,36 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr, true,
             "When true, defaults to BBR congestion control instead of Cubic.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr_v2, false,
-            "If true, use BBRv2 as the default congestion controller. Takes "
-            "precedence over --quic_default_to_bbr.")
+            "If true, use BBRv2 as the default congestion controller. Takes precedence over "
+            "--quic_default_to_bbr.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_connection_migration_for_udp_proxying, true,
-            "If true, GFE disables connection migration in connection option for "
-            "proxied sessions.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_delete_send_rst_stream_inner, false,
+            "If true, QuicSession::SendRstStreamInner will be factored out and deleted.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_do_not_accept_stop_waiting, false,
-            "In v44 and above, where STOP_WAITING is never sent, close the "
-            "connection if it's received.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_connection_migration_for_udp_proxying, true,
+            "If true, GFE disables connection migration in connection option for proxied sessions.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_process_small_initial_packets, true,
-            "If true, server drops client initial packets in datagrams < 1200 bytes.")
+QUICHE_FLAG(
+    bool, quic_reloadable_flag_quic_do_not_accept_stop_waiting, false,
+    "In v44 and above, where STOP_WAITING is never sent, close the connection if it's received.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_time, false,
             "If true, stop resetting ideal_next_packet_send_time_ in pacing sender.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_ack_decimation, false,
-            "Default enables QUIC ack decimation and adds a connection option to "
-            "disable it.")
+            "Default enables QUIC ack decimation and adds a connection option to disable it.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_fifo_write_scheduler, true,
-            "If true and FIFO connection option is received, write_blocked_streams "
-            "uses FIFO(stream with smallest ID has highest priority) write scheduler.")
+            "If true and FIFO connection option is received, write_blocked_streams uses "
+            "FIFO(stream with smallest ID has highest priority) write scheduler.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_ietf_loss_detection, true,
             "If true, enable IETF loss detection as described in "
             "https://tools.ietf.org/html/draft-ietf-quic-recovery-22#section-6.1.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_lifo_write_scheduler, true,
-            "If true and LIFO connection option is received, write_blocked_streams "
-            "uses LIFO(stream with largest ID has highest priority) write scheduler.")
+            "If true and LIFO connection option is received, write_blocked_streams uses "
+            "LIFO(stream with largest ID has highest priority) write scheduler.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_pcc3, false,
             "If true, enable experiment for testing PCC congestion-control.")
@@ -138,12 +128,6 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_pto, true, "If true, enable p
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_rr_write_scheduler, true,
             "If true, enable HTTP/2 default scheduling(round robin).")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_49, false,
-            "If true, enable QUIC version 49.")
-
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_50, false,
-            "If true, enable QUIC version 50.")
-
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_99, false, "If true, enable version 99.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "")
@@ -151,89 +135,67 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "")
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bbr_cwnd_in_bandwidth_resumption, true,
             "If true, adjust congestion window when doing bandwidth resumption in BBR.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_framer_doesnt_create_initial_encrypter, true,
-            "If true, QuicFramer does not create an encrypter/decrypter for the "
-            "ENCRYPTION_INITIAL level.")
-
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false,
-            "If true, QuicListener::OnSocketIsWritable will always return false, "
-            "which means there will never be a fake EPOLLOUT event in the next "
-            "epoll iteration.")
+            "If true, QuicListener::OnSocketIsWritable will always return false, which means there "
+            "will never be a fake EPOLLOUT event in the next epoll iteration.")
+
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_log_ack_aggregation_stats, false,
+            "If true, log number of ack aggregation epochs in QUIC transport connection stats.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_monotonic_epoll_clock, false,
             "If true, QuicEpollClock::Now() will monotonically increase.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_mtu_discovery_v2, false,
-            "If true, enable QUIC MTU discovery version 2.")
-
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_negotiate_ack_delay_time, false,
             "If true, will negotiate the ACK delay time.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_neuter_handshake_packets_once, false,
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_neuter_handshake_packets_once2, false,
             "Call NeuterHandshakePackets() at most once per connection.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_decrease_in_final_offset, false,
-            "If true, a stream will be reset if it receives fin that has offset "
-            "less than its highest offset.")
-
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_dup_experiment_id_2, false,
-            "If true, transport connection stats doesn't report duplicated "
-            "experiments for same connection.")
-
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_stream_data_after_reset, false,
-            "If true, QuicStreamSequencer will not take in new data if the "
-            "stream is reset.")
+            "If true, transport connection stats doesn't report duplicated experiments for same "
+            "connection.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_v2_scaling_factor, false,
-            "When true, don't use an extra scaling factor when reading packets "
-            "from QUIC's RX_RING with TPACKET_V2.")
-
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_parse_prox_source_connection_id, true,
-            "When true, QuicFramer allows parsing failures of source connection "
-            "ID for the PROX version.")
+            "When true, don't use an extra scaling factor when reading packets from QUIC's RX_RING "
+            "with TPACKET_V2.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_populate_nonretransmittable_frames, false,
-            "If true, populate nonretransmittable frames in SerializedPacket.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_on_packet_numbers_skipped, false,
+            "If true, add a up call when N packet numbers get skipped.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_write_packed_strings, false,
-            "If true, QuicProxyDispatcher will write packed_client_address and "
-            "packed_server_vip in TcpProxyHeaderProto.")
+            "If true, QuicProxyDispatcher will write packed_client_address and packed_server_vip "
+            "in TcpProxyHeaderProto.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, false,
-            "If true, for L1 GFE, as requests come in, record frontend service to VIP "
-            "mapping which is used to announce VIP in SHLO for proxied sessions. ")
+            "If true, for L1 GFE, as requests come in, record frontend service to VIP mapping "
+            "which is used to announce VIP in SHLO for proxied sessions. ")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_all_traffic, false, "")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_unprocessable_packets_statelessly, false,
-            "If true, do not add connection ID of packets with unknown connection ID "
-            "and no version to time wait list, instead, send appropriate responses "
-            "depending on the packets' sizes and drop them.")
-
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, false,
-            "If true, require handshake confirmation for QUIC connections, "
-            "functionally disabling 0-rtt handshakes.")
+            "If true, require handshake confirmation for QUIC connections, functionally disabling "
+            "0-rtt handshakes.")
+
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_max_push_id_with_settings, false,
+            "If true, then a MAX_PUSH_ID frame will be send when the initial SETTINGS frame is "
+            "sent in HTTP/3.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false,
-            "When the STMP connection option is sent by the client, timestamps "
-            "in the QUIC ACK frame are sent and processed.")
+            "When the STMP connection option is sent by the client, timestamps in the QUIC ACK "
+            "frame are sent and processed.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true,
             "If true, enable server push feature on QUIC.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_simplify_stop_waiting, true,
-            "If true, do not send STOP_WAITING if no_stop_waiting_frame_.")
-
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_skip_packet_number_for_pto, true,
             "If true, skip packet number before sending the last PTO retransmission.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_stop_reading_when_level_triggered, false,
-            "When true, calling StopReading() on a level-triggered QUIC stream "
-            "sequencer will cause the sequencer to discard future data.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_stop_reading_when_level_triggered, true,
+            "When true, calling StopReading() on a level-triggered QUIC stream sequencer will "
+            "cause the sequencer to discard future data.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_supports_tls_handshake, false,
-            "If true, QUIC supports both QUIC Crypto and TLS 1.3 for the "
-            "handshake protocol.")
+            "If true, QUIC supports both QUIC Crypto and TLS 1.3 for the handshake protocol.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false,
             "A testonly reloadable flag that will always default to false.")
@@ -244,63 +206,71 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_true, true,
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_tracegraf_populate_ack_packet_number, true,
             "If true, populate packet_number of received ACK in tracegraf.")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_treat_queued_packets_as_sent, false,
-            "If true, treat queued QUIC packets as sent.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_tracegraf_populate_rtt_variation, false,
+            "If true, QUIC tracegraf populates RTT variation.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_unified_iw_options, false,
-            "When true, set the initial congestion control window from connection "
-            "options in QuicSentPacketManager rather than TcpCubicSenderBytes.")
+            "When true, set the initial congestion control window from connection options in "
+            "QuicSentPacketManager rather than TcpCubicSenderBytes.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_common_stream_check, false,
-            "If true, use common code for checking whether a new stream ID may "
-            "be allocated.")
+            "If true, use common code for checking whether a new stream ID may be allocated.")
+
+QUICHE_FLAG(
+    bool, quic_reloadable_flag_quic_use_connection_encryption_level, false,
+    "If true, QuicCryptoStream::OnCryptoFrame() will never use the frame's encryption level.")
+
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_handshaker_delegate, false,
+            "If true, QUIC crypto handshaker uses handshaker delegate to notify session about "
+            "handshake events.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_header_stage_idle_list2, false,
             "If true, use header stage idle list for QUIC connections in GFE.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_http2_priority_write_scheduler, true,
-            "If true and H2PR connection option is received, write_blocked_streams_ "
-            "uses HTTP2 (tree-style) priority write scheduler.")
+            "If true and H2PR connection option is received, write_blocked_streams_ uses HTTP2 "
+            "(tree-style) priority write scheduler.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_ip_bandwidth_module, true,
-            "If true, use IpBandwidthModule for cwnd bootstrapping if it is "
-            "registered.")
+            "If true, use IpBandwidthModule for cwnd bootstrapping if it is registered.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_leto_key_exchange, false,
-            "If true, QUIC will attempt to use the Leto key exchange service and "
-            "only fall back to local key exchange if that fails.")
+            "If true, QUIC will attempt to use the Leto key exchange service and only fall back to "
+            "local key exchange if that fails.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_pigeon_sockets, false,
             "Use USPS Direct Path for QUIC egress.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_quic_time_for_received_timestamp, false,
-            "If true, use QuicClock::Now() for the fallback source of packet "
-            "received time instead of WallNow().")
+            "If true, use QuicClock::Now() for the fallback source of packet received time instead "
+            "of WallNow().")
 
-QUICHE_FLAG(bool, quic_reloadable_flag_quic_version_negotiation_grease, false,
-            "When true, QUIC Version Negotiation packets will randomly include "
-            "fake versions.")
+QUICHE_FLAG(bool, quic_reloadable_flag_quic_version_negotiated_by_default_at_server, false,
+            "If true, for server QUIC connections, set version_negotiated_ to true by default.")
 
 QUICHE_FLAG(bool, quic_reloadable_flag_send_quic_fallback_server_config_on_leto_error, false,
-            "If true and using Leto for QUIC shared-key calculations, GFE will react "
-            "to a failure to contact Leto by sending a REJ containing a fallback "
-            "ServerConfig, allowing the client to continue the handshake.")
+            "If true and using Leto for QUIC shared-key calculations, GFE will react to a failure "
+            "to contact Leto by sending a REJ containing a fallback ServerConfig, allowing the "
+            "client to continue the handshake.")
 
-QUICHE_FLAG(bool, quic_restart_flag_do_not_create_raw_socket_selector_if_quic_enabled, false,
-            "If true, do not create the RawSocketSelector in "
-            "QuicListener::Initialize() if QUIC is disabled by flag.")
+QUICHE_FLAG(bool, quic_restart_flag_do_not_create_raw_socket_selector_if_quic_enabled, true,
+            "If true, do not create the RawSocketSelector in QuicListener::Initialize() if QUIC is "
+            "disabled by flag.")
 
-QUICHE_FLAG(bool, quic_restart_flag_dont_fetch_quic_private_keys_from_leto, false,
-            "If true, GFE will not request private keys when fetching QUIC "
-            "ServerConfigs from Leto.")
+QUICHE_FLAG(
+    bool, quic_restart_flag_dont_fetch_quic_private_keys_from_leto, false,
+    "If true, GFE will not request private keys when fetching QUIC ServerConfigs from Leto.")
 
-QUICHE_FLAG(bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false,
-            "If true, inspects QUIC CHLOs for kLOAS and early creates sessions "
-            "to allow multi-packet CHLOs")
+QUICHE_FLAG(
+    bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false,
+    "If true, inspects QUIC CHLOs for kLOAS and early creates sessions to allow multi-packet CHLOs")
 
-QUICHE_FLAG(bool, quic_restart_flag_quic_no_cap_net_raw_for_usps_egress, false,
-            "If true, gfe2::RawSocket::CapabilityNeeded will return false if "
-            "QUIC egress method is USPS.")
+QUICHE_FLAG(bool, quic_restart_flag_quic_coalesce_stream_frames_2, false,
+            "If true, adjacent stream frames will be coalesced into one.")
+
+QUICHE_FLAG(
+    bool, quic_restart_flag_quic_no_cap_net_raw_for_usps_egress, false,
+    "If true, gfe2::RawSocket::CapabilityNeeded will return false if QUIC egress method is USPS.")
 
 QUICHE_FLAG(bool, quic_restart_flag_quic_no_fallback_for_pigeon_socket, false,
             "If true, GFEs using USPS egress will not fallback to raw ip socket.")
@@ -317,13 +287,16 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_testonly_default_false, false,
 QUICHE_FLAG(bool, quic_restart_flag_quic_testonly_default_true, true,
             "A testonly restart flag that will always default to true.")
 
-QUICHE_FLAG(bool, quic_restart_flag_quic_use_leto_for_quic_configs, false,
-            "If true, use Leto to fetch QUIC server configs instead of using the "
-            "seeds from Memento.")
+QUICHE_FLAG(bool, quic_restart_flag_quic_use_circular_deque, false,
+            "If true, replace the backing type of QuicDeque from std::deque to QuicCircularDeque.")
+
+QUICHE_FLAG(
+    bool, quic_restart_flag_quic_use_leto_for_quic_configs, false,
+    "If true, use Leto to fetch QUIC server configs instead of using the seeds from Memento.")
 
 QUICHE_FLAG(bool, quic_restart_flag_quic_use_pigeon_socket_to_backend, false,
-            "If true, create a shared pigeon socket for all quic to backend "
-            "connections and switch to use it after successful handshake.")
+            "If true, create a shared pigeon socket for all quic to backend connections and switch "
+            "to use it after successful handshake.")
 
 QUICHE_FLAG(bool, quic_allow_chlo_buffering, true,
             "If true, allows packets to be buffered in anticipation of a "
@@ -335,35 +308,25 @@ QUICHE_FLAG(bool, quic_disable_pacing_for_perf_tests, false, "If true, disable p
 QUICHE_FLAG(bool, quic_enforce_single_packet_chlo, true,
             "If true, enforce that QUIC CHLOs fit in one packet")
 
-// Currently, this number is quite conservative. At a hypothetical 1000 qps,
-// this means that the longest time-wait list we should see is:
-//   200 seconds * 1000 qps = 200000.
-// Of course, there are usually many queries per QUIC connection, so we allow a
-// factor of 3 leeway.
-QUICHE_FLAG(int64_t, // allow-non-std-int
-            quic_time_wait_list_max_connections, 600000,
+QUICHE_FLAG(int64_t, quic_time_wait_list_max_connections, 600000,
             "Maximum number of connections on the time-wait list. "
             "A negative value implies no configured limit.")
 
-QUICHE_FLAG(int64_t, // allow-non-std-int
-            quic_time_wait_list_seconds, 200,
+QUICHE_FLAG(int64_t, quic_time_wait_list_seconds, 200,
             "Time period for which a given connection_id should live in "
             "the time-wait state.")
 
 QUICHE_FLAG(double, quic_bbr_cwnd_gain, 2.0f,
             "Congestion window gain for QUIC BBR during PROBE_BW phase.")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_buffered_data_threshold, 8 * 1024,
+QUICHE_FLAG(int32_t, quic_buffered_data_threshold, 8 * 1024,
             "If buffered data in QUIC stream is less than this "
             "threshold, buffers all provided data or asks upper layer for more data")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_send_buffer_max_data_slice_size, 4 * 1024,
+QUICHE_FLAG(int32_t, quic_send_buffer_max_data_slice_size, 4 * 1024,
             "Max size of data slice in bytes for QUIC stream send buffer.")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_lumpy_pacing_size, 2,
+QUICHE_FLAG(int32_t, quic_lumpy_pacing_size, 2,
             "Number of packets that the pacing sender allows in bursts during "
             "pacing. This flag is ignored if a flow's estimated bandwidth is "
             "lower than 1200 kbps.")
@@ -372,12 +335,10 @@ QUICHE_FLAG(double, quic_lumpy_pacing_cwnd_fraction, 0.25f,
             "Congestion window fraction that the pacing sender allows in bursts "
             "during pacing.")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_max_pace_time_into_future_ms, 10,
+QUICHE_FLAG(int32_t, quic_max_pace_time_into_future_ms, 10,
             "Max time that QUIC can pace packets into the future in ms.")
 
-QUICHE_FLAG(double, quic_pace_time_into_future_srtt_fraction,
-            0.125f, // One-eighth smoothed RTT
+QUICHE_FLAG(double, quic_pace_time_into_future_srtt_fraction, 0.125f,
             "Smoothed RTT fraction that a connection can pace packets into the future.")
 
 QUICHE_FLAG(bool, quic_export_server_num_packets_per_write_histogram, false,
@@ -386,8 +347,7 @@ QUICHE_FLAG(bool, quic_export_server_num_packets_per_write_histogram, false,
 QUICHE_FLAG(bool, quic_disable_version_negotiation_grease_randomness, false,
             "If true, use predictable version negotiation versions.")
 
-QUICHE_FLAG(int64_t, // allow-non-std-int
-            quic_max_tracked_packet_count, 10000, "Maximum number of tracked packets.")
+QUICHE_FLAG(int64_t, quic_max_tracked_packet_count, 10000, "Maximum number of tracked packets.")
 
 QUICHE_FLAG(bool, quic_prober_uses_length_prefixed_connection_ids, false,
             "If true, QuicFramer::WriteClientVersionNegotiationProbePacket uses "
@@ -397,30 +357,24 @@ QUICHE_FLAG(bool, quic_client_convert_http_header_name_to_lowercase, true,
             "If true, HTTP request header names sent from QuicSpdyClientBase(and "
             "descendents) will be automatically converted to lower case.")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_bbr2_default_probe_bw_base_duration_ms, 2000,
+QUICHE_FLAG(int32_t, quic_bbr2_default_probe_bw_base_duration_ms, 2000,
             "The default minimum duration for BBRv2-native probes, in milliseconds.")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_bbr2_default_probe_bw_max_rand_duration_ms, 1000,
+QUICHE_FLAG(int32_t, quic_bbr2_default_probe_bw_max_rand_duration_ms, 1000,
             "The default upper bound of the random amount of BBRv2-native "
             "probes, in milliseconds.")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_bbr2_default_probe_rtt_period_ms, 10000,
+QUICHE_FLAG(int32_t, quic_bbr2_default_probe_rtt_period_ms, 10000,
             "The default period for entering PROBE_RTT, in milliseconds.")
 
-QUICHE_FLAG(double, quic_bbr2_default_loss_threshold,
-            0.3, // Changed from 0.02 for YouTube and Search experiments.
+QUICHE_FLAG(double, quic_bbr2_default_loss_threshold, 0.3,
             "The default loss threshold for QUIC BBRv2, should be a value "
             "between 0 and 1.")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_bbr2_default_startup_full_loss_count, 8,
+QUICHE_FLAG(int32_t, quic_bbr2_default_startup_full_loss_count, 8,
             "The default minimum number of loss marking events to exit STARTUP.")
 
-QUICHE_FLAG(double, quic_bbr2_default_inflight_hi_headroom,
-            0.01, // Changed from 0.15 for YouTube and Search experiments.
+QUICHE_FLAG(double, quic_bbr2_default_inflight_hi_headroom, 0.01,
             "The default fraction of unutilized headroom to try to leave in path "
             "upon high loss.")
 
@@ -429,27 +383,22 @@ QUICHE_FLAG(double, quic_ack_aggregation_bandwidth_threshold, 1.0,
             "bandwidth * this flag), consider the current aggregation completed "
             "and starts a new one.")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_anti_amplification_factor, 3,
+QUICHE_FLAG(int32_t, quic_anti_amplification_factor, 3,
             "Anti-amplification factor. Before address validation, server will "
             "send no more than factor times bytes received.")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_max_buffered_crypto_bytes,
-            16 * 1024, // 16 KB
+QUICHE_FLAG(int32_t, quic_max_buffered_crypto_bytes, 16 * 1024,
             "The maximum amount of CRYPTO frame data that can be buffered.")
 
 QUICHE_FLAG(bool, quic_allow_http3_priority, false, "If true, support HTTP/3 priority")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_max_aggressive_retransmittable_on_wire_ping_count, 0,
+QUICHE_FLAG(int32_t, quic_max_aggressive_retransmittable_on_wire_ping_count, 0,
             "If set to non-zero, the maximum number of consecutive pings that "
             "can be sent with aggressive initial retransmittable on wire timeout "
             "if there is no new data received. After which, the timeout will be "
             "exponentially back off until exceeds the default ping timeout.")
 
-QUICHE_FLAG(int32_t, // allow-non-std-int
-            quic_max_congestion_window, 2000, "The maximum congestion window in packets.")
+QUICHE_FLAG(int32_t, quic_max_congestion_window, 2000, "The maximum congestion window in packets.")
 
 QUICHE_FLAG(bool, http2_reloadable_flag_http2_testonly_default_false, false,
             "A testonly reloadable flag that will always default to false.")
diff --git a/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h
index e2638f8e21bf..6a8f03be6eab 100644
--- a/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h
+++ b/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h
@@ -2,7 +2,9 @@
 
 #include <deque>
 #include <memory>
+#include <ostream>
 #include <queue>
+#include <sstream>
 
 #include "absl/container/flat_hash_map.h"
 #include "absl/container/inlined_vector.h"
@@ -38,4 +40,20 @@ template <typename T> using QuicDequeImpl = std::deque<T>;
 template <typename T, size_t N, typename A = std::allocator<T>>
 using QuicInlinedVectorImpl = absl::InlinedVector<T, N, A>;
 
+template <typename T, size_t N, typename A>
+inline std::ostream& operator<<(std::ostream& os,
+                                const QuicInlinedVectorImpl<T, N, A> inlined_vector) {
+  std::stringstream debug_string;
+  debug_string << "{";
+  typename QuicInlinedVectorImpl<T, N, A>::const_iterator it = inlined_vector.cbegin();
+  debug_string << *it;
+  ++it;
+  while (it != inlined_vector.cend()) {
+    debug_string << ", " << *it;
+    ++it;
+  }
+  debug_string << "}";
+  return os << debug_string.str();
+}
+
 } // namespace quic
diff --git a/source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h b/source/extensions/quic_listeners/quiche/platform/quiche_endian_impl.h
similarity index 93%
rename from source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h
rename to source/extensions/quic_listeners/quiche/platform/quiche_endian_impl.h
index c456da321fa7..c70e45d3dbd7 100644
--- a/source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h
+++ b/source/extensions/quic_listeners/quiche/platform/quiche_endian_impl.h
@@ -10,9 +10,9 @@
 
 #include "common/common/byte_order.h"
 
-namespace quic {
+namespace quiche {
 
-class QuicEndianImpl {
+class QuicheEndianImpl {
 public:
   static uint16_t HostToNet16(uint16_t x) { return toEndianness<ByteOrder::BigEndian>(x); }
   static uint32_t HostToNet32(uint32_t x) { return toEndianness<ByteOrder::BigEndian>(x); }
@@ -25,4 +25,4 @@ class QuicEndianImpl {
   static bool HostIsLittleEndian() { return NetToHost16(0x1234) != 0x1234; }
 };
 
-} // namespace quic
+} // namespace quiche
diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_ptr_util_impl.h b/source/extensions/quic_listeners/quiche/platform/quiche_export_impl.h
similarity index 50%
rename from source/extensions/quic_listeners/quiche/platform/quiche_ptr_util_impl.h
rename to source/extensions/quic_listeners/quiche/platform/quiche_export_impl.h
index e5ed8a2714d0..d532f01b2992 100644
--- a/source/extensions/quic_listeners/quiche/platform/quiche_ptr_util_impl.h
+++ b/source/extensions/quic_listeners/quiche/platform/quiche_export_impl.h
@@ -6,12 +6,6 @@
 // consumed or referenced directly by other Envoy code. It serves purely as a
 // porting layer for QUICHE.
 
-#include <memory>
-
-namespace quiche {
-
-template <typename T, typename... Args> std::unique_ptr<T> QuicheMakeUniqueImpl(Args&&... args) {
-  return std::make_unique<T>(std::forward<Args>(args)...);
-}
-
-} // namespace quiche
+#define QUICHE_EXPORT
+#define QUICHE_EXPORT_PRIVATE
+#define QUICHE_NO_EXPORT
diff --git a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc
index e48374fe4984..13cb3829f004 100644
--- a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc
+++ b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc
@@ -5,17 +5,17 @@
 namespace Envoy {
 namespace Quic {
 
-QuicFilterManagerConnectionImpl::QuicFilterManagerConnectionImpl(EnvoyQuicConnection* connection,
+QuicFilterManagerConnectionImpl::QuicFilterManagerConnectionImpl(EnvoyQuicConnection& connection,
                                                                  Event::Dispatcher& dispatcher,
                                                                  uint32_t send_buffer_limit)
-    : quic_connection_(connection), dispatcher_(dispatcher), filter_manager_(*this),
-      // QUIC connection id can be 18 bytes. It's easier to use hash value instead
-      // of trying to map it into a 64-bit space.
-      stream_info_(dispatcher.timeSource()), id_(quic_connection_->connection_id().Hash()),
+    // Using this for purpose other than logging is not safe. Because QUIC connection id can be
+    // 18 bytes, so there might be collision when it's hashed to 8 bytes.
+    : Network::ConnectionImplBase(dispatcher, /*id=*/connection.connection_id().Hash()),
+      quic_connection_(&connection), filter_manager_(*this), stream_info_(dispatcher.timeSource()),
       write_buffer_watermark_simulation_(
           send_buffer_limit / 2, send_buffer_limit, [this]() { onSendBufferLowWatermark(); },
           [this]() { onSendBufferHighWatermark(); }, ENVOY_LOGGER()) {
-  stream_info_.protocol(Http::Protocol::Http2);
+  stream_info_.protocol(Http::Protocol::Http3);
 }
 
 void QuicFilterManagerConnectionImpl::addWriteFilter(Network::WriteFilterSharedPtr filter) {
@@ -34,10 +34,6 @@ bool QuicFilterManagerConnectionImpl::initializeReadFilters() {
   return filter_manager_.initializeReadFilters();
 }
 
-void QuicFilterManagerConnectionImpl::addConnectionCallbacks(Network::ConnectionCallbacks& cb) {
-  network_connection_callbacks_.push_back(&cb);
-}
-
 void QuicFilterManagerConnectionImpl::enableHalfClose(bool enabled) {
   RELEASE_ASSERT(!enabled, "Quic connection doesn't support half close.");
 }
@@ -61,9 +57,7 @@ void QuicFilterManagerConnectionImpl::close(Network::ConnectionCloseType type) {
     // Already detached from quic connection.
     return;
   }
-  quic_connection_->CloseConnection(quic::QUIC_NO_ERROR, "Closed by application",
-                                    quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
-  quic_connection_ = nullptr;
+  closeConnectionImmediately();
 }
 
 void QuicFilterManagerConnectionImpl::setDelayedCloseTimeout(std::chrono::milliseconds timeout) {
@@ -114,28 +108,28 @@ void QuicFilterManagerConnectionImpl::onConnectionCloseEvent(
                                            " with details: ", frame.error_details);
   if (quic_connection_ != nullptr) {
     // Tell network callbacks about connection close if not detached yet.
-    raiseEvent(source == quic::ConnectionCloseSource::FROM_PEER
-                   ? Network::ConnectionEvent::RemoteClose
-                   : Network::ConnectionEvent::LocalClose);
+    raiseConnectionEvent(source == quic::ConnectionCloseSource::FROM_PEER
+                             ? Network::ConnectionEvent::RemoteClose
+                             : Network::ConnectionEvent::LocalClose);
   }
 }
 
-void QuicFilterManagerConnectionImpl::raiseEvent(Network::ConnectionEvent event) {
-  for (auto callback : network_connection_callbacks_) {
-    callback->onEvent(event);
-  }
+void QuicFilterManagerConnectionImpl::closeConnectionImmediately() {
+  quic_connection_->CloseConnection(quic::QUIC_NO_ERROR, "Closed by application",
+                                    quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
+  quic_connection_ = nullptr;
 }
 
 void QuicFilterManagerConnectionImpl::onSendBufferHighWatermark() {
   ENVOY_CONN_LOG(trace, "onSendBufferHighWatermark", *this);
-  for (auto callback : network_connection_callbacks_) {
+  for (auto callback : callbacks_) {
     callback->onAboveWriteBufferHighWatermark();
   }
 }
 
 void QuicFilterManagerConnectionImpl::onSendBufferLowWatermark() {
   ENVOY_CONN_LOG(trace, "onSendBufferLowWatermark", *this);
-  for (auto callback : network_connection_callbacks_) {
+  for (auto callback : callbacks_) {
     callback->onBelowWriteBufferLowWatermark();
   }
 }
diff --git a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h
index 0db77afce6e5..a6b34cda9e79 100644
--- a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h
+++ b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h
@@ -5,7 +5,7 @@
 
 #include "common/common/empty_string.h"
 #include "common/common/logger.h"
-#include "common/network/filter_manager_impl.h"
+#include "common/network/connection_impl_base.h"
 #include "common/stream_info/stream_info_impl.h"
 
 #include "extensions/quic_listeners/quiche/envoy_quic_connection.h"
@@ -15,10 +15,9 @@ namespace Envoy {
 namespace Quic {
 
 // Act as a Network::Connection to HCM and a FilterManager to FilterFactoryCb.
-class QuicFilterManagerConnectionImpl : public Network::FilterManagerConnection,
-                                        protected Logger::Loggable<Logger::Id::connection> {
+class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase {
 public:
-  QuicFilterManagerConnectionImpl(EnvoyQuicConnection* connection, Event::Dispatcher& dispatcher,
+  QuicFilterManagerConnectionImpl(EnvoyQuicConnection& connection, Event::Dispatcher& dispatcher,
                                   uint32_t send_buffer_limit);
 
   // Network::FilterManager
@@ -29,7 +28,6 @@ class QuicFilterManagerConnectionImpl : public Network::FilterManagerConnection,
   bool initializeReadFilters() override;
 
   // Network::Connection
-  void addConnectionCallbacks(Network::ConnectionCallbacks& cb) override;
   void addBytesSentCallback(Network::Connection::BytesSentCb /*cb*/) override {
     // TODO(danzh): implement to support proxy. This interface is only called from
     // TCP proxy code.
@@ -38,9 +36,6 @@ class QuicFilterManagerConnectionImpl : public Network::FilterManagerConnection,
   void enableHalfClose(bool enabled) override;
   void close(Network::ConnectionCloseType type) override;
   Event::Dispatcher& dispatcher() override { return dispatcher_; }
-  // Using this for purpose other than logging is not safe. Because QUIC connection id can be
-  // 18 bytes, so there might be collision when it's hashed to 8 bytes.
-  uint64_t id() const override { return id_; }
   std::string nextProtocol() const override { return EMPTY_STRING; }
   void noDelay(bool /*enable*/) override {
     // No-op. TCP_NODELAY doesn't apply to UDP.
@@ -57,7 +52,8 @@ class QuicFilterManagerConnectionImpl : public Network::FilterManagerConnection,
     NOT_REACHED_GCOVR_EXCL_LINE;
   }
   void setConnectionStats(const Network::Connection::ConnectionStats& stats) override {
-    stats_ = std::make_unique<Network::Connection::ConnectionStats>(stats);
+    // TODO(danzh): populate stats.
+    Network::ConnectionImplBase::setConnectionStats(stats);
     quic_connection_->setConnectionStats(stats);
   }
   Ssl::ConnectionInfoConstSharedPtr ssl() const override;
@@ -107,12 +103,9 @@ class QuicFilterManagerConnectionImpl : public Network::FilterManagerConnection,
   void onConnectionCloseEvent(const quic::QuicConnectionCloseFrame& frame,
                               quic::ConnectionCloseSource source);
 
-  void raiseEvent(Network::ConnectionEvent event);
+  void closeConnectionImmediately() override;
 
-  EnvoyQuicConnection* quic_connection_;
-  // TODO(danzh): populate stats.
-  std::unique_ptr<Network::Connection::ConnectionStats> stats_;
-  Event::Dispatcher& dispatcher_;
+  EnvoyQuicConnection* quic_connection_{nullptr};
 
 private:
   // Called when aggregated buffered bytes across all the streams exceeds high watermark.
@@ -127,11 +120,7 @@ class QuicFilterManagerConnectionImpl : public Network::FilterManagerConnection,
   Network::FilterManagerImpl filter_manager_;
 
   StreamInfo::StreamInfoImpl stream_info_;
-  // These callbacks are owned by network filters and quic session should out live
-  // them.
-  std::list<Network::ConnectionCallbacks*> network_connection_callbacks_;
   std::string transport_failure_reason_;
-  const uint64_t id_;
   uint32_t bytes_to_send_{0};
   // Keeps the buffer state of the connection, and react upon the changes of how many bytes are
   // buffered cross all streams' send buffer. The state is evaluated and may be changed upon each
diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc
index 38c798551bd6..e5445320a4c9 100644
--- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc
+++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc
@@ -26,7 +26,7 @@ class OpenTracingHTTPHeadersWriter : public opentracing::HTTPHeadersWriter {
                                   opentracing::string_view value) const override {
     Http::LowerCaseString lowercase_key{key};
     request_headers_.remove(lowercase_key);
-    request_headers_.addCopy(std::move(lowercase_key), value);
+    request_headers_.addCopy(std::move(lowercase_key), {value.data(), value.size()});
     return {};
   }
 
diff --git a/source/extensions/transport_sockets/tls/context_impl.h b/source/extensions/transport_sockets/tls/context_impl.h
index f91ad1d0bc8f..6ee964cb09dd 100644
--- a/source/extensions/transport_sockets/tls/context_impl.h
+++ b/source/extensions/transport_sockets/tls/context_impl.h
@@ -29,7 +29,6 @@ namespace Extensions {
 namespace TransportSockets {
 namespace Tls {
 
-// clang-format off
 #define ALL_SSL_STATS(COUNTER, GAUGE, HISTOGRAM)                                                   \
   COUNTER(connection_error)                                                                        \
   COUNTER(handshake)                                                                               \
@@ -39,7 +38,6 @@ namespace Tls {
   COUNTER(fail_verify_error)                                                                       \
   COUNTER(fail_verify_san)                                                                         \
   COUNTER(fail_verify_cert_hash)
-// clang-format on
 
 /**
  * Wrapper struct for SSL stats. @see stats_macros.h
diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h
index fd6cc57c154c..2ce9f6b08dc3 100644
--- a/source/extensions/transport_sockets/tls/ssl_socket.h
+++ b/source/extensions/transport_sockets/tls/ssl_socket.h
@@ -24,12 +24,10 @@ namespace Extensions {
 namespace TransportSockets {
 namespace Tls {
 
-// clang-format off
-#define ALL_SSL_SOCKET_FACTORY_STATS(COUNTER)                                 \
-  COUNTER(ssl_context_update_by_sds)                                          \
-  COUNTER(upstream_context_secrets_not_ready)                                 \
+#define ALL_SSL_SOCKET_FACTORY_STATS(COUNTER)                                                      \
+  COUNTER(ssl_context_update_by_sds)                                                               \
+  COUNTER(upstream_context_secrets_not_ready)                                                      \
   COUNTER(downstream_context_secrets_not_ready)
-// clang-format on
 
 /**
  * Wrapper struct for SSL socket factory stats. @see stats_macros.h
diff --git a/source/server/config_validation/dispatcher.cc b/source/server/config_validation/dispatcher.cc
index 485f5ae2a1f6..1a75abe41935 100644
--- a/source/server/config_validation/dispatcher.cc
+++ b/source/server/config_validation/dispatcher.cc
@@ -17,7 +17,7 @@ Network::ClientConnectionPtr ValidationDispatcher::createClientConnection(
 }
 
 Network::DnsResolverSharedPtr ValidationDispatcher::createDnsResolver(
-    const std::vector<Network::Address::InstanceConstSharedPtr>&) {
+    const std::vector<Network::Address::InstanceConstSharedPtr>&, const bool) {
   return dns_resolver_;
 }
 
diff --git a/source/server/config_validation/dispatcher.h b/source/server/config_validation/dispatcher.h
index 4b3bd6d637a6..bdd989fc171e 100644
--- a/source/server/config_validation/dispatcher.h
+++ b/source/server/config_validation/dispatcher.h
@@ -23,8 +23,9 @@ class ValidationDispatcher : public DispatcherImpl {
   createClientConnection(Network::Address::InstanceConstSharedPtr,
                          Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&&,
                          const Network::ConnectionSocket::OptionsSharedPtr& options) override;
-  Network::DnsResolverSharedPtr createDnsResolver(
-      const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers) override;
+  Network::DnsResolverSharedPtr
+  createDnsResolver(const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,
+                    const bool use_tcp_for_dns_lookups) override;
   Network::ListenerPtr createListener(Network::SocketSharedPtr&&, Network::ListenerCallbacks&,
                                       bool bind_to_port) override;
 
diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h
index bc22ffc87645..97a6e89334dc 100644
--- a/source/server/config_validation/server.h
+++ b/source/server/config_validation/server.h
@@ -70,7 +70,7 @@ class ValidationInstance final : Logger::Loggable<Logger::Id::main>,
   Ssl::ContextManager& sslContextManager() override { return *ssl_context_manager_; }
   Event::Dispatcher& dispatcher() override { return *dispatcher_; }
   Network::DnsResolverSharedPtr dnsResolver() override {
-    return dispatcher().createDnsResolver({});
+    return dispatcher().createDnsResolver({}, false);
   }
   void drainListeners() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }
   DrainManager& drainManager() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }
@@ -132,7 +132,7 @@ class ValidationInstance final : Logger::Loggable<Logger::Id::main>,
   Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr,
                                               Network::Address::SocketType,
                                               const Network::Socket::OptionsSharedPtr&,
-                                              bool) override {
+                                              const ListenSocketCreationParams&) override {
     // Returned sockets are not currently used so we can return nothing here safely vs. a
     // validation mock.
     return nullptr;
diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc
index a390b3ae0e1e..21c5d6c1503c 100644
--- a/source/server/listener_impl.cc
+++ b/source/server/listener_impl.cc
@@ -26,19 +26,54 @@
 namespace Envoy {
 namespace Server {
 
-ListenSocketFactoryImplBase::ListenSocketFactoryImplBase(
-    ListenerComponentFactory& factory, Network::Address::InstanceConstSharedPtr local_address,
-    Network::Address::SocketType socket_type, const Network::Socket::OptionsSharedPtr& options,
-    bool bind_to_port, const std::string& listener_name)
-    : factory_(factory), local_address_(local_address), socket_type_(socket_type),
-      options_(options), bind_to_port_(bind_to_port), listener_name_(listener_name) {}
-
-Network::SocketSharedPtr ListenSocketFactoryImplBase::createListenSocketAndApplyOptions() {
+ListenSocketFactoryImpl::ListenSocketFactoryImpl(ListenerComponentFactory& factory,
+                                                 Network::Address::InstanceConstSharedPtr address,
+                                                 Network::Address::SocketType socket_type,
+                                                 const Network::Socket::OptionsSharedPtr& options,
+                                                 bool bind_to_port,
+                                                 const std::string& listener_name, bool reuse_port)
+    : factory_(factory), local_address_(address), socket_type_(socket_type), options_(options),
+      bind_to_port_(bind_to_port), listener_name_(listener_name), reuse_port_(reuse_port) {
+
+  bool create_socket = false;
+  if (local_address_->type() == Network::Address::Type::Ip) {
+    if (socket_type_ == Network::Address::SocketType::Datagram) {
+      ASSERT(reuse_port_ == true);
+    }
+
+    if (reuse_port_ == false) {
+      // create a socket which will be used by all worker threads
+      create_socket = true;
+    } else if (local_address_->ip()->port() == 0) {
+      // port is 0, need to create a socket here for reserving a real port number,
+      // then all worker threads should use same port.
+      create_socket = true;
+    }
+  } else {
+    ASSERT(local_address_->type() == Network::Address::Type::Pipe);
+    // Listeners with Unix domain socket always use shared socket.
+    create_socket = true;
+  }
+
+  if (create_socket) {
+    socket_ = createListenSocketAndApplyOptions();
+  }
+
+  if (socket_ && local_address_->ip() && local_address_->ip()->port() == 0) {
+    local_address_ = socket_->localAddress();
+  }
+
+  ENVOY_LOG(debug, "Set listener {} socket factory local address to {}", listener_name_,
+            local_address_->asString());
+}
+
+Network::SocketSharedPtr ListenSocketFactoryImpl::createListenSocketAndApplyOptions() {
   // socket might be nullptr depending on factory_ implementation.
-  Network::SocketSharedPtr socket =
-      factory_.createListenSocket(local_address_, socket_type_, options_, bind_to_port_);
+  Network::SocketSharedPtr socket = factory_.createListenSocket(
+      local_address_, socket_type_, options_, {bind_to_port_, !reuse_port_});
+
   // Binding is done by now.
-  ENVOY_LOG(info, "Create listen socket for listener {} on address {}", listener_name_,
+  ENVOY_LOG(debug, "Create listen socket for listener {} on address {}", listener_name_,
             local_address_->asString());
   if (socket != nullptr && options_ != nullptr) {
     const bool ok = Network::Socket::applyOptions(options_, *socket,
@@ -59,42 +94,30 @@ Network::SocketSharedPtr ListenSocketFactoryImplBase::createListenSocketAndApply
   return socket;
 }
 
-void ListenSocketFactoryImplBase::setLocalAddress(
-    Network::Address::InstanceConstSharedPtr local_address) {
-  ENVOY_LOG(debug, "Set listener {} socket factory local address to {}", listener_name_,
-            local_address->asString());
-  local_address_ = local_address;
-}
-
-TcpListenSocketFactory::TcpListenSocketFactory(
-    ListenerComponentFactory& factory, Network::Address::InstanceConstSharedPtr local_address,
-    const Network::Socket::OptionsSharedPtr& options, bool bind_to_port,
-    const std::string& listener_name)
-    : ListenSocketFactoryImplBase(factory, local_address, Network::Address::SocketType::Stream,
-                                  options, bind_to_port, listener_name) {
-  socket_ = createListenSocketAndApplyOptions();
-  if (socket_ != nullptr && localAddress()->ip() != nullptr && localAddress()->ip()->port() == 0) {
-    setLocalAddress(socket_->localAddress());
+Network::SocketSharedPtr ListenSocketFactoryImpl::getListenSocket() {
+  if (!reuse_port_) {
+    return socket_;
   }
-}
 
-Network::SocketSharedPtr TcpListenSocketFactory::getListenSocket() { return socket_; }
-
-UdpListenSocketFactory::UdpListenSocketFactory(
-    ListenerComponentFactory& factory, Network::Address::InstanceConstSharedPtr local_address,
-    const Network::Socket::OptionsSharedPtr& options, bool bind_to_port,
-    const std::string& listener_name)
-    : ListenSocketFactoryImplBase(factory, local_address, Network::Address::SocketType::Datagram,
-                                  options, bind_to_port, listener_name) {}
-
-Network::SocketSharedPtr UdpListenSocketFactory::getListenSocket() {
-  // TODO(danzh) add support of SO_REUSEPORT. Currently calling this method twice will fail because
-  // the port is already in use.
-  Network::SocketSharedPtr socket = createListenSocketAndApplyOptions();
-  if (socket != nullptr && localAddress()->ip() != nullptr && localAddress()->ip()->port() == 0) {
-    setLocalAddress(socket->localAddress());
+  Network::SocketSharedPtr socket;
+  absl::call_once(steal_once_, [this, &socket]() {
+    if (socket_) {
+      // If a listener's port is set to 0, socket_ should be created for reserving a port
+      // number, it is handed over to the first worker thread came here.
+      // There are several reasons for doing this:
+      // - for UDP, once a socket being bound, it begins to receive packets, it can't be
+      //   left unused, and closing it will lost packets received by it.
+      // - port number should be reserved before adding listener to active_listeners_ list,
+      //   otherwise admin API /listeners might return 0 as listener's port.
+      socket = std::move(socket_);
+    }
+  });
+
+  if (socket) {
+    return socket;
   }
-  return socket;
+
+  return createListenSocketAndApplyOptions();
 }
 
 ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, const std::string& version_info,
@@ -120,18 +143,21 @@ ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, const std::st
       listener_filters_timeout_(
           PROTOBUF_GET_MS_OR_DEFAULT(config, listener_filters_timeout, 15000)),
       continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()) {
+  Network::Address::SocketType socket_type =
+      Network::Utility::protobufAddressSocketType(config.address());
   if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, transparent, false)) {
     addListenSocketOptions(Network::SocketOptionFactory::buildIpTransparentOptions());
   }
   if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, freebind, false)) {
     addListenSocketOptions(Network::SocketOptionFactory::buildIpFreebindOptions());
   }
+  if ((socket_type == Network::Address::SocketType::Datagram) || config.reuse_port()) {
+    addListenSocketOptions(Network::SocketOptionFactory::buildReusePortOptions());
+  }
   if (!config.socket_options().empty()) {
     addListenSocketOptions(
         Network::SocketOptionFactory::buildLiteralOptions(config.socket_options()));
   }
-  Network::Address::SocketType socket_type =
-      Network::Utility::protobufAddressSocketType(config.address());
   if (socket_type == Network::Address::SocketType::Datagram) {
     // Needed for recvmsg to return destination address in IP header.
     addListenSocketOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions());
diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h
index 3b0498dc36a8..013d423ed50e 100644
--- a/source/server/listener_impl.h
+++ b/source/server/listener_impl.h
@@ -14,19 +14,21 @@
 
 #include "server/filter_chain_manager_impl.h"
 
+#include "absl/base/call_once.h"
+
 namespace Envoy {
 namespace Server {
 
 class ListenerManagerImpl;
 
-class ListenSocketFactoryImplBase : public Network::ListenSocketFactory,
-                                    protected Logger::Loggable<Logger::Id::config> {
+class ListenSocketFactoryImpl : public Network::ListenSocketFactory,
+                                protected Logger::Loggable<Logger::Id::config> {
 public:
-  ListenSocketFactoryImplBase(ListenerComponentFactory& factory,
-                              Network::Address::InstanceConstSharedPtr local_address,
-                              Network::Address::SocketType socket_type,
-                              const Network::Socket::OptionsSharedPtr& options, bool bind_to_port,
-                              const std::string& listener_name);
+  ListenSocketFactoryImpl(ListenerComponentFactory& factory,
+                          Network::Address::InstanceConstSharedPtr address,
+                          Network::Address::SocketType socket_type,
+                          const Network::Socket::OptionsSharedPtr& options, bool bind_to_port,
+                          const std::string& listener_name, bool reuse_port);
 
   // Network::ListenSocketFactory
   Network::Address::SocketType socketType() const override { return socket_type_; }
@@ -34,9 +36,23 @@ class ListenSocketFactoryImplBase : public Network::ListenSocketFactory,
     return local_address_;
   }
 
+  Network::SocketSharedPtr getListenSocket() override;
+
+  /**
+   * @return the socket shared by worker threads; otherwise return null.
+   */
+  absl::optional<std::reference_wrapper<Network::Socket>> sharedSocket() const override {
+    if (!reuse_port_) {
+      ASSERT(socket_ != nullptr);
+      return *socket_;
+    }
+    // If reuse_port is true, always return null, even socket_ is created for reserving
+    // port number.
+    return absl::nullopt;
+  }
+
 protected:
   Network::SocketSharedPtr createListenSocketAndApplyOptions();
-  void setLocalAddress(Network::Address::InstanceConstSharedPtr local_address);
 
 private:
   ListenerComponentFactory& factory_;
@@ -47,41 +63,9 @@ class ListenSocketFactoryImplBase : public Network::ListenSocketFactory,
   const Network::Socket::OptionsSharedPtr options_;
   bool bind_to_port_;
   const std::string& listener_name_;
-};
-
-class TcpListenSocketFactory : public ListenSocketFactoryImplBase {
-public:
-  TcpListenSocketFactory(ListenerComponentFactory& factory,
-                         Network::Address::InstanceConstSharedPtr local_address,
-                         const Network::Socket::OptionsSharedPtr& options, bool bind_to_port,
-                         const std::string& listener_name);
-
-  // Network::ListenSocketFactory
-  // If |socket_| is nullptr, create a new socket for it. Otherwise, always return |socket_|.
-  Network::SocketSharedPtr getListenSocket() override;
-  absl::optional<std::reference_wrapper<Network::Socket>> sharedSocket() const override {
-    ASSERT(socket_ != nullptr);
-    return *socket_;
-  }
-
-private:
-  // This is currently always shared across all workers. In the future SO_REUSEPORT support will be
-  // added.
+  const bool reuse_port_;
   Network::SocketSharedPtr socket_;
-};
-
-class UdpListenSocketFactory : public ListenSocketFactoryImplBase {
-public:
-  UdpListenSocketFactory(ListenerComponentFactory& factory,
-                         Network::Address::InstanceConstSharedPtr local_address,
-                         const Network::Socket::OptionsSharedPtr& options, bool bind_to_port,
-                         const std::string& listener_name);
-
-  // Network::ListenSocketFactory
-  Network::SocketSharedPtr getListenSocket() override;
-  absl::optional<std::reference_wrapper<Network::Socket>> sharedSocket() const override {
-    return absl::nullopt;
-  }
+  absl::once_flag steal_once_;
 };
 
 // TODO(mattklein123): Consider getting rid of pre-worker start and post-worker start code by
diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc
index 3d2ce8bc7546..1b1029161962 100644
--- a/source/server/listener_manager_impl.cc
+++ b/source/server/listener_manager_impl.cc
@@ -68,6 +68,15 @@ void fillState(envoy::admin::v2alpha::ListenersConfigDump_DynamicListenerState&
 
 } // namespace
 
+bool ListenSocketCreationParams::operator==(const ListenSocketCreationParams& rhs) const {
+  return (bind_to_port == rhs.bind_to_port) &&
+         (duplicate_parent_socket == rhs.duplicate_parent_socket);
+}
+
+bool ListenSocketCreationParams::operator!=(const ListenSocketCreationParams& rhs) const {
+  return !operator==(rhs);
+}
+
 std::vector<Network::FilterFactoryCb> ProdListenerComponentFactory::createNetworkFilterFactoryList_(
     const Protobuf::RepeatedPtrField<envoy::api::v2::listener::Filter>& filters,
     Configuration::FactoryContext& context) {
@@ -147,7 +156,7 @@ ProdListenerComponentFactory::createUdpListenerFilterFactoryList_(
 
 Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket(
     Network::Address::InstanceConstSharedPtr address, Network::Address::SocketType socket_type,
-    const Network::Socket::OptionsSharedPtr& options, bool bind_to_port) {
+    const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) {
   ASSERT(address->type() == Network::Address::Type::Ip ||
          address->type() == Network::Address::Type::Pipe);
   ASSERT(socket_type == Network::Address::SocketType::Stream ||
@@ -178,7 +187,7 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket(
                                  : Network::Utility::UDP_SCHEME;
   const std::string addr = absl::StrCat(scheme, address->asString());
 
-  if (bind_to_port) {
+  if (params.bind_to_port && params.duplicate_parent_socket) {
     const int fd = server_.hotRestart().duplicateParentListenSocket(addr);
     if (fd != -1) {
       ENVOY_LOG(debug, "obtained socket for address {} from parent", addr);
@@ -192,9 +201,9 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket(
   }
 
   if (socket_type == Network::Address::SocketType::Stream) {
-    return std::make_shared<Network::TcpListenSocket>(address, options, bind_to_port);
+    return std::make_shared<Network::TcpListenSocket>(address, options, params.bind_to_port);
   } else {
-    return std::make_shared<Network::UdpListenSocket>(address, options, bind_to_port);
+    return std::make_shared<Network::UdpListenSocket>(address, options, params.bind_to_port);
   }
 }
 
@@ -414,10 +423,14 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal(const envoy::api::v2::List
       draining_listen_socket_factory = existing_draining_listener->listener_->getSocketFactory();
     }
 
+    Network::Address::SocketType socket_type =
+        Network::Utility::protobufAddressSocketType(config.address());
     new_listener->setSocketFactory(
         draining_listen_socket_factory
             ? draining_listen_socket_factory
-            : createListenSocketFactory(config.address(), *new_listener));
+            : createListenSocketFactory(config.address(), *new_listener,
+                                        (socket_type == Network::Address::SocketType::Datagram) ||
+                                            config.reuse_port()));
     if (workers_started_) {
       new_listener->debugLog("add warming listener");
       warming_listeners_.emplace_back(std::move(new_listener));
@@ -757,20 +770,12 @@ std::unique_ptr<Network::FilterChain> ListenerFilterChainFactoryBuilder::buildFi
 
 Network::ListenSocketFactorySharedPtr
 ListenerManagerImpl::createListenSocketFactory(const envoy::api::v2::core::Address& proto_address,
-                                               ListenerImpl& listener) {
+                                               ListenerImpl& listener, bool reuse_port) {
   Network::Address::SocketType socket_type =
       Network::Utility::protobufAddressSocketType(proto_address);
-  switch (socket_type) {
-  case Network::Address::SocketType::Stream:
-    return std::make_shared<TcpListenSocketFactory>(factory_, listener.address(),
-                                                    listener.listenSocketOptions(),
-                                                    listener.bindToPort(), listener.name());
-  case Network::Address::SocketType::Datagram:
-    return std::make_shared<UdpListenSocketFactory>(factory_, listener.address(),
-                                                    listener.listenSocketOptions(),
-                                                    listener.bindToPort(), listener.name());
-  }
-  NOT_REACHED_GCOVR_EXCL_LINE;
+  return std::make_shared<ListenSocketFactoryImpl>(
+      factory_, listener.address(), socket_type, listener.listenSocketOptions(),
+      listener.bindToPort(), listener.name(), reuse_port);
 }
 
 } // namespace Server
diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h
index 2582920365e8..40170057da8b 100644
--- a/source/server/listener_manager_impl.h
+++ b/source/server/listener_manager_impl.h
@@ -81,7 +81,7 @@ class ProdListenerComponentFactory : public ListenerComponentFactory,
   Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr address,
                                               Network::Address::SocketType socket_type,
                                               const Network::Socket::OptionsSharedPtr& options,
-                                              bool bind_to_port) override;
+                                              const ListenSocketCreationParams& params) override;
 
   DrainManagerPtr createDrainManager(envoy::api::v2::Listener::DrainType drain_type) override;
   uint64_t nextListenerTag() override { return next_listener_tag_++; }
@@ -208,7 +208,7 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable<Logger::Id:
 
   Network::ListenSocketFactorySharedPtr
   createListenSocketFactory(const envoy::api::v2::core::Address& proto_address,
-                            ListenerImpl& listener);
+                            ListenerImpl& listener, bool reuse_port);
 
   // Active listeners are listeners that are currently accepting new connections on the workers.
   ListenerList active_listeners_;
diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc
index 59aceafddd61..b12863b10112 100644
--- a/source/server/options_impl.cc
+++ b/source/server/options_impl.cc
@@ -18,9 +18,24 @@
 #include "tclap/CmdLine.h"
 
 namespace Envoy {
+namespace {
+std::vector<std::string> toArgsVector(int argc, const char* const* argv) {
+  std::vector<std::string> args;
+  for (int i = 0; i < argc; ++i) {
+    args.emplace_back(argv[i]);
+  }
+  return args;
+}
+} // namespace
+
 OptionsImpl::OptionsImpl(int argc, const char* const* argv,
                          const HotRestartVersionCb& hot_restart_version_cb,
                          spdlog::level::level_enum default_log_level)
+    : OptionsImpl(toArgsVector(argc, argv), hot_restart_version_cb, default_log_level) {}
+
+OptionsImpl::OptionsImpl(std::vector<std::string> args,
+                         const HotRestartVersionCb& hot_restart_version_cb,
+                         spdlog::level::level_enum default_log_level)
     : signal_handling_enabled_(true) {
   std::string log_levels_string = "Log levels: ";
   for (auto level_string_view : spdlog::level::level_string_views) {
@@ -118,7 +133,7 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv,
                                               "bool", cmd);
   cmd.setExceptionHandling(false);
   try {
-    cmd.parse(argc, argv);
+    cmd.parse(args);
     count_ = cmd.getArgList().size();
   } catch (TCLAP::ArgException& e) {
     try {
diff --git a/source/server/options_impl.h b/source/server/options_impl.h
index 2d635fd91b9c..b5a209329d21 100644
--- a/source/server/options_impl.h
+++ b/source/server/options_impl.h
@@ -24,7 +24,7 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable<Logger::I
   using HotRestartVersionCb = std::function<std::string(bool)>;
 
   /**
-   * @throw NoServingException if Envoy has already done everything specified by the argv (e.g.
+   * @throw NoServingException if Envoy has already done everything specified by the args (e.g.
    *        print the hot restart version) and it's time to exit without serving HTTP traffic. The
    *        caller should exit(0) after any necessary cleanup.
    * @throw MalformedArgvException if something is wrong with the arguments (invalid flag or flag
@@ -33,6 +33,16 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable<Logger::I
   OptionsImpl(int argc, const char* const* argv, const HotRestartVersionCb& hot_restart_version_cb,
               spdlog::level::level_enum default_log_level);
 
+  /**
+   * @throw NoServingException if Envoy has already done everything specified by the args (e.g.
+   *        print the hot restart version) and it's time to exit without serving HTTP traffic. The
+   *        caller should exit(0) after any necessary cleanup.
+   * @throw MalformedArgvException if something is wrong with the arguments (invalid flag or flag
+   *        value). The caller should call exit(1) after any necessary cleanup.
+   */
+  OptionsImpl(std::vector<std::string> args, const HotRestartVersionCb& hot_restart_version_cb,
+              spdlog::level::level_enum default_log_level);
+
   // Test constructor; creates "reasonable" defaults, but desired values should be set explicitly.
   OptionsImpl(const std::string& service_cluster, const std::string& service_node,
               const std::string& service_zone, spdlog::level::level_enum log_level);
diff --git a/source/server/server.cc b/source/server/server.cc
index 23ae0f8561c1..7a3e604d6c2c 100644
--- a/source/server/server.cc
+++ b/source/server/server.cc
@@ -67,7 +67,6 @@ InstanceImpl::InstanceImpl(
       handler_(new ConnectionHandlerImpl(*dispatcher_, "main_thread")),
       random_generator_(std::move(random_generator)), listener_component_factory_(*this),
       worker_factory_(thread_local_, *api_, hooks),
-      dns_resolver_(dispatcher_->createDnsResolver({})),
       access_log_manager_(options.fileFlushIntervalMsec(), *api_, *dispatcher_, access_log_lock,
                           store),
       terminated_(false),
@@ -371,6 +370,9 @@ void InstanceImpl::initialize(const Options& options,
   // Once we have runtime we can initialize the SSL context manager.
   ssl_context_manager_ = createContextManager(Ssl::ContextManagerFactory::name(), time_source_);
 
+  const bool use_tcp_for_dns_lookups = bootstrap_.use_tcp_for_dns_lookups();
+  dns_resolver_ = dispatcher_->createDnsResolver({}, use_tcp_for_dns_lookups);
+
   cluster_manager_factory_ = std::make_unique<Upstream::ProdClusterManagerFactory>(
       *admin_, Runtime::LoaderSingleton::get(), stats_store_, thread_local_, *random_generator_,
       dns_resolver_, *ssl_context_manager_, *dispatcher_, *local_info_, *secret_manager_,
diff --git a/test/common/access_log/access_log_formatter_speed_test.cc b/test/common/access_log/access_log_formatter_speed_test.cc
index 595f6e1f5c87..a0c3611be5a7 100644
--- a/test/common/access_log/access_log_formatter_speed_test.cc
+++ b/test/common/access_log/access_log_formatter_speed_test.cc
@@ -9,6 +9,8 @@
 namespace {
 
 static std::unique_ptr<Envoy::AccessLog::FormatterImpl> formatter;
+static std::unique_ptr<Envoy::AccessLog::JsonFormatterImpl> json_formatter;
+static std::unique_ptr<Envoy::AccessLog::JsonFormatterImpl> typed_json_formatter;
 static std::unique_ptr<Envoy::TestStreamInfo> stream_info;
 
 } // namespace
@@ -29,6 +31,34 @@ static void BM_AccessLogFormatter(benchmark::State& state) {
 }
 BENCHMARK(BM_AccessLogFormatter);
 
+static void BM_JsonAccessLogFormatter(benchmark::State& state) {
+  size_t output_bytes = 0;
+  Http::TestHeaderMapImpl request_headers;
+  Http::TestHeaderMapImpl response_headers;
+  Http::TestHeaderMapImpl response_trailers;
+  for (auto _ : state) {
+    output_bytes +=
+        json_formatter->format(request_headers, response_headers, response_trailers, *stream_info)
+            .length();
+  }
+  benchmark::DoNotOptimize(output_bytes);
+}
+BENCHMARK(BM_JsonAccessLogFormatter);
+
+static void BM_TypedJsonAccessLogFormatter(benchmark::State& state) {
+  size_t output_bytes = 0;
+  Http::TestHeaderMapImpl request_headers;
+  Http::TestHeaderMapImpl response_headers;
+  Http::TestHeaderMapImpl response_trailers;
+  for (auto _ : state) {
+    output_bytes += typed_json_formatter
+                        ->format(request_headers, response_headers, response_trailers, *stream_info)
+                        .length();
+  }
+  benchmark::DoNotOptimize(output_bytes);
+}
+BENCHMARK(BM_TypedJsonAccessLogFormatter);
+
 } // namespace Envoy
 
 // Boilerplate main(), which discovers benchmarks in the same file and runs them.
@@ -40,6 +70,22 @@ int main(int argc, char** argv) {
       "s%RESPONSE_CODE% %BYTES_SENT% %DURATION% %REQ(REFERER)% \"%REQ(USER-AGENT)%\" - - -\n";
 
   formatter = std::make_unique<Envoy::AccessLog::FormatterImpl>(LogFormat);
+
+  std::unordered_map<std::string, std::string> JsonLogFormat = {
+      {"remote_address", "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%"},
+      {"start_time", "%START_TIME(%Y/%m/%dT%H:%M:%S%z %s)%"},
+      {"method", "%REQ(:METHOD)%"},
+      {"url", "%REQ(X-FORWARDED-PROTO)%://%REQ(:AUTHORITY)%%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%"},
+      {"protocol", "%PROTOCOL%"},
+      {"respoinse_code", "%RESPONSE_CODE%"},
+      {"bytes_sent", "%BYTES_SENT%"},
+      {"duration", "%DURATION%"},
+      {"referer", "%REQ(REFERER)%"},
+      {"user-agent", "%REQ(USER-AGENT)%"}};
+
+  json_formatter = std::make_unique<Envoy::AccessLog::JsonFormatterImpl>(JsonLogFormat, false);
+  typed_json_formatter = std::make_unique<Envoy::AccessLog::JsonFormatterImpl>(JsonLogFormat, true);
+
   stream_info = std::make_unique<Envoy::TestStreamInfo>();
   stream_info->setDownstreamRemoteAddress(
       std::make_shared<Envoy::Network::Address::Ipv4Instance>("203.0.113.1"));
diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/access_log/access_log_formatter_test.cc
index c5ff7b41c30a..dbcfbcde89ba 100644
--- a/test/common/access_log/access_log_formatter_test.cc
+++ b/test/common/access_log/access_log_formatter_test.cc
@@ -27,6 +27,27 @@ namespace Envoy {
 namespace AccessLog {
 namespace {
 
+const ProtobufWkt::Value& nullValue() {
+  static const auto* v = []() -> ProtobufWkt::Value* {
+    auto* vv = new ProtobufWkt::Value();
+    vv->set_null_value(ProtobufWkt::NULL_VALUE);
+    return vv;
+  }();
+  return *v;
+}
+
+ProtobufWkt::Value stringValue(const std::string& str) {
+  ProtobufWkt::Value val;
+  val.set_string_value(str);
+  return val;
+}
+
+ProtobufWkt::Value numberValue(double num) {
+  ProtobufWkt::Value val;
+  val.set_number_value(num);
+  return val;
+}
+
 class TestSerializedUnknownFilterState : public StreamInfo::FilterState::Object {
 public:
   ProtobufTypes::MessagePtr serializeAsProto() const override {
@@ -37,6 +58,38 @@ class TestSerializedUnknownFilterState : public StreamInfo::FilterState::Object
   }
 };
 
+class TestSerializedStructFilterState : public StreamInfo::FilterState::Object {
+public:
+  TestSerializedStructFilterState() : use_struct_(true) {
+    (*struct_.mutable_fields())["inner_key"] = stringValue("inner_value");
+  }
+
+  explicit TestSerializedStructFilterState(const ProtobufWkt::Struct& s) : use_struct_(true) {
+    struct_.CopyFrom(s);
+  }
+
+  explicit TestSerializedStructFilterState(std::chrono::seconds seconds) : use_struct_(false) {
+    duration_.set_seconds(seconds.count());
+  }
+
+  ProtobufTypes::MessagePtr serializeAsProto() const override {
+    if (use_struct_) {
+      auto s = std::make_unique<ProtobufWkt::Struct>();
+      s->CopyFrom(struct_);
+      return s;
+    }
+
+    auto d = std::make_unique<ProtobufWkt::Duration>();
+    d->CopyFrom(duration_);
+    return d;
+  }
+
+private:
+  const bool use_struct_;
+  ProtobufWkt::Struct struct_;
+  ProtobufWkt::Duration duration_;
+};
+
 TEST(AccessLogFormatUtilsTest, protocolToString) {
   EXPECT_EQ("HTTP/1.0", AccessLogFormatUtils::protocolToString(Http::Protocol::Http10));
   EXPECT_EQ("HTTP/1.1", AccessLogFormatUtils::protocolToString(Http::Protocol::Http11));
@@ -50,6 +103,8 @@ TEST(AccessLogFormatterTest, plainStringFormatter) {
   StreamInfo::MockStreamInfo stream_info;
 
   EXPECT_EQ("plain", formatter.format(header, header, header, stream_info));
+  EXPECT_THAT(formatter.formatValue(header, header, header, stream_info),
+              ProtoEq(stringValue("plain")));
 }
 
 TEST(AccessLogFormatterTest, streamInfoFormatter) {
@@ -61,15 +116,19 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
   {
     StreamInfoFormatter request_duration_format("REQUEST_DURATION");
     absl::optional<std::chrono::nanoseconds> dur = std::chrono::nanoseconds(5000000);
-    EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillOnce(Return(dur));
+    EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillRepeatedly(Return(dur));
     EXPECT_EQ("5", request_duration_format.format(header, header, header, stream_info));
+    EXPECT_THAT(request_duration_format.formatValue(header, header, header, stream_info),
+                ProtoEq(numberValue(5.0)));
   }
 
   {
     StreamInfoFormatter request_duration_format("REQUEST_DURATION");
     absl::optional<std::chrono::nanoseconds> dur;
-    EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillOnce(Return(dur));
+    EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillRepeatedly(Return(dur));
     EXPECT_EQ("-", request_duration_format.format(header, header, header, stream_info));
+    EXPECT_THAT(request_duration_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
 
   {
@@ -77,6 +136,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     absl::optional<std::chrono::nanoseconds> dur = std::chrono::nanoseconds(10000000);
     EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(dur));
     EXPECT_EQ("10", response_duration_format.format(header, header, header, stream_info));
+    EXPECT_THAT(response_duration_format.formatValue(header, header, header, stream_info),
+                ProtoEq(numberValue(10.0)));
   }
 
   {
@@ -84,6 +145,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     absl::optional<std::chrono::nanoseconds> dur;
     EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(dur));
     EXPECT_EQ("-", response_duration_format.format(header, header, header, stream_info));
+    EXPECT_THAT(response_duration_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
 
   {
@@ -95,6 +158,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(stream_info, lastDownstreamTxByteSent()).WillRepeatedly(Return(dur_downstream));
 
     EXPECT_EQ("15", ttlb_duration_format.format(header, header, header, stream_info));
+    EXPECT_THAT(ttlb_duration_format.formatValue(header, header, header, stream_info),
+                ProtoEq(numberValue(15.0)));
   }
 
   {
@@ -106,19 +171,25 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(stream_info, lastDownstreamTxByteSent()).WillRepeatedly(Return(dur_downstream));
 
     EXPECT_EQ("-", ttlb_duration_format.format(header, header, header, stream_info));
+    EXPECT_THAT(ttlb_duration_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
 
   {
     StreamInfoFormatter bytes_received_format("BYTES_RECEIVED");
-    EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(1));
+    EXPECT_CALL(stream_info, bytesReceived()).WillRepeatedly(Return(1));
     EXPECT_EQ("1", bytes_received_format.format(header, header, header, stream_info));
+    EXPECT_THAT(bytes_received_format.formatValue(header, header, header, stream_info),
+                ProtoEq(numberValue(1.0)));
   }
 
   {
     StreamInfoFormatter protocol_format("PROTOCOL");
     absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;
-    EXPECT_CALL(stream_info, protocol()).WillOnce(Return(protocol));
+    EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));
     EXPECT_EQ("HTTP/1.1", protocol_format.format(header, header, header, stream_info));
+    EXPECT_THAT(protocol_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("HTTP/1.1")));
   }
 
   {
@@ -126,6 +197,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     absl::optional<uint32_t> response_code{200};
     EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(Return(response_code));
     EXPECT_EQ("200", response_format.format(header, header, header, stream_info));
+    EXPECT_THAT(response_format.formatValue(header, header, header, stream_info),
+                ProtoEq(numberValue(200.0)));
   }
 
   {
@@ -133,6 +206,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     absl::optional<uint32_t> response_code;
     EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(Return(response_code));
     EXPECT_EQ("0", response_code_format.format(header, header, header, stream_info));
+    EXPECT_THAT(response_code_format.formatValue(header, header, header, stream_info),
+                ProtoEq(numberValue(0.0)));
   }
 
   {
@@ -140,6 +215,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     absl::optional<std::string> rc_details;
     EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details));
     EXPECT_EQ("-", response_format.format(header, header, header, stream_info));
+    EXPECT_THAT(response_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
 
   {
@@ -147,12 +224,16 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     absl::optional<std::string> rc_details{"via_upstream"};
     EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details));
     EXPECT_EQ("via_upstream", response_code_format.format(header, header, header, stream_info));
+    EXPECT_THAT(response_code_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("via_upstream")));
   }
 
   {
     StreamInfoFormatter bytes_sent_format("BYTES_SENT");
-    EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(1));
+    EXPECT_CALL(stream_info, bytesSent()).WillRepeatedly(Return(1));
     EXPECT_EQ("1", bytes_sent_format.format(header, header, header, stream_info));
+    EXPECT_THAT(bytes_sent_format.formatValue(header, header, header, stream_info),
+                ProtoEq(numberValue(1.0)));
   }
 
   {
@@ -160,6 +241,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     absl::optional<std::chrono::nanoseconds> dur = std::chrono::nanoseconds(15000000);
     EXPECT_CALL(stream_info, requestComplete()).WillRepeatedly(Return(dur));
     EXPECT_EQ("15", duration_format.format(header, header, header, stream_info));
+    EXPECT_THAT(duration_format.formatValue(header, header, header, stream_info),
+                ProtoEq(numberValue(15.0)));
   }
 
   {
@@ -167,60 +250,83 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     ON_CALL(stream_info, hasResponseFlag(StreamInfo::ResponseFlag::LocalReset))
         .WillByDefault(Return(true));
     EXPECT_EQ("LR", response_flags_format.format(header, header, header, stream_info));
+    EXPECT_THAT(response_flags_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("LR")));
   }
 
   {
     StreamInfoFormatter upstream_format("UPSTREAM_HOST");
     EXPECT_EQ("10.0.0.1:443", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("10.0.0.1:443")));
   }
 
   {
     StreamInfoFormatter upstream_format("UPSTREAM_CLUSTER");
     const std::string upstream_cluster_name = "cluster_name";
-    EXPECT_CALL(stream_info.host_->cluster_, name()).WillOnce(ReturnRef(upstream_cluster_name));
+    EXPECT_CALL(stream_info.host_->cluster_, name())
+        .WillRepeatedly(ReturnRef(upstream_cluster_name));
     EXPECT_EQ("cluster_name", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("cluster_name")));
   }
 
   {
     StreamInfoFormatter upstream_format("UPSTREAM_HOST");
-    EXPECT_CALL(stream_info, upstreamHost()).WillOnce(Return(nullptr));
+    EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
 
   {
     StreamInfoFormatter upstream_format("UPSTREAM_CLUSTER");
-    EXPECT_CALL(stream_info, upstreamHost()).WillOnce(Return(nullptr));
+    EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
 
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_ADDRESS");
     EXPECT_EQ("127.0.0.2:0", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("127.0.0.2:0")));
   }
 
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT");
     EXPECT_EQ("127.0.0.2", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("127.0.0.2")));
   }
 
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT");
     EXPECT_EQ("127.0.0.1", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("127.0.0.1")));
   }
 
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_REMOTE_ADDRESS");
     EXPECT_EQ("127.0.0.1:0", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("127.0.0.1:0")));
   }
 
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT");
     EXPECT_EQ("127.0.0.1", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("127.0.0.1")));
   }
 
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_DIRECT_REMOTE_ADDRESS");
     EXPECT_EQ("127.0.0.1:0", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("127.0.0.1:0")));
   }
 
   {
@@ -229,6 +335,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(stream_info, requestedServerName())
         .WillRepeatedly(ReturnRef(requested_server_name));
     EXPECT_EQ("stub_server", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("stub_server")));
   }
 
   {
@@ -237,6 +345,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(stream_info, requestedServerName())
         .WillRepeatedly(ReturnRef(requested_server_name));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN");
@@ -245,6 +355,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillRepeatedly(Return(sans));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("san", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("san")));
   }
 
   {
@@ -262,11 +374,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(Return(std::vector<std::string>()));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN");
@@ -275,6 +391,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillRepeatedly(Return(sans));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("san", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("san")));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN");
@@ -291,11 +409,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(Return(std::vector<std::string>()));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_SUBJECT");
@@ -305,6 +427,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(ReturnRef(subject_local));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("subject", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("subject")));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_SUBJECT");
@@ -313,11 +437,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(ReturnRef(EMPTY_STRING));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_SUBJECT");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT");
@@ -326,6 +454,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject_peer));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("subject", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("subject")));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT");
@@ -333,11 +463,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_SESSION_ID");
@@ -346,6 +480,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(session_id));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("deadbeef", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("deadbeef")));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_SESSION_ID");
@@ -353,11 +489,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(EMPTY_STRING));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_SESSION_ID");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_CIPHER");
@@ -374,11 +514,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, ciphersuiteString()).WillRepeatedly(Return(""));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_CIPHER");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_VERSION");
@@ -387,6 +531,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, tlsVersion()).WillRepeatedly(ReturnRef(tlsVersion));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("TLSv1.2", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("TLSv1.2")));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_VERSION");
@@ -394,11 +540,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, tlsVersion()).WillRepeatedly(ReturnRef(EMPTY_STRING));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_VERSION");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_256");
@@ -408,6 +558,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(ReturnRef(expected_sha));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ(expected_sha, upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue(expected_sha)));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_256");
@@ -417,11 +569,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(ReturnRef(expected_sha));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_256");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SERIAL");
@@ -431,6 +587,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(ReturnRef(serial_number));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("b8b5ecc898f2124a", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("b8b5ecc898f2124a")));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SERIAL");
@@ -439,11 +597,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(ReturnRef(EMPTY_STRING));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SERIAL");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_ISSUER");
@@ -461,11 +623,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_ISSUER");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT");
@@ -483,11 +649,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT");
@@ -497,6 +667,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(ReturnRef(expected_cert));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ(expected_cert, upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue(expected_cert)));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT");
@@ -506,11 +678,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(ReturnRef(expected_cert));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_START");
@@ -529,11 +705,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(absl::nullopt));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_START");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_END");
@@ -553,11 +733,15 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
         .WillRepeatedly(Return(absl::nullopt));
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr));
     StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_END");
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
   {
     StreamInfoFormatter upstream_format("UPSTREAM_TRANSPORT_FAILURE_REASON");
@@ -565,6 +749,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(stream_info, upstreamTransportFailureReason())
         .WillRepeatedly(ReturnRef(upstream_transport_failure_reason));
     EXPECT_EQ("SSL error", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("SSL error")));
   }
 
   {
@@ -573,6 +759,8 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) {
     EXPECT_CALL(stream_info, upstreamTransportFailureReason())
         .WillRepeatedly(ReturnRef(upstream_transport_failure_reason));
     EXPECT_EQ("-", upstream_format.format(header, header, header, stream_info));
+    EXPECT_THAT(upstream_format.formatValue(header, header, header, stream_info),
+                ProtoEq(nullValue()));
   }
 }
 
@@ -586,24 +774,45 @@ TEST(AccessLogFormatterTest, requestHeaderFormatter) {
     RequestHeaderFormatter formatter(":Method", "", absl::optional<size_t>());
     EXPECT_EQ("GET",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("GET")));
   }
 
   {
     RequestHeaderFormatter formatter(":path", ":method", absl::optional<size_t>());
     EXPECT_EQ("/",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("/")));
   }
 
   {
     RequestHeaderFormatter formatter(":TEST", ":METHOD", absl::optional<size_t>());
     EXPECT_EQ("GET",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("GET")));
   }
 
   {
     RequestHeaderFormatter formatter("does_not_exist", "", absl::optional<size_t>());
     EXPECT_EQ("-",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(nullValue()));
+  }
+
+  {
+    RequestHeaderFormatter formatter(":Method", "", absl::optional<size_t>(2));
+    EXPECT_EQ("GE",
+              formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("GE")));
   }
 }
 
@@ -617,24 +826,45 @@ TEST(AccessLogFormatterTest, responseHeaderFormatter) {
     ResponseHeaderFormatter formatter(":method", "", absl::optional<size_t>());
     EXPECT_EQ("PUT",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("PUT")));
   }
 
   {
     ResponseHeaderFormatter formatter("test", ":method", absl::optional<size_t>());
     EXPECT_EQ("test",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("test")));
   }
 
   {
     ResponseHeaderFormatter formatter(":path", ":method", absl::optional<size_t>());
     EXPECT_EQ("PUT",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("PUT")));
   }
 
   {
     ResponseHeaderFormatter formatter("does_not_exist", "", absl::optional<size_t>());
     EXPECT_EQ("-",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(nullValue()));
+  }
+
+  {
+    ResponseHeaderFormatter formatter(":method", "", absl::optional<size_t>(2));
+    EXPECT_EQ("PU",
+              formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("PU")));
   }
 }
 
@@ -648,24 +878,45 @@ TEST(AccessLogFormatterTest, responseTrailerFormatter) {
     ResponseTrailerFormatter formatter(":method", "", absl::optional<size_t>());
     EXPECT_EQ("POST",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("POST")));
   }
 
   {
     ResponseTrailerFormatter formatter("test-2", ":method", absl::optional<size_t>());
     EXPECT_EQ("test-2",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("test-2")));
   }
 
   {
     ResponseTrailerFormatter formatter(":path", ":method", absl::optional<size_t>());
     EXPECT_EQ("POST",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("POST")));
   }
 
   {
     ResponseTrailerFormatter formatter("does_not_exist", "", absl::optional<size_t>());
     EXPECT_EQ("-",
               formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(nullValue()));
+  }
+
+  {
+    ResponseTrailerFormatter formatter(":method", "", absl::optional<size_t>(2));
+    EXPECT_EQ("PO",
+              formatter.format(request_header, response_header, response_trailer, stream_info));
+    EXPECT_THAT(
+        formatter.formatValue(request_header, response_header, response_trailer, stream_info),
+        ProtoEq(stringValue("PO")));
   }
 }
 
@@ -675,66 +926,161 @@ TEST(AccessLogFormatterTest, responseTrailerFormatter) {
  */
 void populateMetadataTestData(envoy::api::v2::core::Metadata& metadata) {
   ProtobufWkt::Struct struct_obj;
-  ProtobufWkt::Value val;
   auto& fields_map = *struct_obj.mutable_fields();
-  val.set_string_value("test_value");
-  fields_map["test_key"] = val;
-  val.set_string_value("inner_value");
+  fields_map["test_key"] = stringValue("test_value");
   ProtobufWkt::Struct struct_inner;
-  (*struct_inner.mutable_fields())["inner_key"] = val;
-  val.clear_string_value();
+  (*struct_inner.mutable_fields())["inner_key"] = stringValue("inner_value");
+  ProtobufWkt::Value val;
   *val.mutable_struct_value() = struct_inner;
   fields_map["test_obj"] = val;
   (*metadata.mutable_filter_metadata())["com.test"] = struct_obj;
 }
 
-TEST(AccessLogFormatterTest, dynamicMetadataFormatter) {
+TEST(AccessLogFormatterTest, DynamicMetadataFormatter) {
   envoy::api::v2::core::Metadata metadata;
   populateMetadataTestData(metadata);
+  NiceMock<StreamInfo::MockStreamInfo> stream_info;
+  EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));
+  EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));
+  Http::TestHeaderMapImpl header;
 
   {
-    MetadataFormatter formatter("com.test", {}, absl::optional<size_t>());
-    std::string json = formatter.format(metadata);
-    EXPECT_TRUE(json.find("\"test_key\":\"test_value\"") != std::string::npos);
-    EXPECT_TRUE(json.find("\"test_obj\":{\"inner_key\":\"inner_value\"}") != std::string::npos);
+    DynamicMetadataFormatter formatter("com.test", {}, absl::optional<size_t>());
+    std::string val = formatter.format(header, header, header, stream_info);
+    EXPECT_TRUE(val.find("\"test_key\":\"test_value\"") != std::string::npos);
+    EXPECT_TRUE(val.find("\"test_obj\":{\"inner_key\":\"inner_value\"}") != std::string::npos);
+
+    ProtobufWkt::Value expected_val;
+    expected_val.mutable_struct_value()->CopyFrom(metadata.filter_metadata().at("com.test"));
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info), ProtoEq(expected_val));
   }
   {
-    MetadataFormatter formatter("com.test", {"test_key"}, absl::optional<size_t>());
-    std::string json = formatter.format(metadata);
-    EXPECT_EQ("\"test_value\"", json);
+    DynamicMetadataFormatter formatter("com.test", {"test_key"}, absl::optional<size_t>());
+    EXPECT_EQ("\"test_value\"", formatter.format(header, header, header, stream_info));
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("test_value")));
   }
   {
-    MetadataFormatter formatter("com.test", {"test_obj"}, absl::optional<size_t>());
-    std::string json = formatter.format(metadata);
-    EXPECT_EQ("{\"inner_key\":\"inner_value\"}", json);
+    DynamicMetadataFormatter formatter("com.test", {"test_obj"}, absl::optional<size_t>());
+    EXPECT_EQ("{\"inner_key\":\"inner_value\"}",
+              formatter.format(header, header, header, stream_info));
+
+    ProtobufWkt::Value expected_val;
+    (*expected_val.mutable_struct_value()->mutable_fields())["inner_key"] =
+        stringValue("inner_value");
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info), ProtoEq(expected_val));
   }
   {
-    MetadataFormatter formatter("com.test", {"test_obj", "inner_key"}, absl::optional<size_t>());
-    std::string json = formatter.format(metadata);
-    EXPECT_EQ("\"inner_value\"", json);
+    DynamicMetadataFormatter formatter("com.test", {"test_obj", "inner_key"},
+                                       absl::optional<size_t>());
+    EXPECT_EQ("\"inner_value\"", formatter.format(header, header, header, stream_info));
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("inner_value")));
   }
+
   // not found cases
   {
-    MetadataFormatter formatter("com.notfound", {}, absl::optional<size_t>());
-    EXPECT_EQ("-", formatter.format(metadata));
+    DynamicMetadataFormatter formatter("com.notfound", {}, absl::optional<size_t>());
+    EXPECT_EQ("-", formatter.format(header, header, header, stream_info));
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info), ProtoEq(nullValue()));
+  }
+  {
+    DynamicMetadataFormatter formatter("com.test", {"notfound"}, absl::optional<size_t>());
+    EXPECT_EQ("-", formatter.format(header, header, header, stream_info));
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info), ProtoEq(nullValue()));
+  }
+  {
+    DynamicMetadataFormatter formatter("com.test", {"test_obj", "notfound"},
+                                       absl::optional<size_t>());
+    EXPECT_EQ("-", formatter.format(header, header, header, stream_info));
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info), ProtoEq(nullValue()));
+  }
+
+  // size limit
+  {
+    DynamicMetadataFormatter formatter("com.test", {"test_key"}, absl::optional<size_t>(5));
+    EXPECT_EQ("\"test", formatter.format(header, header, header, stream_info));
+
+    // N.B. Does not truncate.
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("test_value")));
+  }
+}
+
+TEST(AccessLogFormatterTest, FilterStateFormatter) {
+  Http::TestHeaderMapImpl header;
+  StreamInfo::MockStreamInfo stream_info;
+  stream_info.filter_state_.setData("key",
+                                    std::make_unique<Router::StringAccessorImpl>("test_value"),
+                                    StreamInfo::FilterState::StateType::ReadOnly);
+  stream_info.filter_state_.setData("key-struct",
+                                    std::make_unique<TestSerializedStructFilterState>(),
+                                    StreamInfo::FilterState::StateType::ReadOnly);
+  stream_info.filter_state_.setData("key-no-serialization",
+                                    std::make_unique<StreamInfo::FilterState::Object>(),
+                                    StreamInfo::FilterState::StateType::ReadOnly);
+  stream_info.filter_state_.setData(
+      "key-serialization-error",
+      std::make_unique<TestSerializedStructFilterState>(std::chrono::seconds(-281474976710656)),
+      StreamInfo::FilterState::StateType::ReadOnly);
+  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));
+
+  {
+    FilterStateFormatter formatter("key", absl::optional<size_t>());
+
+    EXPECT_EQ("\"test_value\"", formatter.format(header, header, header, stream_info));
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("test_value")));
+  }
+  {
+    FilterStateFormatter formatter("key-struct", absl::optional<size_t>());
+
+    EXPECT_EQ("{\"inner_key\":\"inner_value\"}",
+              formatter.format(header, header, header, stream_info));
+
+    ProtobufWkt::Value expected;
+    (*expected.mutable_struct_value()->mutable_fields())["inner_key"] = stringValue("inner_value");
+
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info), ProtoEq(expected));
+  }
+
+  // not found case
+  {
+    FilterStateFormatter formatter("key-not-found", absl::optional<size_t>());
+
+    EXPECT_EQ("-", formatter.format(header, header, header, stream_info));
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info), ProtoEq(nullValue()));
   }
+
+  // no serialization case
   {
-    MetadataFormatter formatter("com.test", {"notfound"}, absl::optional<size_t>());
-    EXPECT_EQ("-", formatter.format(metadata));
+    FilterStateFormatter formatter("key-no-serialization", absl::optional<size_t>());
+
+    EXPECT_EQ("-", formatter.format(header, header, header, stream_info));
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info), ProtoEq(nullValue()));
   }
+
+  // serialization error case
   {
-    MetadataFormatter formatter("com.test", {"test_obj", "notfound"}, absl::optional<size_t>());
-    EXPECT_EQ("-", formatter.format(metadata));
+    FilterStateFormatter formatter("key-serialization-error", absl::optional<size_t>());
+
+    EXPECT_EQ("-", formatter.format(header, header, header, stream_info));
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info), ProtoEq(nullValue()));
   }
+
   // size limit
   {
-    MetadataFormatter formatter("com.test", {"test_key"}, absl::optional<size_t>(5));
-    std::string json = formatter.format(metadata);
-    EXPECT_EQ("\"test", json);
+    FilterStateFormatter formatter("key", absl::optional<size_t>(5));
+
+    EXPECT_EQ("\"test", formatter.format(header, header, header, stream_info));
+
+    // N.B. Does not truncate.
+    EXPECT_THAT(formatter.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("test_value")));
   }
 }
 
-TEST(AccessLogFormatterTest, startTimeFormatter) {
+TEST(AccessLogFormatterTest, StartTimeFormatter) {
   NiceMock<StreamInfo::MockStreamInfo> stream_info;
   Http::TestHeaderMapImpl header{{":method", "GET"}, {":path", "/"}};
 
@@ -742,16 +1088,20 @@ TEST(AccessLogFormatterTest, startTimeFormatter) {
     StartTimeFormatter start_time_format("%Y/%m/%d");
     time_t test_epoch = 1522280158;
     SystemTime time = std::chrono::system_clock::from_time_t(test_epoch);
-    EXPECT_CALL(stream_info, startTime()).WillOnce(Return(time));
+    EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time));
     EXPECT_EQ("2018/03/28", start_time_format.format(header, header, header, stream_info));
+    EXPECT_THAT(start_time_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue("2018/03/28")));
   }
 
   {
     StartTimeFormatter start_time_format("");
     SystemTime time;
-    EXPECT_CALL(stream_info, startTime()).WillOnce(Return(time));
+    EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time));
     EXPECT_EQ(AccessLogDateTimeFormatter::fromTime(time),
               start_time_format.format(header, header, header, stream_info));
+    EXPECT_THAT(start_time_format.formatValue(header, header, header, stream_info),
+                ProtoEq(stringValue(AccessLogDateTimeFormatter::fromTime(time))));
   }
 }
 
@@ -786,7 +1136,7 @@ TEST(AccessLogFormatterTest, JsonFormatterPlainStringTest) {
 
   std::unordered_map<std::string, std::string> key_mapping = {
       {"plain_string", "plain_string_value"}};
-  JsonFormatterImpl formatter(key_mapping);
+  JsonFormatterImpl formatter(key_mapping, false);
 
   verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info),
                    expected_json_map);
@@ -806,7 +1156,7 @@ TEST(AccessLogFormatterTest, JsonFormatterSingleOperatorTest) {
   std::unordered_map<std::string, std::string> expected_json_map = {{"protocol", "HTTP/1.1"}};
 
   std::unordered_map<std::string, std::string> key_mapping = {{"protocol", "%PROTOCOL%"}};
-  JsonFormatterImpl formatter(key_mapping);
+  JsonFormatterImpl formatter(key_mapping, false);
 
   verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info),
                    expected_json_map);
@@ -829,7 +1179,7 @@ TEST(AccessLogFormatterTest, JsonFormatterNonExistentHeaderTest) {
       {"some_request_header", "%REQ(some_request_header)%"},
       {"nonexistent_response_header", "%RESP(nonexistent_response_header)%"},
       {"some_response_header", "%RESP(some_response_header)%"}};
-  JsonFormatterImpl formatter(key_mapping);
+  JsonFormatterImpl formatter(key_mapping, false);
 
   absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;
   EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));
@@ -859,7 +1209,7 @@ TEST(AccessLogFormatterTest, JsonFormatterAlternateHeaderTest) {
        "%RESP(response_absent_header?response_present_header)%"},
       {"response_present_header_or_response_absent_header",
        "%RESP(response_present_header?response_absent_header)%"}};
-  JsonFormatterImpl formatter(key_mapping);
+  JsonFormatterImpl formatter(key_mapping, false);
 
   absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;
   EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));
@@ -889,12 +1239,88 @@ TEST(AccessLogFormatterTest, JsonFormatterDynamicMetadataTest) {
       {"test_obj", "%DYNAMIC_METADATA(com.test:test_obj)%"},
       {"test_obj.inner_key", "%DYNAMIC_METADATA(com.test:test_obj:inner_key)%"}};
 
-  JsonFormatterImpl formatter(key_mapping);
+  JsonFormatterImpl formatter(key_mapping, false);
 
   verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info),
                    expected_json_map);
 }
 
+TEST(AccessLogFormatterTest, JsonFormatterTypedDynamicMetadataTest) {
+  StreamInfo::MockStreamInfo stream_info;
+  Http::TestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}};
+  Http::TestHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}};
+  Http::TestHeaderMapImpl response_trailer{{"third", "POST"}, {"test-2", "test-2"}};
+
+  envoy::api::v2::core::Metadata metadata;
+  populateMetadataTestData(metadata);
+  EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));
+  EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata));
+
+  std::unordered_map<std::string, std::string> key_mapping = {
+      {"test_key", "%DYNAMIC_METADATA(com.test:test_key)%"},
+      {"test_obj", "%DYNAMIC_METADATA(com.test:test_obj)%"},
+      {"test_obj.inner_key", "%DYNAMIC_METADATA(com.test:test_obj:inner_key)%"}};
+
+  JsonFormatterImpl formatter(key_mapping, true);
+
+  const std::string json =
+      formatter.format(request_header, response_header, response_trailer, stream_info);
+  ProtobufWkt::Struct output;
+  MessageUtil::loadFromJson(json, output);
+
+  const auto& fields = output.fields();
+  EXPECT_EQ("test_value", fields.at("test_key").string_value());
+  EXPECT_EQ("inner_value", fields.at("test_obj.inner_key").string_value());
+  EXPECT_EQ("inner_value",
+            fields.at("test_obj").struct_value().fields().at("inner_key").string_value());
+}
+
+TEST(AccessLogFormatterTets, JsonFormatterFilterStateTest) {
+  Http::TestHeaderMapImpl header;
+  StreamInfo::MockStreamInfo stream_info;
+  stream_info.filter_state_.setData("test_key",
+                                    std::make_unique<Router::StringAccessorImpl>("test_value"),
+                                    StreamInfo::FilterState::StateType::ReadOnly);
+  stream_info.filter_state_.setData("test_obj", std::make_unique<TestSerializedStructFilterState>(),
+                                    StreamInfo::FilterState::StateType::ReadOnly);
+  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));
+
+  std::unordered_map<std::string, std::string> expected_json_map = {
+      {"test_key", "\"test_value\""}, {"test_obj", "{\"inner_key\":\"inner_value\"}"}};
+
+  std::unordered_map<std::string, std::string> key_mapping = {
+      {"test_key", "%FILTER_STATE(test_key)%"}, {"test_obj", "%FILTER_STATE(test_obj)%"}};
+
+  JsonFormatterImpl formatter(key_mapping, false);
+
+  verifyJsonOutput(formatter.format(header, header, header, stream_info), expected_json_map);
+}
+
+TEST(AccessLogFormatterTets, JsonFormatterTypedFilterStateTest) {
+  Http::TestHeaderMapImpl header;
+  StreamInfo::MockStreamInfo stream_info;
+  stream_info.filter_state_.setData("test_key",
+                                    std::make_unique<Router::StringAccessorImpl>("test_value"),
+                                    StreamInfo::FilterState::StateType::ReadOnly);
+  stream_info.filter_state_.setData("test_obj", std::make_unique<TestSerializedStructFilterState>(),
+                                    StreamInfo::FilterState::StateType::ReadOnly);
+  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));
+
+  std::unordered_map<std::string, std::string> key_mapping = {
+      {"test_key", "%FILTER_STATE(test_key)%"}, {"test_obj", "%FILTER_STATE(test_obj)%"}};
+
+  JsonFormatterImpl formatter(key_mapping, true);
+
+  std::string json = formatter.format(header, header, header, stream_info);
+  ProtobufWkt::Struct output;
+  MessageUtil::loadFromJson(json, output);
+
+  const auto& fields = output.fields();
+  EXPECT_EQ("test_value", fields.at("test_key").string_value());
+  EXPECT_EQ("inner_value",
+            fields.at("test_obj").struct_value().fields().at("inner_key").string_value());
+}
+
 TEST(AccessLogFormatterTest, JsonFormatterStartTimeTest) {
   StreamInfo::MockStreamInfo stream_info;
   Http::TestHeaderMapImpl request_header;
@@ -918,7 +1344,7 @@ TEST(AccessLogFormatterTest, JsonFormatterStartTimeTest) {
       {"bad_format", "%START_TIME(bad_format)%"},
       {"default", "%START_TIME%"},
       {"all_zeroes", "%START_TIME(%f.%1f.%2f.%3f)%"}};
-  JsonFormatterImpl formatter(key_mapping);
+  JsonFormatterImpl formatter(key_mapping, false);
 
   verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info),
                    expected_json_map);
@@ -937,19 +1363,61 @@ TEST(AccessLogFormatterTest, JsonFormatterMultiTokenTest) {
     std::unordered_map<std::string, std::string> key_mapping = {
         {"multi_token_field",
          "%PROTOCOL% plainstring %REQ(some_request_header)% %RESP(some_response_header)%"}};
-    JsonFormatterImpl formatter(key_mapping);
 
-    absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;
-    EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));
+    for (const bool preserve_types : {false, true}) {
+      JsonFormatterImpl formatter(key_mapping, preserve_types);
+
+      absl::optional<Http::Protocol> protocol = Http::Protocol::Http11;
+      EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol));
 
-    const auto parsed = Json::Factory::loadFromString(
-        formatter.format(request_header, response_header, response_trailer, stream_info));
-    for (const auto& pair : expected_json_map) {
-      EXPECT_EQ(parsed->getString(pair.first), pair.second);
+      const auto parsed = Json::Factory::loadFromString(
+          formatter.format(request_header, response_header, response_trailer, stream_info));
+      for (const auto& pair : expected_json_map) {
+        EXPECT_EQ(parsed->getString(pair.first), pair.second);
+      }
     }
   }
 }
 
+TEST(AccessLogFormatterTest, JsonFormatterTypedTest) {
+  Http::TestHeaderMapImpl header;
+  StreamInfo::MockStreamInfo stream_info;
+  EXPECT_CALL(Const(stream_info), lastDownstreamRxByteReceived())
+      .WillRepeatedly(Return(std::chrono::nanoseconds(5000000)));
+
+  ProtobufWkt::Value list;
+  list.mutable_list_value()->add_values()->set_bool_value(true);
+  list.mutable_list_value()->add_values()->set_string_value("two");
+  list.mutable_list_value()->add_values()->set_number_value(3.14);
+
+  ProtobufWkt::Struct s;
+  (*s.mutable_fields())["list"] = list;
+
+  stream_info.filter_state_.setData("test_obj",
+                                    std::make_unique<TestSerializedStructFilterState>(s),
+                                    StreamInfo::FilterState::StateType::ReadOnly);
+  EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1));
+
+  std::unordered_map<std::string, std::string> key_mapping = {
+      {"request_duration", "%REQUEST_DURATION%"},
+      {"request_duration_multi", "%REQUEST_DURATION%ms"},
+      {"filter_state", "%FILTER_STATE(test_obj)%"},
+  };
+
+  JsonFormatterImpl formatter(key_mapping, true);
+
+  const auto json = formatter.format(header, header, header, stream_info);
+  ProtobufWkt::Struct output;
+  MessageUtil::loadFromJson(json, output);
+
+  EXPECT_THAT(output.fields().at("request_duration"), ProtoEq(numberValue(5.0)));
+  EXPECT_THAT(output.fields().at("request_duration_multi"), ProtoEq(stringValue("5ms")));
+
+  ProtobufWkt::Value expected;
+  expected.mutable_struct_value()->CopyFrom(s);
+  EXPECT_THAT(output.fields().at("filter_state"), ProtoEq(expected));
+}
+
 TEST(AccessLogFormatterTest, CompositeFormatterSuccess) {
   StreamInfo::MockStreamInfo stream_info;
   Http::TestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}};
diff --git a/test/common/common/regex_test.cc b/test/common/common/regex_test.cc
index b12454d7cdb0..40caf6fa34e4 100644
--- a/test/common/common/regex_test.cc
+++ b/test/common/common/regex_test.cc
@@ -50,9 +50,8 @@ TEST(Utility, ParseRegex) {
     envoy::type::matcher::RegexMatcher matcher;
     matcher.mutable_google_re2()->mutable_max_program_size()->set_value(1);
     matcher.set_regex("/asdf/.*");
-    EXPECT_THROW_WITH_MESSAGE(Utility::parseRegex(matcher), EnvoyException,
-                              "regex '/asdf/.*' RE2 program size of 24 > max program size of 1. "
-                              "Increase configured max program size if necessary.");
+    EXPECT_THROW_WITH_REGEX(Utility::parseRegex(matcher), EnvoyException,
+                            "RE2 program size of [0-9]+ > max program size of 1\\.");
   }
 }
 
diff --git a/test/common/config/BUILD b/test/common/config/BUILD
index 7367f62e35bb..651d43196f6e 100644
--- a/test/common/config/BUILD
+++ b/test/common/config/BUILD
@@ -10,6 +10,16 @@ load(
 
 envoy_package()
 
+envoy_cc_test(
+    name = "api_type_oracle_test",
+    srcs = ["api_type_oracle_test.cc"],
+    deps = [
+        "//source/common/config:api_type_oracle_lib",
+        "@envoy_api//envoy/config/filter/http/ip_tagging/v2:pkg_cc_proto",
+        "@envoy_api//envoy/config/filter/http/ip_tagging/v3alpha:pkg_cc_proto",
+    ],
+)
+
 envoy_cc_test(
     name = "delta_subscription_impl_test",
     srcs = ["delta_subscription_impl_test.cc"],
@@ -275,7 +285,7 @@ envoy_cc_test(
         "//test/test_common:environment_lib",
         "//test/test_common:logging_lib",
         "//test/test_common:utility_lib",
-        "@com_github_cncf_udpa//udpa/type/v1:typed_struct_cc",
+        "@com_github_cncf_udpa//udpa/type/v1:pkg_cc_proto",
         "@envoy_api//envoy/api/v2:pkg_cc_proto",
     ],
 )
@@ -284,6 +294,7 @@ envoy_cc_test(
     name = "registry_test",
     srcs = ["registry_test.cc"],
     deps = [
+        "//test/test_common:logging_lib",
         "//test/test_common:utility_lib",
     ],
 )
diff --git a/test/common/config/api_type_oracle_test.cc b/test/common/config/api_type_oracle_test.cc
new file mode 100644
index 000000000000..4b5f436fab33
--- /dev/null
+++ b/test/common/config/api_type_oracle_test.cc
@@ -0,0 +1,74 @@
+#include "common/config/api_type_oracle.h"
+
+// For proto descriptors only
+#include "envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.h"
+#include "envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.pb.h"
+
+#include "gtest/gtest.h"
+#include "udpa/type/v1/typed_struct.pb.h"
+
+namespace Envoy {
+namespace Config {
+namespace {
+
+TEST(ApiTypeOracleTest, All) {
+  EXPECT_EQ(nullptr, ApiTypeOracle::inferEarlierVersionDescriptor("foo", {}, ""));
+  EXPECT_EQ(nullptr, ApiTypeOracle::inferEarlierVersionDescriptor("envoy.ip_tagging", {}, ""));
+
+  // Struct upgrade to v3alpha.
+  {
+    const auto* desc = ApiTypeOracle::inferEarlierVersionDescriptor(
+        "envoy.ip_tagging", {}, "envoy.config.filter.http.ip_tagging.v3alpha.IPTagging");
+    EXPECT_EQ("envoy.config.filter.http.ip_tagging.v2.IPTagging", desc->full_name());
+  }
+
+  // Any upgrade from v2 to v3alpha.
+  {
+    ProtobufWkt::Any typed_config;
+    typed_config.set_type_url("envoy.config.filter.http.ip_tagging.v2.IPTagging");
+    const auto* desc = ApiTypeOracle::inferEarlierVersionDescriptor(
+        "envoy.ip_tagging", typed_config, "envoy.config.filter.http.ip_tagging.v3alpha.IPTagging");
+    EXPECT_EQ("envoy.config.filter.http.ip_tagging.v2.IPTagging", desc->full_name());
+  }
+
+  // There is no upgrade for same Any and target type URL.
+  {
+    ProtobufWkt::Any typed_config;
+    typed_config.set_type_url("envoy.config.filter.http.ip_tagging.v3alpha.IPTagging");
+    EXPECT_EQ(nullptr, ApiTypeOracle::inferEarlierVersionDescriptor(
+                           "envoy.ip_tagging", typed_config,
+                           "envoy.config.filter.http.ip_tagging.v3alpha.IPTagging"));
+  }
+
+  // TypedStruct upgrade from v2 to v3alpha.
+  {
+    ProtobufWkt::Any typed_config;
+    udpa::type::v1::TypedStruct typed_struct;
+    typed_struct.set_type_url("envoy.config.filter.http.ip_tagging.v2.IPTagging");
+    typed_config.PackFrom(typed_struct);
+    const auto* desc = ApiTypeOracle::inferEarlierVersionDescriptor(
+        "envoy.ip_tagging", typed_config, "envoy.config.filter.http.ip_tagging.v3alpha.IPTagging");
+    EXPECT_EQ("envoy.config.filter.http.ip_tagging.v2.IPTagging", desc->full_name());
+  }
+
+  // There is no upgrade for same TypedStruct and target type URL.
+  {
+    ProtobufWkt::Any typed_config;
+    udpa::type::v1::TypedStruct typed_struct;
+    typed_struct.set_type_url(
+        "type.googleapis.com/envoy.config.filter.http.ip_tagging.v3alpha.IPTagging");
+    typed_config.PackFrom(typed_struct);
+    EXPECT_EQ(nullptr, ApiTypeOracle::inferEarlierVersionDescriptor(
+                           "envoy.ip_tagging", typed_config,
+                           "envoy.config.filter.http.ip_tagging.v3alpha.IPTagging"));
+  }
+
+  // There is no upgrade for v2.
+  EXPECT_EQ(nullptr,
+            ApiTypeOracle::inferEarlierVersionDescriptor(
+                "envoy.ip_tagging", {}, "envoy.config.filter.http.ip_tagging.v2.IPTagging"));
+}
+
+} // namespace
+} // namespace Config
+} // namespace Envoy
diff --git a/test/common/config/datasource_test.cc b/test/common/config/datasource_test.cc
index 4621c2e521db..509a00d672af 100644
--- a/test/common/config/datasource_test.cc
+++ b/test/common/config/datasource_test.cc
@@ -374,6 +374,57 @@ TEST_F(AsyncDataSourceTest, loadRemoteDataSourceExpectInvalidData) {
   EXPECT_CALL(init_watcher_, ready());
 }
 
+TEST_F(AsyncDataSourceTest, datasourceReleasedBeforeFetchingData) {
+  const std::string body = "hello world";
+  std::string async_data = "non-empty";
+  std::unique_ptr<Config::DataSource::RemoteAsyncDataProvider> provider;
+
+  {
+    AsyncDataSourcePb config;
+
+    std::string yaml = R"EOF(
+    remote:
+      http_uri:
+        uri: https://example.com/data
+        cluster: cluster_1
+      sha256:
+        b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9
+  )EOF";
+    TestUtility::loadFromYaml(yaml, config);
+    EXPECT_TRUE(config.has_remote());
+
+    EXPECT_CALL(cm_, httpAsyncClientForCluster("cluster_1")).WillOnce(ReturnRef(cm_.async_client_));
+    EXPECT_CALL(cm_.async_client_, send_(_, _, _))
+        .WillOnce(
+            Invoke([&](Http::MessagePtr&, Http::AsyncClient::Callbacks& callbacks,
+                       const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* {
+              Http::MessagePtr response(new Http::ResponseMessageImpl(
+                  Http::HeaderMapPtr{new Http::TestHeaderMapImpl{{":status", "200"}}}));
+              response->body() = std::make_unique<Buffer::OwnedImpl>(body);
+
+              callbacks.onSuccess(std::move(response));
+              return nullptr;
+            }));
+
+    EXPECT_CALL(init_manager_, add(_)).WillOnce(Invoke([this](const Init::Target& target) {
+      init_target_handle_ = target.createHandle("test");
+    }));
+
+    provider = std::make_unique<Config::DataSource::RemoteAsyncDataProvider>(
+        cm_, init_manager_, config.remote(), true, [&](const std::string& data) {
+          EXPECT_EQ(init_manager_.state(), Init::Manager::State::Initializing);
+          EXPECT_EQ(data, body);
+          async_data = data;
+        });
+  }
+
+  EXPECT_CALL(init_manager_, state()).WillOnce(Return(Init::Manager::State::Initializing));
+  EXPECT_CALL(init_watcher_, ready());
+  init_target_handle_->initialize(init_watcher_);
+  EXPECT_EQ(async_data, body);
+  EXPECT_NE(nullptr, provider.get());
+}
+
 } // namespace
 } // namespace Config
 } // namespace Envoy
\ No newline at end of file
diff --git a/test/common/config/registry_test.cc b/test/common/config/registry_test.cc
index 10d5f4a314b2..3b951e561222 100644
--- a/test/common/config/registry_test.cc
+++ b/test/common/config/registry_test.cc
@@ -2,6 +2,10 @@
 
 #include "envoy/registry/registry.h"
 
+#include "common/common/fmt.h"
+
+#include "test/test_common/logging.h"
+
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 
@@ -71,6 +75,28 @@ TEST(RegistryTest, DefaultFactoryPublished) {
             nullptr);
 }
 
+class TestWithDeprecatedPublishedFactory : public PublishedFactory {
+public:
+  std::string name() override { return "testing.published.instead_name"; }
+};
+
+REGISTER_FACTORY(TestWithDeprecatedPublishedFactory,
+                 PublishedFactory){"testing.published.deprecated_name"};
+
+TEST(RegistryTest, WithDeprecatedFactoryPublished) {
+  EXPECT_EQ("testing.published.instead_name",
+            Envoy::Registry::FactoryRegistry<PublishedFactory>::getFactory(
+                "testing.published.deprecated_name")
+                ->name());
+  EXPECT_LOG_CONTAINS("warn",
+                      fmt::format("{} is deprecated, use {} instead.",
+                                  "testing.published.deprecated_name",
+                                  "testing.published.instead_name"),
+                      Envoy::Registry::FactoryRegistry<PublishedFactory>::getFactory(
+                          "testing.published.deprecated_name")
+                          ->name());
+}
+
 } // namespace
 } // namespace Config
 } // namespace Envoy
diff --git a/test/common/config/utility_test.cc b/test/common/config/utility_test.cc
index be54e5bf1d93..075f42828e5d 100644
--- a/test/common/config/utility_test.cc
+++ b/test/common/config/utility_test.cc
@@ -261,6 +261,20 @@ TEST(UtilityTest, PrepareDnsRefreshStrategy) {
   }
 }
 
+// Validate that an opaque config of the wrong type throws during conversion.
+TEST(UtilityTest, AnyWrongType) {
+  ProtobufWkt::Duration source_duration;
+  source_duration.set_seconds(42);
+  ProtobufWkt::Any typed_config;
+  typed_config.PackFrom(source_duration);
+  ProtobufWkt::Timestamp out;
+  EXPECT_THROW_WITH_REGEX(
+      Utility::translateOpaqueConfig("", typed_config, ProtobufWkt::Struct(),
+                                     ProtobufMessage::getStrictValidationVisitor(), out),
+      EnvoyException,
+      R"(Unable to unpack as google.protobuf.Timestamp: \[type.googleapis.com/google.protobuf.Duration\] .*)");
+}
+
 void packTypedStructIntoAny(ProtobufWkt::Any& typed_config, const Protobuf::Message& inner) {
   udpa::type::v1::TypedStruct typed_struct;
   (*typed_struct.mutable_type_url()) =
@@ -277,7 +291,7 @@ TEST(UtilityTest, TypedStructToStruct) {
   packTypedStructIntoAny(typed_config, untyped_struct);
 
   ProtobufWkt::Struct out;
-  Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),
+  Utility::translateOpaqueConfig("", typed_config, ProtobufWkt::Struct(),
                                  ProtobufMessage::getStrictValidationVisitor(), out);
 
   EXPECT_THAT(out, ProtoEq(untyped_struct));
@@ -298,7 +312,7 @@ TEST(UtilityTest, TypedStructToBootstrap) {
   packTypedStructIntoAny(typed_config, bootstrap);
 
   envoy::config::bootstrap::v2::Bootstrap out;
-  Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),
+  Utility::translateOpaqueConfig("", typed_config, ProtobufWkt::Struct(),
                                  ProtobufMessage::getStrictValidationVisitor(), out);
   EXPECT_THAT(out, ProtoEq(bootstrap));
 }
@@ -319,7 +333,7 @@ TEST(UtilityTest, TypedStructToInvalidType) {
 
   ProtobufWkt::Any out;
   EXPECT_THROW_WITH_MESSAGE(
-      Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(),
+      Utility::translateOpaqueConfig("", typed_config, ProtobufWkt::Struct(),
                                      ProtobufMessage::getStrictValidationVisitor(), out),
       EnvoyException,
       "Invalid proto type.\nExpected google.protobuf.Any\nActual: "
diff --git a/test/common/grpc/common_test.cc b/test/common/grpc/common_test.cc
index 88761b402676..3059ed6a303b 100644
--- a/test/common/grpc/common_test.cc
+++ b/test/common/grpc/common_test.cc
@@ -107,28 +107,28 @@ TEST(GrpcCommonTest, GrpcStatusDetailsBin) {
 }
 
 TEST(GrpcContextTest, ToGrpcTimeout) {
-  Http::HeaderString value;
+  Http::TestHeaderMapImpl headers;
 
-  Common::toGrpcTimeout(std::chrono::milliseconds(0UL), value);
-  EXPECT_EQ("0m", value.getStringView());
+  Common::toGrpcTimeout(std::chrono::milliseconds(0UL), headers);
+  EXPECT_EQ("0m", headers.GrpcTimeout()->value().getStringView());
 
-  Common::toGrpcTimeout(std::chrono::milliseconds(1UL), value);
-  EXPECT_EQ("1m", value.getStringView());
+  Common::toGrpcTimeout(std::chrono::milliseconds(1UL), headers);
+  EXPECT_EQ("1m", headers.GrpcTimeout()->value().getStringView());
 
-  Common::toGrpcTimeout(std::chrono::milliseconds(100000000UL), value);
-  EXPECT_EQ("100000S", value.getStringView());
+  Common::toGrpcTimeout(std::chrono::milliseconds(100000000UL), headers);
+  EXPECT_EQ("100000S", headers.GrpcTimeout()->value().getStringView());
 
-  Common::toGrpcTimeout(std::chrono::milliseconds(100000000000UL), value);
-  EXPECT_EQ("1666666M", value.getStringView());
+  Common::toGrpcTimeout(std::chrono::milliseconds(100000000000UL), headers);
+  EXPECT_EQ("1666666M", headers.GrpcTimeout()->value().getStringView());
 
-  Common::toGrpcTimeout(std::chrono::milliseconds(9000000000000UL), value);
-  EXPECT_EQ("2500000H", value.getStringView());
+  Common::toGrpcTimeout(std::chrono::milliseconds(9000000000000UL), headers);
+  EXPECT_EQ("2500000H", headers.GrpcTimeout()->value().getStringView());
 
-  Common::toGrpcTimeout(std::chrono::milliseconds(360000000000000UL), value);
-  EXPECT_EQ("99999999H", value.getStringView());
+  Common::toGrpcTimeout(std::chrono::milliseconds(360000000000000UL), headers);
+  EXPECT_EQ("99999999H", headers.GrpcTimeout()->value().getStringView());
 
-  Common::toGrpcTimeout(std::chrono::milliseconds(UINT64_MAX), value);
-  EXPECT_EQ("99999999H", value.getStringView());
+  Common::toGrpcTimeout(std::chrono::milliseconds(UINT64_MAX), headers);
+  EXPECT_EQ("99999999H", headers.GrpcTimeout()->value().getStringView());
 }
 
 TEST(GrpcContextTest, PrepareHeaders) {
diff --git a/test/common/grpc/context_impl_test.cc b/test/common/grpc/context_impl_test.cc
index e134236ec5ae..cda0b850a9e8 100644
--- a/test/common/grpc/context_impl_test.cc
+++ b/test/common/grpc/context_impl_test.cc
@@ -61,7 +61,7 @@ TEST(GrpcContextTest, ResolveServiceAndMethod) {
   std::string service;
   std::string method;
   Http::HeaderMapImpl headers;
-  headers.setPath("/service_name/method_name");
+  headers.setPath("/service_name/method_name?a=b");
   const Http::HeaderEntry* path = headers.Path();
   Stats::TestSymbolTable symbol_table;
   ContextImpl context(*symbol_table);
diff --git a/test/common/grpc/google_grpc_creds_test.cc b/test/common/grpc/google_grpc_creds_test.cc
index a08fcf162567..19b6b735dc6d 100644
--- a/test/common/grpc/google_grpc_creds_test.cc
+++ b/test/common/grpc/google_grpc_creds_test.cc
@@ -122,6 +122,15 @@ TEST_F(CredsUtilityTest, DefaultChannelCredentials) {
     google_grpc->add_call_credentials()->mutable_from_plugin()->set_name("foo");
     EXPECT_NE(nullptr, CredsUtility::defaultChannelCredentials(config, *api_));
   }
+  {
+    envoy::api::v2::core::GrpcService config;
+    TestUtility::setTestSslGoogleGrpcConfig(config, true);
+    auto* sts_service = config.mutable_google_grpc()->add_call_credentials()->mutable_sts_service();
+    sts_service->set_token_exchange_service_uri("http://tokenexchangeservice.com");
+    sts_service->set_subject_token_path("/var/run/example_token");
+    sts_service->set_subject_token_type("urn:ietf:params:oauth:token-type:access_token");
+    EXPECT_NE(nullptr, CredsUtility::defaultChannelCredentials(config, *api_));
+  }
 }
 
 } // namespace
diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc
index 21b9c719ef2f..3aa33b548100 100644
--- a/test/common/http/async_client_impl_test.cc
+++ b/test/common/http/async_client_impl_test.cc
@@ -191,6 +191,7 @@ TEST_F(AsyncClientImplTracingTest, Basic) {
   EXPECT_CALL(*child_span,
               setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1")));
+  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("10.0.0.1:443")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("200")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("-")));
@@ -229,6 +230,7 @@ TEST_F(AsyncClientImplTracingTest, BasicNamedChildSpan) {
   EXPECT_CALL(*child_span,
               setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1")));
+  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("10.0.0.1:443")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("200")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("-")));
@@ -859,6 +861,7 @@ TEST_F(AsyncClientImplTracingTest, CancelRequest) {
   EXPECT_CALL(*child_span,
               setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1")));
+  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("10.0.0.1:443")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("0")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("-")));
@@ -918,6 +921,7 @@ TEST_F(AsyncClientImplTracingTest, DestroyWithActiveRequest) {
   EXPECT_CALL(*child_span,
               setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1")));
+  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("10.0.0.1:443")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("0")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("-")));
@@ -1073,6 +1077,7 @@ TEST_F(AsyncClientImplTracingTest, RequestTimeout) {
   EXPECT_CALL(*child_span,
               setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1")));
+  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("10.0.0.1:443")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("504")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("UT")));
diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc
index f137879b20c4..3dcc014b9f79 100644
--- a/test/common/http/conn_manager_impl_test.cc
+++ b/test/common/http/conn_manager_impl_test.cc
@@ -95,6 +95,12 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan
     filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList();
   }
 
+  Tracing::CustomTagConstSharedPtr requestHeaderCustomTag(const std::string& header) {
+    envoy::type::tracing::v2::CustomTag::Header headerTag;
+    headerTag.set_name(header);
+    return std::make_shared<Tracing::RequestHeaderCustomTag>(header, headerTag);
+  }
+
   void setup(bool ssl, const std::string& server_name, bool tracing = true, bool use_srds = false) {
     use_srds_ = use_srds;
     if (ssl) {
@@ -121,7 +127,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan
       percent2.set_denominator(envoy::type::FractionalPercent::TEN_THOUSAND);
       tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(
           TracingConnectionManagerConfig{Tracing::OperationName::Ingress,
-                                         {LowerCaseString(":method")},
+                                         {{":method", requestHeaderCustomTag(":method")}},
                                          percent1,
                                          percent2,
                                          percent1,
@@ -795,10 +801,98 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) {
   // No decorator.
   EXPECT_CALL(*route_config_provider_.route_config_->route_, decorator())
       .WillRepeatedly(Return(nullptr));
+  envoy::type::FractionalPercent percent1;
+  percent1.set_numerator(100);
+  envoy::type::FractionalPercent percent2;
+  percent2.set_numerator(10000);
+  percent2.set_denominator(envoy::type::FractionalPercent::TEN_THOUSAND);
+
+  struct TracingTagMetaSuite {
+    using Factory =
+        std::function<Tracing::CustomTagConstSharedPtr(const std::string&, const std::string&)>;
+    std::string prefix;
+    Factory factory;
+  };
+  struct TracingTagSuite {
+    bool has_conn;
+    bool has_route;
+    std::list<Tracing::CustomTagConstSharedPtr> custom_tags;
+    std::string tag;
+    std::string tag_value;
+  };
+  std::vector<TracingTagMetaSuite> tracing_tag_meta_cases = {
+      {"l-tag",
+       [](const std::string& t, const std::string& v) {
+         envoy::type::tracing::v2::CustomTag::Literal literal;
+         literal.set_value(v);
+         return std::make_shared<Tracing::LiteralCustomTag>(t, literal);
+       }},
+      {"e-tag",
+       [](const std::string& t, const std::string& v) {
+         envoy::type::tracing::v2::CustomTag::Environment e;
+         e.set_default_value(v);
+         return std::make_shared<Tracing::EnvironmentCustomTag>(t, e);
+       }},
+      {"x-tag",
+       [](const std::string& t, const std::string& v) {
+         envoy::type::tracing::v2::CustomTag::Header h;
+         h.set_default_value(v);
+         return std::make_shared<Tracing::RequestHeaderCustomTag>(t, h);
+       }},
+      {"m-tag", [](const std::string& t, const std::string& v) {
+         envoy::type::tracing::v2::CustomTag::Metadata m;
+         m.mutable_kind()->mutable_host();
+         m.set_default_value(v);
+         return std::make_shared<Tracing::MetadataCustomTag>(t, m);
+       }}};
+  std::vector<TracingTagSuite> tracing_tag_cases;
+  for (const TracingTagMetaSuite& ms : tracing_tag_meta_cases) {
+    const std::string& t1 = ms.prefix + "-1";
+    const std::string& v1 = ms.prefix + "-v1";
+    tracing_tag_cases.push_back({true, false, {ms.factory(t1, v1)}, t1, v1});
+
+    const std::string& t2 = ms.prefix + "-2";
+    const std::string& v2 = ms.prefix + "-v2";
+    const std::string& rv2 = ms.prefix + "-r2";
+    tracing_tag_cases.push_back({true, true, {ms.factory(t2, v2), ms.factory(t2, rv2)}, t2, rv2});
+
+    const std::string& t3 = ms.prefix + "-3";
+    const std::string& rv3 = ms.prefix + "-r3";
+    tracing_tag_cases.push_back({false, true, {ms.factory(t3, rv3)}, t3, rv3});
+  }
+  Tracing::CustomTagMap conn_tracing_tags = {
+      {":method", requestHeaderCustomTag(":method")}}; // legacy test case
+  Tracing::CustomTagMap route_tracing_tags;
+  for (TracingTagSuite& s : tracing_tag_cases) {
+    if (s.has_conn) {
+      const Tracing::CustomTagConstSharedPtr& ptr = s.custom_tags.front();
+      conn_tracing_tags.emplace(ptr->tag(), ptr);
+      s.custom_tags.pop_front();
+    }
+    if (s.has_route) {
+      const Tracing::CustomTagConstSharedPtr& ptr = s.custom_tags.front();
+      route_tracing_tags.emplace(ptr->tag(), ptr);
+      s.custom_tags.pop_front();
+    }
+  }
+  tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(
+      TracingConnectionManagerConfig{Tracing::OperationName::Ingress, conn_tracing_tags, percent1,
+                                     percent2, percent1, false, 256});
+  NiceMock<Router::MockRouteTracing> route_tracing;
+  ON_CALL(route_tracing, getClientSampling()).WillByDefault(ReturnRef(percent1));
+  ON_CALL(route_tracing, getRandomSampling()).WillByDefault(ReturnRef(percent2));
+  ON_CALL(route_tracing, getOverallSampling()).WillByDefault(ReturnRef(percent1));
+  ON_CALL(route_tracing, getCustomTags()).WillByDefault(ReturnRef(route_tracing_tags));
+  ON_CALL(*route_config_provider_.route_config_->route_, tracingConfig())
+      .WillByDefault(Return(&route_tracing));
+
   EXPECT_CALL(*span, finishSpan());
   EXPECT_CALL(*span, setTag(_, _)).Times(testing::AnyNumber());
   // Verify tag is set based on the request headers.
   EXPECT_CALL(*span, setTag(Eq(":method"), Eq("GET")));
+  for (const TracingTagSuite& s : tracing_tag_cases) {
+    EXPECT_CALL(*span, setTag(Eq(s.tag), Eq(s.tag_value)));
+  }
   // Verify if the activeSpan interface returns reference to the current span.
   EXPECT_CALL(*span, setTag(Eq("service-cluster"), Eq("scoobydoo")));
   EXPECT_CALL(runtime_.snapshot_, featureEnabled("tracing.global_enabled",
@@ -986,7 +1080,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato
   percent2.set_denominator(envoy::type::FractionalPercent::TEN_THOUSAND);
   tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(
       TracingConnectionManagerConfig{Tracing::OperationName::Egress,
-                                     {LowerCaseString(":method")},
+                                     {{":method", requestHeaderCustomTag(":method")}},
                                      percent1,
                                      percent2,
                                      percent1,
@@ -1065,7 +1159,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato
   percent2.set_denominator(envoy::type::FractionalPercent::TEN_THOUSAND);
   tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(
       TracingConnectionManagerConfig{Tracing::OperationName::Egress,
-                                     {LowerCaseString(":method")},
+                                     {{":method", requestHeaderCustomTag(":method")}},
                                      percent1,
                                      percent2,
                                      percent1,
@@ -1139,7 +1233,7 @@ TEST_F(HttpConnectionManagerImplTest,
   percent2.set_denominator(envoy::type::FractionalPercent::TEN_THOUSAND);
   tracing_config_ = std::make_unique<TracingConnectionManagerConfig>(
       TracingConnectionManagerConfig{Tracing::OperationName::Egress,
-                                     {LowerCaseString(":method")},
+                                     {{":method", requestHeaderCustomTag(":method")}},
                                      percent1,
                                      percent2,
                                      percent1,
diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc
index 5353f9b02f8e..9b3257092b41 100644
--- a/test/common/http/conn_manager_utility_test.cc
+++ b/test/common/http/conn_manager_utility_test.cc
@@ -241,26 +241,8 @@ TEST_F(ConnectionManagerUtilityTest, SkipXffAppendPassThruUseRemoteAddress) {
   EXPECT_EQ("198.51.100.1", headers.ForwardedFor()->value().getStringView());
 }
 
-TEST_F(ConnectionManagerUtilityTest, ForwardedProtoLegacyBehavior) {
-  TestScopedRuntime scoped_runtime;
-  Runtime::LoaderSingleton::getExisting()->mergeValues(
-      {{"envoy.reloadable_features.trusted_forwarded_proto", "false"}});
-
-  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));
-  ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(1));
-  EXPECT_CALL(config_, skipXffAppend()).WillOnce(Return(true));
-  connection_.remote_address_ = std::make_shared<Network::Address::Ipv4Instance>("12.12.12.12");
-  ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));
-  TestHeaderMapImpl headers{{"x-forwarded-proto", "https"}};
-
-  callMutateRequestHeaders(headers, Protocol::Http2);
-  EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView());
-}
-
 TEST_F(ConnectionManagerUtilityTest, PreserveForwardedProtoWhenInternal) {
   TestScopedRuntime scoped_runtime;
-  Runtime::LoaderSingleton::getExisting()->mergeValues(
-      {{"envoy.reloadable_features.trusted_forwarded_proto", "true"}});
 
   ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));
   ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(1));
diff --git a/test/common/http/header_map_impl_corpus/crash-5fb09ca426eb21db14151b94fd74d418b49042e4 b/test/common/http/header_map_impl_corpus/crash-5fb09ca426eb21db14151b94fd74d418b49042e4
new file mode 100644
index 000000000000..f095332be401
--- /dev/null
+++ b/test/common/http/header_map_impl_corpus/crash-5fb09ca426eb21db14151b94fd74d418b49042e4
@@ -0,0 +1,1900 @@
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference_key {
+    key: "foo"
+    value: "m"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  set_reference_key {
+    key: ":method"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_reference_key {
+    key: ":method"
+    string_value: "baz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  add_copy {
+    key: ":method"
+    string_value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference_key {
+    key: ":method"
+    value: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference_key {
+    key: ":method"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_reference_key {
+    key: ":method"
+    uint64_value: 37
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference {
+    key: ":method"
+    value: "bar"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  lookup: "foo"
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference_key {
+    key: ":method"
+    value: "bar"
+  }
+}
+actions {
+  set_reference_key {
+    key: "baz"
+    value: "bar"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  lookup: "foo"
+}
+actions {
+  mutate_and_move {
+    key: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_copy {
+    key: ":method"
+    uint64_value: 42
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_copy {
+    key: "foo_string_key"
+    string_value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_copy {
+    key: "foo_uint64_key"
+    uint64_value: 42
+  }
+}
+actions {
+  set_reference_key {
+    key: "foo"
+    value: "baz"
+  }
+}
+actions {
+  set_reference_key {
+    key: "foo"
+    value: "m"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_reference_key {
+    key: "foo_string_key"
+    string_value: "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  add_copy {
+    key: "foo_string_key"
+    string_value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  set_reference_key {
+    key: "foo"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference_key {
+    key: "baz"
+    value: "bar"
+  }
+}
+actions {
+  add_reference {
+    value: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  remove_prefix: "foo"
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference_key {
+    key: ":method"
+    value: "baz"
+  }
+}
+actions {
+  set_reference_key {
+    key: ":method"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    clear {
+    }
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_reference {
+    key: "foo"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    set_copy: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  lookup: "foo"
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_reference_key {
+    key: ":method"
+    string_value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  copy {
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference {
+    key: ":method"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  lookup: "foo"
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  remove: "f"
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference_key {
+    key: ":method"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    append: ""
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_reference_key {
+    key: ":method"
+    string_value: "baz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "aa"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference_key {
+    key: "foo"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_reference_key {
+    key: ":method"
+    uint64_value: 37
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+}
+actions {
+  add_reference {
+    key: "foo"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  set_reference_key {
+    key: ":method"
+    value: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  set_reference_key {
+    key: ":method"
+    value: "baz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference_key {
+    key: ":method"
+    value: "baz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  lookup: "_"
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: ""
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  lookup: ":method"
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_reference {
+    key: ":method"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    clear {
+    }
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  add_reference {
+    key: "foo"
+    value: "baz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_copy {
+    key: "foo_string_key"
+    string_value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  lookup: ":method"
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_copy {
+    key: "foo_uint64_key"
+    uint64_value: 42
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    set_integer: 0
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  set_reference {
+    key: ":method"
+    value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  add_copy {
+    key: "foo_string_key"
+    string_value: "baz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+  get_and_mutate {
+    key: ":method"
+    append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+  }
+}
+actions {
+}
diff --git a/test/common/http/header_map_impl_fuzz.proto b/test/common/http/header_map_impl_fuzz.proto
index 9ff5279f022f..bebe373b6ae5 100644
--- a/test/common/http/header_map_impl_fuzz.proto
+++ b/test/common/http/header_map_impl_fuzz.proto
@@ -51,6 +51,25 @@ message GetAndMutate {
   }
 }
 
+message MutateAndMove {
+  string key = 1;
+  oneof mutate_selector {
+    string append = 2;
+    string set_copy = 3;
+    uint64 set_integer = 4;
+    string set_reference = 5;
+  }
+}
+
+message Append {
+  string key = 1;
+  string value = 2;
+}
+
+message Get {
+  string key = 1;
+}
+
 message Action {
   oneof action_selector {
     option (validate.required) = true;
@@ -59,7 +78,10 @@ message Action {
     AddCopy add_copy = 3;
     SetReference set_reference = 4;
     SetReferenceKey set_reference_key = 5;
-    GetAndMutate get_and_mutate = 6;
+    GetAndMutate get_and_mutate = 6 [deprecated = true];
+    Get get = 13;
+    MutateAndMove mutate_and_move = 12;
+    Append append = 11;
     google.protobuf.Empty copy = 7;
     string lookup = 8;
     string remove = 9;
diff --git a/test/common/http/header_map_impl_fuzz_test.cc b/test/common/http/header_map_impl_fuzz_test.cc
index b10c2f695ad8..c5fe9f613d6d 100644
--- a/test/common/http/header_map_impl_fuzz_test.cc
+++ b/test/common/http/header_map_impl_fuzz_test.cc
@@ -15,24 +15,15 @@ namespace Envoy {
 // Fuzz the header map implementation.
 DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) {
   Http::HeaderMapImplPtr header_map = std::make_unique<Http::HeaderMapImpl>();
-  const auto predefined_exists = [&header_map](const std::string& s) -> bool {
-    const Http::HeaderEntry* entry;
-    return header_map->lookup(Http::LowerCaseString(replaceInvalidCharacters(s)), &entry) ==
-           Http::HeaderMap::Lookup::Found;
-  };
   std::vector<std::unique_ptr<Http::LowerCaseString>> lower_case_strings;
   std::vector<std::unique_ptr<std::string>> strings;
-  constexpr auto max_actions = 1024;
+  constexpr auto max_actions = 128;
   for (int i = 0; i < std::min(max_actions, input.actions().size()); ++i) {
     const auto& action = input.actions(i);
     ENVOY_LOG_MISC(debug, "Action {}", action.DebugString());
     switch (action.action_selector_case()) {
     case test::common::http::Action::kAddReference: {
       const auto& add_reference = action.add_reference();
-      // Workaround for https://github.com/envoyproxy/envoy/issues/3919.
-      if (predefined_exists(add_reference.key())) {
-        continue;
-      }
       lower_case_strings.emplace_back(
           std::make_unique<Http::LowerCaseString>(replaceInvalidCharacters(add_reference.key())));
       strings.emplace_back(
@@ -42,10 +33,6 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input)
     }
     case test::common::http::Action::kAddReferenceKey: {
       const auto& add_reference_key = action.add_reference_key();
-      // Workaround for https://github.com/envoyproxy/envoy/issues/3919.
-      if (predefined_exists(add_reference_key.key())) {
-        continue;
-      }
       lower_case_strings.emplace_back(std::make_unique<Http::LowerCaseString>(
           replaceInvalidCharacters(add_reference_key.key())));
       switch (add_reference_key.value_selector_case()) {
@@ -63,10 +50,6 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input)
     }
     case test::common::http::Action::kAddCopy: {
       const auto& add_copy = action.add_copy();
-      // Workaround for https://github.com/envoyproxy/envoy/issues/3919.
-      if (predefined_exists(add_copy.key())) {
-        continue;
-      }
       const Http::LowerCaseString key{replaceInvalidCharacters(add_copy.key())};
       switch (add_copy.value_selector_case()) {
       case test::common::http::AddCopy::kStringValue:
@@ -97,47 +80,58 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input)
                                   replaceInvalidCharacters(set_reference_key.value()));
       break;
     }
-    case test::common::http::Action::kGetAndMutate: {
-      const auto& get_and_mutate = action.get_and_mutate();
-      auto* header_entry =
-          header_map->get(Http::LowerCaseString(replaceInvalidCharacters(get_and_mutate.key())));
+    case test::common::http::Action::kGet: {
+      const auto& get = action.get();
+      const auto* header_entry =
+          header_map->get(Http::LowerCaseString(replaceInvalidCharacters(get.key())));
       if (header_entry != nullptr) {
         // Do some read-only stuff.
         (void)strlen(std::string(header_entry->key().getStringView()).c_str());
         (void)strlen(std::string(header_entry->value().getStringView()).c_str());
-        (void)strlen(header_entry->value().buffer());
         header_entry->key().empty();
         header_entry->value().empty();
-        // Do some mutation or parameterized action.
-        switch (get_and_mutate.mutate_selector_case()) {
-        case test::common::http::GetAndMutate::kAppend:
-          header_entry->value().append(replaceInvalidCharacters(get_and_mutate.append()).c_str(),
-                                       get_and_mutate.append().size());
-          break;
-        case test::common::http::GetAndMutate::kClear:
-          header_entry->value().clear();
-          break;
-        case test::common::http::GetAndMutate::kFind:
-          header_entry->value().getStringView().find(get_and_mutate.find());
-          break;
-        case test::common::http::GetAndMutate::kSetCopy:
-          header_entry->value().setCopy(replaceInvalidCharacters(get_and_mutate.set_copy()).c_str(),
-                                        get_and_mutate.set_copy().size());
-          break;
-        case test::common::http::GetAndMutate::kSetInteger:
-          header_entry->value().setInteger(get_and_mutate.set_integer());
-          break;
-        case test::common::http::GetAndMutate::kSetReference:
-          strings.emplace_back(std::make_unique<std::string>(
-              replaceInvalidCharacters(get_and_mutate.set_reference())));
-          header_entry->value().setReference(*strings.back());
-          break;
-        default:
-          break;
-        }
       }
       break;
     }
+    case test::common::http::Action::kMutateAndMove: {
+      const auto& mutate_and_move = action.mutate_and_move();
+      Http::HeaderString header_field(
+          Http::LowerCaseString(replaceInvalidCharacters(mutate_and_move.key())));
+      Http::HeaderString header_value;
+      // Do some mutation or parameterized action.
+      switch (mutate_and_move.mutate_selector_case()) {
+      case test::common::http::MutateAndMove::kAppend:
+        header_value.append(replaceInvalidCharacters(mutate_and_move.append()).c_str(),
+                            mutate_and_move.append().size());
+        break;
+      case test::common::http::MutateAndMove::kSetCopy:
+        header_value.setCopy(replaceInvalidCharacters(mutate_and_move.set_copy()));
+        break;
+      case test::common::http::MutateAndMove::kSetInteger:
+        header_value.setInteger(mutate_and_move.set_integer());
+        break;
+      case test::common::http::MutateAndMove::kSetReference:
+        strings.emplace_back(std::make_unique<std::string>(
+            replaceInvalidCharacters(mutate_and_move.set_reference())));
+        header_value.setReference(*strings.back());
+        break;
+      default:
+        break;
+      }
+      // Can't addViaMove on an empty header value.
+      if (!header_value.empty()) {
+        header_map->addViaMove(std::move(header_field), std::move(header_value));
+      }
+      break;
+    }
+    case test::common::http::Action::kAppend: {
+      const auto& append = action.append();
+      lower_case_strings.emplace_back(
+          std::make_unique<Http::LowerCaseString>(replaceInvalidCharacters(append.key())));
+      strings.emplace_back(std::make_unique<std::string>(replaceInvalidCharacters(append.value())));
+      header_map->appendCopy(*lower_case_strings.back(), *strings.back());
+      break;
+    }
     case test::common::http::Action::kCopy: {
       header_map = std::make_unique<Http::HeaderMapImpl>(
           *reinterpret_cast<Http::HeaderMap*>(header_map.get()));
@@ -158,13 +152,17 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input)
           Http::LowerCaseString(replaceInvalidCharacters(action.remove_prefix())));
       break;
     }
+    case test::common::http::Action::kGetAndMutate: {
+      // Deprecated. Can not get and mutate entries.
+      break;
+    }
     default:
       // Maybe nothing is set?
       break;
     }
     // Exercise some read-only accessors.
-    header_map->byteSize();
     header_map->size();
+    header_map->byteSize();
     header_map->iterate(
         [](const Http::HeaderEntry& header, void * /*context*/) -> Http::HeaderMap::Iterate {
           header.key();
diff --git a/test/common/http/header_map_impl_speed_test.cc b/test/common/http/header_map_impl_speed_test.cc
index b59cb5130c7f..f313d26b4a17 100644
--- a/test/common/http/header_map_impl_speed_test.cc
+++ b/test/common/http/header_map_impl_speed_test.cc
@@ -74,7 +74,7 @@ static void HeaderMapImplGetInline(benchmark::State& state) {
   const std::string value("01234567890123456789");
   HeaderMapImpl headers;
   addDummyHeaders(headers, state.range(0));
-  headers.insertConnection().value().setReference(value);
+  headers.setReferenceConnection(value);
   size_t size = 0;
   for (auto _ : state) {
     size += headers.Connection()->value().size();
@@ -83,21 +83,6 @@ static void HeaderMapImplGetInline(benchmark::State& state) {
 }
 BENCHMARK(HeaderMapImplGetInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50);
 
-/**
- * Measure the speed of writing to a header for which HeaderMapImpl is expected to
- * provide special optimizations.
- */
-static void HeaderMapImplSetInline(benchmark::State& state) {
-  const std::string value("01234567890123456789");
-  HeaderMapImpl headers;
-  addDummyHeaders(headers, state.range(0));
-  for (auto _ : state) {
-    headers.insertConnection().value().setReference(value);
-  }
-  benchmark::DoNotOptimize(headers.size());
-}
-BENCHMARK(HeaderMapImplSetInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50);
-
 /**
  * Measure the speed of writing to a header for which HeaderMapImpl is expected to
  * provide special optimizations.
@@ -134,7 +119,7 @@ static void HeaderMapImplGetByteSize(benchmark::State& state) {
   addDummyHeaders(headers, state.range(0));
   uint64_t size = 0;
   for (auto _ : state) {
-    size += headers.byteSize().value();
+    size += headers.byteSize();
   }
   benchmark::DoNotOptimize(size);
 }
diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc
index 4ade5f79bbce..74f2d225e308 100644
--- a/test/common/http/header_map_impl_test.cc
+++ b/test/common/http/header_map_impl_test.cc
@@ -14,6 +14,16 @@ using ::testing::InSequence;
 namespace Envoy {
 namespace Http {
 
+class VerifiedHeaderMapImpl : public HeaderMapImpl {
+public:
+  VerifiedHeaderMapImpl() = default;
+  explicit VerifiedHeaderMapImpl(
+      const std::initializer_list<std::pair<LowerCaseString, std::string>>& values)
+      : HeaderMapImpl(values) {}
+
+  void verifyByteSize() override { ASSERT(cached_byte_size_ == byteSizeInternal()); }
+};
+
 TEST(HeaderStringTest, All) {
   // Static LowerCaseString constructor
   {
@@ -57,7 +67,7 @@ TEST(HeaderStringTest, All) {
   // Inline move constructor
   {
     HeaderString string;
-    string.setCopy("hello", 5);
+    string.setCopy("hello");
     EXPECT_EQ(HeaderString::Type::Inline, string.type());
     HeaderString string2(std::move(string));
     EXPECT_TRUE(string.empty()); // NOLINT(bugprone-use-after-move)
@@ -74,7 +84,7 @@ TEST(HeaderStringTest, All) {
   {
     std::string large(4096, 'a');
     HeaderString string;
-    string.setCopy(large.c_str(), large.size());
+    string.setCopy(large);
     EXPECT_EQ(HeaderString::Type::Dynamic, string.type());
     HeaderString string2(std::move(string));
     EXPECT_TRUE(string.empty()); // NOLINT(bugprone-use-after-move)
@@ -100,7 +110,7 @@ TEST(HeaderStringTest, All) {
   {
     std::string static_string("HELLO");
     HeaderString string(static_string);
-    string.setCopy(static_string.c_str(), static_string.size());
+    string.setCopy(static_string);
     EXPECT_EQ(HeaderString::Type::Inline, string.type());
     EXPECT_EQ("HELLO", string.getStringView());
   }
@@ -127,7 +137,7 @@ TEST(HeaderStringTest, All) {
   // Copy inline
   {
     HeaderString string;
-    string.setCopy("hello", 5);
+    string.setCopy("hello");
     EXPECT_EQ("hello", string.getStringView());
     EXPECT_EQ(5U, string.size());
   }
@@ -136,7 +146,7 @@ TEST(HeaderStringTest, All) {
   {
     HeaderString string;
     std::string large_value(4096, 'a');
-    string.setCopy(large_value.c_str(), large_value.size());
+    string.setCopy(large_value);
     EXPECT_EQ(large_value, string.getStringView());
     EXPECT_NE(large_value.c_str(), string.getStringView().data());
     EXPECT_EQ(4096U, string.size());
@@ -146,9 +156,9 @@ TEST(HeaderStringTest, All) {
   {
     HeaderString string;
     std::string large_value1(4096, 'a');
-    string.setCopy(large_value1.c_str(), large_value1.size());
+    string.setCopy(large_value1);
     std::string large_value2(2048, 'b');
-    string.setCopy(large_value2.c_str(), large_value2.size());
+    string.setCopy(large_value2);
     EXPECT_EQ(large_value2, string.getStringView());
     EXPECT_NE(large_value2.c_str(), string.getStringView().data());
     EXPECT_EQ(2048U, string.size());
@@ -158,9 +168,9 @@ TEST(HeaderStringTest, All) {
   {
     HeaderString string;
     std::string large_value1(4096, 'a');
-    string.setCopy(large_value1.c_str(), large_value1.size());
+    string.setCopy(large_value1);
     std::string large_value2(16384, 'b');
-    string.setCopy(large_value2.c_str(), large_value2.size());
+    string.setCopy(large_value2);
     EXPECT_EQ(large_value2, string.getStringView());
     EXPECT_NE(large_value2.c_str(), string.getStringView().data());
     EXPECT_EQ(16384U, string.size());
@@ -170,9 +180,9 @@ TEST(HeaderStringTest, All) {
   {
     HeaderString string;
     std::string large_value1(16, 'a');
-    string.setCopy(large_value1.c_str(), large_value1.size());
+    string.setCopy(large_value1);
     std::string large_value2(16384, 'b');
-    string.setCopy(large_value2.c_str(), large_value2.size());
+    string.setCopy(large_value2);
     EXPECT_EQ(large_value2, string.getStringView());
     EXPECT_NE(large_value2.c_str(), string.getStringView().data());
     EXPECT_EQ(16384U, string.size());
@@ -187,7 +197,7 @@ TEST(HeaderStringTest, All) {
   {
     HeaderString string;
     std::string large(128, 'z');
-    string.setCopy(large.c_str(), large.size());
+    string.setCopy(large);
     EXPECT_EQ(string.type(), HeaderString::Type::Inline);
     EXPECT_EQ(string.getStringView(), large);
   }
@@ -196,11 +206,11 @@ TEST(HeaderStringTest, All) {
   {
     HeaderString string;
     std::string large(128, 'z');
-    string.setCopy(large.c_str(), large.size());
+    string.setCopy(large);
     EXPECT_EQ(string.type(), HeaderString::Type::Inline);
     EXPECT_EQ(string.getStringView(), large);
     std::string small(1, 'a');
-    string.setCopy(small.c_str(), small.size());
+    string.setCopy(small);
     EXPECT_EQ(string.type(), HeaderString::Type::Inline);
     EXPECT_EQ(string.getStringView(), small);
     // If we peek past the valid first character of the
@@ -218,13 +228,13 @@ TEST(HeaderStringTest, All) {
     HeaderString string;
     // Force Dynamic with setCopy of inline buffer size + 1.
     std::string large1(129, 'z');
-    string.setCopy(large1.c_str(), large1.size());
+    string.setCopy(large1);
     EXPECT_EQ(string.type(), HeaderString::Type::Dynamic);
     const void* dynamic_buffer_address = string.getStringView().data();
     // Dynamic capacity in setCopy is 2x required by the size.
     // So to fill it exactly setCopy with a total of 258 chars.
     std::string large2(258, 'z');
-    string.setCopy(large2.c_str(), large2.size());
+    string.setCopy(large2);
     EXPECT_EQ(string.type(), HeaderString::Type::Dynamic);
     // The actual buffer address should be the same as it was after
     // setCopy(large1), ensuring no reallocation occurred.
@@ -295,7 +305,7 @@ TEST(HeaderStringTest, All) {
     HeaderString string;
     // Force Dynamic with setCopy of inline buffer size + 1.
     std::string large1(129, 'z');
-    string.setCopy(large1.c_str(), large1.size());
+    string.setCopy(large1);
     EXPECT_EQ(string.type(), HeaderString::Type::Dynamic);
     const void* dynamic_buffer_address = string.getStringView().data();
     // Dynamic capacity in setCopy is 2x required by the size.
@@ -338,7 +348,7 @@ TEST(HeaderStringTest, All) {
     EXPECT_EQ(HeaderString::Type::Reference, string.type());
 
     const std::string large(129, 'a');
-    string.setCopy(large.c_str(), large.size());
+    string.setCopy(large);
     EXPECT_NE(string.getStringView().data(), large.c_str());
     EXPECT_EQ(HeaderString::Type::Dynamic, string.type());
 
@@ -363,12 +373,11 @@ TEST(HeaderStringTest, All) {
 }
 
 TEST(HeaderMapImplTest, InlineInsert) {
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
   EXPECT_TRUE(headers.empty());
   EXPECT_EQ(0, headers.size());
-  EXPECT_EQ(headers.byteSize().value(), 0);
   EXPECT_EQ(nullptr, headers.Host());
-  headers.insertHost().value(std::string("hello"));
+  headers.setHost("hello");
   EXPECT_FALSE(headers.empty());
   EXPECT_EQ(1, headers.size());
   EXPECT_EQ(":authority", headers.Host()->key().getStringView());
@@ -376,47 +385,78 @@ TEST(HeaderMapImplTest, InlineInsert) {
   EXPECT_EQ("hello", headers.get(Headers::get().Host)->value().getStringView());
 }
 
-// Utility function for testing byteSize() against a manual byte count.
-uint64_t countBytesForTest(const HeaderMapImpl& headers) {
-  uint64_t byte_size = 0;
-  headers.iterate(
-      [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate {
-        auto* byte_size = static_cast<uint64_t*>(context);
-        *byte_size += header.key().getStringView().size() + header.value().getStringView().size();
-        return Http::HeaderMap::Iterate::Continue;
-      },
-      &byte_size);
-  return byte_size;
+TEST(HeaderMapImplTest, InlineAppend) {
+  {
+    VerifiedHeaderMapImpl headers;
+    // Create via header and append.
+    headers.setVia("");
+    headers.appendVia("1.0 fred", ",");
+    EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred");
+    headers.appendVia("1.1 nowhere.com", ",");
+    EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 nowhere.com");
+  }
+  {
+    // Append to via header without explicitly creating first.
+    VerifiedHeaderMapImpl headers;
+    headers.appendVia("1.0 fred", ",");
+    EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred");
+    headers.appendVia("1.1 nowhere.com", ",");
+    EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 nowhere.com");
+  }
+  {
+    // Custom delimiter.
+    VerifiedHeaderMapImpl headers;
+    headers.setVia("");
+    headers.appendVia("1.0 fred", ", ");
+    EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred");
+    headers.appendVia("1.1 nowhere.com", ", ");
+    EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred, 1.1 nowhere.com");
+  }
+  {
+    // Append and then later set.
+    VerifiedHeaderMapImpl headers;
+    headers.appendVia("1.0 fred", ",");
+    headers.appendVia("1.1 nowhere.com", ",");
+    EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 nowhere.com");
+    headers.setVia("2.0 override");
+    EXPECT_EQ(headers.Via()->value().getStringView(), "2.0 override");
+  }
+  {
+    // Set and then append. This mimics how GrpcTimeout is set.
+    VerifiedHeaderMapImpl headers;
+    headers.setGrpcTimeout(42);
+    EXPECT_EQ(headers.GrpcTimeout()->value().getStringView(), "42");
+    headers.appendGrpcTimeout("s", "");
+    EXPECT_EQ(headers.GrpcTimeout()->value().getStringView(), "42s");
+  }
 }
 
 TEST(HeaderMapImplTest, MoveIntoInline) {
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
   HeaderString key;
   key.setCopy(Headers::get().CacheControl.get());
   HeaderString value;
-  value.setCopy("hello", 5);
+  value.setCopy("hello");
   headers.addViaMove(std::move(key), std::move(value));
   EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView());
   EXPECT_EQ("hello", headers.CacheControl()->value().getStringView());
 
   HeaderString key2;
-  key2.setCopy(Headers::get().CacheControl.get().c_str(), Headers::get().CacheControl.get().size());
+  key2.setCopy(Headers::get().CacheControl.get());
   HeaderString value2;
-  value2.setCopy("there", 5);
+  value2.setCopy("there");
   headers.addViaMove(std::move(key2), std::move(value2));
   EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView());
   EXPECT_EQ("hello,there", headers.CacheControl()->value().getStringView());
-  EXPECT_EQ(headers.refreshByteSize(), countBytesForTest(headers));
 }
 
 TEST(HeaderMapImplTest, Remove) {
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
 
   // Add random header and then remove by name.
   LowerCaseString static_key("hello");
   std::string ref_value("value");
   headers.addReference(static_key, ref_value);
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
   EXPECT_EQ("value", headers.get(static_key)->value().getStringView());
   EXPECT_EQ(HeaderString::Type::Reference, headers.get(static_key)->value().type());
   EXPECT_EQ(1UL, headers.size());
@@ -425,11 +465,9 @@ TEST(HeaderMapImplTest, Remove) {
   EXPECT_EQ(nullptr, headers.get(static_key));
   EXPECT_EQ(0UL, headers.size());
   EXPECT_TRUE(headers.empty());
-  EXPECT_EQ(headers.refreshByteSize(), 0);
 
   // Add and remove by inline.
-  headers.insertContentLength().value(5);
-  EXPECT_EQ(headers.refreshByteSize(), countBytesForTest(headers));
+  headers.setContentLength(5);
   EXPECT_EQ("5", headers.ContentLength()->value().getStringView());
   EXPECT_EQ(1UL, headers.size());
   EXPECT_FALSE(headers.empty());
@@ -437,19 +475,16 @@ TEST(HeaderMapImplTest, Remove) {
   EXPECT_EQ(nullptr, headers.ContentLength());
   EXPECT_EQ(0UL, headers.size());
   EXPECT_TRUE(headers.empty());
-  EXPECT_EQ(headers.refreshByteSize(), countBytesForTest(headers));
 
   // Add inline and remove by name.
-  headers.insertContentLength().value(5);
+  headers.setContentLength(5);
   EXPECT_EQ("5", headers.ContentLength()->value().getStringView());
   EXPECT_EQ(1UL, headers.size());
   EXPECT_FALSE(headers.empty());
-  EXPECT_EQ(headers.refreshByteSize(), countBytesForTest(headers));
   headers.remove(Headers::get().ContentLength);
   EXPECT_EQ(nullptr, headers.ContentLength());
   EXPECT_EQ(0UL, headers.size());
   EXPECT_TRUE(headers.empty());
-  EXPECT_EQ(headers.refreshByteSize(), 0);
 }
 
 TEST(HeaderMapImplTest, RemoveRegex) {
@@ -461,17 +496,15 @@ TEST(HeaderMapImplTest, RemoveRegex) {
   LowerCaseString key2 = LowerCaseString(" x-prefix-foo");
   LowerCaseString key4 = LowerCaseString("y-x-prefix-foo");
 
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
   headers.addReference(key1, "value");
   headers.addReference(key2, "value");
   headers.addReference(key3, "value");
   headers.addReference(key4, "value");
   headers.addReference(key5, "value");
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
 
   // Test removing the first header, middle headers, and the end header.
   headers.removePrefix(LowerCaseString("x-prefix-"));
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
   EXPECT_EQ(nullptr, headers.get(key1));
   EXPECT_NE(nullptr, headers.get(key2));
   EXPECT_EQ(nullptr, headers.get(key3));
@@ -479,25 +512,21 @@ TEST(HeaderMapImplTest, RemoveRegex) {
   EXPECT_EQ(nullptr, headers.get(key5));
 
   // Remove all headers.
-  headers.refreshByteSize();
   headers.removePrefix(LowerCaseString(""));
-  EXPECT_EQ(headers.byteSize().value(), 0);
   EXPECT_EQ(nullptr, headers.get(key2));
   EXPECT_EQ(nullptr, headers.get(key4));
 
   // Add inline and remove by regex
-  headers.insertContentLength().value(5);
+  headers.setContentLength(5);
   EXPECT_EQ("5", headers.ContentLength()->value().getStringView());
   EXPECT_EQ(1UL, headers.size());
   EXPECT_FALSE(headers.empty());
-  EXPECT_EQ(headers.refreshByteSize(), countBytesForTest(headers));
   headers.removePrefix(LowerCaseString("content"));
   EXPECT_EQ(nullptr, headers.ContentLength());
-  EXPECT_EQ(headers.refreshByteSize(), 0);
 }
 
 TEST(HeaderMapImplTest, SetRemovesAllValues) {
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
 
   LowerCaseString key1("hello");
   LowerCaseString key2("olleh");
@@ -511,7 +540,6 @@ TEST(HeaderMapImplTest, SetRemovesAllValues) {
   headers.addReference(key2, ref_value2);
   headers.addReference(key1, ref_value3);
   headers.addReference(key1, ref_value4);
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
 
   using MockCb = testing::MockFunction<void(const std::string&, const std::string&)>;
 
@@ -554,37 +582,33 @@ TEST(HeaderMapImplTest, SetRemovesAllValues) {
 
 TEST(HeaderMapImplTest, DoubleInlineAdd) {
   {
-    HeaderMapImpl headers;
+    VerifiedHeaderMapImpl headers;
     const std::string foo("foo");
     const std::string bar("bar");
     headers.addReference(Headers::get().ContentLength, foo);
     headers.addReference(Headers::get().ContentLength, bar);
-    EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
     EXPECT_EQ("foo,bar", headers.ContentLength()->value().getStringView());
     EXPECT_EQ(1UL, headers.size());
   }
   {
-    HeaderMapImpl headers;
+    VerifiedHeaderMapImpl headers;
     headers.addReferenceKey(Headers::get().ContentLength, "foo");
     headers.addReferenceKey(Headers::get().ContentLength, "bar");
-    EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
     EXPECT_EQ("foo,bar", headers.ContentLength()->value().getStringView());
     EXPECT_EQ(1UL, headers.size());
   }
   {
-    HeaderMapImpl headers;
+    VerifiedHeaderMapImpl headers;
     headers.addReferenceKey(Headers::get().ContentLength, 5);
     headers.addReferenceKey(Headers::get().ContentLength, 6);
-    EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
     EXPECT_EQ("5,6", headers.ContentLength()->value().getStringView());
     EXPECT_EQ(1UL, headers.size());
   }
   {
-    HeaderMapImpl headers;
+    VerifiedHeaderMapImpl headers;
     const std::string foo("foo");
     headers.addReference(Headers::get().ContentLength, foo);
     headers.addReferenceKey(Headers::get().ContentLength, 6);
-    EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
     EXPECT_EQ("foo,6", headers.ContentLength()->value().getStringView());
     EXPECT_EQ(1UL, headers.size());
   }
@@ -593,14 +617,13 @@ TEST(HeaderMapImplTest, DoubleInlineAdd) {
 // Per https://github.com/envoyproxy/envoy/issues/7488 make sure we don't
 // combine set-cookie headers
 TEST(HeaderMapImplTest, DoubleCookieAdd) {
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
   const std::string foo("foo");
   const std::string bar("bar");
   const LowerCaseString& set_cookie = Http::Headers::get().SetCookie;
   headers.addReference(set_cookie, foo);
   headers.addReference(set_cookie, bar);
   EXPECT_EQ(2UL, headers.size());
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
 
   std::vector<absl::string_view> out;
   Http::HeaderUtility::getAllOfHeader(headers, "set-cookie", out);
@@ -610,45 +633,91 @@ TEST(HeaderMapImplTest, DoubleCookieAdd) {
 }
 
 TEST(HeaderMapImplTest, DoubleInlineSet) {
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
   headers.setReferenceKey(Headers::get().ContentType, "blah");
   headers.setReferenceKey(Headers::get().ContentType, "text/html");
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
   EXPECT_EQ("text/html", headers.ContentType()->value().getStringView());
   EXPECT_EQ(1UL, headers.size());
 }
 
 TEST(HeaderMapImplTest, AddReferenceKey) {
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
   LowerCaseString foo("hello");
   headers.addReferenceKey(foo, "world");
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
   EXPECT_NE("world", headers.get(foo)->value().getStringView().data());
   EXPECT_EQ("world", headers.get(foo)->value().getStringView());
 }
 
 TEST(HeaderMapImplTest, SetReferenceKey) {
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
   LowerCaseString foo("hello");
   headers.setReferenceKey(foo, "world");
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
   EXPECT_NE("world", headers.get(foo)->value().getStringView().data());
   EXPECT_EQ("world", headers.get(foo)->value().getStringView());
-  headers.refreshByteSize();
 
   headers.setReferenceKey(foo, "monde");
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
   EXPECT_NE("monde", headers.get(foo)->value().getStringView().data());
   EXPECT_EQ("monde", headers.get(foo)->value().getStringView());
 }
 
+TEST(HeaderMapImplTest, SetCopy) {
+  VerifiedHeaderMapImpl headers;
+  LowerCaseString foo("hello");
+  headers.setCopy(foo, "world");
+  EXPECT_EQ("world", headers.get(foo)->value().getStringView());
+
+  // Overwrite value.
+  headers.setCopy(foo, "monde");
+  EXPECT_EQ("monde", headers.get(foo)->value().getStringView());
+
+  // Add another foo header.
+  headers.addCopy(foo, "monde2");
+  EXPECT_EQ(headers.size(), 2);
+
+  // Only the first foo header is overridden.
+  headers.setCopy(foo, "override-monde");
+  EXPECT_EQ(headers.size(), 2);
+
+  using MockCb = testing::MockFunction<void(const std::string&, const std::string&)>;
+  MockCb cb;
+
+  InSequence seq;
+  EXPECT_CALL(cb, Call("hello", "override-monde"));
+  EXPECT_CALL(cb, Call("hello", "monde2"));
+  headers.iterate(
+      [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate {
+        static_cast<MockCb*>(cb_v)->Call(std::string(header.key().getStringView()),
+                                         std::string(header.value().getStringView()));
+        return HeaderMap::Iterate::Continue;
+      },
+      &cb);
+
+  // Test setting an empty string and then overriding.
+  headers.remove(foo);
+  EXPECT_EQ(headers.size(), 0);
+  const std::string empty;
+  headers.setCopy(foo, empty);
+  EXPECT_EQ(headers.size(), 1);
+  headers.setCopy(foo, "not-empty");
+  EXPECT_EQ(headers.get(foo)->value().getStringView(), "not-empty");
+
+  // Use setCopy with inline headers both indirectly and directly.
+  headers.clear();
+  EXPECT_EQ(headers.size(), 0);
+  headers.setCopy(Headers::get().Path, "/");
+  EXPECT_EQ(headers.size(), 1);
+  EXPECT_EQ(headers.Path()->value().getStringView(), "/");
+  headers.setPath("/foo");
+  EXPECT_EQ(headers.size(), 1);
+  EXPECT_EQ(headers.Path()->value().getStringView(), "/foo");
+}
+
 TEST(HeaderMapImplTest, AddCopy) {
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
 
   // Start with a string value.
   std::unique_ptr<LowerCaseString> lcKeyPtr(new LowerCaseString("hello"));
   headers.addCopy(*lcKeyPtr, "world");
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
 
   const HeaderString& value = headers.get(*lcKeyPtr)->value();
 
@@ -669,18 +738,15 @@ TEST(HeaderMapImplTest, AddCopy) {
   // addReferenceKey and addCopy can both add multiple instances of a
   // given header, so we need to delete the old "hello" header.
   // Test that removing will return 0 byte size.
-  headers.refreshByteSize();
   headers.remove(LowerCaseString("hello"));
-  EXPECT_EQ(headers.byteSize().value(), 0);
+  EXPECT_EQ(headers.byteSize(), 0);
 
   // Build "hello" with string concatenation to make it unlikely that the
   // compiler is just reusing the same string constant for everything.
   lcKeyPtr = std::make_unique<LowerCaseString>(std::string("he") + "llo");
   EXPECT_STREQ("hello", lcKeyPtr->get().c_str());
 
-  headers.refreshByteSize();
   headers.addCopy(*lcKeyPtr, 42);
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
 
   const HeaderString& value3 = headers.get(*lcKeyPtr)->value();
 
@@ -706,48 +772,42 @@ TEST(HeaderMapImplTest, AddCopy) {
   headers.addCopy(cache_control, "max-age=1345");
   EXPECT_EQ("max-age=1345", headers.get(cache_control)->value().getStringView());
   EXPECT_EQ("max-age=1345", headers.CacheControl()->value().getStringView());
-  EXPECT_EQ(headers.refreshByteSize(), countBytesForTest(headers));
   headers.addCopy(cache_control, "public");
-  EXPECT_EQ(headers.refreshByteSize(), countBytesForTest(headers));
   EXPECT_EQ("max-age=1345,public", headers.get(cache_control)->value().getStringView());
   headers.addCopy(cache_control, "");
-  EXPECT_EQ(headers.refreshByteSize(), countBytesForTest(headers));
   EXPECT_EQ("max-age=1345,public", headers.get(cache_control)->value().getStringView());
   headers.addCopy(cache_control, 123);
-  EXPECT_EQ(headers.refreshByteSize(), countBytesForTest(headers));
   EXPECT_EQ("max-age=1345,public,123", headers.get(cache_control)->value().getStringView());
   headers.addCopy(cache_control, std::numeric_limits<uint64_t>::max());
   EXPECT_EQ("max-age=1345,public,123,18446744073709551615",
             headers.get(cache_control)->value().getStringView());
-  EXPECT_EQ(headers.refreshByteSize(), countBytesForTest(headers));
 }
 
 TEST(HeaderMapImplTest, Equality) {
-  TestHeaderMapImpl headers1;
-  TestHeaderMapImpl headers2;
+  VerifiedHeaderMapImpl headers1;
+  VerifiedHeaderMapImpl headers2;
   EXPECT_EQ(headers1, headers2);
 
-  headers1.addCopy("hello", "world");
+  headers1.addCopy(LowerCaseString("hello"), "world");
   EXPECT_FALSE(headers1 == headers2);
 
-  headers2.addCopy("foo", "bar");
+  headers2.addCopy(LowerCaseString("foo"), "bar");
   EXPECT_FALSE(headers1 == headers2);
 }
 
 TEST(HeaderMapImplTest, LargeCharInHeader) {
-  HeaderMapImpl headers;
+  VerifiedHeaderMapImpl headers;
   LowerCaseString static_key("\x90hello");
   std::string ref_value("value");
   headers.addReference(static_key, ref_value);
-  EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
   EXPECT_EQ("value", headers.get(static_key)->value().getStringView());
 }
 
 TEST(HeaderMapImplTest, Iterate) {
-  TestHeaderMapImpl headers;
-  headers.addCopy("hello", "world");
-  headers.addCopy("foo", "xxx");
-  headers.addCopy("world", "hello");
+  VerifiedHeaderMapImpl headers;
+  headers.addCopy(LowerCaseString("hello"), "world");
+  headers.addCopy(LowerCaseString("foo"), "xxx");
+  headers.addCopy(LowerCaseString("world"), "hello");
   LowerCaseString foo_key("foo");
   headers.setReferenceKey(foo_key, "bar"); // set moves key to end
 
@@ -768,9 +828,9 @@ TEST(HeaderMapImplTest, Iterate) {
 }
 
 TEST(HeaderMapImplTest, IterateReverse) {
-  TestHeaderMapImpl headers;
-  headers.addCopy("hello", "world");
-  headers.addCopy("foo", "bar");
+  VerifiedHeaderMapImpl headers;
+  headers.addCopy(LowerCaseString("hello"), "world");
+  headers.addCopy(LowerCaseString("foo"), "bar");
   LowerCaseString world_key("world");
   headers.setReferenceKey(world_key, "hello");
 
@@ -795,9 +855,9 @@ TEST(HeaderMapImplTest, IterateReverse) {
 }
 
 TEST(HeaderMapImplTest, Lookup) {
-  TestHeaderMapImpl headers;
-  headers.addCopy("hello", "world");
-  headers.insertContentLength().value(5);
+  VerifiedHeaderMapImpl headers;
+  headers.addCopy(LowerCaseString("hello"), "world");
+  headers.setContentLength(5);
 
   // Lookup is not supported for non predefined inline headers.
   {
@@ -823,18 +883,21 @@ TEST(HeaderMapImplTest, Lookup) {
 
 TEST(HeaderMapImplTest, Get) {
   {
-    const TestHeaderMapImpl headers{{":path", "/"}, {"hello", "world"}};
+    const VerifiedHeaderMapImpl headers{{Headers::get().Path, "/"},
+                                        {LowerCaseString("hello"), "world"}};
     EXPECT_EQ("/", headers.get(LowerCaseString(":path"))->value().getStringView());
     EXPECT_EQ("world", headers.get(LowerCaseString("hello"))->value().getStringView());
     EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo")));
   }
 
   {
-    TestHeaderMapImpl headers{{":path", "/"}, {"hello", "world"}};
-    headers.get(LowerCaseString(":path"))->value(std::string("/new_path"));
+    VerifiedHeaderMapImpl headers{{Headers::get().Path, "/"}, {LowerCaseString("hello"), "world"}};
+    // There is not HeaderMap method to set a header and copy both the key and value.
+    headers.setReferenceKey(LowerCaseString(":path"), "/new_path");
     EXPECT_EQ("/new_path", headers.get(LowerCaseString(":path"))->value().getStringView());
-    headers.get(LowerCaseString("hello"))->value(std::string("world2"));
-    EXPECT_EQ("world2", headers.get(LowerCaseString("hello"))->value().getStringView());
+    LowerCaseString foo("hello");
+    headers.setReferenceKey(foo, "world2");
+    EXPECT_EQ("world2", headers.get(foo)->value().getStringView());
     EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo")));
   }
 }
@@ -842,41 +905,57 @@ TEST(HeaderMapImplTest, Get) {
 TEST(HeaderMapImplTest, TestAppendHeader) {
   // Test appending to a string with a value.
   {
-    HeaderString value1;
-    value1.setCopy("some;", 5);
-    HeaderMapImpl::appendToHeader(value1, "test");
-    EXPECT_EQ(value1, "some;,test");
+    VerifiedHeaderMapImpl headers;
+    LowerCaseString foo("key1");
+    headers.addCopy(foo, "some;");
+    headers.appendCopy(foo, "test");
+    EXPECT_EQ(headers.get(foo)->value().getStringView(), "some;,test");
   }
 
   // Test appending to an empty string.
   {
-    HeaderString value2;
-    HeaderMapImpl::appendToHeader(value2, "my tag data");
-    EXPECT_EQ(value2, "my tag data");
+    VerifiedHeaderMapImpl headers;
+    LowerCaseString key2("key2");
+    headers.appendCopy(key2, "my tag data");
+    EXPECT_EQ(headers.get(key2)->value().getStringView(), "my tag data");
   }
 
   // Test empty data case.
   {
-    HeaderString value3;
-    value3.setCopy("empty", 5);
-    HeaderMapImpl::appendToHeader(value3, "");
-    EXPECT_EQ(value3, "empty");
+    VerifiedHeaderMapImpl headers;
+    LowerCaseString key3("key3");
+    headers.addCopy(key3, "empty");
+    headers.appendCopy(key3, "");
+    EXPECT_EQ(headers.get(key3)->value().getStringView(), "empty");
   }
   // Regression test for appending to an empty string with a short string, then
   // setting integer.
   {
+    VerifiedHeaderMapImpl headers;
     const std::string empty;
-    HeaderString value4(empty);
-    HeaderMapImpl::appendToHeader(value4, " ");
-    value4.setInteger(0);
-    EXPECT_EQ("0", value4.getStringView());
-    EXPECT_EQ(1U, value4.size());
+    headers.setPath(empty);
+    // Append with default delimiter.
+    headers.appendPath(" ", ",");
+    headers.setPath(0);
+    EXPECT_EQ("0", headers.Path()->value().getStringView());
+    EXPECT_EQ(1U, headers.Path()->value().size());
+  }
+  // Test append for inline headers using this method and append##name.
+  {
+    VerifiedHeaderMapImpl headers;
+    headers.addCopy(Headers::get().Via, "1.0 fred");
+    EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred");
+    headers.appendCopy(Headers::get().Via, "1.1 p.example.net");
+    EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 p.example.net");
+    headers.appendVia("1.1 new.example.net", ",");
+    EXPECT_EQ(headers.Via()->value().getStringView(),
+              "1.0 fred,1.1 p.example.net,1.1 new.example.net");
   }
 }
 
 TEST(HeaderMapImplDeathTest, TestHeaderLengthChecks) {
   HeaderString value;
-  value.setCopy("some;", 5);
+  value.setCopy("some;");
   EXPECT_DEATH_LOG_TO_STDERR(value.append(nullptr, std::numeric_limits<uint32_t>::max()),
                              "Trying to allocate overly large headers.");
 
@@ -893,8 +972,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) {
 
   {
     LowerCaseString foo("hello");
-    Http::TestHeaderMapImpl headers{};
-    EXPECT_EQ(headers.refreshByteSize(), 0);
+    Http::VerifiedHeaderMapImpl headers{};
     EXPECT_EQ(0UL, headers.size());
     EXPECT_TRUE(headers.empty());
 
@@ -1048,11 +1126,11 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) {
 
   // Starting with a normal header
   {
-    Http::TestHeaderMapImpl headers{{"content-type", "text/plain"},
-                                    {":method", "GET"},
-                                    {":path", "/"},
-                                    {"hello", "world"},
-                                    {":authority", "host"}};
+    Http::VerifiedHeaderMapImpl headers{{Headers::get().ContentType, "text/plain"},
+                                        {Headers::get().Method, "GET"},
+                                        {Headers::get().Path, "/"},
+                                        {LowerCaseString("hello"), "world"},
+                                        {Headers::get().Host, "host"}};
 
     InSequence seq;
     EXPECT_CALL(cb, Call(":method", "GET"));
@@ -1072,11 +1150,11 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) {
 
   // Starting with a pseudo-header
   {
-    Http::TestHeaderMapImpl headers{{":path", "/"},
-                                    {"content-type", "text/plain"},
-                                    {":method", "GET"},
-                                    {"hello", "world"},
-                                    {":authority", "host"}};
+    Http::VerifiedHeaderMapImpl headers{{Headers::get().Path, "/"},
+                                        {Headers::get().ContentType, "text/plain"},
+                                        {Headers::get().Method, "GET"},
+                                        {LowerCaseString("hello"), "world"},
+                                        {Headers::get().Host, "host"}};
 
     InSequence seq;
     EXPECT_CALL(cb, Call(":path", "/"));
@@ -1112,57 +1190,104 @@ TEST(HeaderMapImplTest, TestHeaderMapImplyCopy) {
 }
 
 TEST(HeaderMapImplTest, TestInlineHeaderAdd) {
-  TestHeaderMapImpl foo;
-  foo.addCopy(":path", "GET");
+  VerifiedHeaderMapImpl foo;
+  foo.addCopy(LowerCaseString(":path"), "GET");
   EXPECT_EQ(foo.size(), 1);
   EXPECT_TRUE(foo.Path() != nullptr);
 }
 
+TEST(HeaderMapImplTest, ClearHeaderMap) {
+  VerifiedHeaderMapImpl headers;
+  LowerCaseString static_key("hello");
+  std::string ref_value("value");
+
+  // Add random header and then clear.
+  headers.addReference(static_key, ref_value);
+  EXPECT_EQ("value", headers.get(static_key)->value().getStringView());
+  EXPECT_EQ(HeaderString::Type::Reference, headers.get(static_key)->value().type());
+  EXPECT_EQ(1UL, headers.size());
+  EXPECT_FALSE(headers.empty());
+  headers.clear();
+  EXPECT_EQ(nullptr, headers.get(static_key));
+  EXPECT_EQ(0UL, headers.size());
+  EXPECT_EQ(headers.byteSize(), 0);
+  EXPECT_TRUE(headers.empty());
+
+  // Add inline and clear.
+  headers.setContentLength(5);
+  EXPECT_EQ("5", headers.ContentLength()->value().getStringView());
+  EXPECT_EQ(1UL, headers.size());
+  EXPECT_FALSE(headers.empty());
+  headers.clear();
+  EXPECT_EQ(nullptr, headers.ContentLength());
+  EXPECT_EQ(0UL, headers.size());
+  EXPECT_EQ(headers.byteSize(), 0);
+  EXPECT_TRUE(headers.empty());
+
+  // Add mixture of headers.
+  headers.addReference(static_key, ref_value);
+  headers.setContentLength(5);
+  headers.addCopy(static_key, "new_value");
+  EXPECT_EQ(3UL, headers.size());
+  EXPECT_FALSE(headers.empty());
+  headers.clear();
+  EXPECT_EQ(nullptr, headers.ContentLength());
+  EXPECT_EQ(0UL, headers.size());
+  EXPECT_EQ(headers.byteSize(), 0);
+  EXPECT_TRUE(headers.empty());
+}
+
 // Validates byte size is properly accounted for in different inline header setting scenarios.
 TEST(HeaderMapImplTest, InlineHeaderByteSize) {
-  uint64_t hostKeySize = Headers::get().Host.get().size();
-  uint64_t statusKeySize = Headers::get().Status.get().size();
   {
-    HeaderMapImpl headers;
+    VerifiedHeaderMapImpl headers;
     std::string foo = "foo";
-    EXPECT_EQ(headers.byteSize().value(), 0);
     headers.setHost(foo);
-    EXPECT_EQ(headers.byteSize().value(), foo.size() + hostKeySize);
+    EXPECT_EQ(headers.byteSize(), 13);
   }
   {
-    // Overwrite an inline headers.
-    HeaderMapImpl headers;
+    // Overwrite an inline headers with set.
+    VerifiedHeaderMapImpl headers;
     std::string foo = "foo";
-    EXPECT_EQ(headers.byteSize().value(), 0);
     headers.setHost(foo);
-    EXPECT_EQ(headers.byteSize().value(), foo.size() + hostKeySize);
     std::string big_foo = "big_foo";
     headers.setHost(big_foo);
-    EXPECT_EQ(headers.byteSize().value(), big_foo.size() + hostKeySize);
+    EXPECT_EQ(headers.byteSize(), 17);
   }
   {
-    // Overwrite an inline headers with reference value and clear.
-    HeaderMapImpl headers;
+    // Overwrite an inline headers with setReference and clear.
+    VerifiedHeaderMapImpl headers;
     std::string foo = "foo";
-    EXPECT_EQ(headers.byteSize().value(), 0);
     headers.setHost(foo);
-    EXPECT_EQ(headers.byteSize().value(), foo.size() + hostKeySize);
     std::string big_foo = "big_foo";
     headers.setReferenceHost(big_foo);
-    EXPECT_EQ(headers.byteSize().value(), big_foo.size() + hostKeySize);
+    EXPECT_EQ(headers.byteSize(), 17);
     headers.removeHost();
-    EXPECT_EQ(headers.byteSize().value(), 0);
+    EXPECT_EQ(headers.byteSize(), 0);
+  }
+  {
+    // Overwrite an inline headers with set integer value.
+    VerifiedHeaderMapImpl headers;
+    uint64_t status = 200;
+    headers.setStatus(status);
+    EXPECT_EQ(headers.byteSize(), 10);
+    uint64_t newStatus = 500;
+    headers.setStatus(newStatus);
+    EXPECT_EQ(headers.byteSize(), 10);
+    headers.removeStatus();
+    EXPECT_EQ(headers.byteSize(), 0);
   }
   {
-    // Overwrite an inline headers with integer value.
-    HeaderMapImpl headers;
+    // Set an inline header, remove, and rewrite.
+    VerifiedHeaderMapImpl headers;
     uint64_t status = 200;
-    EXPECT_EQ(headers.byteSize().value(), 0);
     headers.setStatus(status);
-    EXPECT_EQ(headers.byteSize().value(), 3 + statusKeySize);
+    EXPECT_EQ(headers.byteSize(), 10);
+    headers.removeStatus();
+    EXPECT_EQ(headers.byteSize(), 0);
     uint64_t newStatus = 500;
     headers.setStatus(newStatus);
-    EXPECT_EQ(headers.byteSize().value(), 3 + statusKeySize);
+    EXPECT_EQ(headers.byteSize(), 10);
   }
 }
 
diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc
index ea9d5c480956..c2784c8dcf19 100644
--- a/test/common/http/http2/codec_impl_test.cc
+++ b/test/common/http/http2/codec_impl_test.cc
@@ -1143,19 +1143,17 @@ TEST_P(Http2CodecImplTest, LargeRequestHeadersAtLimitAccepted) {
 
   TestHeaderMapImpl request_headers;
   HttpTestUtility::addDefaultHeaders(request_headers);
-  // Refresh byte size after adding default inline headers by reference.
-  request_headers.refreshByteSize();
   std::string key = "big";
   uint32_t head_room = 77;
   uint32_t long_string_length =
-      codec_limit_kb * 1024 - request_headers.byteSize().value() - key.length() - head_room;
+      codec_limit_kb * 1024 - request_headers.byteSize() - key.length() - head_room;
   std::string long_string = std::string(long_string_length, 'q');
   request_headers.addCopy(key, long_string);
 
   // The amount of data sent to the codec is not equivalent to the size of the
   // request headers that Envoy computes, as the codec limits based on the
   // entire http2 frame. The exact head room needed (76) was found through iteration.
-  ASSERT_EQ(request_headers.byteSize().value() + head_room, codec_limit_kb * 1024);
+  ASSERT_EQ(request_headers.byteSize() + head_room, codec_limit_kb * 1024);
 
   EXPECT_CALL(request_decoder_, decodeHeaders_(_, _));
   request_encoder_->encodeHeaders(request_headers, true);
diff --git a/test/common/http/path_utility_test.cc b/test/common/http/path_utility_test.cc
index 7f8274240388..c2a7230f6300 100644
--- a/test/common/http/path_utility_test.cc
+++ b/test/common/http/path_utility_test.cc
@@ -14,7 +14,7 @@ class PathUtilityTest : public testing::Test {
   // This is an indirect way to build a header entry for
   // PathUtil::canonicalPath(), since we don't have direct access to the
   // HeaderMapImpl constructor.
-  HeaderEntry& pathHeaderEntry(const std::string& path_value) {
+  const HeaderEntry& pathHeaderEntry(const std::string& path_value) {
     headers_.setPath(path_value);
     return *headers_.Path();
   }
@@ -26,7 +26,7 @@ TEST_F(PathUtilityTest, AlreadyNormalPaths) {
   const std::vector<std::string> normal_paths{"/xyz", "/x/y/z"};
   for (const auto& path : normal_paths) {
     auto& path_header = pathHeaderEntry(path);
-    const auto result = PathUtil::canonicalPath(path_header);
+    const auto result = PathUtil::canonicalPath(headers_);
     EXPECT_TRUE(result) << "original path: " << path;
     EXPECT_EQ(path_header.value().getStringView(), absl::string_view(path));
   }
@@ -37,8 +37,8 @@ TEST_F(PathUtilityTest, InvalidPaths) {
   const std::vector<std::string> invalid_paths{"/xyz/.%00../abc", "/xyz/%00.%00./abc",
                                                "/xyz/AAAAA%%0000/abc"};
   for (const auto& path : invalid_paths) {
-    auto& path_header = pathHeaderEntry(path);
-    EXPECT_FALSE(PathUtil::canonicalPath(path_header)) << "original path: " << path;
+    pathHeaderEntry(path);
+    EXPECT_FALSE(PathUtil::canonicalPath(headers_)) << "original path: " << path;
   }
 }
 
@@ -57,7 +57,7 @@ TEST_F(PathUtilityTest, NormalizeValidPaths) {
 
   for (const auto& path_pair : non_normal_pairs) {
     auto& path_header = pathHeaderEntry(path_pair.first);
-    const auto result = PathUtil::canonicalPath(path_header);
+    const auto result = PathUtil::canonicalPath(headers_);
     EXPECT_TRUE(result) << "original path: " << path_pair.first;
     EXPECT_EQ(path_header.value().getStringView(), path_pair.second)
         << "original path: " << path_pair.second;
@@ -75,7 +75,7 @@ TEST_F(PathUtilityTest, NormalizeCasePath) {
 
   for (const auto& path_pair : non_normal_pairs) {
     auto& path_header = pathHeaderEntry(path_pair.first);
-    const auto result = PathUtil::canonicalPath(path_header);
+    const auto result = PathUtil::canonicalPath(headers_);
     EXPECT_TRUE(result) << "original path: " << path_pair.first;
     EXPECT_EQ(path_header.value().getStringView(), path_pair.second)
         << "original path: " << path_pair.second;
@@ -89,7 +89,7 @@ TEST_F(PathUtilityTest, NormalizeCasePath) {
 TEST_F(PathUtilityTest, MergeSlashes) {
   auto mergeSlashes = [this](const std::string& path_value) {
     auto& path_header = pathHeaderEntry(path_value);
-    PathUtil::mergeSlashes(path_header);
+    PathUtil::mergeSlashes(headers_);
     auto sanitized_path_value = path_header.value().getStringView();
     return std::string(sanitized_path_value);
   };
diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc
index beddb6cce7c9..88b7b3781ee8 100644
--- a/test/common/http/utility_test.cc
+++ b/test/common/http/utility_test.cc
@@ -1,3 +1,4 @@
+#include <array>
 #include <cstdint>
 #include <string>
 
@@ -754,6 +755,37 @@ TEST(HttpUtility, GetMergedPerFilterConfig) {
   EXPECT_EQ(2, merged_cfg.value().state_);
 }
 
+TEST(HttpUtility, CheckIsIpAddress) {
+  std::array<std::tuple<bool, std::string, std::string, absl::optional<uint32_t>>, 15> patterns{
+      std::make_tuple(true, "1.2.3.4", "1.2.3.4", absl::nullopt),
+      std::make_tuple(true, "1.2.3.4:0", "1.2.3.4", 0),
+      std::make_tuple(true, "0.0.0.0:4000", "0.0.0.0", 4000),
+      std::make_tuple(true, "127.0.0.1:0", "127.0.0.1", 0),
+      std::make_tuple(true, "[::]:0", "::", 0),
+      std::make_tuple(true, "[::]", "::", absl::nullopt),
+      std::make_tuple(true, "[1::2:3]:0", "1::2:3", 0),
+      std::make_tuple(true, "[a::1]:0", "a::1", 0),
+      std::make_tuple(true, "[a:b:c:d::]:0", "a:b:c:d::", 0),
+      std::make_tuple(false, "example.com", "example.com", absl::nullopt),
+      std::make_tuple(false, "example.com:8000", "example.com", 8000),
+      std::make_tuple(false, "example.com:abc", "example.com:abc", absl::nullopt),
+      std::make_tuple(false, "localhost:10000", "localhost", 10000),
+      std::make_tuple(false, "localhost", "localhost", absl::nullopt)};
+
+  for (const auto& pattern : patterns) {
+    bool status_pattern = std::get<0>(pattern);
+    const auto& try_host = std::get<1>(pattern);
+    const auto& expect_host = std::get<2>(pattern);
+    const auto& expect_port = std::get<3>(pattern);
+
+    const auto host_attributes = Utility::parseAuthority(try_host);
+
+    EXPECT_EQ(status_pattern, host_attributes.is_ip_address_);
+    EXPECT_EQ(expect_host, host_attributes.host_);
+    EXPECT_EQ(expect_port, host_attributes.port_);
+  }
+}
+
 // Validates TE header is stripped if it contains an unsupported value
 // Also validate the behavior if a nominated header does not exist
 TEST(HttpUtility, TestTeHeaderGzipTrailersSanitized) {
diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc
index e0e74149627a..90493b7f7abf 100644
--- a/test/common/network/connection_impl_test.cc
+++ b/test/common/network/connection_impl_test.cc
@@ -334,6 +334,11 @@ TEST_P(ConnectionImplTest, SocketOptions) {
       }));
 
   dispatcher_->run(Event::Dispatcher::RunType::Block);
+
+  // Assert that upstream connection gets the socket options
+  ASSERT(upstream_connection_ != nullptr);
+  ASSERT(upstream_connection_->socketOptions() != nullptr);
+  ASSERT(upstream_connection_->socketOptions()->front() == option);
 }
 
 TEST_P(ConnectionImplTest, SocketOptionsFailureTest) {
diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc
index afcdb784daf2..9603e8d2748b 100644
--- a/test/common/network/dns_impl_test.cc
+++ b/test/common/network/dns_impl_test.cc
@@ -335,7 +335,7 @@ TEST_F(DnsImplConstructor, SupportsCustomResolvers) {
   auto addr4 = Network::Utility::parseInternetAddressAndPort("127.0.0.1:54");
   char addr6str[INET6_ADDRSTRLEN];
   auto addr6 = Network::Utility::parseInternetAddressAndPort("[::1]:54");
-  auto resolver = dispatcher_->createDnsResolver({addr4, addr6});
+  auto resolver = dispatcher_->createDnsResolver({addr4, addr6}, false);
   auto peer = std::make_unique<DnsResolverImplPeer>(dynamic_cast<DnsResolverImpl*>(resolver.get()));
   ares_addr_port_node* resolvers;
   int result = ares_get_servers_ports(peer->channel(), &resolvers);
@@ -380,7 +380,7 @@ class CustomInstance : public Address::Instance {
 TEST_F(DnsImplConstructor, SupportCustomAddressInstances) {
   auto test_instance(std::make_shared<CustomInstance>("127.0.0.1", 45));
   EXPECT_EQ(test_instance->asString(), "127.0.0.1:borked_port_45");
-  auto resolver = dispatcher_->createDnsResolver({test_instance});
+  auto resolver = dispatcher_->createDnsResolver({test_instance}, false);
   auto peer = std::make_unique<DnsResolverImplPeer>(dynamic_cast<DnsResolverImpl*>(resolver.get()));
   ares_addr_port_node* resolvers;
   int result = ares_get_servers_ports(peer->channel(), &resolvers);
@@ -396,7 +396,7 @@ TEST_F(DnsImplConstructor, BadCustomResolvers) {
   envoy::api::v2::core::Address pipe_address;
   pipe_address.mutable_pipe()->set_path("foo");
   auto pipe_instance = Network::Utility::protobufAddressToAddress(pipe_address);
-  EXPECT_THROW_WITH_MESSAGE(dispatcher_->createDnsResolver({pipe_instance}), EnvoyException,
+  EXPECT_THROW_WITH_MESSAGE(dispatcher_->createDnsResolver({pipe_instance}, false), EnvoyException,
                             "DNS resolver 'foo' is not an IP address");
 }
 
@@ -405,7 +405,7 @@ class DnsImplTest : public testing::TestWithParam<Address::IpVersion> {
   DnsImplTest() : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher()) {}
 
   void SetUp() override {
-    resolver_ = dispatcher_->createDnsResolver({});
+    resolver_ = dispatcher_->createDnsResolver({}, use_tcp_for_dns_lookups());
 
     // Instantiate TestDnsServer and listen on a random port on the loopback address.
     server_ = std::make_unique<TestDnsServer>(*dispatcher_);
@@ -415,7 +415,9 @@ class DnsImplTest : public testing::TestWithParam<Address::IpVersion> {
 
     // Point c-ares at the listener with no search domains and TCP-only.
     peer_ = std::make_unique<DnsResolverImplPeer>(dynamic_cast<DnsResolverImpl*>(resolver_.get()));
-    peer_->resetChannelTcpOnly(zero_timeout());
+    if (tcp_only()) {
+      peer_->resetChannelTcpOnly(zero_timeout());
+    }
     ares_set_servers_ports_csv(peer_->channel(), socket_->localAddress()->asString().c_str());
   }
 
@@ -437,6 +439,8 @@ class DnsImplTest : public testing::TestWithParam<Address::IpVersion> {
 protected:
   // Should the DnsResolverImpl use a zero timeout for c-ares queries?
   virtual bool zero_timeout() const { return false; }
+  virtual bool tcp_only() const { return true; }
+  virtual bool use_tcp_for_dns_lookups() const { return false; }
   std::unique_ptr<TestDnsServer> server_;
   std::unique_ptr<DnsResolverImplPeer> peer_;
   Network::MockConnectionHandler connection_handler_;
@@ -849,7 +853,7 @@ TEST(DnsImplUnitTest, PendingTimerEnable) {
   Event::MockDispatcher dispatcher;
   Event::MockTimer* timer = new NiceMock<Event::MockTimer>();
   EXPECT_CALL(dispatcher, createTimer_(_)).WillOnce(Return(timer));
-  DnsResolverImpl resolver(dispatcher, {});
+  DnsResolverImpl resolver(dispatcher, {}, false);
   Event::FileEvent* file_event = new NiceMock<Event::MockFileEvent>();
   EXPECT_CALL(dispatcher, createFileEvent_(_, _, _, _)).WillOnce(Return(file_event));
   EXPECT_CALL(*timer, enableTimer(_, _));
@@ -859,5 +863,64 @@ TEST(DnsImplUnitTest, PendingTimerEnable) {
                                       }));
 }
 
+class DnsImplAresFlagsForTcpTest : public DnsImplTest {
+protected:
+  bool tcp_only() const override { return false; }
+  bool use_tcp_for_dns_lookups() const override { return true; }
+};
+
+// Parameterize the DNS test server socket address.
+INSTANTIATE_TEST_SUITE_P(IpVersions, DnsImplAresFlagsForTcpTest,
+                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),
+                         TestUtility::ipTestParamsToString);
+
+// Validate that c_ares flag `ARES_FLAG_USEVC` is set when boolean property
+// `use_tcp_for_dns_lookups` is enabled.
+TEST_P(DnsImplAresFlagsForTcpTest, TcpLookupsEnabled) {
+  server_->addCName("root.cnam.domain", "result.cname.domain");
+  server_->addHosts("result.cname.domain", {"201.134.56.7"}, RecordType::A);
+  std::list<Address::InstanceConstSharedPtr> address_list;
+  struct ares_options opts;
+  memset(&opts, 0, sizeof(opts));
+  int optmask = 0;
+  EXPECT_EQ(ARES_SUCCESS, ares_save_options(peer_->channel(), &opts, &optmask));
+  EXPECT_TRUE((opts.flags & ARES_FLAG_USEVC) == ARES_FLAG_USEVC);
+  EXPECT_NE(nullptr, resolver_->resolve("root.cnam.domain", DnsLookupFamily::Auto,
+                                        [&](std::list<DnsResponse>&& results) -> void {
+                                          address_list = getAddressList(results);
+                                          dispatcher_->exit();
+                                        }));
+  ares_destroy_options(&opts);
+}
+
+class DnsImplAresFlagsForUdpTest : public DnsImplTest {
+protected:
+  bool tcp_only() const override { return false; }
+};
+
+// Parameterize the DNS test server socket address.
+INSTANTIATE_TEST_SUITE_P(IpVersions, DnsImplAresFlagsForUdpTest,
+                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),
+                         TestUtility::ipTestParamsToString);
+
+// Validate that c_ares flag `ARES_FLAG_USEVC` is not set when boolean property
+// `use_tcp_for_dns_lookups` is disabled.
+TEST_P(DnsImplAresFlagsForUdpTest, UdpLookupsEnabled) {
+  server_->addCName("root.cnam.domain", "result.cname.domain");
+  server_->addHosts("result.cname.domain", {"201.134.56.7"}, RecordType::A);
+  std::list<Address::InstanceConstSharedPtr> address_list;
+  struct ares_options opts;
+  memset(&opts, 0, sizeof(opts));
+  int optmask = 0;
+  EXPECT_EQ(ARES_SUCCESS, ares_save_options(peer_->channel(), &opts, &optmask));
+  EXPECT_FALSE((opts.flags & ARES_FLAG_USEVC) == ARES_FLAG_USEVC);
+  EXPECT_NE(nullptr, resolver_->resolve("root.cnam.domain", DnsLookupFamily::Auto,
+                                        [&](std::list<DnsResponse>&& results) -> void {
+                                          address_list = getAddressList(results);
+                                          dispatcher_->exit();
+                                        }));
+  ares_destroy_options(&opts);
+}
+
 } // namespace Network
 } // namespace Envoy
diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc
index 2fead659b9ae..02c9d29e672a 100644
--- a/test/common/network/listener_impl_test.cc
+++ b/test/common/network/listener_impl_test.cc
@@ -219,6 +219,7 @@ TEST_P(ListenerImplTest, DisableAndEnableListener) {
   auto socket =
       std::make_shared<TcpListenSocket>(Network::Test::getAnyAddress(version_), nullptr, true);
   MockListenerCallbacks listener_callbacks;
+  MockConnectionCallbacks connection_callbacks;
   TestListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true);
 
   // When listener is disabled, the timer should fire before any connection is accepted.
@@ -227,15 +228,15 @@ TEST_P(ListenerImplTest, DisableAndEnableListener) {
   ClientConnectionPtr client_connection =
       dispatcher_->createClientConnection(socket->localAddress(), Address::InstanceConstSharedPtr(),
                                           Network::Test::createRawBufferSocket(), nullptr);
+  client_connection->addConnectionCallbacks(connection_callbacks);
   client_connection->connect();
-  Event::TimerPtr timer = dispatcher_->createTimer([&] {
-    client_connection->close(ConnectionCloseType::NoFlush);
-    dispatcher_->exit();
-  });
-  timer->enableTimer(std::chrono::milliseconds(2000));
 
   EXPECT_CALL(listener_callbacks, onAccept_(_)).Times(0);
-  time_system_.sleep(std::chrono::milliseconds(2000));
+  EXPECT_CALL(connection_callbacks, onEvent(_))
+      .WillOnce(Invoke([&](Network::ConnectionEvent event) -> void {
+        EXPECT_EQ(event, Network::ConnectionEvent::Connected);
+        dispatcher_->exit();
+      }));
   dispatcher_->run(Event::Dispatcher::RunType::Block);
 
   // When the listener is re-enabled, the pending connection should be accepted.
@@ -246,8 +247,12 @@ TEST_P(ListenerImplTest, DisableAndEnableListener) {
           [](int fd) -> Address::InstanceConstSharedPtr { return Address::addressFromFd(fd); }));
   EXPECT_CALL(listener_callbacks, onAccept_(_)).WillOnce(Invoke([&](ConnectionSocketPtr&) -> void {
     client_connection->close(ConnectionCloseType::NoFlush);
-    dispatcher_->exit();
   }));
+  EXPECT_CALL(connection_callbacks, onEvent(_))
+      .WillOnce(Invoke([&](Network::ConnectionEvent event) -> void {
+        EXPECT_NE(event, Network::ConnectionEvent::Connected);
+        dispatcher_->exit();
+      }));
 
   dispatcher_->run(Event::Dispatcher::RunType::Block);
 }
diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc
index 7d782df391cb..7f63b2856ba2 100644
--- a/test/common/protobuf/utility_test.cc
+++ b/test/common/protobuf/utility_test.cc
@@ -428,13 +428,27 @@ TEST_F(ProtobufUtilityTest, HashedValueStdHash) {
   EXPECT_NE(set.find(hv3), set.end());
 }
 
+// MessageUtility::anyConvert() with the wrong type throws.
 TEST_F(ProtobufUtilityTest, AnyConvertWrongType) {
   ProtobufWkt::Duration source_duration;
   source_duration.set_seconds(42);
   ProtobufWkt::Any source_any;
   source_any.PackFrom(source_duration);
-  EXPECT_THROW_WITH_REGEX(TestUtility::anyConvert<ProtobufWkt::Timestamp>(source_any),
-                          EnvoyException, "Unable to unpack .*");
+  EXPECT_THROW_WITH_REGEX(
+      TestUtility::anyConvert<ProtobufWkt::Timestamp>(source_any), EnvoyException,
+      R"(Unable to unpack as google.protobuf.Timestamp: \[type.googleapis.com/google.protobuf.Duration\] .*)");
+}
+
+// MessageUtility::unpackTo() with the wrong type throws.
+TEST_F(ProtobufUtilityTest, UnpackToWrongType) {
+  ProtobufWkt::Duration source_duration;
+  source_duration.set_seconds(42);
+  ProtobufWkt::Any source_any;
+  source_any.PackFrom(source_duration);
+  ProtobufWkt::Timestamp dst;
+  EXPECT_THROW_WITH_REGEX(
+      MessageUtil::unpackTo(source_any, dst), EnvoyException,
+      R"(Unable to unpack as google.protobuf.Timestamp: \[type.googleapis.com/google.protobuf.Duration\] .*)");
 }
 
 TEST_F(ProtobufUtilityTest, JsonConvertSuccess) {
@@ -481,6 +495,30 @@ TEST_F(ProtobufUtilityTest, JsonConvertCamelSnake) {
                        .string_value());
 }
 
+// Test the jsonConvertValue happy path. Failure modes are converted by jsonConvert tests.
+TEST_F(ProtobufUtilityTest, JsonConvertValueSuccess) {
+  {
+    envoy::config::bootstrap::v2::Bootstrap source;
+    source.set_flags_path("foo");
+    ProtobufWkt::Value tmp;
+    envoy::config::bootstrap::v2::Bootstrap dest;
+    MessageUtil::jsonConvertValue(source, tmp);
+    TestUtility::jsonConvert(tmp, dest);
+    EXPECT_EQ("foo", dest.flags_path());
+  }
+
+  {
+    ProtobufWkt::StringValue source;
+    source.set_value("foo");
+    ProtobufWkt::Value dest;
+    MessageUtil::jsonConvertValue(source, dest);
+
+    ProtobufWkt::Value expected;
+    expected.set_string_value("foo");
+    EXPECT_THAT(dest, ProtoEq(expected));
+  }
+}
+
 TEST_F(ProtobufUtilityTest, YamlLoadFromStringFail) {
   envoy::config::bootstrap::v2::Bootstrap bootstrap;
   // Verify loadFromYaml can parse valid YAML string.
@@ -809,4 +847,10 @@ TEST(StatusCode, Strings) {
             MessageUtil::CodeEnumToString(static_cast<ProtobufUtil::error::Code>(last_code + 1)));
 }
 
+TEST(TypeUtilTest, TypeUrlToDescriptorFullName) {
+  EXPECT_EQ("envoy.config.filter.http.ip_tagging.v2.IPTagging",
+            TypeUtil::typeUrlToDescriptorFullName(
+                "type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging"));
+}
+
 } // namespace Envoy
diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc
index 1cdb2ae411cd..3d5216e82459 100644
--- a/test/common/router/config_impl_test.cc
+++ b/test/common/router/config_impl_test.cc
@@ -2197,6 +2197,26 @@ TEST_F(RouterMatcherHashPolicyTest, HashIpv6DifferentAddresses) {
   }
 }
 
+TEST_F(RouterMatcherHashPolicyTest, HashQueryParameters) {
+  firstRouteHashPolicy()->mutable_query_parameter()->set_name("param");
+  {
+    Http::TestHeaderMapImpl headers = genHeaders("www.lyft.com", "/foo", "GET");
+    Router::RouteConstSharedPtr route = config().route(headers, 0);
+    EXPECT_FALSE(
+        route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_));
+  }
+  {
+    Http::TestHeaderMapImpl headers = genHeaders("www.lyft.com", "/foo?param=xyz", "GET");
+    Router::RouteConstSharedPtr route = config().route(headers, 0);
+    EXPECT_TRUE(route->routeEntry()->hashPolicy()->generateHash(nullptr, headers, add_cookie_nop_));
+  }
+  {
+    Http::TestHeaderMapImpl headers = genHeaders("www.lyft.com", "/bar?param=xyz", "GET");
+    Router::RouteConstSharedPtr route = config().route(headers, 0);
+    EXPECT_FALSE(route->routeEntry()->hashPolicy());
+  }
+}
+
 TEST_F(RouterMatcherHashPolicyTest, HashMultiple) {
   auto route = route_config_.mutable_virtual_hosts(0)->mutable_routes(0)->mutable_route();
   route->add_hash_policy()->mutable_header()->set_header_name("foo_header");
@@ -5444,6 +5464,22 @@ name: foo
             denominator: 1
           overall_sampling:
             numerator: 3
+          custom_tags:
+          - tag: ltag
+            literal:
+              value: lvalue
+          - tag: etag
+            environment:
+              name: E_TAG
+          - tag: rtag
+            request_header:
+              name: X-Tag
+          - tag: mtag
+            metadata:
+              kind: { route: {} }
+              metadata_key:
+                key: com.bar.foo
+                path: [ { key: xx }, { key: yy } ]
         route: { cluster: ww2 }
   )EOF";
   BazFactory baz_factory;
@@ -5470,6 +5506,12 @@ name: foo
   EXPECT_EQ(1, route3->tracingConfig()->getRandomSampling().denominator());
   EXPECT_EQ(3, route3->tracingConfig()->getOverallSampling().numerator());
   EXPECT_EQ(0, route3->tracingConfig()->getOverallSampling().denominator());
+
+  std::vector<std::string> custom_tags{"ltag", "etag", "rtag", "mtag"};
+  const Tracing::CustomTagMap& map = route3->tracingConfig()->getCustomTags();
+  for (const std::string& custom_tag : custom_tags) {
+    EXPECT_NE(map.find(custom_tag), map.end());
+  }
 }
 
 // Test to check Prefix Rewrite for redirects
diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc
index 0dc7df00f3da..4e7be26a9a8e 100644
--- a/test/common/router/rds_impl_test.cc
+++ b/test/common/router/rds_impl_test.cc
@@ -28,6 +28,7 @@ using testing::_;
 using testing::Eq;
 using testing::InSequence;
 using testing::Invoke;
+using testing::Return;
 using testing::ReturnRef;
 
 namespace Envoy {
@@ -263,6 +264,50 @@ TEST_F(RdsImplTest, FailureSubscription) {
   rds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, {});
 }
 
+class RdsRouteConfigSubscriptionTest : public RdsTestBase {
+public:
+  RdsRouteConfigSubscriptionTest() {
+    EXPECT_CALL(server_factory_context_.admin_.config_tracker_, add_("routes", _));
+    route_config_provider_manager_ =
+        std::make_unique<RouteConfigProviderManagerImpl>(server_factory_context_.admin_);
+  }
+
+  ~RdsRouteConfigSubscriptionTest() override {
+    server_factory_context_.thread_local_.shutdownThread();
+  }
+
+  std::unique_ptr<RouteConfigProviderManagerImpl> route_config_provider_manager_;
+};
+
+// Verifies that maybeCreateInitManager() creates a noop init manager if the main init manager is in
+// Initialized state already
+TEST_F(RdsRouteConfigSubscriptionTest, CreatesNoopInitManager) {
+  const std::string rds_config = R"EOF(
+  route_config_name: my_route
+  config_source:
+    api_config_source:
+      api_type: GRPC
+      grpc_services:
+        envoy_grpc:
+          cluster_name: xds_cluster
+)EOF";
+  EXPECT_CALL(outer_init_manager_, state()).WillOnce(Return(Init::Manager::State::Initialized));
+  const auto rds =
+      TestUtility::parseYaml<envoy::config::filter::network::http_connection_manager::v2::Rds>(
+          rds_config);
+  const auto route_config_provider = route_config_provider_manager_->createRdsRouteConfigProvider(
+      rds, mock_factory_context_, "stat_prefix", outer_init_manager_);
+  RdsRouteConfigSubscription& subscription =
+      (dynamic_cast<RdsRouteConfigProviderImpl*>(route_config_provider.get()))->subscription();
+
+  std::unique_ptr<Init::ManagerImpl> noop_init_manager;
+  std::unique_ptr<Cleanup> init_vhds;
+  subscription.maybeCreateInitManager("version_info", noop_init_manager, init_vhds);
+
+  EXPECT_TRUE(init_vhds);
+  EXPECT_TRUE(noop_init_manager);
+}
+
 class RouteConfigProviderManagerImplTest : public RdsTestBase {
 public:
   void setup() {
diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc
index 8676fc3dd6ae..e1e585630844 100644
--- a/test/common/router/router_test.cc
+++ b/test/common/router/router_test.cc
@@ -4237,12 +4237,12 @@ TEST_F(RouterTest, AutoHostRewriteEnabled) {
 
   Http::TestHeaderMapImpl incoming_headers;
   HttpTestUtility::addDefaultHeaders(incoming_headers);
-  incoming_headers.Host()->value(req_host);
+  incoming_headers.setHost(req_host);
 
   cm_.conn_pool_.host_->hostname_ = "scooby.doo";
   Http::TestHeaderMapImpl outgoing_headers;
   HttpTestUtility::addDefaultHeaders(outgoing_headers);
-  outgoing_headers.Host()->value(cm_.conn_pool_.host_->hostname_);
+  outgoing_headers.setHost(cm_.conn_pool_.host_->hostname_);
 
   EXPECT_CALL(callbacks_.route_->route_entry_, timeout())
       .WillOnce(Return(std::chrono::milliseconds(0)));
@@ -4275,7 +4275,7 @@ TEST_F(RouterTest, AutoHostRewriteDisabled) {
 
   Http::TestHeaderMapImpl incoming_headers;
   HttpTestUtility::addDefaultHeaders(incoming_headers);
-  incoming_headers.Host()->value(req_host);
+  incoming_headers.setHost(req_host);
 
   cm_.conn_pool_.host_->hostname_ = "scooby.doo";
 
@@ -4548,6 +4548,7 @@ TEST_F(RouterTestChildSpan, BasicFlow) {
   EXPECT_CALL(*child_span,
               setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.0")));
+  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("10.0.0.5:9211")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("200")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("-")));
@@ -4591,6 +4592,7 @@ TEST_F(RouterTestChildSpan, ResetFlow) {
   EXPECT_CALL(*child_span,
               setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.0")));
+  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("10.0.0.5:9211")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("200")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("UR")));
@@ -4630,6 +4632,7 @@ TEST_F(RouterTestChildSpan, CancelFlow) {
   EXPECT_CALL(*child_span,
               setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.0")));
+  EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("10.0.0.5:9211")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("0")));
   EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("-")));
@@ -4669,6 +4672,7 @@ TEST_F(RouterTestChildSpan, ResetRetryFlow) {
   EXPECT_CALL(*child_span_1,
               setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
   EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.0")));
+  EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("10.0.0.5:9211")));
   EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster")));
   EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("0")));
   EXPECT_CALL(*child_span_1, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("UR")));
@@ -4705,6 +4709,7 @@ TEST_F(RouterTestChildSpan, ResetRetryFlow) {
   EXPECT_CALL(*child_span_2,
               setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
   EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.0")));
+  EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("10.0.0.5:9211")));
   EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster")));
   EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("200")));
   EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("-")));
diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc
index e53097f5d74b..d591e3b6d763 100644
--- a/test/common/runtime/runtime_impl_test.cc
+++ b/test/common/runtime/runtime_impl_test.cc
@@ -48,6 +48,23 @@ TEST(Random, SanityCheckOfUniquenessRandom) {
   EXPECT_EQ(num_of_results, results.size());
 }
 
+TEST(Random, SanityCheckOfStdLibRandom) {
+  Runtime::RandomGeneratorImpl random;
+
+  static const auto num_of_items = 100;
+  std::vector<uint64_t> v(num_of_items);
+  std::iota(v.begin(), v.end(), 0);
+
+  static const auto num_of_checks = 10000;
+  for (size_t i = 0; i < num_of_checks; ++i) {
+    const auto prev = v;
+    std::shuffle(v.begin(), v.end(), random);
+    EXPECT_EQ(v.size(), prev.size());
+    EXPECT_NE(v, prev);
+    EXPECT_FALSE(std::is_sorted(v.begin(), v.end()));
+  }
+}
+
 TEST(UUID, CheckLengthOfUUID) {
   RandomGeneratorImpl random;
 
diff --git a/test/common/tracing/BUILD b/test/common/tracing/BUILD
index 24ef626a3194..0ccd24be2adc 100644
--- a/test/common/tracing/BUILD
+++ b/test/common/tracing/BUILD
@@ -23,11 +23,13 @@ envoy_cc_test(
         "//source/common/tracing:http_tracer_lib",
         "//test/mocks/http:http_mocks",
         "//test/mocks/local_info:local_info_mocks",
+        "//test/mocks/router:router_mocks",
         "//test/mocks/runtime:runtime_mocks",
         "//test/mocks/stats:stats_mocks",
         "//test/mocks/thread_local:thread_local_mocks",
         "//test/mocks/tracing:tracing_mocks",
         "//test/mocks/upstream:upstream_mocks",
+        "//test/test_common:environment_lib",
         "//test/test_common:utility_lib",
     ],
 )
diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc
index ebb7af2b65fa..9bcf20b00377 100644
--- a/test/common/tracing/http_tracer_impl_test.cc
+++ b/test/common/tracing/http_tracer_impl_test.cc
@@ -12,11 +12,13 @@
 
 #include "test/mocks/http/mocks.h"
 #include "test/mocks/local_info/mocks.h"
+#include "test/mocks/router/mocks.h"
 #include "test/mocks/runtime/mocks.h"
 #include "test/mocks/stats/mocks.h"
 #include "test/mocks/thread_local/mocks.h"
 #include "test/mocks/tracing/mocks.h"
 #include "test/mocks/upstream/mocks.h"
+#include "test/test_common/environment.h"
 #include "test/test_common/printers.h"
 #include "test/test_common/utility.h"
 
@@ -110,11 +112,36 @@ TEST(HttpTracerUtilityTest, IsTracing) {
   }
 }
 
-TEST(HttpConnManFinalizerImpl, OriginalAndLongPath) {
+class HttpConnManFinalizerImplTest : public testing::Test {
+protected:
+  struct CustomTagCase {
+    std::string custom_tag;
+    bool set;
+    std::string value;
+  };
+
+  void expectSetCustomTags(const std::vector<CustomTagCase>& cases) {
+    for (const CustomTagCase& cas : cases) {
+      envoy::type::tracing::v2::CustomTag custom_tag;
+      TestUtility::loadFromYaml(cas.custom_tag, custom_tag);
+      config.custom_tags_.emplace(custom_tag.tag(), HttpTracerUtility::createCustomTag(custom_tag));
+      if (cas.set) {
+        EXPECT_CALL(span, setTag(Eq(custom_tag.tag()), Eq(cas.value)));
+      } else {
+        EXPECT_CALL(span, setTag(Eq(custom_tag.tag()), _)).Times(0);
+      }
+    }
+  }
+
+  NiceMock<MockSpan> span;
+  NiceMock<MockConfig> config;
+  NiceMock<StreamInfo::MockStreamInfo> stream_info;
+};
+
+TEST_F(HttpConnManFinalizerImplTest, OriginalAndLongPath) {
   const std::string path(300, 'a');
   const std::string path_prefix = "http://";
   const std::string expected_path(256, 'a');
-  NiceMock<MockSpan> span;
 
   Http::TestHeaderMapImpl request_headers{{"x-request-id", "id"},
                                           {"x-envoy-original-path", path},
@@ -122,7 +149,6 @@ TEST(HttpConnManFinalizerImpl, OriginalAndLongPath) {
                                           {"x-forwarded-proto", "http"}};
   Http::TestHeaderMapImpl response_headers;
   Http::TestHeaderMapImpl response_trailers;
-  NiceMock<StreamInfo::MockStreamInfo> stream_info;
 
   absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;
   EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));
@@ -136,22 +162,19 @@ TEST(HttpConnManFinalizerImpl, OriginalAndLongPath) {
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq("GET")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/2")));
 
-  NiceMock<MockConfig> config;
   HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,
                                             &response_trailers, stream_info, config);
 }
 
-TEST(HttpConnManFinalizerImpl, NoGeneratedId) {
+TEST_F(HttpConnManFinalizerImplTest, NoGeneratedId) {
   const std::string path(300, 'a');
   const std::string path_prefix = "http://";
   const std::string expected_path(256, 'a');
-  NiceMock<MockSpan> span;
 
   Http::TestHeaderMapImpl request_headers{
       {"x-envoy-original-path", path}, {":method", "GET"}, {"x-forwarded-proto", "http"}};
   Http::TestHeaderMapImpl response_headers;
   Http::TestHeaderMapImpl response_trailers;
-  NiceMock<StreamInfo::MockStreamInfo> stream_info;
 
   absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;
   EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));
@@ -165,20 +188,17 @@ TEST(HttpConnManFinalizerImpl, NoGeneratedId) {
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq("GET")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/2")));
 
-  NiceMock<MockConfig> config;
   HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,
                                             &response_trailers, stream_info, config);
 }
 
-TEST(HttpConnManFinalizerImpl, NullRequestHeaders) {
-  NiceMock<MockSpan> span;
-  NiceMock<StreamInfo::MockStreamInfo> stream_info;
-
+TEST_F(HttpConnManFinalizerImplTest, NullRequestHeadersAndNullRouteEntry) {
   EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));
   EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11));
   absl::optional<uint32_t> response_code;
   EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));
-  EXPECT_CALL(stream_info, upstreamHost()).WillOnce(Return(nullptr));
+  EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr));
+  EXPECT_CALL(stream_info, routeEntry()).WillRepeatedly(Return(nullptr));
 
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("0")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True)));
@@ -186,15 +206,34 @@ TEST(HttpConnManFinalizerImpl, NullRequestHeaders) {
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("-")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().RequestSize), Eq("10")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
+  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), _)).Times(0);
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), _)).Times(0);
 
-  NiceMock<MockConfig> config;
+  expectSetCustomTags({{"{ tag: a, request_header: { name: X-Ax } }", false, ""},
+                       {R"EOF(
+tag: b
+metadata:
+  kind: { route: {} }
+  metadata_key: { key: m.rot, path: [ {key: not-found } ] }
+  default_value: _c)EOF",
+                        true, "_c"},
+                       {R"EOF(
+tag: c
+metadata:
+  kind: { cluster: {} }
+  metadata_key: { key: m.cluster, path: [ {key: not-found } ] })EOF",
+                        false, ""},
+                       {R"EOF(
+tag: d
+metadata:
+  kind: { host: {} }
+  metadata_key: { key: m.host, path: [ {key: not-found } ] })EOF",
+                        false, ""}});
+
   HttpTracerUtility::finalizeDownstreamSpan(span, nullptr, nullptr, nullptr, stream_info, config);
 }
 
-TEST(HttpConnManFinalizerImpl, StreamInfoLogs) {
-  NiceMock<MockSpan> span;
-  NiceMock<StreamInfo::MockStreamInfo> stream_info;
+TEST_F(HttpConnManFinalizerImplTest, StreamInfoLogs) {
   stream_info.host_->cluster_.name_ = "my_upstream_cluster";
 
   EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));
@@ -225,14 +264,11 @@ TEST(HttpConnManFinalizerImpl, StreamInfoLogs) {
   EXPECT_CALL(span, log(log_timestamp, Tracing::Logs::get().FirstDownstreamTxByteSent));
   EXPECT_CALL(span, log(log_timestamp, Tracing::Logs::get().LastDownstreamTxByteSent));
 
-  NiceMock<MockConfig> config;
   EXPECT_CALL(config, verbose).WillOnce(Return(true));
   HttpTracerUtility::finalizeDownstreamSpan(span, nullptr, nullptr, nullptr, stream_info, config);
 }
 
-TEST(HttpConnManFinalizerImpl, UpstreamClusterTagSet) {
-  NiceMock<MockSpan> span;
-  NiceMock<StreamInfo::MockStreamInfo> stream_info;
+TEST_F(HttpConnManFinalizerImplTest, UpstreamClusterTagSet) {
   stream_info.host_->cluster_.name_ = "my_upstream_cluster";
 
   EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));
@@ -249,25 +285,20 @@ TEST(HttpConnManFinalizerImpl, UpstreamClusterTagSet) {
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("-")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().RequestSize), Eq("10")));
 
-  NiceMock<MockConfig> config;
   HttpTracerUtility::finalizeDownstreamSpan(span, nullptr, nullptr, nullptr, stream_info, config);
 }
 
-TEST(HttpConnManFinalizerImpl, SpanOptionalHeaders) {
-  NiceMock<MockSpan> span;
-
+TEST_F(HttpConnManFinalizerImplTest, SpanOptionalHeaders) {
   Http::TestHeaderMapImpl request_headers{{"x-request-id", "id"},
                                           {":path", "/test"},
                                           {":method", "GET"},
                                           {"x-forwarded-proto", "https"}};
   Http::TestHeaderMapImpl response_headers;
   Http::TestHeaderMapImpl response_trailers;
-  NiceMock<StreamInfo::MockStreamInfo> stream_info;
 
   absl::optional<Http::Protocol> protocol = Http::Protocol::Http10;
   EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));
   EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));
-  const std::string service_node = "i-453";
 
   // Check that span is populated correctly.
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GuidXRequestId), Eq("id")));
@@ -288,22 +319,139 @@ TEST(HttpConnManFinalizerImpl, SpanOptionalHeaders) {
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseSize), Eq("100")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("-")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
+  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), _)).Times(0);
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), _)).Times(0);
 
-  NiceMock<MockConfig> config;
   HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,
                                             &response_trailers, stream_info, config);
 }
 
-TEST(HttpConnManFinalizerImpl, SpanPopulatedFailureResponse) {
-  NiceMock<MockSpan> span;
+TEST_F(HttpConnManFinalizerImplTest, SpanCustomTags) {
+  TestEnvironment::setEnvVar("E_CC", "c", 1);
+
+  Http::TestHeaderMapImpl request_headers{{"x-request-id", "id"},
+                                          {":path", "/test"},
+                                          {":method", "GET"},
+                                          {"x-forwarded-proto", "https"},
+                                          {"x-bb", "b"}};
+
+  ProtobufWkt::Struct fake_struct;
+  std::string yaml = R"EOF(
+ree:
+  foo: bar
+  nuu: 1
+  boo: true
+  poo: false
+  stt: { some: thing }
+  lii: [ something ]
+  emp: "")EOF";
+  TestUtility::loadFromYaml(yaml, fake_struct);
+  (*stream_info.metadata_.mutable_filter_metadata())["m.req"].MergeFrom(fake_struct);
+  NiceMock<Router::MockRouteEntry> route_entry;
+  EXPECT_CALL(stream_info, routeEntry()).WillRepeatedly(Return(&route_entry));
+  (*route_entry.metadata_.mutable_filter_metadata())["m.rot"].MergeFrom(fake_struct);
+  std::shared_ptr<envoy::api::v2::core::Metadata> host_metadata =
+      std::make_shared<envoy::api::v2::core::Metadata>();
+  (*host_metadata->mutable_filter_metadata())["m.host"].MergeFrom(fake_struct);
+  (*stream_info.host_->cluster_.metadata_.mutable_filter_metadata())["m.cluster"].MergeFrom(
+      fake_struct);
+
+  absl::optional<Http::Protocol> protocol = Http::Protocol::Http10;
+  EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));
+  EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));
+  absl::optional<uint32_t> response_code;
+  EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code));
+  EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(100));
+  EXPECT_CALL(*stream_info.host_, metadata()).WillRepeatedly(Return(host_metadata));
+
+  EXPECT_CALL(config, customTags());
+  EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber());
+
+  expectSetCustomTags(
+      {{"{ tag: aa, literal: { value: a } }", true, "a"},
+       {"{ tag: bb-1, request_header: { name: X-Bb, default_value: _b } }", true, "b"},
+       {"{ tag: bb-2, request_header: { name: X-Bb-Not-Found, default_value: b2 } }", true, "b2"},
+       {"{ tag: bb-3, request_header: { name: X-Bb-Not-Found } }", false, ""},
+       {"{ tag: cc-1, environment: { name: E_CC } }", true, "c"},
+       {"{ tag: cc-1-a, environment: { name: E_CC, default_value: _c } }", true, "c"},
+       {"{ tag: cc-2, environment: { name: E_CC_NOT_FOUND, default_value: c2 } }", true, "c2"},
+       {"{ tag: cc-3, environment: { name: E_CC_NOT_FOUND} }", false, ""},
+       {R"EOF(
+tag: dd-1,
+metadata:
+  kind: { request: {} }
+  metadata_key: { key: m.req, path: [ { key: ree }, { key: foo } ] })EOF",
+        true, "bar"},
+       {R"EOF(
+tag: dd-2,
+metadata:
+  kind: { request: {} }
+  metadata_key: { key: m.req, path: [ { key: not-found } ] }
+  default_value: d2)EOF",
+        true, "d2"},
+       {R"EOF(
+tag: dd-3,
+metadata:
+  kind: { request: {} }
+  metadata_key: { key: m.req, path: [ { key: not-found } ] })EOF",
+        false, ""},
+       {R"EOF(
+tag: dd-4,
+metadata:
+  kind: { request: {} }
+  metadata_key: { key: m.req, path: [ { key: ree }, { key: nuu } ] }
+  default_value: _d)EOF",
+        true, "1"},
+       {R"EOF(
+tag: dd-5,
+metadata:
+  kind: { route: {} }
+  metadata_key: { key: m.rot, path: [ { key: ree }, { key: boo } ] })EOF",
+        true, "true"},
+       {R"EOF(
+tag: dd-6,
+metadata:
+  kind: { route: {} }
+  metadata_key: { key: m.rot, path: [ { key: ree }, { key: poo } ] })EOF",
+        true, "false"},
+       {R"EOF(
+tag: dd-7,
+metadata:
+  kind: { cluster: {} }
+  metadata_key: { key: m.cluster, path: [ { key: ree }, { key: emp } ] }
+  default_value: _d)EOF",
+        true, ""},
+       {R"EOF(
+tag: dd-8,
+metadata:
+  kind: { cluster: {} }
+  metadata_key: { key: m.cluster, path: [ { key: ree }, { key: lii } ] }
+  default_value: _d)EOF",
+        true, "[\"something\"]"},
+       {R"EOF(
+tag: dd-9,
+metadata:
+  kind: { host: {} }
+  metadata_key: { key: m.host, path: [ { key: ree }, { key: stt } ] })EOF",
+        true, R"({"some":"thing"})"},
+       {R"EOF(
+tag: dd-10,
+metadata:
+  kind: { host: {} }
+  metadata_key: { key: m.host, path: [ { key: not-found } ] })EOF",
+        false, ""}});
+
+  HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, nullptr, nullptr, stream_info,
+                                            config);
+}
+
+TEST_F(HttpConnManFinalizerImplTest, SpanPopulatedFailureResponse) {
   Http::TestHeaderMapImpl request_headers{{"x-request-id", "id"},
                                           {":path", "/test"},
                                           {":method", "GET"},
                                           {"x-forwarded-proto", "http"}};
   Http::TestHeaderMapImpl response_headers;
   Http::TestHeaderMapImpl response_trailers;
-  NiceMock<StreamInfo::MockStreamInfo> stream_info;
 
   request_headers.setHost("api");
   request_headers.setUserAgent("agent");
@@ -313,7 +461,6 @@ TEST(HttpConnManFinalizerImpl, SpanPopulatedFailureResponse) {
   absl::optional<Http::Protocol> protocol = Http::Protocol::Http10;
   EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol));
   EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10));
-  const std::string service_node = "i-453";
 
   // Check that span is populated correctly.
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GuidXRequestId), Eq("id")));
@@ -325,17 +472,6 @@ TEST(HttpConnManFinalizerImpl, SpanPopulatedFailureResponse) {
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().RequestSize), Eq("10")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GuidXClientTraceId), Eq("client_trace_id")));
 
-  // Check that span has tags from custom headers.
-  request_headers.addCopy(Http::LowerCaseString("aa"), "a");
-  request_headers.addCopy(Http::LowerCaseString("bb"), "b");
-  request_headers.addCopy(Http::LowerCaseString("cc"), "c");
-  MockConfig config;
-  config.headers_.push_back(Http::LowerCaseString("aa"));
-  config.headers_.push_back(Http::LowerCaseString("cc"));
-  config.headers_.push_back(Http::LowerCaseString("ee"));
-  EXPECT_CALL(span, setTag(Eq("aa"), Eq("a")));
-  EXPECT_CALL(span, setTag(Eq("cc"), Eq("c")));
-  EXPECT_CALL(config, requestHeadersForTags());
   EXPECT_CALL(config, verbose).WillOnce(Return(false));
   EXPECT_CALL(config, maxPathTagLength).WillOnce(Return(256));
 
@@ -351,15 +487,15 @@ TEST(HttpConnManFinalizerImpl, SpanPopulatedFailureResponse) {
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseSize), Eq("100")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().ResponseFlags), Eq("UT")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy)));
+  EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), _)).Times(0);
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), _)).Times(0);
 
   HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,
                                             &response_trailers, stream_info, config);
 }
 
-TEST(HttpConnManFinalizerImpl, GrpcOkStatus) {
+TEST_F(HttpConnManFinalizerImplTest, GrpcOkStatus) {
   const std::string path_prefix = "http://";
-  NiceMock<MockSpan> span;
 
   Http::TestHeaderMapImpl request_headers{{":method", "POST"},
                                           {":scheme", "http"},
@@ -371,7 +507,6 @@ TEST(HttpConnManFinalizerImpl, GrpcOkStatus) {
   Http::TestHeaderMapImpl response_headers{{":status", "200"},
                                            {"content-type", "application/grpc"}};
   Http::TestHeaderMapImpl response_trailers{{"grpc-status", "0"}, {"grpc-message", ""}};
-  NiceMock<StreamInfo::MockStreamInfo> stream_info;
 
   absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;
   absl::optional<uint32_t> response_code(200);
@@ -387,14 +522,12 @@ TEST(HttpConnManFinalizerImpl, GrpcOkStatus) {
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("0")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcMessage), Eq("")));
 
-  NiceMock<MockConfig> config;
   HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,
                                             &response_trailers, stream_info, config);
 }
 
-TEST(HttpConnManFinalizerImpl, GrpcErrorTag) {
+TEST_F(HttpConnManFinalizerImplTest, GrpcErrorTag) {
   const std::string path_prefix = "http://";
-  NiceMock<MockSpan> span;
 
   Http::TestHeaderMapImpl request_headers{{":method", "POST"},
                                           {":scheme", "http"},
@@ -407,7 +540,6 @@ TEST(HttpConnManFinalizerImpl, GrpcErrorTag) {
                                            {"content-type", "application/grpc"}};
   Http::TestHeaderMapImpl response_trailers{{"grpc-status", "7"},
                                             {"grpc-message", "permission denied"}};
-  NiceMock<StreamInfo::MockStreamInfo> stream_info;
 
   absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;
   absl::optional<uint32_t> response_code(200);
@@ -424,14 +556,12 @@ TEST(HttpConnManFinalizerImpl, GrpcErrorTag) {
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("7")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcMessage), Eq("permission denied")));
 
-  NiceMock<MockConfig> config;
   HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,
                                             &response_trailers, stream_info, config);
 }
 
-TEST(HttpConnManFinalizerImpl, GrpcTrailersOnly) {
+TEST_F(HttpConnManFinalizerImplTest, GrpcTrailersOnly) {
   const std::string path_prefix = "http://";
-  NiceMock<MockSpan> span;
 
   Http::TestHeaderMapImpl request_headers{{":method", "POST"},
                                           {":scheme", "http"},
@@ -445,7 +575,6 @@ TEST(HttpConnManFinalizerImpl, GrpcTrailersOnly) {
                                            {"grpc-status", "7"},
                                            {"grpc-message", "permission denied"}};
   Http::TestHeaderMapImpl response_trailers;
-  NiceMock<StreamInfo::MockStreamInfo> stream_info;
 
   absl::optional<Http::Protocol> protocol = Http::Protocol::Http2;
   absl::optional<uint32_t> response_code(200);
@@ -462,7 +591,6 @@ TEST(HttpConnManFinalizerImpl, GrpcTrailersOnly) {
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("7")));
   EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GrpcMessage), Eq("permission denied")));
 
-  NiceMock<MockConfig> config;
   HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers,
                                             &response_trailers, stream_info, config);
 }
diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD
index 0e07bab57399..aea0dc82191a 100644
--- a/test/common/upstream/BUILD
+++ b/test/common/upstream/BUILD
@@ -32,42 +32,7 @@ envoy_cc_test(
         "abseil_optional",
     ],
     deps = [
-        ":utility_lib",
-        "//include/envoy/stats:stats_interface",
-        "//include/envoy/upstream:upstream_interface",
-        "//source/common/api:api_lib",
-        "//source/common/config:utility_lib",
-        "//source/common/event:dispatcher_lib",
-        "//source/common/network:socket_option_lib",
-        "//source/common/network:transport_socket_options_lib",
-        "//source/common/network:utility_lib",
-        "//source/common/protobuf:utility_lib",
-        "//source/common/stats:stats_lib",
-        "//source/common/upstream:cluster_factory_lib",
-        "//source/common/upstream:cluster_manager_lib",
-        "//source/common/upstream:subset_lb_lib",
-        "//source/extensions/transport_sockets/raw_buffer:config",
-        "//source/extensions/transport_sockets/tls:context_lib",
-        "//test/integration/clusters:custom_static_cluster",
-        "//test/mocks/access_log:access_log_mocks",
-        "//test/mocks/api:api_mocks",
-        "//test/mocks/http:http_mocks",
-        "//test/mocks/local_info:local_info_mocks",
-        "//test/mocks/network:network_mocks",
-        "//test/mocks/protobuf:protobuf_mocks",
-        "//test/mocks/runtime:runtime_mocks",
-        "//test/mocks/secret:secret_mocks",
-        "//test/mocks/server:server_mocks",
-        "//test/mocks/tcp:tcp_mocks",
-        "//test/mocks/thread_local:thread_local_mocks",
-        "//test/mocks/upstream:upstream_mocks",
-        "//test/test_common:registry_lib",
-        "//test/test_common:simulated_time_system_lib",
-        "//test/test_common:threadsafe_singleton_injector_lib",
-        "//test/test_common:utility_lib",
-        "@envoy_api//envoy/admin/v2alpha:pkg_cc_proto",
-        "@envoy_api//envoy/api/v2:pkg_cc_proto",
-        "@envoy_api//envoy/api/v2/core:pkg_cc_proto",
+        ":test_cluster_manager",
     ],
 )
 
@@ -437,6 +402,49 @@ envoy_cc_test_library(
     ],
 )
 
+envoy_cc_test_library(
+    name = "test_cluster_manager",
+    hdrs = ["test_cluster_manager.h"],
+    deps = [
+        ":utility_lib",
+        "//include/envoy/stats:stats_interface",
+        "//include/envoy/upstream:upstream_interface",
+        "//source/common/api:api_lib",
+        "//source/common/config:utility_lib",
+        "//source/common/event:dispatcher_lib",
+        "//source/common/network:socket_option_lib",
+        "//source/common/network:transport_socket_options_lib",
+        "//source/common/network:utility_lib",
+        "//source/common/protobuf:utility_lib",
+        "//source/common/stats:stats_lib",
+        "//source/common/upstream:cluster_factory_lib",
+        "//source/common/upstream:cluster_manager_lib",
+        "//source/common/upstream:subset_lb_lib",
+        "//source/extensions/transport_sockets/raw_buffer:config",
+        "//source/extensions/transport_sockets/tls:context_lib",
+        "//test/integration/clusters:custom_static_cluster",
+        "//test/mocks/access_log:access_log_mocks",
+        "//test/mocks/api:api_mocks",
+        "//test/mocks/http:http_mocks",
+        "//test/mocks/local_info:local_info_mocks",
+        "//test/mocks/network:network_mocks",
+        "//test/mocks/protobuf:protobuf_mocks",
+        "//test/mocks/runtime:runtime_mocks",
+        "//test/mocks/secret:secret_mocks",
+        "//test/mocks/server:server_mocks",
+        "//test/mocks/tcp:tcp_mocks",
+        "//test/mocks/thread_local:thread_local_mocks",
+        "//test/mocks/upstream:upstream_mocks",
+        "//test/test_common:registry_lib",
+        "//test/test_common:simulated_time_system_lib",
+        "//test/test_common:threadsafe_singleton_injector_lib",
+        "//test/test_common:utility_lib",
+        "@envoy_api//envoy/admin/v2alpha:pkg_cc_proto",
+        "@envoy_api//envoy/api/v2:pkg_cc_proto",
+        "@envoy_api//envoy/api/v2/core:pkg_cc_proto",
+    ],
+)
+
 envoy_cc_test(
     name = "cluster_factory_impl_test",
     srcs = ["cluster_factory_impl_test.cc"],
diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc
index 7798cfc69c8d..d94b4d1a2562 100644
--- a/test/common/upstream/cluster_manager_impl_test.cc
+++ b/test/common/upstream/cluster_manager_impl_test.cc
@@ -1,50 +1,4 @@
-#include <memory>
-#include <string>
-
-#include "envoy/admin/v2alpha/config_dump.pb.h"
-#include "envoy/api/v2/cds.pb.h"
-#include "envoy/api/v2/core/base.pb.h"
-#include "envoy/network/listen_socket.h"
-#include "envoy/upstream/upstream.h"
-
-#include "common/api/api_impl.h"
-#include "common/config/utility.h"
-#include "common/http/context_impl.h"
-#include "common/network/socket_option_factory.h"
-#include "common/network/socket_option_impl.h"
-#include "common/network/transport_socket_options_impl.h"
-#include "common/network/utility.h"
-#include "common/protobuf/utility.h"
-#include "common/singleton/manager_impl.h"
-#include "common/upstream/cluster_factory_impl.h"
-#include "common/upstream/cluster_manager_impl.h"
-#include "common/upstream/subset_lb.h"
-
-#include "extensions/transport_sockets/tls/context_manager_impl.h"
-
-#include "test/common/upstream/utility.h"
-#include "test/integration/clusters/custom_static_cluster.h"
-#include "test/mocks/access_log/mocks.h"
-#include "test/mocks/api/mocks.h"
-#include "test/mocks/http/mocks.h"
-#include "test/mocks/local_info/mocks.h"
-#include "test/mocks/network/mocks.h"
-#include "test/mocks/protobuf/mocks.h"
-#include "test/mocks/runtime/mocks.h"
-#include "test/mocks/secret/mocks.h"
-#include "test/mocks/server/mocks.h"
-#include "test/mocks/tcp/mocks.h"
-#include "test/mocks/thread_local/mocks.h"
-#include "test/mocks/upstream/mocks.h"
-#include "test/test_common/registry.h"
-#include "test/test_common/simulated_time_system.h"
-#include "test/test_common/threadsafe_singleton_injector.h"
-#include "test/test_common/utility.h"
-
-#include "absl/strings/str_join.h"
-#include "absl/strings/str_replace.h"
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
+#include "test/common/upstream/test_cluster_manager.h"
 
 using testing::_;
 using testing::Eq;
@@ -60,161 +14,6 @@ namespace Envoy {
 namespace Upstream {
 namespace {
 
-// The tests in this file are split between testing with real clusters and some with mock clusters.
-// By default we setup to call the real cluster creation function. Individual tests can override
-// the expectations when needed.
-class TestClusterManagerFactory : public ClusterManagerFactory {
-public:
-  TestClusterManagerFactory() : api_(Api::createApiForTest(stats_)) {
-    ON_CALL(*this, clusterFromProto_(_, _, _, _))
-        .WillByDefault(Invoke(
-            [&](const envoy::api::v2::Cluster& cluster, ClusterManager& cm,
-                Outlier::EventLoggerSharedPtr outlier_event_logger,
-                bool added_via_api) -> std::pair<ClusterSharedPtr, ThreadAwareLoadBalancer*> {
-              auto result = ClusterFactoryImplBase::create(
-                  cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_,
-                  dispatcher_, log_manager_, local_info_, admin_, singleton_manager_,
-                  outlier_event_logger, added_via_api, validation_visitor_, *api_);
-              // Convert from load balancer unique_ptr -> raw pointer -> unique_ptr.
-              return std::make_pair(result.first, result.second.release());
-            }));
-  }
-
-  Http::ConnectionPool::InstancePtr allocateConnPool(
-      Event::Dispatcher&, HostConstSharedPtr host, ResourcePriority, Http::Protocol,
-      const Network::ConnectionSocket::OptionsSharedPtr& options,
-      const Network::TransportSocketOptionsSharedPtr& transport_socket_options) override {
-    return Http::ConnectionPool::InstancePtr{
-        allocateConnPool_(host, options, transport_socket_options)};
-  }
-
-  Tcp::ConnectionPool::InstancePtr
-  allocateTcpConnPool(Event::Dispatcher&, HostConstSharedPtr host, ResourcePriority,
-                      const Network::ConnectionSocket::OptionsSharedPtr&,
-                      Network::TransportSocketOptionsSharedPtr) override {
-    return Tcp::ConnectionPool::InstancePtr{allocateTcpConnPool_(host)};
-  }
-
-  std::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr>
-  clusterFromProto(const envoy::api::v2::Cluster& cluster, ClusterManager& cm,
-                   Outlier::EventLoggerSharedPtr outlier_event_logger,
-                   bool added_via_api) override {
-    auto result = clusterFromProto_(cluster, cm, outlier_event_logger, added_via_api);
-    return std::make_pair(result.first, ThreadAwareLoadBalancerPtr(result.second));
-  }
-
-  CdsApiPtr createCds(const envoy::api::v2::core::ConfigSource&, ClusterManager&) override {
-    return CdsApiPtr{createCds_()};
-  }
-
-  ClusterManagerPtr
-  clusterManagerFromProto(const envoy::config::bootstrap::v2::Bootstrap& bootstrap) override {
-    return ClusterManagerPtr{clusterManagerFromProto_(bootstrap)};
-  }
-
-  Secret::SecretManager& secretManager() override { return secret_manager_; }
-
-  MOCK_METHOD1(clusterManagerFromProto_,
-               ClusterManager*(const envoy::config::bootstrap::v2::Bootstrap& bootstrap));
-  MOCK_METHOD3(allocateConnPool_,
-               Http::ConnectionPool::Instance*(HostConstSharedPtr host,
-                                               Network::ConnectionSocket::OptionsSharedPtr,
-                                               Network::TransportSocketOptionsSharedPtr));
-  MOCK_METHOD1(allocateTcpConnPool_, Tcp::ConnectionPool::Instance*(HostConstSharedPtr host));
-  MOCK_METHOD4(clusterFromProto_,
-               std::pair<ClusterSharedPtr, ThreadAwareLoadBalancer*>(
-                   const envoy::api::v2::Cluster& cluster, ClusterManager& cm,
-                   Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api));
-  MOCK_METHOD0(createCds_, CdsApi*());
-
-  Stats::IsolatedStoreImpl stats_;
-  NiceMock<ThreadLocal::MockInstance> tls_;
-  std::shared_ptr<NiceMock<Network::MockDnsResolver>> dns_resolver_{
-      new NiceMock<Network::MockDnsResolver>};
-  NiceMock<Runtime::MockLoader> runtime_;
-  NiceMock<Runtime::MockRandomGenerator> random_;
-  NiceMock<Event::MockDispatcher> dispatcher_;
-  Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_{
-      dispatcher_.timeSource()};
-  NiceMock<LocalInfo::MockLocalInfo> local_info_;
-  NiceMock<Server::MockAdmin> admin_;
-  NiceMock<Secret::MockSecretManager> secret_manager_;
-  NiceMock<AccessLog::MockAccessLogManager> log_manager_;
-  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};
-  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;
-  Api::ApiPtr api_;
-};
-
-// Helper to intercept calls to postThreadLocalClusterUpdate.
-class MockLocalClusterUpdate {
-public:
-  MOCK_METHOD3(post, void(uint32_t priority, const HostVector& hosts_added,
-                          const HostVector& hosts_removed));
-};
-
-class MockLocalHostsRemoved {
-public:
-  MOCK_METHOD1(post, void(const HostVector&));
-};
-
-// A test version of ClusterManagerImpl that provides a way to get a non-const handle to the
-// clusters, which is necessary in order to call updateHosts on the priority set.
-class TestClusterManagerImpl : public ClusterManagerImpl {
-public:
-  using ClusterManagerImpl::ClusterManagerImpl;
-
-  TestClusterManagerImpl(const envoy::config::bootstrap::v2::Bootstrap& bootstrap,
-                         ClusterManagerFactory& factory, Stats::Store& stats,
-                         ThreadLocal::Instance& tls, Runtime::Loader& runtime,
-                         Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info,
-                         AccessLog::AccessLogManager& log_manager,
-                         Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin,
-                         ProtobufMessage::ValidationContext& validation_context, Api::Api& api,
-                         Http::Context& http_context)
-      : ClusterManagerImpl(bootstrap, factory, stats, tls, runtime, random, local_info, log_manager,
-                           main_thread_dispatcher, admin, validation_context, api, http_context) {}
-
-  std::map<std::string, std::reference_wrapper<Cluster>> activeClusters() {
-    std::map<std::string, std::reference_wrapper<Cluster>> clusters;
-    for (auto& cluster : active_clusters_) {
-      clusters.emplace(cluster.first, *cluster.second->cluster_);
-    }
-    return clusters;
-  }
-};
-
-// Override postThreadLocalClusterUpdate so we can test that merged updates calls
-// it with the right values at the right times.
-class MockedUpdatedClusterManagerImpl : public TestClusterManagerImpl {
-public:
-  MockedUpdatedClusterManagerImpl(
-      const envoy::config::bootstrap::v2::Bootstrap& bootstrap, ClusterManagerFactory& factory,
-      Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime,
-      Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info,
-      AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher,
-      Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api,
-      MockLocalClusterUpdate& local_cluster_update, MockLocalHostsRemoved& local_hosts_removed,
-      Http::Context& http_context)
-      : TestClusterManagerImpl(bootstrap, factory, stats, tls, runtime, random, local_info,
-                               log_manager, main_thread_dispatcher, admin, validation_context, api,
-                               http_context),
-        local_cluster_update_(local_cluster_update), local_hosts_removed_(local_hosts_removed) {}
-
-protected:
-  void postThreadLocalClusterUpdate(const Cluster&, uint32_t priority,
-                                    const HostVector& hosts_added,
-                                    const HostVector& hosts_removed) override {
-    local_cluster_update_.post(priority, hosts_added, hosts_removed);
-  }
-
-  void postThreadLocalDrainConnections(const Cluster&, const HostVector& hosts_removed) override {
-    local_hosts_removed_.post(hosts_removed);
-  }
-
-  MockLocalClusterUpdate& local_cluster_update_;
-  MockLocalHostsRemoved& local_hosts_removed_;
-};
-
 envoy::config::bootstrap::v2::Bootstrap parseBootstrapFromV2Yaml(const std::string& yaml) {
   envoy::config::bootstrap::v2::Bootstrap bootstrap;
   TestUtility::loadFromYaml(yaml, bootstrap);
@@ -560,7 +359,7 @@ TEST_F(ClusterManagerImplTest, OriginalDstLbRestriction) {
 
 TEST_F(ClusterManagerImplTest, OriginalDstLbRestriction2) {
   const std::string yaml = R"EOF(
-static_resources:
+ static_resources:
   clusters:
   - name: cluster_1
     connect_timeout: 0.250s
@@ -614,7 +413,7 @@ class ClusterManagerSubsetInitializationTest
 // Test initialization of subset load balancer with every possible load balancer policy.
 TEST_P(ClusterManagerSubsetInitializationTest, SubsetLoadBalancerInitialization) {
   const std::string yamlPattern = R"EOF(
-static_resources:
+ static_resources:
   clusters:
   - name: cluster_1
     connect_timeout: 0.250s
@@ -648,7 +447,6 @@ TEST_P(ClusterManagerSubsetInitializationTest, SubsetLoadBalancerInitialization)
     // This custom cluster type is registered by linking test/integration/custom/static_cluster.cc.
     cluster_type = "cluster_type: { name: envoy.clusters.custom_static_with_lb }";
   }
-
   const std::string yaml = fmt::format(yamlPattern, cluster_type, policy_name);
 
   if (GetParam() == envoy::api::v2::Cluster_LbPolicy_ORIGINAL_DST_LB ||
@@ -681,7 +479,7 @@ INSTANTIATE_TEST_SUITE_P(ClusterManagerSubsetInitializationTest,
 
 TEST_F(ClusterManagerImplTest, SubsetLoadBalancerOriginalDstRestriction) {
   const std::string yaml = R"EOF(
-static_resources:
+ static_resources:
   clusters:
   - name: cluster_1
     connect_timeout: 0.250s
@@ -700,7 +498,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerOriginalDstRestriction) {
 
 TEST_F(ClusterManagerImplTest, SubsetLoadBalancerClusterProvidedLbRestriction) {
   const std::string yaml = R"EOF(
-static_resources:
+ static_resources:
   clusters:
   - name: cluster_1
     connect_timeout: 0.250s
@@ -719,7 +517,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerClusterProvidedLbRestriction) {
 
 TEST_F(ClusterManagerImplTest, SubsetLoadBalancerLocalityAware) {
   const std::string yaml = R"EOF(
-static_resources:
+ static_resources:
   clusters:
   - name: cluster_1
     connect_timeout: 0.250s
@@ -752,7 +550,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerLocalityAware) {
 
 TEST_F(ClusterManagerImplTest, RingHashLoadBalancerInitialization) {
   const std::string yaml = R"EOF(
-static_resources:
+ static_resources:
   clusters:
   - name: redis_cluster
     lb_policy: RING_HASH
@@ -889,7 +687,7 @@ TEST_F(ClusterManagerImplThreadAwareLbTest, MaglevLoadBalancerThreadAwareUpdate)
 
 TEST_F(ClusterManagerImplTest, TcpHealthChecker) {
   const std::string yaml = R"EOF(
-static_resources:
+ static_resources:
   clusters:
   - name: cluster_1
     connect_timeout: 0.250s
@@ -926,7 +724,7 @@ TEST_F(ClusterManagerImplTest, TcpHealthChecker) {
 
 TEST_F(ClusterManagerImplTest, HttpHealthChecker) {
   const std::string yaml = R"EOF(
-static_resources:
+ static_resources:
   clusters:
   - name: cluster_1
     connect_timeout: 0.250s
@@ -986,7 +784,7 @@ TEST_F(ClusterManagerImplTest, UnknownCluster) {
  */
 TEST_F(ClusterManagerImplTest, VerifyBufferLimits) {
   const std::string yaml = R"EOF(
-static_resources:
+ static_resources:
   clusters:
   - name: cluster_1
     connect_timeout: 0.250s
@@ -1124,8 +922,8 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) {
   cds->initialized_callback_();
   EXPECT_CALL(*cds, versionInfo()).WillOnce(Return("version3"));
   checkConfigDump(R"EOF(
-version_info: version3
-static_clusters:
+ version_info: version3
+ static_clusters:
   - cluster:
       name: "cds_cluster"
       type: "STATIC"
@@ -1159,7 +957,7 @@ version_info: version3
     last_updated:
       seconds: 1234567891
       nanos: 234000000
-dynamic_active_clusters:
+ dynamic_active_clusters:
   - version_info: "version1"
     cluster:
       name: "cluster3"
@@ -1196,7 +994,7 @@ version_info: version3
     last_updated:
       seconds: 1234567891
       nanos: 234000000
-dynamic_warming_clusters:
+ dynamic_warming_clusters:
 )EOF");
 
   EXPECT_CALL(*cluster3, initialize(_));
@@ -1712,8 +1510,8 @@ TEST_F(ClusterManagerImplTest, CloseTcpConnectionPoolsOnHealthFailure) {
   EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get()));
 }
 
-// Test that we close all TCP connection pool connections when there is a host health failure, when
-// configured to do so.
+// Test that we close all TCP connection pool connections when there is a host health failure,
+// when configured to do so.
 TEST_F(ClusterManagerImplTest, CloseTcpConnectionsOnHealthFailure) {
   const std::string yaml = R"EOF(
   static_resources:
@@ -1865,7 +1663,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemove) {
   )EOF";
 
   std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());
-  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_)).WillOnce(Return(dns_resolver));
+  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver));
 
   Network::DnsResolver::ResolveCb dns_callback;
   Event::MockTimer* dns_timer_ = new NiceMock<Event::MockTimer>(&factory_.dispatcher_);
@@ -2005,7 +1803,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) {
   )EOF";
 
   std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());
-  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_)).WillOnce(Return(dns_resolver));
+  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver));
 
   Network::DnsResolver::ResolveCb dns_callback;
   Event::MockTimer* dns_timer_ = new NiceMock<Event::MockTimer>(&factory_.dispatcher_);
@@ -2203,6 +2001,86 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) {
   factory_.tls_.shutdownThread();
 }
 
+// Test that default DNS resolver with TCP lookups is used, when there are no DNS custom resolvers
+// configured per cluster and `use_tcp_for_dns_lookups` is set in bootstrap config.
+TEST_F(ClusterManagerImplTest, UseTcpInDefaultDnsResolver) {
+  const std::string yaml = R"EOF(
+  use_tcp_for_dns_lookups: true
+  static_resources:
+    clusters:
+    - name: cluster_1
+      connect_timeout: 0.250s
+      type: STRICT_DNS
+  )EOF";
+
+  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());
+  // As custom resolvers are not specified in config, this method should not be called,
+  // resolver from context should be used instead.
+  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).Times(0);
+
+  Network::DnsResolver::ResolveCb dns_callback;
+  Network::MockActiveDnsQuery active_dns_query;
+  EXPECT_CALL(*dns_resolver, resolve(_, _, _))
+      .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query)));
+  create(parseBootstrapFromV2Yaml(yaml));
+  factory_.tls_.shutdownThread();
+}
+
+// Test that custom DNS resolver with UDP lookups is used, when custom resolver is configured
+// per cluster and `use_tcp_for_dns_lookups` is not specified.
+TEST_F(ClusterManagerImplTest, UseUdpWithCustomDnsResolver) {
+  const std::string yaml = R"EOF(
+  static_resources:
+    clusters:
+    - name: cluster_1
+      connect_timeout: 0.250s
+      type: STRICT_DNS
+      dns_resolvers:
+      - socket_address:
+          address: 1.2.3.4
+          port_value: 80
+  )EOF";
+
+  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());
+  // `false` here stands for using udp
+  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, false)).WillOnce(Return(dns_resolver));
+
+  Network::DnsResolver::ResolveCb dns_callback;
+  Network::MockActiveDnsQuery active_dns_query;
+  EXPECT_CALL(*dns_resolver, resolve(_, _, _))
+      .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query)));
+  create(parseBootstrapFromV2Yaml(yaml));
+  factory_.tls_.shutdownThread();
+}
+
+// Test that custom DNS resolver with TCP lookups is used, when custom resolver is configured
+// per cluster and `use_tcp_for_dns_lookups` is enabled for that cluster.
+TEST_F(ClusterManagerImplTest, UseTcpWithCustomDnsResolver) {
+  const std::string yaml = R"EOF(
+  static_resources:
+    clusters:
+    - name: cluster_1
+      use_tcp_for_dns_lookups: true
+      connect_timeout: 0.250s
+      type: STRICT_DNS
+      dns_resolvers:
+      - socket_address:
+          address: 1.2.3.4
+          port_value: 80
+  )EOF";
+
+  std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());
+  // `true` here stands for using tcp
+  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, true)).WillOnce(Return(dns_resolver));
+
+  Network::DnsResolver::ResolveCb dns_callback;
+  Network::MockActiveDnsQuery active_dns_query;
+  EXPECT_CALL(*dns_resolver, resolve(_, _, _))
+      .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query)));
+  create(parseBootstrapFromV2Yaml(yaml));
+  factory_.tls_.shutdownThread();
+}
+
 // This is a regression test for a use-after-free in
 // ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools(), where a removal at one
 // priority from the ConnPoolsContainer would delete the ConnPoolsContainer mid-iteration over the
@@ -2230,7 +2108,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveDefaultPriority) {
   )EOF";
 
   std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());
-  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_)).WillOnce(Return(dns_resolver));
+  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver));
 
   Network::DnsResolver::ResolveCb dns_callback;
   Event::MockTimer* dns_timer_ = new NiceMock<Event::MockTimer>(&factory_.dispatcher_);
@@ -2309,7 +2187,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolDestroyWithDraining) {
   )EOF";
 
   std::shared_ptr<Network::MockDnsResolver> dns_resolver(new Network::MockDnsResolver());
-  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_)).WillOnce(Return(dns_resolver));
+  EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver));
 
   Network::DnsResolver::ResolveCb dns_callback;
   Event::MockTimer* dns_timer_ = new NiceMock<Event::MockTimer>(&factory_.dispatcher_);
diff --git a/test/common/upstream/load_balancer_benchmark.cc b/test/common/upstream/load_balancer_benchmark.cc
index 4bc13e843a6e..edc2eb53d9d2 100644
--- a/test/common/upstream/load_balancer_benchmark.cc
+++ b/test/common/upstream/load_balancer_benchmark.cc
@@ -61,6 +61,19 @@ class RoundRobinTester : public BaseTester {
   std::unique_ptr<RoundRobinLoadBalancer> lb_;
 };
 
+class LeastRequestTester : public BaseTester {
+public:
+  LeastRequestTester(uint64_t num_hosts, uint32_t choice_count) : BaseTester(num_hosts) {
+    envoy::api::v2::Cluster::LeastRequestLbConfig lr_lb_config;
+    lr_lb_config.mutable_choice_count()->set_value(choice_count);
+    lb_ =
+        std::make_unique<LeastRequestLoadBalancer>(priority_set_, &local_priority_set_, stats_,
+                                                   runtime_, random_, common_config_, lr_lb_config);
+  }
+
+  std::unique_ptr<LeastRequestLoadBalancer> lb_;
+};
+
 void BM_RoundRobinLoadBalancerBuild(benchmark::State& state) {
   for (auto _ : state) {
     state.PauseTiming();
@@ -212,6 +225,36 @@ void computeHitStats(benchmark::State& state,
   state.counters["relative_stddev_hits"] = (stddev / mean);
 }
 
+void BM_LeastRequestLoadBalancerChooseHost(benchmark::State& state) {
+  for (auto _ : state) {
+    state.PauseTiming();
+    const uint64_t num_hosts = state.range(0);
+    const uint64_t choice_count = state.range(1);
+    const uint64_t keys_to_simulate = state.range(2);
+    LeastRequestTester tester(num_hosts, choice_count);
+    std::unordered_map<std::string, uint64_t> hit_counter;
+    TestLoadBalancerContext context;
+    state.ResumeTiming();
+
+    for (uint64_t i = 0; i < keys_to_simulate; ++i) {
+      hit_counter[tester.lb_->chooseHost(&context)->address()->asString()] += 1;
+    }
+
+    // Do not time computation of mean, standard deviation, and relative standard deviation.
+    state.PauseTiming();
+    computeHitStats(state, hit_counter);
+    state.ResumeTiming();
+  }
+}
+BENCHMARK(BM_LeastRequestLoadBalancerChooseHost)
+    ->Args({100, 1, 1000000})
+    ->Args({100, 2, 1000000})
+    ->Args({100, 3, 1000000})
+    ->Args({100, 10, 1000000})
+    ->Args({100, 50, 1000000})
+    ->Args({100, 100, 1000000})
+    ->Unit(benchmark::kMillisecond);
+
 void BM_RingHashLoadBalancerChooseHost(benchmark::State& state) {
   for (auto _ : state) {
     // Do not time the creation of the ring.
diff --git a/test/common/upstream/test_cluster_manager.h b/test/common/upstream/test_cluster_manager.h
new file mode 100644
index 000000000000..e59f43c1175d
--- /dev/null
+++ b/test/common/upstream/test_cluster_manager.h
@@ -0,0 +1,214 @@
+#pragma once
+
+#include <memory>
+#include <string>
+
+#include "envoy/admin/v2alpha/config_dump.pb.h"
+#include "envoy/api/v2/cds.pb.h"
+#include "envoy/api/v2/core/base.pb.h"
+#include "envoy/network/listen_socket.h"
+#include "envoy/upstream/upstream.h"
+
+#include "common/api/api_impl.h"
+#include "common/config/utility.h"
+#include "common/http/context_impl.h"
+#include "common/network/socket_option_factory.h"
+#include "common/network/socket_option_impl.h"
+#include "common/network/transport_socket_options_impl.h"
+#include "common/network/utility.h"
+#include "common/protobuf/utility.h"
+#include "common/singleton/manager_impl.h"
+#include "common/upstream/cluster_factory_impl.h"
+#include "common/upstream/cluster_manager_impl.h"
+#include "common/upstream/subset_lb.h"
+
+#include "extensions/transport_sockets/tls/context_manager_impl.h"
+
+#include "test/common/upstream/utility.h"
+#include "test/integration/clusters/custom_static_cluster.h"
+#include "test/mocks/access_log/mocks.h"
+#include "test/mocks/api/mocks.h"
+#include "test/mocks/http/mocks.h"
+#include "test/mocks/local_info/mocks.h"
+#include "test/mocks/network/mocks.h"
+#include "test/mocks/protobuf/mocks.h"
+#include "test/mocks/runtime/mocks.h"
+#include "test/mocks/secret/mocks.h"
+#include "test/mocks/server/mocks.h"
+#include "test/mocks/tcp/mocks.h"
+#include "test/mocks/thread_local/mocks.h"
+#include "test/mocks/upstream/mocks.h"
+#include "test/test_common/registry.h"
+#include "test/test_common/simulated_time_system.h"
+#include "test/test_common/threadsafe_singleton_injector.h"
+#include "test/test_common/utility.h"
+
+#include "absl/strings/str_join.h"
+#include "absl/strings/str_replace.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+using testing::_;
+using testing::Invoke;
+using testing::NiceMock;
+
+namespace Envoy {
+namespace Upstream {
+
+// The tests in this file are split between testing with real clusters and some with mock clusters.
+// By default we setup to call the real cluster creation function. Individual tests can override
+// the expectations when needed.
+class TestClusterManagerFactory : public ClusterManagerFactory {
+public:
+  TestClusterManagerFactory() : api_(Api::createApiForTest(stats_)) {
+    ON_CALL(*this, clusterFromProto_(_, _, _, _))
+        .WillByDefault(Invoke(
+            [&](const envoy::api::v2::Cluster& cluster, ClusterManager& cm,
+                Outlier::EventLoggerSharedPtr outlier_event_logger,
+                bool added_via_api) -> std::pair<ClusterSharedPtr, ThreadAwareLoadBalancer*> {
+              auto result = ClusterFactoryImplBase::create(
+                  cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_,
+                  dispatcher_, log_manager_, local_info_, admin_, singleton_manager_,
+                  outlier_event_logger, added_via_api, validation_visitor_, *api_);
+              // Convert from load balancer unique_ptr -> raw pointer -> unique_ptr.
+              return std::make_pair(result.first, result.second.release());
+            }));
+  }
+
+  Http::ConnectionPool::InstancePtr allocateConnPool(
+      Event::Dispatcher&, HostConstSharedPtr host, ResourcePriority, Http::Protocol,
+      const Network::ConnectionSocket::OptionsSharedPtr& options,
+      const Network::TransportSocketOptionsSharedPtr& transport_socket_options) override {
+    return Http::ConnectionPool::InstancePtr{
+        allocateConnPool_(host, options, transport_socket_options)};
+  }
+
+  Tcp::ConnectionPool::InstancePtr
+  allocateTcpConnPool(Event::Dispatcher&, HostConstSharedPtr host, ResourcePriority,
+                      const Network::ConnectionSocket::OptionsSharedPtr&,
+                      Network::TransportSocketOptionsSharedPtr) override {
+    return Tcp::ConnectionPool::InstancePtr{allocateTcpConnPool_(host)};
+  }
+
+  std::pair<ClusterSharedPtr, ThreadAwareLoadBalancerPtr>
+  clusterFromProto(const envoy::api::v2::Cluster& cluster, ClusterManager& cm,
+                   Outlier::EventLoggerSharedPtr outlier_event_logger,
+                   bool added_via_api) override {
+    auto result = clusterFromProto_(cluster, cm, outlier_event_logger, added_via_api);
+    return std::make_pair(result.first, ThreadAwareLoadBalancerPtr(result.second));
+  }
+
+  CdsApiPtr createCds(const envoy::api::v2::core::ConfigSource&, ClusterManager&) override {
+    return CdsApiPtr{createCds_()};
+  }
+
+  ClusterManagerPtr
+  clusterManagerFromProto(const envoy::config::bootstrap::v2::Bootstrap& bootstrap) override {
+    return ClusterManagerPtr{clusterManagerFromProto_(bootstrap)};
+  }
+
+  Secret::SecretManager& secretManager() override { return secret_manager_; }
+
+  MOCK_METHOD1(clusterManagerFromProto_,
+               ClusterManager*(const envoy::config::bootstrap::v2::Bootstrap& bootstrap));
+  MOCK_METHOD3(allocateConnPool_,
+               Http::ConnectionPool::Instance*(HostConstSharedPtr host,
+                                               Network::ConnectionSocket::OptionsSharedPtr,
+                                               Network::TransportSocketOptionsSharedPtr));
+  MOCK_METHOD1(allocateTcpConnPool_, Tcp::ConnectionPool::Instance*(HostConstSharedPtr host));
+  MOCK_METHOD4(clusterFromProto_,
+               std::pair<ClusterSharedPtr, ThreadAwareLoadBalancer*>(
+                   const envoy::api::v2::Cluster& cluster, ClusterManager& cm,
+                   Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api));
+  MOCK_METHOD0(createCds_, CdsApi*());
+
+  Stats::IsolatedStoreImpl stats_;
+  NiceMock<ThreadLocal::MockInstance> tls_;
+  std::shared_ptr<NiceMock<Network::MockDnsResolver>> dns_resolver_{
+      new NiceMock<Network::MockDnsResolver>};
+  NiceMock<Runtime::MockLoader> runtime_;
+  NiceMock<Runtime::MockRandomGenerator> random_;
+  NiceMock<Event::MockDispatcher> dispatcher_;
+  Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_{
+      dispatcher_.timeSource()};
+  NiceMock<LocalInfo::MockLocalInfo> local_info_;
+  NiceMock<Server::MockAdmin> admin_;
+  NiceMock<Secret::MockSecretManager> secret_manager_;
+  NiceMock<AccessLog::MockAccessLogManager> log_manager_;
+  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};
+  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;
+  Api::ApiPtr api_;
+};
+
+// Helper to intercept calls to postThreadLocalClusterUpdate.
+class MockLocalClusterUpdate {
+public:
+  MOCK_METHOD3(post, void(uint32_t priority, const HostVector& hosts_added,
+                          const HostVector& hosts_removed));
+};
+
+class MockLocalHostsRemoved {
+public:
+  MOCK_METHOD1(post, void(const HostVector&));
+};
+
+// A test version of ClusterManagerImpl that provides a way to get a non-const handle to the
+// clusters, which is necessary in order to call updateHosts on the priority set.
+class TestClusterManagerImpl : public ClusterManagerImpl {
+public:
+  using ClusterManagerImpl::ClusterManagerImpl;
+
+  TestClusterManagerImpl(const envoy::config::bootstrap::v2::Bootstrap& bootstrap,
+                         ClusterManagerFactory& factory, Stats::Store& stats,
+                         ThreadLocal::Instance& tls, Runtime::Loader& runtime,
+                         Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info,
+                         AccessLog::AccessLogManager& log_manager,
+                         Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin,
+                         ProtobufMessage::ValidationContext& validation_context, Api::Api& api,
+                         Http::Context& http_context)
+      : ClusterManagerImpl(bootstrap, factory, stats, tls, runtime, random, local_info, log_manager,
+                           main_thread_dispatcher, admin, validation_context, api, http_context) {}
+
+  std::map<std::string, std::reference_wrapper<Cluster>> activeClusters() {
+    std::map<std::string, std::reference_wrapper<Cluster>> clusters;
+    for (auto& cluster : active_clusters_) {
+      clusters.emplace(cluster.first, *cluster.second->cluster_);
+    }
+    return clusters;
+  }
+};
+
+// Override postThreadLocalClusterUpdate so we can test that merged updates calls
+// it with the right values at the right times.
+class MockedUpdatedClusterManagerImpl : public TestClusterManagerImpl {
+public:
+  MockedUpdatedClusterManagerImpl(
+      const envoy::config::bootstrap::v2::Bootstrap& bootstrap, ClusterManagerFactory& factory,
+      Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime,
+      Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info,
+      AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher,
+      Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api,
+      MockLocalClusterUpdate& local_cluster_update, MockLocalHostsRemoved& local_hosts_removed,
+      Http::Context& http_context)
+      : TestClusterManagerImpl(bootstrap, factory, stats, tls, runtime, random, local_info,
+                               log_manager, main_thread_dispatcher, admin, validation_context, api,
+                               http_context),
+        local_cluster_update_(local_cluster_update), local_hosts_removed_(local_hosts_removed) {}
+
+protected:
+  void postThreadLocalClusterUpdate(const Cluster&, uint32_t priority,
+                                    const HostVector& hosts_added,
+                                    const HostVector& hosts_removed) override {
+    local_cluster_update_.post(priority, hosts_added, hosts_removed);
+  }
+
+  void postThreadLocalDrainConnections(const Cluster&, const HostVector& hosts_removed) override {
+    local_hosts_removed_.post(hosts_removed);
+  }
+
+  MockLocalClusterUpdate& local_cluster_update_;
+  MockLocalHostsRemoved& local_hosts_removed_;
+};
+
+} // namespace Upstream
+} // namespace Envoy
\ No newline at end of file
diff --git a/test/config/integration/server.yaml b/test/config/integration/server.yaml
index a96f37db2094..3813fa137bdf 100644
--- a/test/config/integration/server.yaml
+++ b/test/config/integration/server.yaml
@@ -4,6 +4,7 @@ static_resources:
       socket_address:
         address: {{ ip_loopback_address }}
         port_value: 0
+    reuse_port: {{ reuse_port }}
     filter_chains:
     - filters:
       - name: envoy.http_connection_manager
diff --git a/test/config/utility.cc b/test/config/utility.cc
index 17804e22de83..6260f6e85e50 100644
--- a/test/config/utility.cc
+++ b/test/config/utility.cc
@@ -116,6 +116,41 @@ const std::string ConfigHelper::HTTP_PROXY_CONFIG = BASE_CONFIG + R"EOF(
             name: route_config_0
 )EOF";
 
+// TODO(danzh): For better compatibility with HTTP integration test framework,
+// it's better to combine with HTTP_PROXY_CONFIG, and use config modifiers to
+// specify quic specific things.
+const std::string ConfigHelper::QUIC_HTTP_PROXY_CONFIG = BASE_UDP_LISTENER_CONFIG + R"EOF(
+    filter_chains:
+      transport_socket:
+        name: envoy.transport_sockets.quic
+      filters:
+        name: envoy.http_connection_manager
+        typed_config:
+          "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+          stat_prefix: config_test
+          http_filters:
+            name: envoy.router
+          codec_type: HTTP3
+          access_log:
+            name: envoy.file_access_log
+            filter:
+              not_health_check_filter:  {}
+            config:
+              path: /dev/null
+          route_config:
+            virtual_hosts:
+              name: integration
+              routes:
+                route:
+                  cluster: cluster_0
+                match:
+                  prefix: "/"
+              domains: "*"
+            name: route_config_0
+    udp_listener_config:
+      udp_listener_name: "quiche_quic_listener"
+)EOF";
+
 const std::string ConfigHelper::DEFAULT_BUFFER_FILTER =
     R"EOF(
 name: envoy.buffer
@@ -603,7 +638,7 @@ void ConfigHelper::addSslConfig(const ServerSslOptions& options) {
   filter_chain->mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context);
 }
 
-bool ConfigHelper::setAccessLog(const std::string& filename) {
+bool ConfigHelper::setAccessLog(const std::string& filename, absl::string_view format) {
   if (getFilterFromListener("envoy.http_connection_manager") == nullptr) {
     return false;
   }
@@ -611,6 +646,9 @@ bool ConfigHelper::setAccessLog(const std::string& filename) {
   envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager hcm_config;
   loadHttpConnectionManager(hcm_config);
   envoy::config::accesslog::v2::FileAccessLog access_log_config;
+  if (!format.empty()) {
+    access_log_config.set_format(std::string(format));
+  }
   access_log_config.set_path(filename);
   hcm_config.mutable_access_log(0)->mutable_typed_config()->PackFrom(access_log_config);
   storeHttpConnectionManager(hcm_config);
diff --git a/test/config/utility.h b/test/config/utility.h
index 8359fa0edeb1..2150e797b554 100644
--- a/test/config/utility.h
+++ b/test/config/utility.h
@@ -78,7 +78,8 @@ class ConfigHelper {
   static const std::string TCP_PROXY_CONFIG;
   // A basic configuration for L7 proxying.
   static const std::string HTTP_PROXY_CONFIG;
-
+  // A basic configuration for L7 proxying with QUIC transport.
+  static const std::string QUIC_HTTP_PROXY_CONFIG;
   // A string for a basic buffer filter, which can be used with addFilter()
   static const std::string DEFAULT_BUFFER_FILTER;
   // A string for a small buffer filter, which can be used with addFilter()
@@ -139,7 +140,7 @@ class ConfigHelper {
 
   // Set the HTTP access log for the first HCM (if present) to a given file. The default is
   // /dev/null.
-  bool setAccessLog(const std::string& filename);
+  bool setAccessLog(const std::string& filename, absl::string_view format = "");
 
   // Renames the first listener to the name specified.
   void renameListener(const std::string& name);
diff --git a/test/extensions/access_loggers/file/config_test.cc b/test/extensions/access_loggers/file/config_test.cc
index a44146beb3b5..181c8665c498 100644
--- a/test/extensions/access_loggers/file/config_test.cc
+++ b/test/extensions/access_loggers/file/config_test.cc
@@ -108,6 +108,31 @@ TEST(FileAccessLogConfigTest, FileAccessLogJsonTest) {
                             "Didn't find a registered implementation for name: 'INVALID'");
 }
 
+TEST(FileAccessLogConfigTest, FileAccessLogTypedJsonTest) {
+  envoy::config::filter::accesslog::v2::AccessLog config;
+
+  envoy::config::accesslog::v2::FileAccessLog fal_config;
+  fal_config.set_path("/dev/null");
+
+  ProtobufWkt::Value string_value;
+  string_value.set_string_value("%PROTOCOL%");
+
+  auto json_format = fal_config.mutable_typed_json_format();
+  (*json_format->mutable_fields())["protocol"] = string_value;
+
+  EXPECT_EQ(fal_config.access_log_format_case(),
+            envoy::config::accesslog::v2::FileAccessLog::kTypedJsonFormat);
+  TestUtility::jsonConvert(fal_config, *config.mutable_config());
+
+  config.set_name(AccessLogNames::get().File);
+
+  NiceMock<Server::Configuration::MockFactoryContext> context;
+  AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context);
+
+  EXPECT_NE(nullptr, log);
+  EXPECT_NE(nullptr, dynamic_cast<FileAccessLog*>(log.get()));
+}
+
 TEST(FileAccessLogConfigTest, FileAccessLogJsonWithBoolValueTest) {
   {
     // Make sure we fail if you set a bool value in the format dictionary
diff --git a/test/extensions/clusters/aggregate/BUILD b/test/extensions/clusters/aggregate/BUILD
new file mode 100644
index 000000000000..670023ef8cdd
--- /dev/null
+++ b/test/extensions/clusters/aggregate/BUILD
@@ -0,0 +1,67 @@
+licenses(["notice"])  # Apache 2
+
+load(
+    "//bazel:envoy_build_system.bzl",
+    "envoy_package",
+)
+load(
+    "//test/extensions:extensions_build_system.bzl",
+    "envoy_extension_cc_test",
+)
+
+envoy_package()
+
+envoy_extension_cc_test(
+    name = "cluster_test",
+    srcs = ["cluster_test.cc"],
+    extension_name = "envoy.clusters.aggregate",
+    deps = [
+        "//source/extensions/clusters/aggregate:cluster",
+        "//source/extensions/transport_sockets/raw_buffer:config",
+        "//test/common/upstream:utility_lib",
+        "//test/mocks/protobuf:protobuf_mocks",
+        "//test/mocks/server:server_mocks",
+        "//test/mocks/ssl:ssl_mocks",
+        "//test/test_common:environment_lib",
+    ],
+)
+
+envoy_extension_cc_test(
+    name = "cluster_update_test",
+    srcs = ["cluster_update_test.cc"],
+    extension_name = "envoy.clusters.aggregate",
+    deps = [
+        "//source/common/upstream:cluster_factory_lib",
+        "//source/common/upstream:cluster_manager_lib",
+        "//source/extensions/clusters/aggregate:cluster",
+        "//source/extensions/transport_sockets/raw_buffer:config",
+        "//test/common/upstream:test_cluster_manager",
+        "//test/common/upstream:utility_lib",
+        "//test/mocks/protobuf:protobuf_mocks",
+        "//test/mocks/server:server_mocks",
+        "//test/mocks/ssl:ssl_mocks",
+        "//test/test_common:environment_lib",
+        "//test/test_common:simulated_time_system_lib",
+    ],
+)
+
+envoy_extension_cc_test(
+    name = "cluster_integration_test",
+    srcs = ["cluster_integration_test.cc"],
+    extension_name = "envoy.clusters.aggregate",
+    deps = [
+        "//source/common/config:protobuf_link_hacks",
+        "//source/common/config:resources_lib",
+        "//source/common/protobuf:utility_lib",
+        "//source/extensions/clusters/aggregate:cluster",
+        "//source/extensions/filters/network/tcp_proxy:config",
+        "//test/common/grpc:grpc_client_integration_lib",
+        "//test/integration:http_integration_lib",
+        "//test/integration:integration_lib",
+        "//test/mocks/runtime:runtime_mocks",
+        "//test/mocks/server:server_mocks",
+        "//test/test_common:network_utility_lib",
+        "//test/test_common:utility_lib",
+        "@envoy_api//envoy/api/v2:pkg_cc_proto",
+    ],
+)
diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc
new file mode 100644
index 000000000000..1eb82136ff55
--- /dev/null
+++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc
@@ -0,0 +1,249 @@
+#include "envoy/api/v2/cds.pb.h"
+#include "envoy/api/v2/discovery.pb.h"
+#include "envoy/grpc/status.h"
+#include "envoy/stats/scope.h"
+
+#include "common/config/protobuf_link_hacks.h"
+#include "common/config/resources.h"
+#include "common/protobuf/protobuf.h"
+#include "common/protobuf/utility.h"
+
+#include "test/common/grpc/grpc_client_integration.h"
+#include "test/integration/http_integration.h"
+#include "test/integration/utility.h"
+#include "test/mocks/server/mocks.h"
+#include "test/test_common/network_utility.h"
+#include "test/test_common/simulated_time_system.h"
+#include "test/test_common/utility.h"
+
+#include "absl/synchronization/notification.h"
+#include "gtest/gtest.h"
+
+using testing::AssertionResult;
+
+namespace Envoy {
+namespace {
+
+const char FirstClusterName[] = "cluster_1";
+const char SecondClusterName[] = "cluster_2";
+// Index in fake_upstreams_
+const int FirstUpstreamIndex = 2;
+const int SecondUpstreamIndex = 3;
+
+const std::string& config() {
+  CONSTRUCT_ON_FIRST_USE(std::string, R"EOF(
+admin:
+  access_log_path: /dev/null
+  address:
+    socket_address:
+      address: 127.0.0.1
+      port_value: 0
+dynamic_resources:
+  cds_config:
+    api_config_source:
+      api_type: GRPC
+      grpc_services:
+        envoy_grpc:
+          cluster_name: my_cds_cluster
+      set_node_on_first_message_only: false
+static_resources:
+  clusters:
+  - name: my_cds_cluster
+    http2_protocol_options: {}
+    hosts:
+      socket_address:
+        address: 127.0.0.1
+        port_value: 0
+  - name: aggregate_cluster
+    connect_timeout: 0.25s
+    lb_policy: CLUSTER_PROVIDED
+    cluster_type:
+      name: envoy.clusters.aggregate
+      typed_config:
+        "@type": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig
+        clusters:
+        - cluster_1
+        - cluster_2
+  listeners:
+  - name: http
+    address:
+      socket_address:
+        address: 127.0.0.1
+        port_value: 0
+    filter_chains:
+      filters:
+        name: envoy.http_connection_manager
+        typed_config:
+          "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+          stat_prefix: config_test
+          http_filters:
+            name: envoy.router
+          codec_type: HTTP1
+          route_config:
+            name: route_config_0
+            validate_clusters: false
+            virtual_hosts:
+              name: integration
+              routes:
+              - route:
+                  cluster: cluster_1
+                match:
+                  prefix: "/cluster1"
+              - route:
+                  cluster: cluster_2
+                match:
+                  prefix: "/cluster2"
+              - route:
+                  cluster: aggregate_cluster
+                match:
+                  prefix: "/aggregatecluster"
+              domains: "*"
+)EOF");
+}
+
+class AggregateIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,
+                                 public HttpIntegrationTest {
+public:
+  AggregateIntegrationTest()
+      : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam(), config()) {
+    use_lds_ = false;
+  }
+
+  void TearDown() override {
+    cleanUpXdsConnection();
+    test_server_.reset();
+    fake_upstreams_.clear();
+  }
+
+  void initialize() override {
+    use_lds_ = false;
+    setUpstreamCount(2);                                  // the CDS cluster
+    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // CDS uses gRPC uses HTTP2.
+
+    defer_listener_finalization_ = true;
+    HttpIntegrationTest::initialize();
+
+    fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP1, version_,
+                                                  timeSystem(), enable_half_close_));
+    fake_upstreams_[FirstUpstreamIndex]->set_allow_unexpected_disconnects(false);
+    fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP1, version_,
+                                                  timeSystem(), enable_half_close_));
+    fake_upstreams_[SecondUpstreamIndex]->set_allow_unexpected_disconnects(false);
+    cluster1_ = ConfigHelper::buildCluster(
+        FirstClusterName, fake_upstreams_[FirstUpstreamIndex]->localAddress()->ip()->port(),
+        Network::Test::getLoopbackAddressString(GetParam()));
+    cluster2_ = ConfigHelper::buildCluster(
+        SecondClusterName, fake_upstreams_[SecondUpstreamIndex]->localAddress()->ip()->port(),
+        Network::Test::getLoopbackAddressString(GetParam()));
+
+    // Let Envoy establish its connection to the CDS server.
+    acceptXdsConnection();
+
+    // Do the initial compareDiscoveryRequest / sendDiscoveryResponse for cluster_1.
+    EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true));
+    sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster, {cluster1_},
+                                                   {cluster1_}, {}, "55");
+
+    test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3);
+
+    // Wait for our statically specified listener to become ready, and register its port in the
+    // test framework's downstream listener port map.
+    test_server_->waitUntilListenersReady();
+    registerTestServerPorts({"http"});
+  }
+
+  void acceptXdsConnection() {
+    AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection.
+        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_);
+    RELEASE_ASSERT(result, result.message());
+    result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);
+    RELEASE_ASSERT(result, result.message());
+    xds_stream_->startGrpcStream();
+    fake_upstreams_[0]->set_allow_unexpected_disconnects(true);
+  }
+
+  envoy::api::v2::Cluster cluster1_;
+  envoy::api::v2::Cluster cluster2_;
+};
+
+INSTANTIATE_TEST_SUITE_P(IpVersions, AggregateIntegrationTest,
+                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));
+
+TEST_P(AggregateIntegrationTest, ClusterUpDownUp) {
+  // Calls our initialize(), which includes establishing a listener, route, and cluster.
+  testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, "/aggregatecluster");
+
+  // Tell Envoy that cluster_1 is gone.
+  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {}));
+  sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster, {}, {},
+                                                 {FirstClusterName}, "42");
+  // We can continue the test once we're sure that Envoy's ClusterManager has made use of
+  // the DiscoveryResponse that says cluster_1 is gone.
+  test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1);
+
+  // Now that cluster_1 is gone, the listener (with its routing to cluster_1) should 503.
+  BufferingStreamDecoderPtr response =
+      IntegrationUtil::makeSingleRequest(lookupPort("http"), "GET", "/aggregatecluster", "",
+                                         downstream_protocol_, version_, "foo.com");
+  ASSERT_TRUE(response->complete());
+  EXPECT_EQ("503", response->headers().Status()->value().getStringView());
+
+  cleanupUpstreamAndDownstream();
+  codec_client_->waitForDisconnect();
+
+  // Tell Envoy that cluster_1 is back.
+  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {}));
+  sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster, {cluster1_},
+                                                 {cluster1_}, {}, "413");
+
+  test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3);
+  testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, "/aggregatecluster");
+
+  cleanupUpstreamAndDownstream();
+}
+
+// Tests adding a cluster, adding another, then removing the first.
+TEST_P(AggregateIntegrationTest, TwoClusters) {
+  // Calls our initialize(), which includes establishing a listener, route, and cluster.
+  testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, "/aggregatecluster");
+
+  cleanupUpstreamAndDownstream();
+  codec_client_->waitForDisconnect();
+
+  // Tell Envoy that cluster_2 is here.
+  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {}));
+  sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster,
+                                                 {cluster1_, cluster2_}, {cluster2_}, {}, "42");
+  // The '4' includes the fake CDS server and aggregate cluster.
+  test_server_->waitForGaugeGe("cluster_manager.active_clusters", 4);
+
+  // A request for aggregate cluster should be fine.
+  testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, "/aggregatecluster");
+  cleanupUpstreamAndDownstream();
+  codec_client_->waitForDisconnect();
+
+  // Tell Envoy that cluster_1 is gone.
+  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {}));
+  sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster, {cluster2_}, {},
+                                                 {FirstClusterName}, "42");
+  // We can continue the test once we're sure that Envoy's ClusterManager has made use of
+  // the DiscoveryResponse that says cluster_1 is gone.
+  test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1);
+
+  testRouterHeaderOnlyRequestAndResponse(nullptr, SecondUpstreamIndex, "/aggregatecluster");
+  cleanupUpstreamAndDownstream();
+  codec_client_->waitForDisconnect();
+
+  // Tell Envoy that cluster_1 is back.
+  EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {}));
+  sendDiscoveryResponse<envoy::api::v2::Cluster>(Config::TypeUrl::get().Cluster,
+                                                 {cluster1_, cluster2_}, {cluster1_}, {}, "413");
+
+  test_server_->waitForGaugeGe("cluster_manager.active_clusters", 4);
+  testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, "/aggregatecluster");
+
+  cleanupUpstreamAndDownstream();
+}
+
+} // namespace
+} // namespace Envoy
diff --git a/test/extensions/clusters/aggregate/cluster_test.cc b/test/extensions/clusters/aggregate/cluster_test.cc
new file mode 100644
index 000000000000..bf0c2d5dd6b7
--- /dev/null
+++ b/test/extensions/clusters/aggregate/cluster_test.cc
@@ -0,0 +1,301 @@
+#include "common/singleton/manager_impl.h"
+
+#include "extensions/clusters/aggregate/cluster.h"
+
+#include "test/common/upstream/utility.h"
+#include "test/mocks/protobuf/mocks.h"
+#include "test/mocks/server/mocks.h"
+#include "test/mocks/ssl/mocks.h"
+#include "test/test_common/environment.h"
+
+using testing::Eq;
+using testing::Return;
+using testing::ReturnRef;
+
+namespace Envoy {
+namespace Extensions {
+namespace Clusters {
+namespace Aggregate {
+
+class AggregateClusterTest : public testing::Test {
+public:
+  AggregateClusterTest() : stats_(Upstream::ClusterInfoImpl::generateStats(stats_store_)) {}
+
+  Upstream::HostVector setupHostSet(int healthy_hosts, int degraded_hosts, int unhealthy_hosts) {
+    Upstream::HostVector hosts;
+    for (int i = 0; i < healthy_hosts; ++i) {
+      hosts.emplace_back(Upstream::makeTestHost(info_, "tcp://127.0.0.1:80"));
+    }
+
+    for (int i = 0; i < degraded_hosts; ++i) {
+      Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.2:80");
+      host->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);
+      hosts.emplace_back(host);
+    }
+
+    for (int i = 0; i < unhealthy_hosts; ++i) {
+      Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.3:80");
+      host->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC);
+      hosts.emplace_back(host);
+    }
+
+    return hosts;
+  }
+
+  void setupPrimary(int priority, int healthy_hosts, int degraded_hosts, int unhealthy_hosts) {
+    auto hosts = setupHostSet(healthy_hosts, degraded_hosts, unhealthy_hosts);
+    primary_ps_.updateHosts(
+        priority,
+        Upstream::HostSetImpl::partitionHosts(std::make_shared<Upstream::HostVector>(hosts),
+                                              Upstream::HostsPerLocalityImpl::empty()),
+        nullptr, hosts, {}, 100);
+    cluster_->refresh();
+  }
+
+  void setupSecondary(int priority, int healthy_hosts, int degraded_hosts, int unhealthy_hosts) {
+    auto hosts = setupHostSet(healthy_hosts, degraded_hosts, unhealthy_hosts);
+    secondary_ps_.updateHosts(
+        priority,
+        Upstream::HostSetImpl::partitionHosts(std::make_shared<Upstream::HostVector>(hosts),
+                                              Upstream::HostsPerLocalityImpl::empty()),
+        nullptr, hosts, {}, 100);
+    cluster_->refresh();
+  }
+
+  void setupPrioritySet() {
+    setupPrimary(0, 1, 1, 1);
+    setupPrimary(1, 2, 2, 2);
+    setupSecondary(0, 2, 2, 2);
+    setupSecondary(1, 1, 1, 1);
+  }
+
+  void initialize(const std::string& yaml_config) {
+    envoy::api::v2::Cluster cluster_config = Upstream::parseClusterFromV2Yaml(yaml_config);
+    envoy::config::cluster::aggregate::v2alpha::ClusterConfig config;
+    Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().name(),
+                                           cluster_config.cluster_type().typed_config(),
+                                           ProtobufWkt::Struct::default_instance(),
+                                           ProtobufMessage::getStrictValidationVisitor(), config);
+    Stats::ScopePtr scope = stats_store_.createScope("cluster.name.");
+    Server::Configuration::TransportSocketFactoryContextImpl factory_context(
+        admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_store_,
+        singleton_manager_, tls_, validation_visitor_, *api_);
+
+    cluster_ = std::make_shared<Cluster>(cluster_config, config, cm_, runtime_, random_,
+                                         factory_context, std::move(scope), tls_, false);
+
+    thread_aware_lb_ = std::make_unique<AggregateThreadAwareLoadBalancer>(*cluster_);
+    lb_factory_ = thread_aware_lb_->factory();
+    lb_ = lb_factory_->create();
+
+    EXPECT_CALL(cm_, get(Eq("aggregate_cluster"))).WillRepeatedly(Return(&aggregate_cluster_));
+    EXPECT_CALL(cm_, get(Eq("primary"))).WillRepeatedly(Return(&primary_));
+    EXPECT_CALL(cm_, get(Eq("secondary"))).WillRepeatedly(Return(&secondary_));
+    EXPECT_CALL(cm_, get(Eq("tertiary"))).WillRepeatedly(Return(nullptr));
+    ON_CALL(primary_, prioritySet()).WillByDefault(ReturnRef(primary_ps_));
+    ON_CALL(secondary_, prioritySet()).WillByDefault(ReturnRef(secondary_ps_));
+    ON_CALL(aggregate_cluster_, loadBalancer()).WillByDefault(ReturnRef(*lb_));
+
+    setupPrioritySet();
+
+    ON_CALL(primary_, loadBalancer()).WillByDefault(ReturnRef(primary_load_balancer_));
+    ON_CALL(secondary_, loadBalancer()).WillByDefault(ReturnRef(secondary_load_balancer_));
+  }
+
+  Stats::IsolatedStoreImpl stats_store_;
+  Ssl::MockContextManager ssl_context_manager_;
+  NiceMock<Upstream::MockClusterManager> cm_;
+  NiceMock<Runtime::MockRandomGenerator> random_;
+  NiceMock<ThreadLocal::MockInstance> tls_;
+  NiceMock<Runtime::MockLoader> runtime_;
+  NiceMock<Event::MockDispatcher> dispatcher_;
+  NiceMock<LocalInfo::MockLocalInfo> local_info_;
+  NiceMock<Server::MockAdmin> admin_;
+  Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()};
+  NiceMock<ProtobufMessage::MockValidationVisitor> validation_visitor_;
+  Api::ApiPtr api_{Api::createApiForTest(stats_store_)};
+  std::shared_ptr<Cluster> cluster_;
+  Upstream::ThreadAwareLoadBalancerPtr thread_aware_lb_;
+  Upstream::LoadBalancerFactorySharedPtr lb_factory_;
+  Upstream::LoadBalancerPtr lb_;
+  Upstream::ClusterStats stats_;
+  std::shared_ptr<Upstream::MockClusterInfo> info_{new NiceMock<Upstream::MockClusterInfo>()};
+  NiceMock<Upstream::MockThreadLocalCluster> aggregate_cluster_, primary_, secondary_;
+  Upstream::PrioritySetImpl primary_ps_, secondary_ps_;
+  NiceMock<Upstream::MockLoadBalancer> primary_load_balancer_, secondary_load_balancer_;
+
+  const std::string default_yaml_config_ = R"EOF(
+    name: aggregate_cluster
+    connect_timeout: 0.25s
+    lb_policy: CLUSTER_PROVIDED
+    cluster_type:
+      name: envoy.clusters.aggregate
+      typed_config:
+        "@type": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig
+        clusters:
+        - primary
+        - secondary
+)EOF";
+}; // namespace Aggregate
+
+TEST_F(AggregateClusterTest, LoadBalancerTest) {
+  initialize(default_yaml_config_);
+  // Health value:
+  // Cluster 1:
+  //     Priority 0: 33.3%
+  //     Priority 1: 33.3%
+  // Cluster 2:
+  //     Priority 0: 33.3%
+  //     Priority 1: 33.3%
+  Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.1:80");
+  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));
+  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));
+
+  for (int i = 0; i <= 65; ++i) {
+    EXPECT_CALL(random_, random()).WillOnce(Return(i));
+    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);
+    EXPECT_EQ(host.get(), target.get());
+  }
+
+  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));
+  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));
+  for (int i = 66; i < 100; ++i) {
+    EXPECT_CALL(random_, random()).WillOnce(Return(i));
+    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);
+    EXPECT_EQ(host.get(), target.get());
+  }
+
+  // Set up the HostSet with 1 healthy, 1 degraded and 2 unhealthy.
+  setupPrimary(0, 1, 1, 2);
+
+  // Health value:
+  // Cluster 1:
+  //     Priority 0: 25%
+  //     Priority 1: 33.3%
+  // Cluster 2:
+  //     Priority 0: 33.3%
+  //     Priority 1: 33.3%
+  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));
+  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));
+
+  for (int i = 0; i <= 57; ++i) {
+    EXPECT_CALL(random_, random()).WillOnce(Return(i));
+    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);
+    EXPECT_EQ(host.get(), target.get());
+  }
+
+  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));
+  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));
+  for (int i = 58; i < 100; ++i) {
+    EXPECT_CALL(random_, random()).WillOnce(Return(i));
+    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);
+    EXPECT_EQ(host.get(), target.get());
+  }
+}
+
+TEST_F(AggregateClusterTest, AllHostAreUnhealthyTest) {
+  initialize(default_yaml_config_);
+  Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.1:80");
+  // Set up the HostSet with 0 healthy, 0 degraded and 2 unhealthy.
+  setupPrimary(0, 0, 0, 2);
+  setupPrimary(1, 0, 0, 2);
+
+  // Set up the HostSet with 0 healthy, 0 degraded and 2 unhealthy.
+  setupSecondary(0, 0, 0, 2);
+  setupSecondary(1, 0, 0, 2);
+  // Health value:
+  // Cluster 1:
+  //     Priority 0: 0%
+  //     Priority 1: 0%
+  // Cluster 2:
+  //     Priority 0: 0%
+  //     Priority 1: 0%
+  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));
+  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));
+
+  for (int i = 0; i < 100; ++i) {
+    EXPECT_CALL(random_, random()).WillOnce(Return(i));
+    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);
+    EXPECT_EQ(host.get(), target.get());
+  }
+}
+
+TEST_F(AggregateClusterTest, ClusterInPanicTest) {
+  initialize(default_yaml_config_);
+  Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.1:80");
+  setupPrimary(0, 1, 0, 4);
+  setupPrimary(1, 1, 0, 4);
+  setupSecondary(0, 1, 0, 4);
+  setupSecondary(1, 1, 0, 4);
+  // Health value:
+  // Cluster 1:
+  //     Priority 0: 20%
+  //     Priority 1: 20%
+  // Cluster 2:
+  //     Priority 0: 20%
+  //     Priority 1: 20%
+  // All priorities are in panic mode. Traffic will be distributed evenly among four priorities.
+  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));
+  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));
+
+  for (int i = 0; i < 50; ++i) {
+    EXPECT_CALL(random_, random()).WillOnce(Return(i));
+    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);
+    EXPECT_EQ(host.get(), target.get());
+  }
+
+  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));
+  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));
+
+  for (int i = 50; i < 100; ++i) {
+    EXPECT_CALL(random_, random()).WillOnce(Return(i));
+    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);
+    EXPECT_EQ(host.get(), target.get());
+  }
+
+  setupPrimary(0, 1, 0, 9);
+  setupPrimary(1, 1, 0, 9);
+  setupSecondary(0, 1, 0, 9);
+  setupSecondary(1, 1, 0, 1);
+  // Health value:
+  // Cluster 1:
+  //     Priority 0: 10%
+  //     Priority 1: 10%
+  // Cluster 2:
+  //     Priority 0: 10%
+  //     Priority 0: 50%
+  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));
+  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));
+
+  for (int i = 0; i <= 25; ++i) {
+    EXPECT_CALL(random_, random()).WillOnce(Return(i));
+    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);
+    EXPECT_EQ(host.get(), target.get());
+  }
+
+  EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr));
+  EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host));
+
+  for (int i = 26; i < 100; ++i) {
+    EXPECT_CALL(random_, random()).WillOnce(Return(i));
+    Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr);
+    EXPECT_EQ(host.get(), target.get());
+  }
+}
+
+TEST_F(AggregateClusterTest, LBContextTest) {
+  AggregateLoadBalancerContext context(nullptr,
+                                       Upstream::LoadBalancerBase::HostAvailability::Healthy, 0);
+
+  EXPECT_EQ(context.computeHashKey().has_value(), false);
+  EXPECT_EQ(context.downstreamConnection(), nullptr);
+  EXPECT_EQ(context.metadataMatchCriteria(), nullptr);
+  EXPECT_EQ(context.downstreamHeaders(), nullptr);
+  EXPECT_EQ(context.upstreamSocketOptions(), nullptr);
+  EXPECT_EQ(context.upstreamTransportSocketOptions(), nullptr);
+}
+
+} // namespace Aggregate
+} // namespace Clusters
+} // namespace Extensions
+} // namespace Envoy
diff --git a/test/extensions/clusters/aggregate/cluster_update_test.cc b/test/extensions/clusters/aggregate/cluster_update_test.cc
new file mode 100644
index 000000000000..119eaa613893
--- /dev/null
+++ b/test/extensions/clusters/aggregate/cluster_update_test.cc
@@ -0,0 +1,295 @@
+#include "common/singleton/manager_impl.h"
+#include "common/upstream/cluster_factory_impl.h"
+#include "common/upstream/cluster_manager_impl.h"
+
+#include "extensions/clusters/aggregate/cluster.h"
+
+#include "test/common/upstream/test_cluster_manager.h"
+#include "test/common/upstream/utility.h"
+#include "test/mocks/protobuf/mocks.h"
+#include "test/mocks/server/mocks.h"
+#include "test/mocks/ssl/mocks.h"
+#include "test/test_common/environment.h"
+#include "test/test_common/simulated_time_system.h"
+
+using testing::Return;
+
+namespace Envoy {
+namespace Extensions {
+namespace Clusters {
+namespace Aggregate {
+
+envoy::config::bootstrap::v2::Bootstrap parseBootstrapFromV2Yaml(const std::string& yaml) {
+  envoy::config::bootstrap::v2::Bootstrap bootstrap;
+  TestUtility::loadFromYaml(yaml, bootstrap);
+  return bootstrap;
+}
+
+class AggregateClusterUpdateTest : public testing::Test {
+public:
+  AggregateClusterUpdateTest() : http_context_(stats_store_.symbolTable()) {}
+
+  void initialize(const std::string& yaml_config) {
+    cluster_manager_ = std::make_unique<Upstream::TestClusterManagerImpl>(
+        parseBootstrapFromV2Yaml(yaml_config), factory_, factory_.stats_, factory_.tls_,
+        factory_.runtime_, factory_.random_, factory_.local_info_, log_manager_,
+        factory_.dispatcher_, admin_, validation_context_, *api_, http_context_);
+    EXPECT_EQ(cluster_manager_->activeClusters().size(), 1);
+    cluster_ = cluster_manager_->get("aggregate_cluster");
+  }
+
+  Stats::IsolatedStoreImpl stats_store_;
+  NiceMock<Server::MockAdmin> admin_;
+  Api::ApiPtr api_{Api::createApiForTest(stats_store_)};
+  Upstream::ThreadLocalCluster* cluster_;
+
+  Event::SimulatedTimeSystem time_system_;
+  NiceMock<Upstream::TestClusterManagerFactory> factory_;
+  NiceMock<ProtobufMessage::MockValidationContext> validation_context_;
+  std::unique_ptr<Upstream::TestClusterManagerImpl> cluster_manager_;
+  AccessLog::MockAccessLogManager log_manager_;
+  Http::ContextImpl http_context_;
+
+  const std::string default_yaml_config_ = R"EOF(
+ static_resources:
+  clusters:
+  - name: aggregate_cluster
+    connect_timeout: 0.25s
+    lb_policy: CLUSTER_PROVIDED
+    cluster_type:
+      name: envoy.clusters.aggregate
+      typed_config:
+        "@type": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig
+        clusters:
+        - primary
+        - secondary
+  )EOF";
+};
+
+TEST_F(AggregateClusterUpdateTest, NoHealthyUpstream) {
+  initialize(default_yaml_config_);
+  EXPECT_EQ(nullptr, cluster_->loadBalancer().chooseHost(nullptr));
+}
+
+TEST_F(AggregateClusterUpdateTest, BasicFlow) {
+  initialize(default_yaml_config_);
+
+  std::unique_ptr<Upstream::MockClusterUpdateCallbacks> callbacks(
+      new NiceMock<Upstream::MockClusterUpdateCallbacks>());
+  Upstream::ClusterUpdateCallbacksHandlePtr cb =
+      cluster_manager_->addThreadLocalClusterUpdateCallbacks(*callbacks);
+
+  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster("primary"), ""));
+  auto primary = cluster_manager_->get("primary");
+  EXPECT_NE(nullptr, primary);
+  auto host = cluster_->loadBalancer().chooseHost(nullptr);
+  EXPECT_NE(nullptr, host);
+  EXPECT_EQ("primary", host->cluster().name());
+  EXPECT_EQ("127.0.0.1:11001", host->address()->asString());
+
+  EXPECT_TRUE(
+      cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster("secondary"), ""));
+  auto secondary = cluster_manager_->get("secondary");
+  EXPECT_NE(nullptr, secondary);
+  host = cluster_->loadBalancer().chooseHost(nullptr);
+  EXPECT_NE(nullptr, host);
+  EXPECT_EQ("primary", host->cluster().name());
+  EXPECT_EQ("127.0.0.1:11001", host->address()->asString());
+
+  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster("tertiary"), ""));
+  auto tertiary = cluster_manager_->get("tertiary");
+  EXPECT_NE(nullptr, tertiary);
+  host = cluster_->loadBalancer().chooseHost(nullptr);
+  EXPECT_NE(nullptr, host);
+  EXPECT_EQ("primary", host->cluster().name());
+  EXPECT_EQ("127.0.0.1:11001", host->address()->asString());
+
+  EXPECT_TRUE(cluster_manager_->removeCluster("primary"));
+  EXPECT_EQ(nullptr, cluster_manager_->get("primary"));
+  host = cluster_->loadBalancer().chooseHost(nullptr);
+  EXPECT_NE(nullptr, host);
+  EXPECT_EQ("secondary", host->cluster().name());
+  EXPECT_EQ("127.0.0.1:11001", host->address()->asString());
+  EXPECT_EQ(3, cluster_manager_->activeClusters().size());
+
+  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster("primary"), ""));
+  primary = cluster_manager_->get("primary");
+  EXPECT_NE(nullptr, primary);
+  host = cluster_->loadBalancer().chooseHost(nullptr);
+  EXPECT_NE(nullptr, host);
+  EXPECT_EQ("primary", host->cluster().name());
+  EXPECT_EQ("127.0.0.1:11001", host->address()->asString());
+}
+
+TEST_F(AggregateClusterUpdateTest, LoadBalancingTest) {
+  initialize(default_yaml_config_);
+  EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster("primary"), ""));
+  auto primary = cluster_manager_->get("primary");
+  EXPECT_NE(nullptr, primary);
+  EXPECT_TRUE(
+      cluster_manager_->addOrUpdateCluster(Upstream::defaultStaticCluster("secondary"), ""));
+  auto secondary = cluster_manager_->get("secondary");
+  EXPECT_NE(nullptr, secondary);
+
+  // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy.
+  Upstream::HostSharedPtr host1 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.1:80");
+  host1->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);
+  Upstream::HostSharedPtr host2 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.2:80");
+  host2->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC);
+  Upstream::HostSharedPtr host3 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.3:80");
+  Upstream::Cluster& cluster = cluster_manager_->activeClusters().find("primary")->second;
+  cluster.prioritySet().updateHosts(
+      0,
+      Upstream::HostSetImpl::partitionHosts(
+          std::make_shared<Upstream::HostVector>(Upstream::HostVector{host1, host2, host3}),
+          Upstream::HostsPerLocalityImpl::empty()),
+      nullptr, {host1, host2, host3}, {}, 100);
+
+  // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy.
+  Upstream::HostSharedPtr host4 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.4:80");
+  host4->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);
+  Upstream::HostSharedPtr host5 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.5:80");
+  host5->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC);
+  Upstream::HostSharedPtr host6 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.6:80");
+  Upstream::Cluster& cluster1 = cluster_manager_->activeClusters().find("secondary")->second;
+  cluster1.prioritySet().updateHosts(
+      0,
+      Upstream::HostSetImpl::partitionHosts(
+          std::make_shared<Upstream::HostVector>(Upstream::HostVector{host4, host5, host6}),
+          Upstream::HostsPerLocalityImpl::empty()),
+      nullptr, {host4, host5, host6}, {}, 100);
+
+  Upstream::HostConstSharedPtr host;
+  for (int i = 0; i < 33; ++i) {
+    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));
+    EXPECT_EQ(host3, cluster_->loadBalancer().chooseHost(nullptr));
+  }
+
+  for (int i = 33; i < 66; ++i) {
+    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));
+    EXPECT_EQ(host6, cluster_->loadBalancer().chooseHost(nullptr));
+  }
+
+  for (int i = 66; i < 99; ++i) {
+    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));
+    EXPECT_EQ(host1, cluster_->loadBalancer().chooseHost(nullptr));
+  }
+
+  for (int i = 99; i < 100; ++i) {
+    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));
+    EXPECT_EQ(host4, cluster_->loadBalancer().chooseHost(nullptr));
+  }
+
+  EXPECT_TRUE(cluster_manager_->removeCluster("primary"));
+  EXPECT_EQ(nullptr, cluster_manager_->get("primary"));
+
+  // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy.
+  Upstream::HostSharedPtr host7 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.7:80");
+  host7->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);
+  Upstream::HostSharedPtr host8 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.8:80");
+  host8->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC);
+  Upstream::HostSharedPtr host9 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.9:80");
+  cluster1.prioritySet().updateHosts(
+      1,
+      Upstream::HostSetImpl::partitionHosts(
+          std::make_shared<Upstream::HostVector>(Upstream::HostVector{host7, host8, host9}),
+          Upstream::HostsPerLocalityImpl::empty()),
+      nullptr, {host7, host8, host9}, {}, 100);
+
+  // Priority set
+  //   Priority 0: 1/3 healthy, 1/3 degraded
+  //   Priority 1: 1/3 healthy, 1/3 degraded
+  for (int i = 0; i < 33; ++i) {
+    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));
+    host = cluster_->loadBalancer().chooseHost(nullptr);
+    EXPECT_EQ(host6, cluster_->loadBalancer().chooseHost(nullptr));
+  }
+
+  for (int i = 33; i < 66; ++i) {
+    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));
+    host = cluster_->loadBalancer().chooseHost(nullptr);
+    EXPECT_EQ(host9, cluster_->loadBalancer().chooseHost(nullptr));
+  }
+
+  for (int i = 66; i < 99; ++i) {
+    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));
+    EXPECT_EQ(host4, cluster_->loadBalancer().chooseHost(nullptr));
+  }
+
+  for (int i = 99; i < 100; ++i) {
+    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));
+    EXPECT_EQ(host7, cluster_->loadBalancer().chooseHost(nullptr));
+  }
+}
+
+TEST_F(AggregateClusterUpdateTest, InitializeAggregateClusterAfterOtherClusters) {
+  const std::string config = R"EOF(
+ static_resources:
+  clusters:
+  - name: primary
+    connect_timeout: 5s
+    type: STATIC
+    load_assignment:
+      cluster_name: primary
+      endpoints:
+      - lb_endpoints:
+        - endpoint:
+            address:
+              socket_address:
+                address: 127.0.0.1
+                port_value: 80
+    lb_policy: ROUND_ROBIN
+  - name: aggregate_cluster
+    connect_timeout: 0.25s
+    lb_policy: CLUSTER_PROVIDED
+    cluster_type:
+      name: envoy.clusters.aggregate
+      typed_config:
+        "@type": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig
+        clusters:
+        - primary
+        - secondary
+  )EOF";
+
+  cluster_manager_ = std::make_unique<Upstream::TestClusterManagerImpl>(
+      parseBootstrapFromV2Yaml(config), factory_, factory_.stats_, factory_.tls_, factory_.runtime_,
+      factory_.random_, factory_.local_info_, log_manager_, factory_.dispatcher_, admin_,
+      validation_context_, *api_, http_context_);
+  EXPECT_EQ(cluster_manager_->activeClusters().size(), 2);
+  cluster_ = cluster_manager_->get("aggregate_cluster");
+  auto primary = cluster_manager_->get("primary");
+  EXPECT_NE(nullptr, primary);
+  auto host = cluster_->loadBalancer().chooseHost(nullptr);
+  EXPECT_NE(nullptr, host);
+  EXPECT_EQ("primary", host->cluster().name());
+  EXPECT_EQ("127.0.0.1:80", host->address()->asString());
+
+  // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy.
+  Upstream::HostSharedPtr host1 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.1:80");
+  host1->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC);
+  Upstream::HostSharedPtr host2 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.2:80");
+  host2->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC);
+  Upstream::HostSharedPtr host3 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.3:80");
+  Upstream::Cluster& cluster = cluster_manager_->activeClusters().find("primary")->second;
+  cluster.prioritySet().updateHosts(
+      0,
+      Upstream::HostSetImpl::partitionHosts(
+          std::make_shared<Upstream::HostVector>(Upstream::HostVector{host1, host2, host3}),
+          Upstream::HostsPerLocalityImpl::empty()),
+      nullptr, {host1, host2, host3}, {}, 100);
+
+  for (int i = 0; i < 50; ++i) {
+    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));
+    EXPECT_EQ(host3, cluster_->loadBalancer().chooseHost(nullptr));
+  }
+
+  for (int i = 50; i < 100; ++i) {
+    EXPECT_CALL(factory_.random_, random()).WillRepeatedly(Return(i));
+    EXPECT_EQ(host1, cluster_->loadBalancer().chooseHost(nullptr));
+  }
+}
+
+} // namespace Aggregate
+} // namespace Clusters
+} // namespace Extensions
+} // namespace Envoy
diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc
index cc87c48fdde4..3a792f3f604c 100644
--- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc
+++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc
@@ -26,7 +26,8 @@ class ClusterTest : public testing::Test,
   void initialize(const std::string& yaml_config, bool uses_tls) {
     envoy::api::v2::Cluster cluster_config = Upstream::parseClusterFromV2Yaml(yaml_config);
     envoy::config::cluster::dynamic_forward_proxy::v2alpha::ClusterConfig config;
-    Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(),
+    Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().name(),
+                                           cluster_config.cluster_type().typed_config(),
                                            ProtobufWkt::Struct::default_instance(),
                                            ProtobufMessage::getStrictValidationVisitor(), config);
     Stats::ScopePtr scope = stats_store_.createScope("cluster.name.");
diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc
index b0179a1d887e..d377220ab05b 100644
--- a/test/extensions/clusters/redis/redis_cluster_test.cc
+++ b/test/extensions/clusters/redis/redis_cluster_test.cc
@@ -92,7 +92,8 @@ class RedisClusterTest : public testing::Test,
         singleton_manager_, tls_, validation_visitor_, *api_);
 
     envoy::config::cluster::redis::RedisClusterConfig config;
-    Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(),
+    Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().name(),
+                                           cluster_config.cluster_type().typed_config(),
                                            ProtobufWkt::Struct::default_instance(),
                                            ProtobufMessage::getStrictValidationVisitor(), config);
     cluster_callback_ = std::make_shared<NiceMock<MockClusterSlotUpdateCallBack>>();
@@ -122,9 +123,9 @@ class RedisClusterTest : public testing::Test,
         singleton_manager_, tls_, validation_visitor_, *api_);
 
     envoy::config::cluster::redis::RedisClusterConfig config;
-    Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(),
-                                           ProtobufWkt::Struct::default_instance(),
-                                           validation_visitor_, config);
+    Config::Utility::translateOpaqueConfig(
+        cluster_config.cluster_type().name(), cluster_config.cluster_type().typed_config(),
+        ProtobufWkt::Struct::default_instance(), validation_visitor_, config);
 
     NiceMock<AccessLog::MockAccessLogManager> log_manager;
     NiceMock<Upstream::Outlier::EventLoggerSharedPtr> outlier_event_logger;
@@ -162,7 +163,7 @@ class RedisClusterTest : public testing::Test,
       EXPECT_CALL(*client_, addConnectionCallbacks(_));
       EXPECT_CALL(*client_, close());
     }
-    EXPECT_CALL(*client_, makeRequest(Ref(RedisCluster::ClusterSlotsRequest::instance_), _))
+    EXPECT_CALL(*client_, makeRequest_(Ref(RedisCluster::ClusterSlotsRequest::instance_), _))
         .WillOnce(Return(&pool_request_));
   }
 
@@ -506,10 +507,11 @@ class RedisClusterTest : public testing::Test,
     EXPECT_EQ(discovery_session.bufferFlushTimeoutInMs(), std::chrono::milliseconds(0));
     EXPECT_EQ(discovery_session.maxUpstreamUnknownConnections(), 0);
 
-    NetworkFilters::Common::Redis::RespValue dummy_value;
-    dummy_value.type(NetworkFilters::Common::Redis::RespType::Error);
-    dummy_value.asString() = "dummy text";
-    EXPECT_TRUE(discovery_session.onRedirection(dummy_value));
+    NetworkFilters::Common::Redis::RespValuePtr dummy_value{
+        new NetworkFilters::Common::Redis::RespValue()};
+    dummy_value->type(NetworkFilters::Common::Redis::RespType::Error);
+    dummy_value->asString() = "dummy text";
+    EXPECT_TRUE(discovery_session.onRedirection(std::move(dummy_value), "dummy ip", false));
 
     RedisCluster::RedisDiscoveryClient discovery_client(discovery_session);
     EXPECT_NO_THROW(discovery_client.onAboveWriteBufferHighWatermark());
@@ -550,7 +552,7 @@ class RedisClusterTest : public testing::Test,
   Event::MockTimer* interval_timer_{};
   Extensions::NetworkFilters::Common::Redis::Client::MockClient* client_{};
   Extensions::NetworkFilters::Common::Redis::Client::MockPoolRequest pool_request_;
-  Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks* pool_callbacks_{};
+  Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks* pool_callbacks_{};
   std::shared_ptr<RedisCluster> cluster_;
   std::shared_ptr<NiceMock<MockClusterSlotUpdateCallBack>> cluster_callback_;
   Network::MockActiveDnsQuery active_dns_query_;
diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc
index 13fb4f6ac04c..c2062232879a 100644
--- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc
+++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc
@@ -23,7 +23,7 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT
     config_.set_name("foo");
     config_.set_dns_lookup_family(envoy::api::v2::Cluster::V4_ONLY);
 
-    EXPECT_CALL(dispatcher_, createDnsResolver(_)).WillOnce(Return(resolver_));
+    EXPECT_CALL(dispatcher_, createDnsResolver(_, _)).WillOnce(Return(resolver_));
     dns_cache_ = std::make_unique<DnsCacheImpl>(dispatcher_, tls_, store_, config_);
     update_callbacks_handle_ = dns_cache_->addUpdateCallbacks(update_callbacks_);
   }
diff --git a/test/extensions/filters/common/expr/BUILD b/test/extensions/filters/common/expr/BUILD
index 8ce5555328bf..c6af64c0a0f1 100644
--- a/test/extensions/filters/common/expr/BUILD
+++ b/test/extensions/filters/common/expr/BUILD
@@ -2,7 +2,9 @@ licenses(["notice"])  # Apache 2
 
 load(
     "//bazel:envoy_build_system.bzl",
+    "envoy_cc_fuzz_test",
     "envoy_package",
+    "envoy_proto_library",
 )
 load(
     "//test/extensions:extensions_build_system.bzl",
@@ -23,3 +25,25 @@ envoy_extension_cc_test(
         "//test/test_common:utility_lib",
     ],
 )
+
+envoy_proto_library(
+    name = "evaluator_fuzz_proto",
+    srcs = ["evaluator_fuzz.proto"],
+    deps = [
+        "//test/fuzz:common_proto",
+        "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto",
+    ],
+)
+
+envoy_cc_fuzz_test(
+    name = "evaluator_fuzz_test",
+    srcs = ["evaluator_fuzz_test.cc"],
+    corpus = ":evaluator_corpus",
+    deps = [
+        ":evaluator_fuzz_proto_cc_proto",
+        "//source/extensions/filters/common/expr:evaluator_lib",
+        "//test/common/stream_info:test_util",
+        "//test/fuzz:utility_lib",
+        "//test/test_common:utility_lib",
+    ],
+)
diff --git a/test/extensions/filters/common/expr/context_test.cc b/test/extensions/filters/common/expr/context_test.cc
index 5edbc3ec07a6..61f0bcbca070 100644
--- a/test/extensions/filters/common/expr/context_test.cc
+++ b/test/extensions/filters/common/expr/context_test.cc
@@ -45,6 +45,7 @@ TEST(Context, RequestAttributes) {
   EXPECT_CALL(info, startTime()).WillRepeatedly(Return(start_time));
   absl::optional<std::chrono::nanoseconds> dur = std::chrono::nanoseconds(15000000);
   EXPECT_CALL(info, requestComplete()).WillRepeatedly(Return(dur));
+  EXPECT_CALL(info, protocol()).WillRepeatedly(Return(Http::Protocol::Http2));
 
   // stub methods
   EXPECT_EQ(0, request.size());
@@ -157,6 +158,13 @@ TEST(Context, RequestAttributes) {
     ASSERT_TRUE(value.value().IsDuration());
     EXPECT_EQ("15ms", absl::FormatDuration(value.value().DurationOrDie()));
   }
+
+  {
+    auto value = request[CelValue::CreateStringView(Protocol)];
+    EXPECT_TRUE(value.has_value());
+    ASSERT_TRUE(value.value().IsString());
+    EXPECT_EQ("HTTP/2", value.value().StringOrDie().value());
+  }
 }
 
 TEST(Context, RequestFallbackAttributes) {
diff --git a/test/extensions/filters/common/expr/evaluator_corpus/crash-67e48e44650e25b93159729a7a4dd386625bb5c2 b/test/extensions/filters/common/expr/evaluator_corpus/crash-67e48e44650e25b93159729a7a4dd386625bb5c2
new file mode 100644
index 000000000000..eaf60aaa6d29
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_corpus/crash-67e48e44650e25b93159729a7a4dd386625bb5c2
@@ -0,0 +1,207 @@
+expression {
+  id: 17179869184
+  struct_expr {
+    entries {
+      map_key {
+        id: 27424471945274724
+        list_expr {
+        }
+      }
+    }
+    entries {
+      map_key {
+        id: 27424471945274724
+        list_expr {
+          elements {
+            id: 50331648
+          }
+        }
+      }
+    }
+    entries {
+      map_key {
+        id: 27424471945274724
+        list_expr {
+          elements {
+            id: 50331648
+          }
+        }
+      }
+    }
+    entries {
+      map_key {
+        id: 27424471945274724
+        list_expr {
+          elements {
+            id: 50331648
+          }
+        }
+      }
+    }
+    entries {
+      map_key {
+        id: 27424471945274724
+        list_expr {
+          elements {
+            select_expr {
+              operand {
+                ident_expr {
+                  name: "\004\000\000\000"
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+    entries {
+      map_key {
+        id: 27424471945274724
+        list_expr {
+          elements {
+            id: 50331648
+          }
+        }
+      }
+    }
+    entries {
+      map_key {
+        id: 27424471945274724
+        list_expr {
+          elements {
+            id: 50331648
+          }
+        }
+      }
+    }
+    entries {
+      map_key {
+        id: 27424471945274724
+      }
+      value {
+        comprehension_expr {
+          iter_var: "\001\000\000\000\000\000\000\031"
+        }
+      }
+    }
+  }
+}
+request_headers {
+  headers {
+    key: "\t\000\000\000"
+    value: "&&"
+  }
+}
+response_headers {
+}
+trailers {
+  headers {
+    key: "\000\000\000\001"
+    value: "\000\000\000\001"
+  }
+  headers {
+    key: "\000\000\000\001"
+    value: "\000\000\000\001"
+  }
+  headers {
+    key: "\000\000\000\001"
+    value: "\000\000\000\001"
+  }
+  headers {
+    key: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177"
+    value: "\000\000\000\0010\000"
+  }
+}
+stream_info {
+  dynamic_metadata {
+    filter_metadata {
+      key: ""
+      value {
+      }
+    }
+    filter_metadata {
+      key: ""
+      value {
+        fields {
+          key: "("
+          value {
+          }
+        }
+      }
+    }
+    filter_metadata {
+      key: ""
+      value {
+        fields {
+          key: "N5Envoy24ProtoValidation"
+          value {
+          }
+        }
+      }
+    }
+    filter_metadata {
+      key: ""
+      value {
+        fields {
+          key: ""
+          value {
+            number_value: 1.3262473693533e-315
+          }
+        }
+        fields {
+          key: "("
+          value {
+          }
+        }
+      }
+    }
+    filter_metadata {
+      key: ""
+      value {
+        fields {
+          key: ""
+          value {
+            number_value: 1.3262473693533e-315
+          }
+        }
+        fields {
+          key: "("
+          value {
+          }
+        }
+      }
+    }
+    filter_metadata {
+      key: ""
+      value {
+        fields {
+          key: ""
+          value {
+            number_value: 1.3262473693533e-315
+          }
+        }
+        fields {
+          key: "("
+          value {
+          }
+        }
+      }
+    }
+  }
+  response_code {
+    value: 134219776
+  }
+  upstream_metadata {
+    filter_metadata {
+      key: ""
+      value {
+        fields {
+          key: ""
+          value {
+            number_value: 9.56944336513491e-315
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/test/extensions/filters/common/expr/evaluator_corpus/crash-87e3c780acf4403ddd8b182496e6cad5ac5efd66 b/test/extensions/filters/common/expr/evaluator_corpus/crash-87e3c780acf4403ddd8b182496e6cad5ac5efd66
new file mode 100644
index 000000000000..33e49e8b02c9
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_corpus/crash-87e3c780acf4403ddd8b182496e6cad5ac5efd66
@@ -0,0 +1,6 @@
+trailers {
+}
+stream_info {
+  address {
+  }
+}
diff --git a/test/extensions/filters/common/expr/evaluator_corpus/crash-d6a9858c9b8e8b60845af9f5adc9eaead58147bd b/test/extensions/filters/common/expr/evaluator_corpus/crash-d6a9858c9b8e8b60845af9f5adc9eaead58147bd
new file mode 100644
index 000000000000..e8fd63ab0c8d
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_corpus/crash-d6a9858c9b8e8b60845af9f5adc9eaead58147bd
@@ -0,0 +1,27 @@
+expression {
+  comprehension_expr {
+    iter_range {
+      ident_expr {
+        name: "request"
+      }
+    }
+    result {
+      id: 3530822107858468864
+    }
+  }
+}
+trailers {
+  headers {
+    key: "\r\000"
+  }
+}
+stream_info {
+  dynamic_metadata {
+    filter_metadata {
+      key: ""
+      value {
+      }
+    }
+  }
+  requested_server_name: "/"
+}
diff --git a/test/extensions/filters/common/expr/evaluator_corpus/errorcondition b/test/extensions/filters/common/expr/evaluator_corpus/errorcondition
new file mode 100644
index 000000000000..a57bc9920898
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_corpus/errorcondition
@@ -0,0 +1,39 @@
+expression {
+  call_expr {
+    function: "_[_]"
+    args {
+      select_expr {
+        operand {
+	  ident_expr {
+	    name: "request"
+	  }
+        }
+	field: "undefined"
+      }
+      const_expr {
+        string_value: "foo"
+      }
+    }
+  }
+}
+request_headers {
+  headers{key: ":method" value : "GET"}
+  headers{key: ":path" value : "/"}
+  headers{key: ":scheme" value : "http"}
+  headers{key: ":authority" value : "foo.com"}
+  headers {}
+  headers {}
+  headers {}
+  headers {}
+  headers {}
+}
+response_headers {
+  headers {
+    key: ":status"
+    value : "200"
+  }
+}
+trailers {
+  headers {}
+}
+stream_info {}
\ No newline at end of file
diff --git a/test/extensions/filters/common/expr/evaluator_corpus/example b/test/extensions/filters/common/expr/evaluator_corpus/example
new file mode 100644
index 000000000000..18fff18a2a43
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_corpus/example
@@ -0,0 +1,26 @@
+expression {
+  const_expr {
+    bool_value: false
+  }
+}
+request_headers {
+  headers{key: ":method" value : "GET"}
+  headers{key: ":path" value : "/"}
+  headers{key: ":scheme" value : "http"}
+  headers{key: ":authority" value : "foo.com"}
+  headers {}
+  headers {}
+  headers {}
+  headers {}
+  headers {}
+}
+response_headers {
+  headers {
+    key: ":status"
+    value : "200"
+  }
+}
+trailers {
+  headers {}
+}
+stream_info {}
\ No newline at end of file
diff --git a/test/extensions/filters/common/expr/evaluator_corpus/example1 b/test/extensions/filters/common/expr/evaluator_corpus/example1
new file mode 100644
index 000000000000..0d021a92ac50
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_corpus/example1
@@ -0,0 +1,31 @@
+expression {
+  call_expr {
+    function: "undefined_extent"
+    args {
+       const_expr {
+         bool_value: false
+       }
+    }
+  }
+}
+request_headers {
+  headers{key: ":method" value : "GET"}
+  headers{key: ":path" value : "/"}
+  headers{key: ":scheme" value : "http"}
+  headers{key: ":authority" value : "foo.com"}
+  headers {}
+  headers {}
+  headers {}
+  headers {}
+  headers {}
+}
+response_headers {
+  headers {
+    key: ":status"
+    value : "200"
+  }
+}
+trailers {
+  headers {}
+}
+stream_info {}
\ No newline at end of file
diff --git a/test/extensions/filters/common/expr/evaluator_corpus/headercondition b/test/extensions/filters/common/expr/evaluator_corpus/headercondition
new file mode 100644
index 000000000000..641e1a14a5c5
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_corpus/headercondition
@@ -0,0 +1,47 @@
+expression {
+  call_expr {
+    function: "_==_"
+    args {
+      call_expr {
+        function: "_[_]"
+	args {
+	  select_expr {
+	    operand {
+	      ident_expr {
+	        name: "request"
+	      }
+	    }
+	    field: "headers"
+	  }
+	  const_expr {
+	    string_value: "foo"
+	  }
+        }
+      }
+      const_expr {
+        string_value: "bar"
+      }
+    }
+  }
+}
+request_headers {
+  headers{key: ":method" value : "GET"}
+  headers{key: ":path" value : "/"}
+  headers{key: ":scheme" value : "http"}
+  headers{key: ":authority" value : "foo.com"}
+  headers {key: "foo" value: "bar"}
+  headers {}
+  headers {}
+  headers {}
+  headers {}
+}
+response_headers {
+  headers {
+    key: ":status"
+    value : "200"
+  }
+}
+trailers {
+  headers {}
+}
+stream_info {}
\ No newline at end of file
diff --git a/test/extensions/filters/common/expr/evaluator_corpus/metadatacondition b/test/extensions/filters/common/expr/evaluator_corpus/metadatacondition
new file mode 100644
index 000000000000..d109f18b1fdf
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_corpus/metadatacondition
@@ -0,0 +1,63 @@
+expression {
+  call_expr {
+    function: "_==_"
+    args {
+      call_expr {
+        function: "_[_]"
+	args {
+	  select_expr {
+	    operand {
+	      ident_expr {
+	        name: "metadata"
+	      }
+	    }
+	    field: "filter_metadata"
+	  }
+	  const_expr {
+	    string_value: "other"
+	  }
+        }
+	const_expr {
+	  string_value: "label"
+	}
+      }
+      const_expr {
+        string_value: "prod"
+      }
+    }
+  }
+}
+request_headers {
+  headers{key: ":method" value : "GET"}
+  headers{key: ":path" value : "/"}
+  headers{key: ":scheme" value : "http"}
+  headers{key: ":authority" value : "foo.com"}
+  headers {key: "foo" value: "bar"}
+  headers {}
+  headers {}
+  headers {}
+  headers {}
+}
+response_headers {
+  headers {
+    key: ":status"
+    value : "200"
+  }
+}
+trailers {
+  headers {}
+}
+stream_info {
+  start_time: 1522796769123
+  upstream_metadata {
+    filter_metadata {
+      key: "other"
+      value: {
+        fields {
+          key: "label"
+          value: { string_value: "prod" }
+        }
+      }
+    }
+  }
+}
diff --git a/test/extensions/filters/common/expr/evaluator_corpus/mistypedcondition b/test/extensions/filters/common/expr/evaluator_corpus/mistypedcondition
new file mode 100644
index 000000000000..c65359cbf66d
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_corpus/mistypedcondition
@@ -0,0 +1,26 @@
+expression {
+  const_expr {
+    int64_value: 13
+  }
+}
+request_headers {
+  headers{key: ":method" value : "GET"}
+  headers{key: ":path" value : "/"}
+  headers{key: ":scheme" value : "http"}
+  headers{key: ":authority" value : "foo.com"}
+  headers {}
+  headers {}
+  headers {}
+  headers {}
+  headers {}
+}
+response_headers {
+  headers {
+    key: ":status"
+    value : "200"
+  }
+}
+trailers {
+  headers {}
+}
+stream_info {}
\ No newline at end of file
diff --git a/test/extensions/filters/common/expr/evaluator_fuzz.proto b/test/extensions/filters/common/expr/evaluator_fuzz.proto
new file mode 100644
index 000000000000..7b149e73eeb2
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_fuzz.proto
@@ -0,0 +1,17 @@
+syntax = "proto3";
+
+package test.extensions.filters.common.expr;
+
+import "google/api/expr/v1alpha1/syntax.proto";
+import "test/fuzz/common.proto";
+import "validate/validate.proto";
+
+// Structured input for fuzz test.
+
+message EvaluatorTestCase {
+  google.api.expr.v1alpha1.Expr expression = 1 [(validate.rules).message.required = true];
+  test.fuzz.Headers request_headers = 2;
+  test.fuzz.Headers response_headers = 3;
+  test.fuzz.Headers trailers = 4;
+  test.fuzz.StreamInfo stream_info = 5;
+}
\ No newline at end of file
diff --git a/test/extensions/filters/common/expr/evaluator_fuzz_test.cc b/test/extensions/filters/common/expr/evaluator_fuzz_test.cc
new file mode 100644
index 000000000000..6374e6e9f04a
--- /dev/null
+++ b/test/extensions/filters/common/expr/evaluator_fuzz_test.cc
@@ -0,0 +1,57 @@
+#include "common/network/utility.h"
+
+#include "extensions/filters/common/expr/evaluator.h"
+
+#include "test/common/stream_info/test_util.h"
+#include "test/extensions/filters/common/expr/evaluator_fuzz.pb.validate.h"
+#include "test/fuzz/fuzz_runner.h"
+#include "test/fuzz/utility.h"
+#include "test/test_common/network_utility.h"
+#include "test/test_common/utility.h"
+
+#include "gtest/gtest.h"
+
+namespace Envoy {
+namespace Extensions {
+namespace Filters {
+namespace Common {
+namespace Expr {
+namespace {
+
+DEFINE_PROTO_FUZZER(const test::extensions::filters::common::expr::EvaluatorTestCase& input) {
+  // Create builder without constant folding.
+  static Expr::BuilderPtr builder = Expr::createBuilder(nullptr);
+
+  try {
+    // Validate that the input has an expression.
+    TestUtility::validate(input);
+  } catch (const EnvoyException& e) {
+    ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what());
+    return;
+  }
+
+  // Create the headers and stream_info to test against.
+  TestStreamInfo stream_info = Fuzz::fromStreamInfo(input.stream_info());
+  Http::TestHeaderMapImpl request_headers = Fuzz::fromHeaders(input.request_headers());
+  Http::TestHeaderMapImpl response_headers = Fuzz::fromHeaders(input.response_headers());
+  Http::TestHeaderMapImpl response_trailers = Fuzz::fromHeaders(input.trailers());
+
+  try {
+    // Create the CEL expression.
+    Expr::ExpressionPtr expr = Expr::createExpression(*builder, input.expression());
+
+    // Evaluate the CEL expression.
+    Protobuf::Arena arena;
+    Expr::evaluate(*expr, nullptr, stream_info, &request_headers, &response_headers,
+                   &response_trailers);
+  } catch (const CelException& e) {
+    ENVOY_LOG_MISC(debug, "CelException: {}", e.what());
+  }
+}
+
+} // namespace
+} // namespace Expr
+} // namespace Common
+} // namespace Filters
+} // namespace Extensions
+} // namespace Envoy
diff --git a/test/extensions/filters/common/rbac/matchers_test.cc b/test/extensions/filters/common/rbac/matchers_test.cc
index 699b53d8b4e7..9b6a79ef595d 100644
--- a/test/extensions/filters/common/rbac/matchers_test.cc
+++ b/test/extensions/filters/common/rbac/matchers_test.cc
@@ -193,15 +193,22 @@ TEST(AuthenticatedMatcher, uriSanPeerCertificate) {
   Envoy::Network::MockConnection conn;
   auto ssl = std::make_shared<Ssl::MockConnectionInfo>();
 
-  const std::vector<std::string> sans{"foo", "baz"};
-  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(sans));
+  const std::vector<std::string> uri_sans{"foo", "baz"};
+  const std::vector<std::string> dns_sans;
+  EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(uri_sans));
+  EXPECT_CALL(*ssl, dnsSansPeerCertificate()).WillRepeatedly(Return(dns_sans));
+  EXPECT_CALL(*ssl, subjectPeerCertificate()).WillRepeatedly(ReturnRef("subject"));
+
   EXPECT_CALL(Const(conn), ssl()).WillRepeatedly(Return(ssl));
 
-  // We should get the first URI SAN.
+  // We should check if any URI SAN matches.
   envoy::config::rbac::v2::Principal_Authenticated auth;
   auth.mutable_principal_name()->set_exact("foo");
   checkMatcher(AuthenticatedMatcher(auth), true, conn);
 
+  auth.mutable_principal_name()->set_exact("baz");
+  checkMatcher(AuthenticatedMatcher(auth), true, conn);
+
   auth.mutable_principal_name()->set_exact("bar");
   checkMatcher(AuthenticatedMatcher(auth), false, conn);
 }
@@ -210,7 +217,7 @@ TEST(AuthenticatedMatcher, dnsSanPeerCertificate) {
   Envoy::Network::MockConnection conn;
   auto ssl = std::make_shared<Ssl::MockConnectionInfo>();
 
-  const std::vector<std::string> uri_sans;
+  const std::vector<std::string> uri_sans{"uri_foo"};
   const std::vector<std::string> dns_sans{"foo", "baz"};
 
   EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(uri_sans));
@@ -219,11 +226,16 @@ TEST(AuthenticatedMatcher, dnsSanPeerCertificate) {
   EXPECT_CALL(*ssl, dnsSansPeerCertificate()).WillRepeatedly(Return(dns_sans));
   EXPECT_CALL(Const(conn), ssl()).WillRepeatedly(Return(ssl));
 
-  // We should get the first DNS SAN as URI SAN is not available.
+  EXPECT_CALL(*ssl, subjectPeerCertificate()).WillRepeatedly(ReturnRef("subject"));
+
+  // We should get check if any DNS SAN matches as URI SAN is not available.
   envoy::config::rbac::v2::Principal_Authenticated auth;
   auth.mutable_principal_name()->set_exact("foo");
   checkMatcher(AuthenticatedMatcher(auth), true, conn);
 
+  auth.mutable_principal_name()->set_exact("baz");
+  checkMatcher(AuthenticatedMatcher(auth), true, conn);
+
   auth.mutable_principal_name()->set_exact("bar");
   checkMatcher(AuthenticatedMatcher(auth), false, conn);
 }
@@ -302,8 +314,12 @@ TEST(PolicyMatcher, PolicyMatcher) {
   Envoy::Network::Address::InstanceConstSharedPtr addr =
       Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false);
 
-  const std::vector<std::string> sans{"bar", "baz"};
-  EXPECT_CALL(*ssl, uriSanPeerCertificate()).Times(2).WillRepeatedly(Return(sans));
+  const std::vector<std::string> uri_sans{"bar", "baz"};
+  const std::vector<std::string> dns_sans;
+  EXPECT_CALL(*ssl, uriSanPeerCertificate()).Times(4).WillRepeatedly(Return(uri_sans));
+  EXPECT_CALL(*ssl, dnsSansPeerCertificate()).WillRepeatedly(Return(dns_sans));
+  EXPECT_CALL(*ssl, subjectPeerCertificate()).WillRepeatedly(ReturnRef("subject"));
+
   EXPECT_CALL(Const(conn), ssl()).Times(2).WillRepeatedly(Return(ssl));
   EXPECT_CALL(conn, localAddress()).Times(2).WillRepeatedly(ReturnRef(addr));
 
diff --git a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc
index 4c490321c40c..a237eb08a1de 100644
--- a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc
+++ b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc
@@ -41,7 +41,7 @@ request_type: internal
 )EOF";
 
   void initializeFilter(const std::string& yaml) {
-    envoy::config::filter::http::ip_tagging::v2::IPTagging config;
+    envoy::config::filter::http::ip_tagging::v3alpha::IPTagging config;
     TestUtility::loadFromYaml(yaml, config);
     config_.reset(new IpTaggingFilterConfig(config, "prefix.", stats_, runtime_));
     filter_ = std::make_unique<IpTaggingFilter>(config_);
diff --git a/test/extensions/filters/network/common/redis/client_impl_test.cc b/test/extensions/filters/network/common/redis/client_impl_test.cc
index ae97e4f54110..db3f0b35d116 100644
--- a/test/extensions/filters/network/common/redis/client_impl_test.cc
+++ b/test/extensions/filters/network/common/redis/client_impl_test.cc
@@ -149,7 +149,7 @@ TEST_F(RedisClientImplTest, BatchWithZeroBufferAndTimeout) {
 
   // Make the dummy request
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -196,7 +196,7 @@ TEST_F(RedisClientImplTest, BatchWithTimerFiring) {
 
   // Make the dummy request
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enableTimer(_, _));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -238,7 +238,7 @@ TEST_F(RedisClientImplTest, BatchWithTimerCancelledByBufferFlush) {
 
   // Make the dummy request (doesn't fill buffer, starts timer)
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enableTimer(_, _));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -246,7 +246,7 @@ TEST_F(RedisClientImplTest, BatchWithTimerCancelledByBufferFlush) {
 
   // Make a second dummy request (fills buffer, cancels timer)
   Common::Redis::RespValue request2;
-  MockPoolCallbacks callbacks2;
+  MockClientCallbacks callbacks2;
   EXPECT_CALL(*encoder_, encode(Ref(request2), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(true));
   ;
@@ -287,7 +287,7 @@ TEST_F(RedisClientImplTest, Basic) {
   client_->initialize(auth_password_);
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -296,7 +296,7 @@ TEST_F(RedisClientImplTest, Basic) {
   onConnected();
 
   Common::Redis::RespValue request2;
-  MockPoolCallbacks callbacks2;
+  MockClientCallbacks callbacks2;
   EXPECT_CALL(*encoder_, encode(Ref(request2), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);
@@ -378,7 +378,7 @@ TEST_F(RedisClientImplTest, Cancel) {
   setup();
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -387,7 +387,7 @@ TEST_F(RedisClientImplTest, Cancel) {
   onConnected();
 
   Common::Redis::RespValue request2;
-  MockPoolCallbacks callbacks2;
+  MockClientCallbacks callbacks2;
   EXPECT_CALL(*encoder_, encode(Ref(request2), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);
@@ -431,7 +431,7 @@ TEST_F(RedisClientImplTest, FailAll) {
   client_->addConnectionCallbacks(connection_callbacks);
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -459,7 +459,7 @@ TEST_F(RedisClientImplTest, FailAllWithCancel) {
   client_->addConnectionCallbacks(connection_callbacks);
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -484,7 +484,7 @@ TEST_F(RedisClientImplTest, ProtocolError) {
   setup();
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -513,7 +513,7 @@ TEST_F(RedisClientImplTest, ConnectFail) {
   setup();
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -549,7 +549,7 @@ TEST_F(RedisClientImplTest, OutlierDisabled) {
   setup(std::make_unique<ConfigOutlierDisabled>());
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -570,7 +570,7 @@ TEST_F(RedisClientImplTest, ConnectTimeout) {
   setup();
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -593,7 +593,7 @@ TEST_F(RedisClientImplTest, OpTimeout) {
   setup();
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -638,7 +638,7 @@ TEST_F(RedisClientImplTest, AskRedirection) {
   setup();
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -647,7 +647,7 @@ TEST_F(RedisClientImplTest, AskRedirection) {
   onConnected();
 
   Common::Redis::RespValue request2;
-  MockPoolCallbacks callbacks2;
+  MockClientCallbacks callbacks2;
   EXPECT_CALL(*encoder_, encode(Ref(request2), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);
@@ -666,8 +666,8 @@ TEST_F(RedisClientImplTest, AskRedirection) {
     // The exact values of the hash slot and IP info are not important.
     response1->asString() = "ASK 1111 10.1.2.3:4321";
     // Simulate redirection failure.
-    EXPECT_CALL(callbacks1, onRedirection(Ref(*response1))).WillOnce(Return(false));
-    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));
+    EXPECT_CALL(callbacks1, onRedirection_(Ref(response1), "10.1.2.3:4321", true))
+        .WillOnce(Return(false));
     EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));
     EXPECT_CALL(host_->outlier_detector_,
                 putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));
@@ -679,7 +679,8 @@ TEST_F(RedisClientImplTest, AskRedirection) {
     response2->type(Common::Redis::RespType::Error);
     // The exact values of the hash slot and IP info are not important.
     response2->asString() = "ASK 2222 10.1.2.4:4321";
-    EXPECT_CALL(callbacks2, onRedirection(Ref(*response2))).WillOnce(Return(true));
+    EXPECT_CALL(callbacks2, onRedirection_(Ref(response2), "10.1.2.4:4321", true))
+        .WillOnce(Return(true));
     EXPECT_CALL(*connect_or_op_timer_, disableTimer());
     EXPECT_CALL(host_->outlier_detector_,
                 putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));
@@ -700,7 +701,7 @@ TEST_F(RedisClientImplTest, MovedRedirection) {
   setup();
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -709,7 +710,7 @@ TEST_F(RedisClientImplTest, MovedRedirection) {
   onConnected();
 
   Common::Redis::RespValue request2;
-  MockPoolCallbacks callbacks2;
+  MockClientCallbacks callbacks2;
   EXPECT_CALL(*encoder_, encode(Ref(request2), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);
@@ -728,8 +729,8 @@ TEST_F(RedisClientImplTest, MovedRedirection) {
     // The exact values of the hash slot and IP info are not important.
     response1->asString() = "MOVED 1111 10.1.2.3:4321";
     // Simulate redirection failure.
-    EXPECT_CALL(callbacks1, onRedirection(Ref(*response1))).WillOnce(Return(false));
-    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));
+    EXPECT_CALL(callbacks1, onRedirection_(Ref(response1), "10.1.2.3:4321", false))
+        .WillOnce(Return(false));
     EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));
     EXPECT_CALL(host_->outlier_detector_,
                 putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));
@@ -741,7 +742,8 @@ TEST_F(RedisClientImplTest, MovedRedirection) {
     response2->type(Common::Redis::RespType::Error);
     // The exact values of the hash slot and IP info are not important.
     response2->asString() = "MOVED 2222 10.1.2.4:4321";
-    EXPECT_CALL(callbacks2, onRedirection(Ref(*response2))).WillOnce(Return(true));
+    EXPECT_CALL(callbacks2, onRedirection_(Ref(response2), "10.1.2.4:4321", false))
+        .WillOnce(Return(true));
     EXPECT_CALL(*connect_or_op_timer_, disableTimer());
     EXPECT_CALL(host_->outlier_detector_,
                 putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));
@@ -756,13 +758,78 @@ TEST_F(RedisClientImplTest, MovedRedirection) {
   client_->close();
 }
 
+TEST_F(RedisClientImplTest, RedirectionFailure) {
+  InSequence s;
+
+  setup();
+
+  Common::Redis::RespValue request1;
+  MockClientCallbacks callbacks1;
+  EXPECT_CALL(*encoder_, encode(Ref(request1), _));
+  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
+  PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
+  EXPECT_NE(nullptr, handle1);
+
+  onConnected();
+
+  Common::Redis::RespValue request2;
+  MockClientCallbacks callbacks2;
+  EXPECT_CALL(*encoder_, encode(Ref(request2), _));
+  EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
+  PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);
+  EXPECT_NE(nullptr, handle2);
+
+  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value());
+  EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value());
+  EXPECT_EQ(2UL, host_->stats_.rq_total_.value());
+  EXPECT_EQ(2UL, host_->stats_.rq_active_.value());
+
+  Buffer::OwnedImpl fake_data;
+  EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void {
+    InSequence s;
+
+    // Test an error that looks like it might be a MOVED or ASK redirection error except for the
+    // first non-whitespace substring.
+    Common::Redis::RespValuePtr response1{new Common::Redis::RespValue()};
+    response1->type(Common::Redis::RespType::Error);
+    response1->asString() = "NOTMOVEDORASK 1111 1.1.1.1:1";
+
+    EXPECT_CALL(callbacks1, onResponse_(Ref(response1)));
+    EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _));
+    EXPECT_CALL(host_->outlier_detector_,
+                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));
+    callbacks_->onRespValue(std::move(response1));
+
+    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value());
+    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value());
+
+    // Test a truncated MOVED error response that cannot be parsed properly.
+    Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
+    response2->type(Common::Redis::RespType::Error);
+    response2->asString() = "MOVED 1111";
+    EXPECT_CALL(callbacks2, onResponse_(Ref(response2)));
+    EXPECT_CALL(*connect_or_op_timer_, disableTimer());
+    EXPECT_CALL(host_->outlier_detector_,
+                putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _));
+    callbacks_->onRespValue(std::move(response2));
+
+    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value());
+    EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value());
+  }));
+  upstream_read_filter_->onData(fake_data, false);
+
+  EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush));
+  EXPECT_CALL(*connect_or_op_timer_, disableTimer());
+  client_->close();
+}
+
 TEST_F(RedisClientImplTest, AskRedirectionNotEnabled) {
   InSequence s;
 
   setup(std::make_unique<ConfigImpl>(createConnPoolSettings(20, true, false)));
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -771,7 +838,7 @@ TEST_F(RedisClientImplTest, AskRedirectionNotEnabled) {
   onConnected();
 
   Common::Redis::RespValue request2;
-  MockPoolCallbacks callbacks2;
+  MockClientCallbacks callbacks2;
   EXPECT_CALL(*encoder_, encode(Ref(request2), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);
@@ -825,7 +892,7 @@ TEST_F(RedisClientImplTest, MovedRedirectionNotEnabled) {
   setup(std::make_unique<ConfigImpl>(createConnPoolSettings(20, true, false)));
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -834,7 +901,7 @@ TEST_F(RedisClientImplTest, MovedRedirectionNotEnabled) {
   onConnected();
 
   Common::Redis::RespValue request2;
-  MockPoolCallbacks callbacks2;
+  MockClientCallbacks callbacks2;
   EXPECT_CALL(*encoder_, encode(Ref(request2), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle2 = client_->makeRequest(request2, callbacks2);
@@ -889,7 +956,7 @@ TEST_F(RedisClientImplTest, RemoveFailedHealthCheck) {
   setup();
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
@@ -923,7 +990,7 @@ TEST_F(RedisClientImplTest, RemoveFailedHost) {
   client_->addConnectionCallbacks(connection_callbacks);
 
   Common::Redis::RespValue request1;
-  MockPoolCallbacks callbacks1;
+  MockClientCallbacks callbacks1;
   EXPECT_CALL(*encoder_, encode(Ref(request1), _));
   EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false));
   PoolRequest* handle1 = client_->makeRequest(request1, callbacks1);
diff --git a/test/extensions/filters/network/common/redis/mocks.cc b/test/extensions/filters/network/common/redis/mocks.cc
index 29d0a35725ea..0def373cffad 100644
--- a/test/extensions/filters/network/common/redis/mocks.cc
+++ b/test/extensions/filters/network/common/redis/mocks.cc
@@ -50,8 +50,8 @@ MockClient::~MockClient() = default;
 MockPoolRequest::MockPoolRequest() = default;
 MockPoolRequest::~MockPoolRequest() = default;
 
-MockPoolCallbacks::MockPoolCallbacks() = default;
-MockPoolCallbacks::~MockPoolCallbacks() = default;
+MockClientCallbacks::MockClientCallbacks() = default;
+MockClientCallbacks::~MockClientCallbacks() = default;
 
 } // namespace Client
 
diff --git a/test/extensions/filters/network/common/redis/mocks.h b/test/extensions/filters/network/common/redis/mocks.h
index a44e41ef63e9..c7e8505f8a82 100644
--- a/test/extensions/filters/network/common/redis/mocks.h
+++ b/test/extensions/filters/network/common/redis/mocks.h
@@ -45,6 +45,14 @@ class MockDecoder : public Common::Redis::Decoder {
 
 namespace Client {
 
+class MockPoolRequest : public PoolRequest {
+public:
+  MockPoolRequest();
+  ~MockPoolRequest() override;
+
+  MOCK_METHOD0(cancel, void());
+};
+
 class MockClient : public Client {
 public:
   MockClient();
@@ -68,34 +76,38 @@ class MockClient : public Client {
     }
   }
 
+  PoolRequest* makeRequest(const Common::Redis::RespValue& request,
+                           ClientCallbacks& callbacks) override {
+    client_callbacks_.push_back(&callbacks);
+    return makeRequest_(request, callbacks);
+  }
+
   MOCK_METHOD1(addConnectionCallbacks, void(Network::ConnectionCallbacks& callbacks));
   MOCK_METHOD0(active, bool());
   MOCK_METHOD0(close, void());
-  MOCK_METHOD2(makeRequest,
-               PoolRequest*(const Common::Redis::RespValue& request, PoolCallbacks& callbacks));
+  MOCK_METHOD2(makeRequest_,
+               PoolRequest*(const Common::Redis::RespValue& request, ClientCallbacks& callbacks));
   MOCK_METHOD1(initialize, void(const std::string& password));
 
   std::list<Network::ConnectionCallbacks*> callbacks_;
+  std::list<ClientCallbacks*> client_callbacks_;
 };
 
-class MockPoolRequest : public PoolRequest {
+class MockClientCallbacks : public ClientCallbacks {
 public:
-  MockPoolRequest();
-  ~MockPoolRequest() override;
-
-  MOCK_METHOD0(cancel, void());
-};
-
-class MockPoolCallbacks : public PoolCallbacks {
-public:
-  MockPoolCallbacks();
-  ~MockPoolCallbacks() override;
+  MockClientCallbacks();
+  ~MockClientCallbacks() override;
 
   void onResponse(Common::Redis::RespValuePtr&& value) override { onResponse_(value); }
+  bool onRedirection(Common::Redis::RespValuePtr&& value, const std::string& host_address,
+                     bool ask_redirection) override {
+    return onRedirection_(value, host_address, ask_redirection);
+  }
 
   MOCK_METHOD1(onResponse_, void(Common::Redis::RespValuePtr& value));
   MOCK_METHOD0(onFailure, void());
-  MOCK_METHOD1(onRedirection, bool(const Common::Redis::RespValue& value));
+  MOCK_METHOD3(onRedirection_, bool(Common::Redis::RespValuePtr& value,
+                                    const std::string& host_address, bool ask_redirection));
 };
 
 } // namespace Client
diff --git a/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc b/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc
index 1099862c5417..2c4261e8f9c2 100644
--- a/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc
+++ b/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc
@@ -58,7 +58,7 @@ interface: org.apache.dubbo.demo.DemoService
         parseRouteConfigurationFromV2Yaml(yaml);
 
     NiceMock<Server::Configuration::MockFactoryContext> context;
-    SignleRouteMatcherImpl matcher(config, context);
+    SingleRouteMatcherImpl matcher(config, context);
     auto invo = std::make_shared<RpcInvocationImpl>();
     MessageMetadata metadata;
     metadata.setInvocationInfo(invo);
@@ -107,7 +107,7 @@ group: test
         parseRouteConfigurationFromV2Yaml(yaml);
 
     NiceMock<Server::Configuration::MockFactoryContext> context;
-    SignleRouteMatcherImpl matcher(config, context);
+    SingleRouteMatcherImpl matcher(config, context);
     auto invo = std::make_shared<RpcInvocationImpl>();
     MessageMetadata metadata;
     metadata.setInvocationInfo(invo);
@@ -143,7 +143,7 @@ version: 1.0.0
         parseRouteConfigurationFromV2Yaml(yaml);
 
     NiceMock<Server::Configuration::MockFactoryContext> context;
-    SignleRouteMatcherImpl matcher(config, context);
+    SingleRouteMatcherImpl matcher(config, context);
     auto invo = std::make_shared<RpcInvocationImpl>();
     MessageMetadata metadata;
     metadata.setInvocationInfo(invo);
@@ -184,7 +184,7 @@ group: HSF
         parseRouteConfigurationFromV2Yaml(yaml);
 
     NiceMock<Server::Configuration::MockFactoryContext> context;
-    SignleRouteMatcherImpl matcher(config, context);
+    SingleRouteMatcherImpl matcher(config, context);
     auto invo = std::make_shared<RpcInvocationImpl>();
     MessageMetadata metadata;
     metadata.setInvocationInfo(invo);
@@ -224,7 +224,7 @@ interface: org.apache.dubbo.demo.DemoService
   invo->setServiceName("org.apache.dubbo.demo.DemoService");
 
   NiceMock<Server::Configuration::MockFactoryContext> context;
-  SignleRouteMatcherImpl matcher(config, context);
+  SingleRouteMatcherImpl matcher(config, context);
   EXPECT_EQ(nullptr, matcher.route(metadata, 0));
 
   invo->setMethodName("sub");
@@ -255,7 +255,7 @@ interface: org.apache.dubbo.demo.DemoService
   invo->setServiceName("org.apache.dubbo.demo.DemoService");
 
   NiceMock<Server::Configuration::MockFactoryContext> context;
-  SignleRouteMatcherImpl matcher(config, context);
+  SingleRouteMatcherImpl matcher(config, context);
   EXPECT_EQ(nullptr, matcher.route(metadata, 0));
 
   invo->setMethodName("sub");
@@ -286,7 +286,7 @@ interface: org.apache.dubbo.demo.DemoService
   invo->setServiceName("org.apache.dubbo.demo.DemoService");
 
   NiceMock<Server::Configuration::MockFactoryContext> context;
-  SignleRouteMatcherImpl matcher(config, context);
+  SingleRouteMatcherImpl matcher(config, context);
   EXPECT_EQ(nullptr, matcher.route(metadata, 0));
 
   invo->setMethodName("ab12test");
@@ -322,7 +322,7 @@ interface: org.apache.dubbo.demo.DemoService
   invo->setServiceName("org.apache.dubbo.demo.DemoService");
 
   NiceMock<Server::Configuration::MockFactoryContext> context;
-  SignleRouteMatcherImpl matcher(config, context);
+  SingleRouteMatcherImpl matcher(config, context);
   EXPECT_EQ(nullptr, matcher.route(metadata, 0));
 
   invo->setMethodName("12test");
@@ -363,7 +363,7 @@ interface: org.apache.dubbo.demo.DemoService
   invo->addParameterValue(0, "150");
 
   NiceMock<Server::Configuration::MockFactoryContext> context;
-  SignleRouteMatcherImpl matcher(config, context);
+  SingleRouteMatcherImpl matcher(config, context);
   EXPECT_EQ("user_service_dubbo_server", matcher.route(metadata, 0)->routeEntry()->clusterName());
 }
 
@@ -393,7 +393,7 @@ interface: org.apache.dubbo.demo.DemoService
   invo->addParameterValue(1, "user_id:94562");
 
   NiceMock<Server::Configuration::MockFactoryContext> context;
-  SignleRouteMatcherImpl matcher(config, context);
+  SingleRouteMatcherImpl matcher(config, context);
   EXPECT_EQ("user_service_dubbo_server", matcher.route(metadata, 0)->routeEntry()->clusterName());
 }
 
@@ -430,7 +430,7 @@ interface: org.apache.dubbo.demo.DemoService
   invo->addHeaderReference(test_key, test_value);
 
   NiceMock<Server::Configuration::MockFactoryContext> context;
-  SignleRouteMatcherImpl matcher(config, context);
+  SingleRouteMatcherImpl matcher(config, context);
   EXPECT_EQ(nullptr, matcher.route(metadata, 0));
 
   test_value = "456";
@@ -516,7 +516,7 @@ interface: org.apache.dubbo.demo.DemoService
 
   // There is no parameter information in metadata.
   NiceMock<Server::Configuration::MockFactoryContext> context;
-  SignleRouteMatcherImpl matcher(config, context);
+  SingleRouteMatcherImpl matcher(config, context);
   EXPECT_EQ(nullptr, matcher.route(metadata, 0));
 
   // The parameter is empty.
@@ -575,7 +575,7 @@ interface: org.apache.dubbo.demo.DemoService
   invo->setServiceName("org.apache.dubbo.demo.DemoService");
 
   NiceMock<Server::Configuration::MockFactoryContext> context;
-  SignleRouteMatcherImpl matcher(config, context);
+  SingleRouteMatcherImpl matcher(config, context);
 
   {
     invo->setMethodName("method1");
@@ -623,7 +623,7 @@ name: config
   envoy::config::filter::network::dubbo_proxy::v2alpha1::RouteConfiguration config =
       parseRouteConfigurationFromV2Yaml(yaml);
   NiceMock<Server::Configuration::MockFactoryContext> context;
-  EXPECT_THROW(SignleRouteMatcherImpl m(config, context), EnvoyException);
+  EXPECT_THROW(SingleRouteMatcherImpl m(config, context), EnvoyException);
 }
 
 } // namespace Router
diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc
index 0a66a3294ef3..3a8689c1c404 100644
--- a/test/extensions/filters/network/http_connection_manager/config_test.cc
+++ b/test/extensions/filters/network/http_connection_manager/config_test.cc
@@ -17,7 +17,6 @@
 
 using testing::_;
 using testing::An;
-using testing::ContainerEq;
 using testing::Return;
 using testing::ReturnRef;
 
@@ -34,6 +33,15 @@ parseHttpConnectionManagerFromV2Yaml(const std::string& yaml) {
   return http_connection_manager;
 }
 
+// TODO(yittg): always validate config and split all cases using deprecated feature.
+envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager
+parseHttpConnectionManagerFromV2YamlAndValidate(const std::string& yaml) {
+  envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager
+      http_connection_manager;
+  TestUtility::loadFromYamlAndValidate(yaml, http_connection_manager);
+  return http_connection_manager;
+}
+
 class HttpConnectionManagerConfigTest : public testing::Test {
 public:
   HttpConnectionManagerConfigTest() {
@@ -156,8 +164,6 @@ stat_prefix: router
         cluster: cluster
 tracing:
   operation_name: ingress
-  request_headers_for_tags:
-  - foo
   max_path_tag_length: 128
 http_filters:
 - name: envoy.router
@@ -168,8 +174,6 @@ stat_prefix: router
                                      date_provider_, route_config_provider_manager_,
                                      scoped_routes_config_provider_manager_);
 
-  EXPECT_THAT(std::vector<Http::LowerCaseString>({Http::LowerCaseString("foo")}),
-              ContainerEq(config.tracingConfig()->request_headers_for_tags_));
   EXPECT_EQ(128, config.tracingConfig()->max_path_tag_length_);
   EXPECT_EQ(*context_.local_info_.address_, config.localAddress());
   EXPECT_EQ("foo", config.serverName());
@@ -178,6 +182,59 @@ stat_prefix: router
   EXPECT_EQ(5 * 60 * 1000, config.streamIdleTimeout().count());
 }
 
+TEST_F(HttpConnectionManagerConfigTest, TracingCustomTagsConfig) {
+  const std::string yaml_string = R"EOF(
+stat_prefix: router
+route_config:
+  name: local_route
+tracing:
+  custom_tags:
+  - tag: ltag
+    literal:
+      value: lvalue
+  - tag: etag
+    environment:
+      name: E_TAG
+  - tag: rtag
+    request_header:
+      name: X-Tag
+  - tag: mtag
+    metadata:
+      kind: { request: {} }
+      metadata_key:
+        key: com.bar.foo
+        path: [ { key: xx }, { key: yy } ]
+  )EOF";
+  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2YamlAndValidate(yaml_string),
+                                     context_, date_provider_, route_config_provider_manager_,
+                                     scoped_routes_config_provider_manager_);
+
+  std::vector<std::string> custom_tags{"ltag", "etag", "rtag", "mtag"};
+  const Tracing::CustomTagMap& custom_tag_map = config.tracingConfig()->custom_tags_;
+  for (const std::string& custom_tag : custom_tags) {
+    EXPECT_NE(custom_tag_map.find(custom_tag), custom_tag_map.end());
+  }
+}
+
+TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(RequestHeaderForTagsConfig)) {
+  const std::string yaml_string = R"EOF(
+route_config:
+  name: local_route
+tracing:
+  request_headers_for_tags:
+  - foo
+  )EOF";
+  HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_,
+                                     date_provider_, route_config_provider_manager_,
+                                     scoped_routes_config_provider_manager_);
+
+  const Tracing::CustomTagMap& custom_tag_map = config.tracingConfig()->custom_tags_;
+  const Tracing::RequestHeaderCustomTag* foo = dynamic_cast<const Tracing::RequestHeaderCustomTag*>(
+      custom_tag_map.find("foo")->second.get());
+  EXPECT_NE(foo, nullptr);
+  EXPECT_EQ(foo->tag(), "foo");
+}
+
 TEST_F(HttpConnectionManagerConfigTest, ListenerDirectionOutboundOverride) {
   const std::string yaml_string = R"EOF(
 stat_prefix: router
diff --git a/test/extensions/filters/network/redis_proxy/command_split_speed_test.cc b/test/extensions/filters/network/redis_proxy/command_split_speed_test.cc
index 93927de22c41..1ee376e32c30 100644
--- a/test/extensions/filters/network/redis_proxy/command_split_speed_test.cc
+++ b/test/extensions/filters/network/redis_proxy/command_split_speed_test.cc
@@ -43,6 +43,23 @@ class CommandSplitSpeedTest {
 
     return request;
   }
+  using ValueOrPointer =
+      absl::variant<const Common::Redis::RespValue, Common::Redis::RespValueConstSharedPtr>;
+
+  void createShared(Common::Redis::RespValueSharedPtr request) {
+    for (uint64_t i = 1; i < request->asArray().size(); i += 2) {
+      auto single_set = std::make_shared<const Common::Redis::RespValue>(
+          request, Common::Redis::Utility::SetRequest::instance(), i, i + 2);
+    }
+  }
+
+  void createVariant(Common::Redis::RespValueSharedPtr request) {
+    for (uint64_t i = 1; i < request->asArray().size(); i += 2) {
+      Common::Redis::RespValue single_set(request, Common::Redis::Utility::SetRequest::instance(),
+                                          i, i + 1);
+      ValueOrPointer variant(single_set);
+    }
+  }
 
   void createLocalCompositeArray(Common::Redis::RespValueSharedPtr& request) {
     for (uint64_t i = 1; i < request->asArray().size(); i += 2) {
@@ -92,6 +109,28 @@ static void BM_Split_Copy(benchmark::State& state) {
 }
 BENCHMARK(BM_Split_Copy)->Ranges({{1, 100}, {64, 8 << 14}});
 
+static void BM_Split_CreateShared(benchmark::State& state) {
+  Envoy::Extensions::NetworkFilters::RedisProxy::CommandSplitSpeedTest context;
+  Envoy::Extensions::NetworkFilters::Common::Redis::RespValueSharedPtr request =
+      context.makeSharedBulkStringArray(state.range(0), 36, state.range(1));
+  for (auto _ : state) {
+    context.createShared(request);
+  }
+  state.counters["use_count"] = request.use_count();
+}
+BENCHMARK(BM_Split_CreateShared)->Ranges({{1, 100}, {64, 8 << 14}});
+
+static void BM_Split_CreateVariant(benchmark::State& state) {
+  Envoy::Extensions::NetworkFilters::RedisProxy::CommandSplitSpeedTest context;
+  Envoy::Extensions::NetworkFilters::Common::Redis::RespValueSharedPtr request =
+      context.makeSharedBulkStringArray(state.range(0), 36, state.range(1));
+  for (auto _ : state) {
+    context.createVariant(request);
+  }
+  state.counters["use_count"] = request.use_count();
+}
+BENCHMARK(BM_Split_CreateVariant)->Ranges({{1, 100}, {64, 8 << 14}});
+
 // Boilerplate main(), which discovers benchmarks in the same file and runs them.
 int main(int argc, char** argv) {
   benchmark::Initialize(&argc, argv);
diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc
index 7a0c03ad942e..b5bcc1c6fccb 100644
--- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc
+++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc
@@ -16,16 +16,11 @@
 #include "test/test_common/simulated_time_system.h"
 
 using testing::_;
-using testing::ByRef;
 using testing::DoAll;
-using testing::Eq;
 using testing::InSequence;
-using testing::Invoke;
 using testing::NiceMock;
 using testing::Property;
-using testing::Ref;
 using testing::Return;
-using testing::SaveArg;
 using testing::WithArg;
 
 namespace Envoy {
@@ -34,19 +29,10 @@ namespace NetworkFilters {
 namespace RedisProxy {
 namespace CommandSplitter {
 
-class PassthruRouter : public Router {
-public:
-  PassthruRouter(ConnPool::InstanceSharedPtr conn_pool)
-      : route_(std::make_shared<testing::NiceMock<MockRoute>>(conn_pool)) {}
-
-  RouteSharedPtr upstreamPool(std::string&) override { return route_; }
-
-private:
-  RouteSharedPtr route_;
-};
-
 class RedisCommandSplitterImplTest : public testing::Test {
 public:
+  RedisCommandSplitterImplTest() : RedisCommandSplitterImplTest(false) {}
+  RedisCommandSplitterImplTest(bool latency_in_macro) : latency_in_micros_(latency_in_macro) {}
   void makeBulkStringArray(Common::Redis::RespValue& value,
                            const std::vector<std::string>& strings) {
     std::vector<Common::Redis::RespValue> values(strings.size());
@@ -59,11 +45,21 @@ class RedisCommandSplitterImplTest : public testing::Test {
     value.asArray().swap(values);
   }
 
+  void setupMirrorPolicy() {
+    auto mirror_policy = std::make_shared<NiceMock<MockMirrorPolicy>>(mirror_conn_pool_shared_ptr_);
+    route_->policies_.push_back(mirror_policy);
+  }
+
+  const bool latency_in_micros_;
   ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()};
+  ConnPool::MockInstance* mirror_conn_pool_{new ConnPool::MockInstance()};
+  ConnPool::InstanceSharedPtr mirror_conn_pool_shared_ptr_{mirror_conn_pool_};
+  std::shared_ptr<NiceMock<MockRoute>> route_{
+      new NiceMock<MockRoute>(ConnPool::InstanceSharedPtr{conn_pool_})};
   NiceMock<Stats::MockIsolatedStatsStore> store_;
   Event::SimulatedTimeSystem time_system_;
-  InstanceImpl splitter_{std::make_unique<PassthruRouter>(ConnPool::InstanceSharedPtr{conn_pool_}),
-                         store_, "redis.foo.", time_system_, false};
+  InstanceImpl splitter_{std::make_unique<NiceMock<MockRouter>>(route_), store_, "redis.foo.",
+                         time_system_, latency_in_micros_};
   MockSplitCallbacks callbacks_;
   SplitRequestPtr handle_;
 };
@@ -153,13 +149,29 @@ TEST_F(RedisCommandSplitterImplTest, UnsupportedCommand) {
   EXPECT_EQ(1UL, store_.counter("redis.foo.splitter.unsupported_command").value());
 }
 
+MATCHER_P(RespVariantEq, rhs, "RespVariant should be equal") {
+  const ConnPool::RespVariant& obj = arg;
+  EXPECT_EQ(obj.index(), 1);
+  EXPECT_EQ(*(absl::get<Common::Redis::RespValueConstSharedPtr>(obj)), rhs);
+  return true;
+}
+
 class RedisSingleServerRequestTest : public RedisCommandSplitterImplTest,
                                      public testing::WithParamInterface<std::string> {
 public:
-  void makeRequest(const std::string& hash_key, Common::Redis::RespValuePtr&& request) {
+  RedisSingleServerRequestTest() : RedisSingleServerRequestTest(false) {}
+  RedisSingleServerRequestTest(bool latency_in_micros)
+      : RedisCommandSplitterImplTest(latency_in_micros) {}
+  void makeRequest(const std::string& hash_key, Common::Redis::RespValuePtr&& request,
+                   bool mirrored = false) {
     EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));
-    EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(*request), _))
+    EXPECT_CALL(*conn_pool_, makeRequest_(hash_key, RespVariantEq(*request), _))
         .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_)));
+    if (mirrored) {
+      EXPECT_CALL(*mirror_conn_pool_, makeRequest_(hash_key, RespVariantEq(*request), _))
+          .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&mirror_pool_callbacks_)),
+                          Return(&mirror_pool_request_)));
+    }
     handle_ = splitter_.makeRequest(std::move(request), callbacks_);
   }
 
@@ -171,15 +183,23 @@ class RedisSingleServerRequestTest : public RedisCommandSplitterImplTest,
     pool_callbacks_->onFailure();
   }
 
-  void respond() {
+  void respond(bool mirrored = false) {
     Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
     Common::Redis::RespValue* response1_ptr = response1.get();
-    EXPECT_CALL(callbacks_, onResponse_(PointeesEq(response1_ptr)));
-    pool_callbacks_->onResponse(std::move(response1));
+    if (mirrored) {
+      // expect no-opt for mirrored requests
+      mirror_pool_callbacks_->onResponse(std::move(response1));
+    } else {
+      EXPECT_CALL(callbacks_, onResponse_(PointeesEq(response1_ptr)));
+      pool_callbacks_->onResponse(std::move(response1));
+    }
   }
 
-  Common::Redis::Client::PoolCallbacks* pool_callbacks_;
+  ConnPool::PoolCallbacks* pool_callbacks_;
   Common::Redis::Client::MockPoolRequest pool_request_;
+
+  ConnPool::PoolCallbacks* mirror_pool_callbacks_;
+  Common::Redis::Client::MockPoolRequest mirror_pool_request_;
 };
 
 TEST_P(RedisSingleServerRequestTest, Success) {
@@ -206,6 +226,61 @@ TEST_P(RedisSingleServerRequestTest, Success) {
             store_.counter(fmt::format("redis.foo.command.{}.success", lower_command)).value());
 };
 
+TEST_P(RedisSingleServerRequestTest, Mirrored) {
+  InSequence s;
+
+  setupMirrorPolicy();
+
+  ToLowerTable table;
+  std::string lower_command(GetParam());
+  table.toLowerCase(lower_command);
+
+  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
+  makeBulkStringArray(*request, {GetParam(), "hello"});
+  makeRequest("hello", std::move(request), true);
+  EXPECT_NE(nullptr, handle_);
+
+  time_system_.setMonotonicTime(std::chrono::milliseconds(10));
+  EXPECT_CALL(store_, deliverHistogramToSinks(
+                          Property(&Stats::Metric::name,
+                                   fmt::format("redis.foo.command.{}.latency", lower_command)),
+                          10));
+  respond();
+  respond(true);
+
+  EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.total", lower_command)).value());
+  EXPECT_EQ(1UL,
+            store_.counter(fmt::format("redis.foo.command.{}.success", lower_command)).value());
+};
+
+TEST_P(RedisSingleServerRequestTest, MirroredFailed) {
+  InSequence s;
+
+  setupMirrorPolicy();
+
+  ToLowerTable table;
+  std::string lower_command(GetParam());
+  table.toLowerCase(lower_command);
+
+  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
+  makeBulkStringArray(*request, {GetParam(), "hello"});
+  makeRequest("hello", std::move(request), true);
+  EXPECT_NE(nullptr, handle_);
+
+  time_system_.setMonotonicTime(std::chrono::milliseconds(10));
+  EXPECT_CALL(store_, deliverHistogramToSinks(
+                          Property(&Stats::Metric::name,
+                                   fmt::format("redis.foo.command.{}.latency", lower_command)),
+                          10));
+  // Mirrored request failure should not result in main path failure
+  mirror_pool_callbacks_->onFailure();
+  respond();
+
+  EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.total", lower_command)).value());
+  EXPECT_EQ(1UL,
+            store_.counter(fmt::format("redis.foo.command.{}.success", lower_command)).value());
+};
+
 TEST_P(RedisSingleServerRequestTest, SuccessMultipleArgs) {
   InSequence s;
 
@@ -271,7 +346,8 @@ TEST_P(RedisSingleServerRequestTest, NoUpstream) {
   EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));
   Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
   makeBulkStringArray(*request, {GetParam(), "hello"});
-  EXPECT_CALL(*conn_pool_, makeRequest("hello", Ref(*request), _)).WillOnce(Return(nullptr));
+  EXPECT_CALL(*conn_pool_, makeRequest_("hello", RespVariantEq(*request), _))
+      .WillOnce(Return(nullptr));
 
   Common::Redis::RespValue response;
   response.type(Common::Redis::RespType::Error);
@@ -378,7 +454,8 @@ TEST_F(RedisSingleServerRequestTest, EvalNoUpstream) {
   EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));
   Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
   makeBulkStringArray(*request, {"eval", "return {ARGV[1]}", "1", "key", "arg"});
-  EXPECT_CALL(*conn_pool_, makeRequest("key", Ref(*request), _)).WillOnce(Return(nullptr));
+  EXPECT_CALL(*conn_pool_, makeRequest_("key", RespVariantEq(*request), _))
+      .WillOnce(Return(nullptr));
 
   Common::Redis::RespValue response;
   response.type(Common::Redis::RespType::Error);
@@ -391,208 +468,9 @@ TEST_F(RedisSingleServerRequestTest, EvalNoUpstream) {
   EXPECT_EQ(1UL, store_.counter("redis.foo.command.eval.error").value());
 };
 
-TEST_F(RedisSingleServerRequestTest, MovedRedirectionSuccess) {
-  InSequence s;
-
-  Common::Redis::Client::MockPoolRequest pool_request2;
-  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
-  makeBulkStringArray(*request, {"get", "foo"});
-  makeRequest("foo", std::move(request));
-  EXPECT_NE(nullptr, handle_);
-
-  Common::Redis::RespValue moved_response;
-  moved_response.type(Common::Redis::RespType::Error);
-  moved_response.asString() = "MOVED 1111 10.1.2.3:4000";
-  std::string host_address;
-  Common::Redis::RespValue request_copy;
-  EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_)))
-      .WillOnce(
-          DoAll(SaveArg<0>(&host_address), SaveArg<1>(&request_copy), Return(&pool_request2)));
-  EXPECT_CALL(*conn_pool_, onRedirection());
-  EXPECT_TRUE(pool_callbacks_->onRedirection(moved_response));
-  EXPECT_EQ(host_address, "10.1.2.3:4000");
-  EXPECT_EQ(request_copy.type(), Common::Redis::RespType::Array);
-  EXPECT_EQ(request_copy.asArray().size(), 2);
-  EXPECT_EQ(request_copy.asArray()[0].type(), Common::Redis::RespType::BulkString);
-  EXPECT_EQ(request_copy.asArray()[0].asString(), "get");
-  EXPECT_EQ(request_copy.asArray()[1].type(), Common::Redis::RespType::BulkString);
-  EXPECT_EQ(request_copy.asArray()[1].asString(), "foo");
-
-  respond();
-};
-
-TEST_F(RedisSingleServerRequestTest, MovedRedirectionFailure) {
-  InSequence s;
-
-  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
-  makeBulkStringArray(*request, {"get", "foo"});
-  makeRequest("foo", std::move(request));
-  EXPECT_NE(nullptr, handle_);
-
-  // Test a truncated MOVED error response that cannot be parsed properly.
-  Common::Redis::RespValue moved_response;
-  moved_response.type(Common::Redis::RespType::Error);
-  moved_response.asString() = "MOVED 1111";
-  EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response));
-  moved_response.type(Common::Redis::RespType::Integer);
-  moved_response.asInteger() = 1;
-  EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response));
-
-  // Test an upstream error preventing the request from being sent.
-  moved_response.type(Common::Redis::RespType::Error);
-  moved_response.asString() = "MOVED 1111 10.1.2.3:4000";
-  std::string host_address;
-  Common::Redis::RespValue request_copy;
-  EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _)).WillOnce(Return(nullptr));
-  EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response));
-
-  respond();
-};
-
-TEST_F(RedisSingleServerRequestTest, RedirectionFailure) {
-  InSequence s;
-
-  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
-  makeBulkStringArray(*request, {"get", "foo"});
-  makeRequest("foo", std::move(request));
-  EXPECT_NE(nullptr, handle_);
-
-  // Test an error that looks like it might be a MOVED or ASK redirection error except for the first
-  // non-whitespace substring.
-  Common::Redis::RespValue moved_response;
-  moved_response.type(Common::Redis::RespType::Error);
-  moved_response.asString() = "NOTMOVEDORASK 1111 1.1.1.1:1";
-  EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response));
-  moved_response.type(Common::Redis::RespType::Integer);
-  moved_response.asInteger() = 1;
-  EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response));
-
-  respond();
-};
-
-TEST_F(RedisSingleServerRequestTest, AskRedirectionSuccess) {
-  InSequence s;
-
-  Common::Redis::Client::MockPoolRequest pool_request2, pool_request3;
-  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
-  makeBulkStringArray(*request, {"get", "foo"});
-  makeRequest("foo", std::move(request));
-  EXPECT_NE(nullptr, handle_);
-
-  Common::Redis::RespValue ask_response;
-  ask_response.type(Common::Redis::RespType::Error);
-  ask_response.asString() = "ASK 1111 10.1.2.3:4000";
-  EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _))
-      .WillOnce(
-          Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request,
-                     Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* {
-            // Verify that the request has been properly prepended with an "asking" command.
-            std::vector<std::string> commands = {"asking"};
-            EXPECT_EQ(host_address, "10.1.2.3:4000");
-            EXPECT_TRUE(request.type() == Common::Redis::RespType::Array);
-            EXPECT_EQ(request.asArray().size(), commands.size());
-            for (unsigned int i = 0; i < commands.size(); i++) {
-              EXPECT_TRUE(request.asArray()[i].type() == Common::Redis::RespType::BulkString);
-              EXPECT_EQ(request.asArray()[i].asString(), commands[i]);
-            }
-            return &pool_request2;
-          }));
-  EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_)))
-      .WillOnce(
-          Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request,
-                     Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* {
-            std::vector<std::string> commands = {"get", "foo"};
-            EXPECT_EQ(host_address, "10.1.2.3:4000");
-            EXPECT_TRUE(request.type() == Common::Redis::RespType::Array);
-            EXPECT_EQ(request.asArray().size(), commands.size());
-            for (unsigned int i = 0; i < commands.size(); i++) {
-              EXPECT_TRUE(request.asArray()[i].type() == Common::Redis::RespType::BulkString);
-              EXPECT_EQ(request.asArray()[i].asString(), commands[i]);
-            }
-            return &pool_request3;
-          }));
-  EXPECT_CALL(*conn_pool_, onRedirection());
-  EXPECT_TRUE(pool_callbacks_->onRedirection(ask_response));
-  respond();
-};
-
-TEST_F(RedisSingleServerRequestTest, AskRedirectionFailure) {
-  InSequence s;
-
-  Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
-  makeBulkStringArray(*request, {"get", "foo"});
-  makeRequest("foo", std::move(request));
-  EXPECT_NE(nullptr, handle_);
-
-  Common::Redis::RespValue ask_response;
-
-  // Test a truncated ASK error response that cannot be parsed properly.
-  ask_response.type(Common::Redis::RespType::Error);
-  ask_response.asString() = "ASK 1111";
-  EXPECT_FALSE(pool_callbacks_->onRedirection(ask_response));
-  ask_response.type(Common::Redis::RespType::Integer);
-  ask_response.asInteger() = 1;
-  EXPECT_FALSE(pool_callbacks_->onRedirection(ask_response));
-
-  // Test an upstream error from trying to send an "asking" command upstream.
-  ask_response.type(Common::Redis::RespType::Error);
-  ask_response.asString() = "ASK 1111 10.1.2.3:4000";
-  EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _))
-      .WillOnce(
-          Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request,
-                     Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* {
-            // Verify that the request has been properly prepended with an "asking" command.
-            std::vector<std::string> commands = {"asking"};
-            EXPECT_EQ(host_address, "10.1.2.3:4000");
-            EXPECT_TRUE(request.type() == Common::Redis::RespType::Array);
-            EXPECT_EQ(request.asArray().size(), commands.size());
-            for (unsigned int i = 0; i < commands.size(); i++) {
-              EXPECT_TRUE(request.asArray()[i].type() == Common::Redis::RespType::BulkString);
-              EXPECT_EQ(request.asArray()[i].asString(), commands[i]);
-            }
-            return nullptr;
-          }));
-  EXPECT_FALSE(pool_callbacks_->onRedirection(ask_response));
-
-  // Test an upstream error from trying to send the original request after the "asking" command is
-  // sent successfully.
-  Common::Redis::Client::MockPoolRequest pool_request;
-  EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _))
-      .WillOnce(
-          Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request,
-                     Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* {
-            // Verify that the request has been properly prepended with an "asking" command.
-            std::vector<std::string> commands = {"asking"};
-            EXPECT_EQ(host_address, "10.1.2.3:4000");
-            EXPECT_TRUE(request.type() == Common::Redis::RespType::Array);
-            EXPECT_EQ(request.asArray().size(), commands.size());
-            for (unsigned int i = 0; i < commands.size(); i++) {
-              EXPECT_TRUE(request.asArray()[i].type() == Common::Redis::RespType::BulkString);
-              EXPECT_EQ(request.asArray()[i].asString(), commands[i]);
-            }
-            return &pool_request;
-          }));
-  EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_)))
-      .WillOnce(
-          Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request,
-                     Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* {
-            std::vector<std::string> commands = {"get", "foo"};
-            EXPECT_EQ(host_address, "10.1.2.3:4000");
-            EXPECT_TRUE(request.type() == Common::Redis::RespType::Array);
-            EXPECT_EQ(request.asArray().size(), commands.size());
-            for (unsigned int i = 0; i < commands.size(); i++) {
-              EXPECT_TRUE(request.asArray()[i].type() == Common::Redis::RespType::BulkString);
-              EXPECT_EQ(request.asArray()[i].asString(), commands[i]);
-            }
-            return nullptr;
-          }));
-  EXPECT_FALSE(pool_callbacks_->onRedirection(ask_response));
-
-  respond();
-};
-
 MATCHER_P(CompositeArrayEq, rhs, "CompositeArray should be equal") {
-  const Common::Redis::RespValue& lhs = arg;
+  const ConnPool::RespVariant& obj = arg;
+  const auto& lhs = absl::get<const Common::Redis::RespValue>(obj);
   EXPECT_TRUE(lhs.type() == Common::Redis::RespType::CompositeArray);
   EXPECT_EQ(lhs.asCompositeArray().size(), rhs.size());
   std::vector<std::string> array;
@@ -603,42 +481,75 @@ MATCHER_P(CompositeArrayEq, rhs, "CompositeArray should be equal") {
   return true;
 }
 
-class RedisMGETCommandHandlerTest : public RedisCommandSplitterImplTest {
+class FragmentedRequestCommandHandlerTest : public RedisCommandSplitterImplTest {
 public:
-  void setup(uint32_t num_gets, const std::list<uint64_t>& null_handle_indexes) {
-    std::vector<std::string> request_strings = {"mget"};
-    for (uint32_t i = 0; i < num_gets; i++) {
-      request_strings.push_back(std::to_string(i));
-    }
+  void makeRequest(std::vector<std::string>& request_strings,
+                   const std::list<uint64_t>& null_handle_indexes, bool mirrored) {
+    uint32_t num_gets = expected_requests_.size();
 
     Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
     makeBulkStringArray(*request, request_strings);
 
-    expected_requests_.reserve(num_gets);
     pool_callbacks_.resize(num_gets);
+    mirror_pool_callbacks_.resize(num_gets);
     std::vector<Common::Redis::Client::MockPoolRequest> tmp_pool_requests(num_gets);
     pool_requests_.swap(tmp_pool_requests);
+    std::vector<Common::Redis::Client::MockPoolRequest> tmp_mirrored_pool_requests(num_gets);
+    mirror_pool_requests_.swap(tmp_mirrored_pool_requests);
 
     EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));
 
     for (uint32_t i = 0; i < num_gets; i++) {
-      expected_requests_.push_back({"get", std::to_string(i)});
       Common::Redis::Client::PoolRequest* request_to_use = nullptr;
       if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) ==
           null_handle_indexes.end()) {
         request_to_use = &pool_requests_[i];
       }
+      Common::Redis::Client::PoolRequest* mirror_request_to_use = nullptr;
+      if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) ==
+          null_handle_indexes.end()) {
+        mirror_request_to_use = &mirror_request_to_use[i];
+      }
       EXPECT_CALL(*conn_pool_,
-                  makeRequest(std::to_string(i), CompositeArrayEq(expected_requests_[i]), _))
+                  makeRequest_(std::to_string(i), CompositeArrayEq(expected_requests_[i]), _))
           .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use)));
+      if (mirrored) {
+        EXPECT_CALL(*mirror_conn_pool_,
+                    makeRequest_(std::to_string(i), CompositeArrayEq(expected_requests_[i]), _))
+            .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&mirror_pool_callbacks_[i])),
+                            Return(mirror_request_to_use)));
+      }
     }
 
     handle_ = splitter_.makeRequest(std::move(request), callbacks_);
   }
 
   std::vector<std::vector<std::string>> expected_requests_;
-  std::vector<Common::Redis::Client::PoolCallbacks*> pool_callbacks_;
+  std::vector<ConnPool::PoolCallbacks*> pool_callbacks_;
   std::vector<Common::Redis::Client::MockPoolRequest> pool_requests_;
+  std::vector<ConnPool::PoolCallbacks*> mirror_pool_callbacks_;
+  std::vector<Common::Redis::Client::MockPoolRequest> mirror_pool_requests_;
+};
+
+class RedisMGETCommandHandlerTest : public FragmentedRequestCommandHandlerTest {
+public:
+  void setup(uint32_t num_gets, const std::list<uint64_t>& null_handle_indexes,
+             bool mirrored = false) {
+    expected_requests_.reserve(num_gets);
+    std::vector<std::string> request_strings = {"mget"};
+    for (uint32_t i = 0; i < num_gets; i++) {
+      request_strings.push_back(std::to_string(i));
+      expected_requests_.push_back({"get", std::to_string(i)});
+    }
+    makeRequest(request_strings, null_handle_indexes, mirrored);
+  }
+
+  Common::Redis::RespValuePtr response(const std::string& result) {
+    Common::Redis::RespValuePtr response = std::make_unique<Common::Redis::RespValue>();
+    response->type(Common::Redis::RespType::BulkString);
+    response->asString() = result;
+    return response;
+  }
 };
 
 TEST_F(RedisMGETCommandHandlerTest, Normal) {
@@ -656,19 +567,43 @@ TEST_F(RedisMGETCommandHandlerTest, Normal) {
   elements[1].asString() = "5";
   expected_response.asArray().swap(elements);
 
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::BulkString);
-  response2->asString() = "5";
-  pool_callbacks_[1]->onResponse(std::move(response2));
+  pool_callbacks_[1]->onResponse(response("5"));
 
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::BulkString);
-  response1->asString() = "response";
   time_system_.setMonotonicTime(std::chrono::milliseconds(10));
   EXPECT_CALL(store_, deliverHistogramToSinks(
                           Property(&Stats::Metric::name, "redis.foo.command.mget.latency"), 10));
   EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
+  pool_callbacks_[0]->onResponse(response("response"));
+
+  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.total").value());
+  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.success").value());
+};
+
+TEST_F(RedisMGETCommandHandlerTest, Mirrored) {
+  InSequence s;
+
+  setupMirrorPolicy();
+  setup(2, {}, true);
+  EXPECT_NE(nullptr, handle_);
+
+  Common::Redis::RespValue expected_response;
+  expected_response.type(Common::Redis::RespType::Array);
+  std::vector<Common::Redis::RespValue> elements(2);
+  elements[0].type(Common::Redis::RespType::BulkString);
+  elements[0].asString() = "response";
+  elements[1].type(Common::Redis::RespType::BulkString);
+  elements[1].asString() = "5";
+  expected_response.asArray().swap(elements);
+
+  pool_callbacks_[1]->onResponse(response("5"));
+  mirror_pool_callbacks_[1]->onResponse(response("5"));
+
+  time_system_.setMonotonicTime(std::chrono::milliseconds(10));
+  EXPECT_CALL(store_, deliverHistogramToSinks(
+                          Property(&Stats::Metric::name, "redis.foo.command.mget.latency"), 10));
+  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
+  pool_callbacks_[0]->onResponse(response("response"));
+  mirror_pool_callbacks_[0]->onResponse(response("response"));
 
   EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.total").value());
   EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.success").value());
@@ -690,11 +625,8 @@ TEST_F(RedisMGETCommandHandlerTest, NormalWithNull) {
   Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
   pool_callbacks_[1]->onResponse(std::move(response2));
 
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::BulkString);
-  response1->asString() = "response";
   EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
+  pool_callbacks_[0]->onResponse(response("response"));
 };
 
 TEST_F(RedisMGETCommandHandlerTest, NoUpstreamHostForAll) {
@@ -754,14 +686,11 @@ TEST_F(RedisMGETCommandHandlerTest, Failure) {
 
   pool_callbacks_[1]->onFailure();
 
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::BulkString);
-  response1->asString() = "response";
   time_system_.setMonotonicTime(std::chrono::milliseconds(5));
   EXPECT_CALL(store_, deliverHistogramToSinks(
                           Property(&Stats::Metric::name, "redis.foo.command.mget.latency"), 5));
   EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
+  pool_callbacks_[0]->onResponse(response("response"));
   EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.total").value());
   EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.error").value());
 };
@@ -806,224 +735,74 @@ TEST_F(RedisMGETCommandHandlerTest, Cancel) {
   handle_->cancel();
 };
 
-TEST_F(RedisMGETCommandHandlerTest, NormalWithMovedRedirection) {
-  InSequence s;
+class RedisMSETCommandHandlerTest : public FragmentedRequestCommandHandlerTest {
+public:
+  void setup(uint32_t num_sets, const std::list<uint64_t>& null_handle_indexes,
+             bool mirrored = false) {
 
-  setup(2, {});
-  EXPECT_NE(nullptr, handle_);
+    expected_requests_.reserve(num_sets);
+    std::vector<std::string> request_strings = {"mset"};
+    for (uint32_t i = 0; i < num_sets; i++) {
+      // key
+      request_strings.push_back(std::to_string(i));
+      // value
+      request_strings.push_back(std::to_string(i));
 
-  // Test with a non-error response.
-  Common::Redis::RespValue bad_moved_response;
-  bad_moved_response.type(Common::Redis::RespType::Integer);
-  bad_moved_response.asInteger() = 1;
-  EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_moved_response));
-
-  // Test with a valid MOVED response.
-  Common::Redis::RespValue moved_response;
-  moved_response.type(Common::Redis::RespType::Error);
-  moved_response.asString() = "MOVED 1234 192.168.0.1:5000"; // Exact values are not important.
-
-  // Test with simulated upstream failures. This exercises code in
-  // FragmentedRequest::onChildRedirection() common to MGET, MSET, and SplitKeysSumResult commands.
-  for (unsigned int i = 0; i < 2; i++) {
-    EXPECT_CALL(*conn_pool_,
-                makeRequestToHost(Eq("192.168.0.1:5000"), CompositeArrayEq(expected_requests_[i]),
-                                  Ref(*pool_callbacks_[i])))
-        .WillOnce(Return(nullptr));
-    EXPECT_FALSE(pool_callbacks_[i]->onRedirection(moved_response));
+      expected_requests_.push_back({"set", std::to_string(i), std::to_string(i)});
+    }
+    makeRequest(request_strings, null_handle_indexes, mirrored);
   }
 
-  // Test "successful" redirection.
-  for (unsigned int i = 0; i < 2; i++) {
-    EXPECT_CALL(*conn_pool_,
-                makeRequestToHost(Eq("192.168.0.1:5000"), CompositeArrayEq(expected_requests_[i]),
-                                  Ref(*pool_callbacks_[i])))
-        .WillOnce(Return(&pool_requests_[i]));
-    EXPECT_CALL(*conn_pool_, onRedirection());
-    EXPECT_TRUE(pool_callbacks_[i]->onRedirection(moved_response));
+  Common::Redis::RespValuePtr okResponse() {
+    Common::Redis::RespValuePtr response = std::make_unique<Common::Redis::RespValue>();
+    response->type(Common::Redis::RespType::SimpleString);
+    response->asString() = Response::get().OK;
+    return response;
   }
-
-  Common::Redis::RespValue expected_response;
-  expected_response.type(Common::Redis::RespType::Array);
-  std::vector<Common::Redis::RespValue> elements(2);
-  elements[0].type(Common::Redis::RespType::BulkString);
-  elements[0].asString() = "response";
-  elements[1].type(Common::Redis::RespType::BulkString);
-  elements[1].asString() = "5";
-  expected_response.asArray().swap(elements);
-
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::BulkString);
-  response2->asString() = "5";
-  pool_callbacks_[1]->onResponse(std::move(response2));
-
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::BulkString);
-  response1->asString() = "response";
-  time_system_.setMonotonicTime(std::chrono::milliseconds(10));
-  EXPECT_CALL(store_, deliverHistogramToSinks(
-                          Property(&Stats::Metric::name, "redis.foo.command.mget.latency"), 10));
-  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
-
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.total").value());
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.success").value());
 };
 
-TEST_F(RedisMGETCommandHandlerTest, NormalWithAskRedirection) {
+TEST_F(RedisMSETCommandHandlerTest, Normal) {
   InSequence s;
 
   setup(2, {});
   EXPECT_NE(nullptr, handle_);
 
-  // Test with an non-error response.
-  Common::Redis::RespValue bad_ask_response;
-  bad_ask_response.type(Common::Redis::RespType::Integer);
-  bad_ask_response.asInteger() = 1;
-  EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_ask_response));
-
-  // Test with a valid ASK response.
-  Common::Redis::RespValue ask_response;
-  ask_response.type(Common::Redis::RespType::Error);
-  ask_response.asString() = "ASK 1234 192.168.0.1:5000"; // Exact values are not important.
-  Common::Redis::Client::MockPoolRequest dummy_poolrequest;
-
-  // Test redirection with simulated upstream failures. This exercises code in
-  // FragmentedRequest::onChildRedirection() common to MGET, MSET, and SplitKeysSumResult commands.
-  for (unsigned int i = 0; i < 2; i++) {
-    EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _))
-        .WillOnce(Invoke(
-            [&](const std::string& host_address, const Common::Redis::RespValue& request,
-                Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* {
-              EXPECT_EQ(host_address, "192.168.0.1:5000");
-              EXPECT_TRUE(request.type() == Common::Redis::RespType::Array);
-              EXPECT_EQ(request.asArray().size(), 1);
-              EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString);
-              EXPECT_EQ(request.asArray()[0].asString(), "asking");
-              return (i == 0 ? nullptr : &dummy_poolrequest);
-            }));
-    if (i == 1) {
-      EXPECT_CALL(*conn_pool_,
-                  makeRequestToHost(Eq("192.168.0.1:5000"), CompositeArrayEq(expected_requests_[i]),
-                                    Ref(*pool_callbacks_[i])))
-          .WillOnce(Return(i == 1 ? nullptr : &pool_requests_[i]));
-    }
-    EXPECT_FALSE(pool_callbacks_[i]->onRedirection(ask_response));
-  }
-
-  // Test "successful" redirection.
-  for (unsigned int i = 0; i < 2; i++) {
-    EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _))
-        .WillOnce(Invoke(
-            [&](const std::string& host_address, const Common::Redis::RespValue& request,
-                Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* {
-              EXPECT_EQ(host_address, "192.168.0.1:5000");
-              EXPECT_TRUE(request.type() == Common::Redis::RespType::Array);
-              EXPECT_EQ(request.asArray().size(), 1);
-              EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString);
-              EXPECT_EQ(request.asArray()[0].asString(), "asking");
-              return &dummy_poolrequest;
-            }));
-    EXPECT_CALL(*conn_pool_,
-                makeRequestToHost(Eq("192.168.0.1:5000"), CompositeArrayEq(expected_requests_[i]),
-                                  Ref(*pool_callbacks_[i])))
-        .WillOnce(Return(&pool_requests_[i]));
-    EXPECT_CALL(*conn_pool_, onRedirection());
-    EXPECT_TRUE(pool_callbacks_[i]->onRedirection(ask_response));
-  }
-
   Common::Redis::RespValue expected_response;
-  expected_response.type(Common::Redis::RespType::Array);
-  std::vector<Common::Redis::RespValue> elements(2);
-  elements[0].type(Common::Redis::RespType::BulkString);
-  elements[0].asString() = "response";
-  elements[1].type(Common::Redis::RespType::BulkString);
-  elements[1].asString() = "5";
-  expected_response.asArray().swap(elements);
+  expected_response.type(Common::Redis::RespType::SimpleString);
+  expected_response.asString() = Response::get().OK;
 
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::BulkString);
-  response2->asString() = "5";
-  pool_callbacks_[1]->onResponse(std::move(response2));
+  pool_callbacks_[1]->onResponse(okResponse());
 
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::BulkString);
-  response1->asString() = "response";
   time_system_.setMonotonicTime(std::chrono::milliseconds(10));
   EXPECT_CALL(store_, deliverHistogramToSinks(
-                          Property(&Stats::Metric::name, "redis.foo.command.mget.latency"), 10));
+                          Property(&Stats::Metric::name, "redis.foo.command.mset.latency"), 10));
   EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
-
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.total").value());
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.success").value());
-};
+  pool_callbacks_[0]->onResponse(okResponse());
 
-class RedisMSETCommandHandlerTest : public RedisCommandSplitterImplTest {
-public:
-  void setup(uint32_t num_sets, const std::list<uint64_t>& null_handle_indexes) {
-    std::vector<std::string> request_strings = {"mset"};
-    for (uint32_t i = 0; i < num_sets; i++) {
-      // key
-      request_strings.push_back(std::to_string(i));
-      // value
-      request_strings.push_back(std::to_string(i));
-    }
-
-    Common::Redis::RespValuePtr request{new Common::Redis::RespValue()};
-    makeBulkStringArray(*request, request_strings);
-
-    expected_requests_.reserve(num_sets);
-    pool_callbacks_.resize(num_sets);
-    std::vector<Common::Redis::Client::MockPoolRequest> tmp_pool_requests(num_sets);
-    pool_requests_.swap(tmp_pool_requests);
-
-    EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));
-
-    for (uint32_t i = 0; i < num_sets; i++) {
-      expected_requests_.push_back({"set", std::to_string(i), std::to_string(i)});
-      Common::Redis::Client::PoolRequest* request_to_use = nullptr;
-      if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) ==
-          null_handle_indexes.end()) {
-        request_to_use = &pool_requests_[i];
-      }
-      EXPECT_CALL(*conn_pool_,
-                  makeRequest(std::to_string(i), CompositeArrayEq(expected_requests_[i]), _))
-          .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use)));
-    }
-
-    handle_ = splitter_.makeRequest(std::move(request), callbacks_);
-  }
-
-  std::vector<std::vector<std::string>> expected_requests_;
-  std::vector<Common::Redis::Client::PoolCallbacks*> pool_callbacks_;
-  std::vector<Common::Redis::Client::MockPoolRequest> pool_requests_;
+  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.total").value());
+  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.success").value());
 };
 
-TEST_F(RedisMSETCommandHandlerTest, Normal) {
+TEST_F(RedisMSETCommandHandlerTest, Mirrored) {
   InSequence s;
 
-  setup(2, {});
+  setupMirrorPolicy();
+  setup(2, {}, true);
   EXPECT_NE(nullptr, handle_);
 
   Common::Redis::RespValue expected_response;
   expected_response.type(Common::Redis::RespType::SimpleString);
   expected_response.asString() = Response::get().OK;
 
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::SimpleString);
-  response2->asString() = Response::get().OK;
-  pool_callbacks_[1]->onResponse(std::move(response2));
-
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::SimpleString);
-  response1->asString() = Response::get().OK;
+  pool_callbacks_[1]->onResponse(okResponse());
+  mirror_pool_callbacks_[1]->onResponse(okResponse());
 
   time_system_.setMonotonicTime(std::chrono::milliseconds(10));
   EXPECT_CALL(store_, deliverHistogramToSinks(
                           Property(&Stats::Metric::name, "redis.foo.command.mset.latency"), 10));
   EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
+  pool_callbacks_[0]->onResponse(okResponse());
+  mirror_pool_callbacks_[0]->onResponse(okResponse());
 
   EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.total").value());
   EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.success").value());
@@ -1053,11 +832,8 @@ TEST_F(RedisMSETCommandHandlerTest, NoUpstreamHostForOne) {
   expected_response.type(Common::Redis::RespType::Error);
   expected_response.asString() = "finished with 1 error(s)";
 
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::SimpleString);
-  response2->asString() = Response::get().OK;
   EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[1]->onResponse(std::move(response2));
+  pool_callbacks_[1]->onResponse(okResponse());
   EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.total").value());
   EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.error").value());
 };
@@ -1088,151 +864,27 @@ TEST_F(RedisMSETCommandHandlerTest, WrongNumberOfArgs) {
   EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.error").value());
 };
 
-TEST_F(RedisMSETCommandHandlerTest, NormalWithMovedRedirection) {
-  InSequence s;
-
-  setup(2, {});
-  EXPECT_NE(nullptr, handle_);
-
-  // Test with a non-error response.
-  Common::Redis::RespValue bad_moved_response;
-  bad_moved_response.type(Common::Redis::RespType::Integer);
-  bad_moved_response.asInteger() = 1;
-  EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_moved_response));
-
-  // Test with a valid MOVED response.
-  Common::Redis::RespValue moved_response;
-  moved_response.type(Common::Redis::RespType::Error);
-  moved_response.asString() = "MOVED 1234 192.168.0.1:5000"; // Exact values are not important.
-  for (unsigned int i = 0; i < 2; i++) {
-    EXPECT_CALL(*conn_pool_,
-                makeRequestToHost(Eq("192.168.0.1:5000"), CompositeArrayEq(expected_requests_[i]),
-                                  Ref(*pool_callbacks_[i])))
-        .WillOnce(Return(&pool_requests_[i]));
-    EXPECT_CALL(*conn_pool_, onRedirection());
-    EXPECT_TRUE(pool_callbacks_[i]->onRedirection(moved_response));
-  }
-
-  Common::Redis::RespValue expected_response;
-  expected_response.type(Common::Redis::RespType::SimpleString);
-  expected_response.asString() = Response::get().OK;
-
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::SimpleString);
-  response2->asString() = Response::get().OK;
-  pool_callbacks_[1]->onResponse(std::move(response2));
-
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::SimpleString);
-  response1->asString() = Response::get().OK;
-
-  time_system_.setMonotonicTime(std::chrono::milliseconds(10));
-  EXPECT_CALL(store_, deliverHistogramToSinks(
-                          Property(&Stats::Metric::name, "redis.foo.command.mset.latency"), 10));
-  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
-
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.total").value());
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.success").value());
-};
-
-TEST_F(RedisMSETCommandHandlerTest, NormalWithAskRedirection) {
-  InSequence s;
-
-  setup(2, {});
-  EXPECT_NE(nullptr, handle_);
-
-  // Test with a non-error response.
-  Common::Redis::RespValue bad_ask_response;
-  bad_ask_response.type(Common::Redis::RespType::Integer);
-  bad_ask_response.asInteger() = 1;
-  EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_ask_response));
-
-  // Test with a valid ASK response.
-  Common::Redis::RespValue ask_response;
-  ask_response.type(Common::Redis::RespType::Error);
-  ask_response.asString() = "ASK 1234 192.168.0.1:5000"; // Exact values are not important.
-  Common::Redis::Client::MockPoolRequest dummy_poolrequest;
-  for (unsigned int i = 0; i < 2; i++) {
-    EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _))
-        .WillOnce(Invoke(
-            [&](const std::string& host_address, const Common::Redis::RespValue& request,
-                Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* {
-              EXPECT_EQ(host_address, "192.168.0.1:5000");
-              EXPECT_TRUE(request.type() == Common::Redis::RespType::Array);
-              EXPECT_EQ(request.asArray().size(), 1);
-              EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString);
-              EXPECT_EQ(request.asArray()[0].asString(), "asking");
-              return &dummy_poolrequest;
-            }));
-    EXPECT_CALL(*conn_pool_,
-                makeRequestToHost(Eq("192.168.0.1:5000"), CompositeArrayEq(expected_requests_[i]),
-                                  Ref(*pool_callbacks_[i])))
-        .WillOnce(Return(&pool_requests_[i]));
-    EXPECT_CALL(*conn_pool_, onRedirection());
-    EXPECT_TRUE(pool_callbacks_[i]->onRedirection(ask_response));
-  }
-
-  Common::Redis::RespValue expected_response;
-  expected_response.type(Common::Redis::RespType::SimpleString);
-  expected_response.asString() = Response::get().OK;
-
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::SimpleString);
-  response2->asString() = Response::get().OK;
-  pool_callbacks_[1]->onResponse(std::move(response2));
-
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::SimpleString);
-  response1->asString() = Response::get().OK;
-
-  time_system_.setMonotonicTime(std::chrono::milliseconds(10));
-  EXPECT_CALL(store_, deliverHistogramToSinks(
-                          Property(&Stats::Metric::name, "redis.foo.command.mset.latency"), 10));
-  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
-
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.total").value());
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.success").value());
-};
-
-class RedisSplitKeysSumResultHandlerTest : public RedisCommandSplitterImplTest,
+class RedisSplitKeysSumResultHandlerTest : public FragmentedRequestCommandHandlerTest,
                                            public testing::WithParamInterface<std::string> {
 public:
-  void setup(uint32_t num_commands, const std::list<uint64_t>& null_handle_indexes) {
-    std::vector<std::string> request_strings = {GetParam()};
-    for (uint32_t i = 0; i < num_commands; i++) {
-      request_strings.push_back(std::to_string(i));
-    }
-
-    Common::Redis::RespValuePtr request(new Common::Redis::RespValue());
-    makeBulkStringArray(*request, request_strings);
+  void setup(uint32_t num_commands, const std::list<uint64_t>& null_handle_indexes,
+             bool mirrored = false) {
 
     expected_requests_.reserve(num_commands);
-    pool_callbacks_.resize(num_commands);
-    std::vector<Common::Redis::Client::MockPoolRequest> tmp_pool_requests(num_commands);
-    pool_requests_.swap(tmp_pool_requests);
-
-    EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));
-
+    std::vector<std::string> request_strings = {GetParam()};
     for (uint32_t i = 0; i < num_commands; i++) {
+      request_strings.push_back(std::to_string(i));
       expected_requests_.push_back({GetParam(), std::to_string(i)});
-      Common::Redis::Client::PoolRequest* request_to_use = nullptr;
-      if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) ==
-          null_handle_indexes.end()) {
-        request_to_use = &pool_requests_[i];
-      }
-      EXPECT_CALL(*conn_pool_,
-                  makeRequest(std::to_string(i), CompositeArrayEq(expected_requests_[i]), _))
-          .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use)));
     }
-
-    handle_ = splitter_.makeRequest(std::move(request), callbacks_);
+    makeRequest(request_strings, null_handle_indexes, mirrored);
   }
 
-  std::vector<std::vector<std::string>> expected_requests_;
-  std::vector<Common::Redis::Client::PoolCallbacks*> pool_callbacks_;
-  std::vector<Common::Redis::Client::MockPoolRequest> pool_requests_;
+  Common::Redis::RespValuePtr response(int64_t value) {
+    Common::Redis::RespValuePtr response = std::make_unique<Common::Redis::RespValue>();
+    response->type(Common::Redis::RespType::Integer);
+    response->asInteger() = value;
+    return response;
+  }
 };
 
 TEST_P(RedisSplitKeysSumResultHandlerTest, Normal) {
@@ -1245,173 +897,78 @@ TEST_P(RedisSplitKeysSumResultHandlerTest, Normal) {
   expected_response.type(Common::Redis::RespType::Integer);
   expected_response.asInteger() = 2;
 
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::Integer);
-  response2->asInteger() = 1;
-  pool_callbacks_[1]->onResponse(std::move(response2));
+  pool_callbacks_[1]->onResponse(response(1));
 
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::Integer);
-  response1->asInteger() = 1;
   time_system_.setMonotonicTime(std::chrono::milliseconds(10));
   EXPECT_CALL(
       store_,
       deliverHistogramToSinks(
           Property(&Stats::Metric::name, "redis.foo.command." + GetParam() + ".latency"), 10));
   EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
+  pool_callbacks_[0]->onResponse(response(1));
 
   EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".total").value());
   EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".success").value());
 };
 
-TEST_P(RedisSplitKeysSumResultHandlerTest, NormalOneZero) {
+TEST_P(RedisSplitKeysSumResultHandlerTest, Mirrored) {
   InSequence s;
 
-  setup(2, {});
+  setupMirrorPolicy();
+  setup(2, {}, true);
   EXPECT_NE(nullptr, handle_);
 
   Common::Redis::RespValue expected_response;
   expected_response.type(Common::Redis::RespType::Integer);
-  expected_response.asInteger() = 1;
+  expected_response.asInteger() = 2;
 
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::Integer);
-  response2->asInteger() = 0;
-  pool_callbacks_[1]->onResponse(std::move(response2));
+  pool_callbacks_[1]->onResponse(response(1));
+  mirror_pool_callbacks_[1]->onResponse(response(1));
 
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::Integer);
-  response1->asInteger() = 1;
+  time_system_.setMonotonicTime(std::chrono::milliseconds(10));
+  EXPECT_CALL(
+      store_,
+      deliverHistogramToSinks(
+          Property(&Stats::Metric::name, "redis.foo.command." + GetParam() + ".latency"), 10));
   EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
+  pool_callbacks_[0]->onResponse(response(1));
+  mirror_pool_callbacks_[0]->onResponse(response(1));
 
   EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".total").value());
   EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".success").value());
 };
 
-TEST_P(RedisSplitKeysSumResultHandlerTest, NoUpstreamHostForAll) {
-  // No InSequence to avoid making setup() more complicated.
-
-  Common::Redis::RespValue expected_response;
-  expected_response.type(Common::Redis::RespType::Error);
-  expected_response.asString() = "finished with 2 error(s)";
-
-  EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  setup(2, {0, 1});
-  EXPECT_EQ(nullptr, handle_);
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".total").value());
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".error").value());
-};
-
-TEST_P(RedisSplitKeysSumResultHandlerTest, NormalWithMovedRedirection) {
+TEST_P(RedisSplitKeysSumResultHandlerTest, NormalOneZero) {
   InSequence s;
 
   setup(2, {});
   EXPECT_NE(nullptr, handle_);
 
-  // Test with a non-error response.
-  Common::Redis::RespValue bad_moved_response;
-  bad_moved_response.type(Common::Redis::RespType::Integer);
-  bad_moved_response.asInteger() = 1;
-  EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_moved_response));
-
-  // Test with a valid MOVED response.
-  Common::Redis::RespValue moved_response;
-  moved_response.type(Common::Redis::RespType::Error);
-  moved_response.asString() = "MOVED 1234 192.168.0.1:5000"; // Exact values are not important.
-  for (unsigned int i = 0; i < 2; i++) {
-    EXPECT_CALL(*conn_pool_,
-                makeRequestToHost(Eq("192.168.0.1:5000"), CompositeArrayEq(expected_requests_[i]),
-                                  Ref(*pool_callbacks_[i])))
-        .WillOnce(Return(&pool_requests_[i]));
-    EXPECT_CALL(*conn_pool_, onRedirection());
-    EXPECT_TRUE(pool_callbacks_[i]->onRedirection(moved_response));
-  }
-
   Common::Redis::RespValue expected_response;
   expected_response.type(Common::Redis::RespType::Integer);
-  expected_response.asInteger() = 2;
+  expected_response.asInteger() = 1;
 
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::Integer);
-  response2->asInteger() = 1;
-  pool_callbacks_[1]->onResponse(std::move(response2));
+  pool_callbacks_[1]->onResponse(response(0));
 
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::Integer);
-  response1->asInteger() = 1;
-  time_system_.setMonotonicTime(std::chrono::milliseconds(10));
-  EXPECT_CALL(
-      store_,
-      deliverHistogramToSinks(
-          Property(&Stats::Metric::name, "redis.foo.command." + GetParam() + ".latency"), 10));
   EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
+  pool_callbacks_[0]->onResponse(response(1));
 
   EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".total").value());
   EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".success").value());
 };
 
-TEST_P(RedisSplitKeysSumResultHandlerTest, NormalWithAskRedirection) {
-  InSequence s;
-
-  setup(2, {});
-  EXPECT_NE(nullptr, handle_);
-
-  // Test with a non-error response.
-  Common::Redis::RespValue bad_ask_response;
-  bad_ask_response.type(Common::Redis::RespType::Integer);
-  bad_ask_response.asInteger() = 1;
-  EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_ask_response));
-
-  // Test with a valid ASK response.
-  Common::Redis::RespValue ask_response;
-  ask_response.type(Common::Redis::RespType::Error);
-  ask_response.asString() = "ASK 1234 192.168.0.1:5000"; // Exact values are not important.
-  Common::Redis::Client::MockPoolRequest dummy_poolrequest;
-  for (unsigned int i = 0; i < 2; i++) {
-    EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _))
-        .WillOnce(Invoke(
-            [&](const std::string& host_address, const Common::Redis::RespValue& request,
-                Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* {
-              EXPECT_EQ(host_address, "192.168.0.1:5000");
-              EXPECT_TRUE(request.type() == Common::Redis::RespType::Array);
-              EXPECT_EQ(request.asArray().size(), 1);
-              EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString);
-              EXPECT_EQ(request.asArray()[0].asString(), "asking");
-              return &dummy_poolrequest;
-            }));
-    EXPECT_CALL(*conn_pool_,
-                makeRequestToHost(Eq("192.168.0.1:5000"), CompositeArrayEq(expected_requests_[i]),
-                                  Ref(*pool_callbacks_[i])))
-        .WillOnce(Return(&pool_requests_[i]));
-    EXPECT_CALL(*conn_pool_, onRedirection());
-    EXPECT_TRUE(pool_callbacks_[i]->onRedirection(ask_response));
-  }
+TEST_P(RedisSplitKeysSumResultHandlerTest, NoUpstreamHostForAll) {
+  // No InSequence to avoid making setup() more complicated.
 
   Common::Redis::RespValue expected_response;
-  expected_response.type(Common::Redis::RespType::Integer);
-  expected_response.asInteger() = 2;
-
-  Common::Redis::RespValuePtr response2(new Common::Redis::RespValue());
-  response2->type(Common::Redis::RespType::Integer);
-  response2->asInteger() = 1;
-  pool_callbacks_[1]->onResponse(std::move(response2));
+  expected_response.type(Common::Redis::RespType::Error);
+  expected_response.asString() = "finished with 2 error(s)";
 
-  Common::Redis::RespValuePtr response1(new Common::Redis::RespValue());
-  response1->type(Common::Redis::RespType::Integer);
-  response1->asInteger() = 1;
-  time_system_.setMonotonicTime(std::chrono::milliseconds(10));
-  EXPECT_CALL(
-      store_,
-      deliverHistogramToSinks(
-          Property(&Stats::Metric::name, "redis.foo.command." + GetParam() + ".latency"), 10));
   EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response)));
-  pool_callbacks_[0]->onResponse(std::move(response1));
-
+  setup(2, {0, 1});
+  EXPECT_EQ(nullptr, handle_);
   EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".total").value());
-  EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".success").value());
+  EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".error").value());
 };
 
 INSTANTIATE_TEST_SUITE_P(
@@ -1420,17 +977,7 @@ INSTANTIATE_TEST_SUITE_P(
 
 class RedisSingleServerRequestWithLatencyMicrosTest : public RedisSingleServerRequestTest {
 public:
-  void makeRequest(const std::string& hash_key, Common::Redis::RespValuePtr&& request) {
-    EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true));
-    EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(*request), _))
-        .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_)));
-    handle_ = splitter_.makeRequest(std::move(request), callbacks_);
-  }
-
-  ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()};
-  NiceMock<Stats::MockIsolatedStatsStore> store_;
-  InstanceImpl splitter_{std::make_unique<PassthruRouter>(ConnPool::InstanceSharedPtr{conn_pool_}),
-                         store_, "redis.foo.", time_system_, true};
+  RedisSingleServerRequestWithLatencyMicrosTest() : RedisSingleServerRequestTest(true) {}
 };
 
 TEST_P(RedisSingleServerRequestWithLatencyMicrosTest, Success) {
diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc
index 04e15b802dc8..221d6944aaad 100644
--- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc
+++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc
@@ -97,16 +97,49 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client
       client_ = new NiceMock<Common::Redis::Client::MockClient>();
       EXPECT_CALL(*this, create_(_)).WillOnce(Return(client_));
     }
-    Common::Redis::RespValue value;
-    Common::Redis::Client::MockPoolCallbacks callbacks;
+    Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();
+    MockPoolCallbacks callbacks;
+    std::list<Common::Redis::Client::ClientCallbacks*> client_callbacks;
     Common::Redis::Client::MockPoolRequest active_request;
     EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())
         .WillRepeatedly(Return(test_address_));
-    EXPECT_CALL(*client_, makeRequest(Ref(value), Ref(callbacks)))
-        .WillOnce(Return(&active_request));
+    EXPECT_CALL(*client_, makeRequest_(Ref(*value), _))
+        .WillOnce(Invoke(
+            [&](const Common::Redis::RespValue&, Common::Redis::Client::ClientCallbacks& callbacks)
+                -> Common::Redis::Client::PoolRequest* {
+              client_callbacks.push_back(&callbacks);
+              return &active_request;
+            }));
     Common::Redis::Client::PoolRequest* request =
         conn_pool_->makeRequest(hash_key, value, callbacks);
-    EXPECT_EQ(&active_request, request);
+    EXPECT_NE(nullptr, request);
+    EXPECT_NE(nullptr, client_callbacks.back());
+
+    EXPECT_CALL(active_request, cancel());
+    request->cancel();
+  }
+
+  void makeRequest(Common::Redis::Client::MockClient* client,
+                   Common::Redis::RespValueSharedPtr& value, MockPoolCallbacks& callbacks,
+                   Common::Redis::Client::MockPoolRequest& active_request,
+                   bool create_client = true) {
+    EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))
+        .WillOnce(
+            Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {
+              EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key"));
+              EXPECT_EQ(context->metadataMatchCriteria(), nullptr);
+              EXPECT_EQ(context->downstreamConnection(), nullptr);
+              return this->cm_.thread_local_cluster_.lb_.host_;
+            }));
+    if (create_client) {
+      EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));
+    }
+    EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())
+        .WillRepeatedly(Return(this->test_address_));
+    EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request));
+    Common::Redis::Client::PoolRequest* request =
+        this->conn_pool_->makeRequest("hash_key", value, callbacks);
+    EXPECT_NE(nullptr, request);
   }
 
   std::unordered_map<Upstream::HostConstSharedPtr, InstanceImpl::ThreadLocalActiveClientPtr>&
@@ -174,9 +207,9 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client
     read_policy_ = read_policy;
     setup();
 
-    Common::Redis::RespValue value;
+    Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();
     Common::Redis::Client::MockPoolRequest auth_request, active_request, readonly_request;
-    Common::Redis::Client::MockPoolCallbacks callbacks;
+    MockPoolCallbacks callbacks;
     Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();
 
     EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))
@@ -193,21 +226,44 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client
     EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));
     EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())
         .WillRepeatedly(Return(test_address_));
-    EXPECT_CALL(*client, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request));
+    EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request));
     Common::Redis::Client::PoolRequest* request =
         conn_pool_->makeRequest("hash_key", value, callbacks);
-    EXPECT_EQ(&active_request, request);
+    EXPECT_NE(nullptr, request);
 
+    EXPECT_CALL(active_request, cancel());
+    EXPECT_CALL(callbacks, onFailure_());
     EXPECT_CALL(*client, close());
     tls_.shutdownThread();
   }
 
+  void respond(MockPoolCallbacks& callbacks, Common::Redis::Client::MockClient* client) {
+    EXPECT_CALL(callbacks, onResponse_(_));
+    client->client_callbacks_.back()->onResponse(std::make_unique<Common::Redis::RespValue>());
+    EXPECT_EQ(0,
+              conn_pool_->tls_->getTyped<InstanceImpl::ThreadLocalPool>().pending_requests_.size());
+  }
+
+  void verifyInvalidMoveResponse(Common::Redis::Client::MockClient* client,
+                                 const std::string& host_address, bool create_client) {
+    Common::Redis::RespValueSharedPtr request_value = std::make_shared<Common::Redis::RespValue>();
+    Common::Redis::Client::MockPoolRequest active_request;
+    MockPoolCallbacks callbacks;
+    makeRequest(client, request_value, callbacks, active_request, create_client);
+    Common::Redis::RespValuePtr moved_response{new Common::Redis::RespValue()};
+    moved_response->type(Common::Redis::RespType::Error);
+    moved_response->asString() = "MOVE 1111 " + host_address;
+    EXPECT_CALL(callbacks, onResponse_(Ref(moved_response)));
+    EXPECT_FALSE(client->client_callbacks_.back()->onRedirection(std::move(moved_response),
+                                                                 host_address, false));
+  }
+
   MOCK_METHOD1(create_, Common::Redis::Client::Client*(Upstream::HostConstSharedPtr host));
 
   const std::string cluster_name_{"fake_cluster"};
   NiceMock<Upstream::MockClusterManager> cm_;
   NiceMock<ThreadLocal::MockInstance> tls_;
-  InstanceSharedPtr conn_pool_;
+  std::shared_ptr<InstanceImpl> conn_pool_;
   Upstream::ClusterUpdateCallbacks* update_callbacks_{};
   Common::Redis::Client::MockClient* client_{};
   Network::Address::InstanceConstSharedPtr test_address_;
@@ -226,9 +282,9 @@ TEST_F(RedisConnPoolImplTest, Basic) {
 
   setup();
 
-  Common::Redis::RespValue value;
+  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();
   Common::Redis::Client::MockPoolRequest active_request;
-  Common::Redis::Client::MockPoolCallbacks callbacks;
+  MockPoolCallbacks callbacks;
   Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();
 
   EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))
@@ -241,11 +297,73 @@ TEST_F(RedisConnPoolImplTest, Basic) {
   EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));
   EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())
       .WillRepeatedly(Return(test_address_));
-  EXPECT_CALL(*client, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request));
+  EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request));
   Common::Redis::Client::PoolRequest* request =
       conn_pool_->makeRequest("hash_key", value, callbacks);
-  EXPECT_EQ(&active_request, request);
+  EXPECT_NE(nullptr, request);
 
+  EXPECT_CALL(active_request, cancel());
+  EXPECT_CALL(callbacks, onFailure_());
+  EXPECT_CALL(*client, close());
+  tls_.shutdownThread();
+};
+
+TEST_F(RedisConnPoolImplTest, BasicRespVariant) {
+  InSequence s;
+
+  setup();
+
+  Common::Redis::RespValue value;
+  Common::Redis::Client::MockPoolRequest active_request;
+  MockPoolCallbacks callbacks;
+  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();
+
+  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))
+      .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {
+        EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key"));
+        EXPECT_EQ(context->metadataMatchCriteria(), nullptr);
+        EXPECT_EQ(context->downstreamConnection(), nullptr);
+        return cm_.thread_local_cluster_.lb_.host_;
+      }));
+  EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));
+  EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())
+      .WillRepeatedly(Return(test_address_));
+  EXPECT_CALL(*client, makeRequest_(Eq(value), _)).WillOnce(Return(&active_request));
+  Common::Redis::Client::PoolRequest* request =
+      conn_pool_->makeRequest("hash_key", ConnPool::RespVariant(value), callbacks);
+  EXPECT_NE(nullptr, request);
+
+  EXPECT_CALL(active_request, cancel());
+  EXPECT_CALL(callbacks, onFailure_());
+  EXPECT_CALL(*client, close());
+  tls_.shutdownThread();
+};
+
+TEST_F(RedisConnPoolImplTest, ClientRequestFailed) {
+  InSequence s;
+
+  setup();
+
+  Common::Redis::RespValue value;
+  MockPoolCallbacks callbacks;
+  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();
+
+  EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_))
+      .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {
+        EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key"));
+        EXPECT_EQ(context->metadataMatchCriteria(), nullptr);
+        EXPECT_EQ(context->downstreamConnection(), nullptr);
+        return cm_.thread_local_cluster_.lb_.host_;
+      }));
+  EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));
+  EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())
+      .WillRepeatedly(Return(test_address_));
+  EXPECT_CALL(*client, makeRequest_(Eq(value), _)).WillOnce(Return(nullptr));
+  Common::Redis::Client::PoolRequest* request =
+      conn_pool_->makeRequest("hash_key", ConnPool::RespVariant(value), callbacks);
+
+  // the request should be null and the callback is not called
+  EXPECT_EQ(nullptr, request);
   EXPECT_CALL(*client, close());
   tls_.shutdownThread();
 };
@@ -270,8 +388,8 @@ TEST_F(RedisConnPoolImplTest, Hashtagging) {
 
   setup();
 
-  Common::Redis::RespValue value;
-  Common::Redis::Client::MockPoolCallbacks callbacks;
+  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();
+  MockPoolCallbacks callbacks;
 
   auto expectHashKey = [](const std::string& s) {
     return [s](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {
@@ -301,8 +419,8 @@ TEST_F(RedisConnPoolImplTest, HashtaggingNotEnabled) {
 
   setup(true, false); // Test with hashtagging not enabled.
 
-  Common::Redis::RespValue value;
-  Common::Redis::Client::MockPoolCallbacks callbacks;
+  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();
+  MockPoolCallbacks callbacks;
 
   auto expectHashKey = [](const std::string& s) {
     return [s](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr {
@@ -337,8 +455,8 @@ TEST_F(RedisConnPoolImplTest, NoClusterAtConstruction) {
 
   setup(false);
 
-  Common::Redis::RespValue value;
-  Common::Redis::Client::MockPoolCallbacks callbacks;
+  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();
+  MockPoolCallbacks callbacks;
   Common::Redis::Client::PoolRequest* request =
       conn_pool_->makeRequest("hash_key", value, callbacks);
   EXPECT_EQ(nullptr, request);
@@ -384,12 +502,10 @@ TEST_F(RedisConnPoolImplTest, NoClusterAtConstruction) {
 // This test removes a single host from the ConnPool after learning about 2 hosts from the
 // associated load balancer.
 TEST_F(RedisConnPoolImplTest, HostRemove) {
-  InSequence s;
-
   setup();
 
-  Common::Redis::Client::MockPoolCallbacks callbacks;
-  Common::Redis::RespValue value;
+  MockPoolCallbacks callbacks;
+  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();
   std::shared_ptr<Upstream::MockHost> host1(new Upstream::MockHost());
   std::shared_ptr<Upstream::MockHost> host2(new Upstream::MockHost());
   Common::Redis::Client::MockClient* client1 = new NiceMock<Common::Redis::Client::MockClient>();
@@ -400,25 +516,28 @@ TEST_F(RedisConnPoolImplTest, HostRemove) {
 
   Common::Redis::Client::MockPoolRequest active_request1;
   EXPECT_CALL(*host1, address()).WillRepeatedly(Return(test_address_));
-  EXPECT_CALL(*client1, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request1));
+  EXPECT_CALL(*client1, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request1));
   Common::Redis::Client::PoolRequest* request1 =
       conn_pool_->makeRequest("hash_key", value, callbacks);
-  EXPECT_EQ(&active_request1, request1);
+  EXPECT_NE(nullptr, request1);
 
   EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(host2));
   EXPECT_CALL(*this, create_(Eq(host2))).WillOnce(Return(client2));
 
   Common::Redis::Client::MockPoolRequest active_request2;
   EXPECT_CALL(*host2, address()).WillRepeatedly(Return(test_address_));
-  EXPECT_CALL(*client2, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request2));
+  EXPECT_CALL(*client2, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request2));
   Common::Redis::Client::PoolRequest* request2 = conn_pool_->makeRequest("bar", value, callbacks);
-  EXPECT_EQ(&active_request2, request2);
+  EXPECT_NE(nullptr, request2);
 
   EXPECT_CALL(*client2, close());
   EXPECT_CALL(*host2, address()).WillRepeatedly(Return(test_address_));
   cm_.thread_local_cluster_.cluster_.prioritySet().getMockHostSet(0)->runCallbacks({}, {host2});
 
+  EXPECT_CALL(active_request1, cancel());
+  EXPECT_CALL(active_request2, cancel());
   EXPECT_CALL(*client1, close());
+  EXPECT_CALL(callbacks, onFailure_()).Times(2);
   tls_.shutdownThread();
 
   ASSERT_TRUE(testing::Mock::VerifyAndClearExpectations(host1.get()));
@@ -457,8 +576,8 @@ TEST_F(RedisConnPoolImplTest, NoHost) {
 
   setup();
 
-  Common::Redis::RespValue value;
-  Common::Redis::Client::MockPoolCallbacks callbacks;
+  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();
+  MockPoolCallbacks callbacks;
   EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(nullptr));
   Common::Redis::Client::PoolRequest* request =
       conn_pool_->makeRequest("hash_key", value, callbacks);
@@ -472,16 +591,16 @@ TEST_F(RedisConnPoolImplTest, RemoteClose) {
 
   setup();
 
-  Common::Redis::RespValue value;
+  Common::Redis::RespValueSharedPtr value = std::make_shared<Common::Redis::RespValue>();
   Common::Redis::Client::MockPoolRequest active_request;
-  Common::Redis::Client::MockPoolCallbacks callbacks;
+  MockPoolCallbacks callbacks;
   Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();
 
   EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_));
   EXPECT_CALL(*this, create_(_)).WillOnce(Return(client));
   EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address())
       .WillRepeatedly(Return(test_address_));
-  EXPECT_CALL(*client, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request));
+  EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request));
   conn_pool_->makeRequest("hash_key", value, callbacks);
 
   EXPECT_CALL(tls_.dispatcher_, deferredDelete_(_));
@@ -489,6 +608,8 @@ TEST_F(RedisConnPoolImplTest, RemoteClose) {
   client->runLowWatermarkCallbacks();
   client->raiseEvent(Network::ConnectionEvent::RemoteClose);
 
+  EXPECT_CALL(active_request, cancel());
+  EXPECT_CALL(callbacks, onFailure_());
   tls_.shutdownThread();
 }
 
@@ -500,8 +621,8 @@ TEST_F(RedisConnPoolImplTest, MakeRequestToHost) {
   Common::Redis::RespValue value;
   Common::Redis::Client::MockPoolRequest active_request1;
   Common::Redis::Client::MockPoolRequest active_request2;
-  Common::Redis::Client::MockPoolCallbacks callbacks1;
-  Common::Redis::Client::MockPoolCallbacks callbacks2;
+  Common::Redis::Client::MockClientCallbacks callbacks1;
+  Common::Redis::Client::MockClientCallbacks callbacks2;
   Common::Redis::Client::MockClient* client1 = new NiceMock<Common::Redis::Client::MockClient>();
   Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();
   Upstream::HostConstSharedPtr host1;
@@ -513,7 +634,7 @@ TEST_F(RedisConnPoolImplTest, MakeRequestToHost) {
   update_callbacks_->onClusterAddOrUpdate(cm_.thread_local_cluster_);
 
   EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1)));
-  EXPECT_CALL(*client1, makeRequest(Ref(value), Ref(callbacks1)))
+  EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1)))
       .WillOnce(Return(&active_request1));
   Common::Redis::Client::PoolRequest* request1 =
       conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1);
@@ -524,7 +645,7 @@ TEST_F(RedisConnPoolImplTest, MakeRequestToHost) {
   // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around
   // the address.
   EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2)));
-  EXPECT_CALL(*client2, makeRequest(Ref(value), Ref(callbacks2)))
+  EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2)))
       .WillOnce(Return(&active_request2));
   Common::Redis::Client::PoolRequest* request2 =
       conn_pool_->makeRequestToHost("2001:470:813B:0:0:0:0:1:3333", value, callbacks2);
@@ -559,7 +680,7 @@ TEST_F(RedisConnPoolImplTest, MakeRequestToHostWithZeroMaxUnknownUpstreamConnect
   setup(true, true, 0);
 
   Common::Redis::RespValue value;
-  Common::Redis::Client::MockPoolCallbacks callbacks1;
+  Common::Redis::Client::MockClientCallbacks callbacks1;
 
   // The max_unknown_upstream_connections is set to 0. Request should fail.
   EXPECT_EQ(nullptr, conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1));
@@ -577,15 +698,15 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndRemovedWithDraining) {
   Common::Redis::RespValue value;
   Common::Redis::Client::MockPoolRequest auth_request1, active_request1;
   Common::Redis::Client::MockPoolRequest auth_request2, active_request2;
-  Common::Redis::Client::MockPoolCallbacks callbacks1;
-  Common::Redis::Client::MockPoolCallbacks callbacks2;
+  Common::Redis::Client::MockClientCallbacks callbacks1;
+  Common::Redis::Client::MockClientCallbacks callbacks2;
   Common::Redis::Client::MockClient* client1 = new NiceMock<Common::Redis::Client::MockClient>();
   Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();
   Upstream::HostConstSharedPtr host1;
   Upstream::HostConstSharedPtr host2;
 
   EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1)));
-  EXPECT_CALL(*client1, makeRequest(Ref(value), Ref(callbacks1)))
+  EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1)))
       .WillOnce(Return(&active_request1));
   Common::Redis::Client::PoolRequest* request1 =
       conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1);
@@ -596,7 +717,7 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndRemovedWithDraining) {
   // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around
   // the address.
   EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2)));
-  EXPECT_CALL(*client2, makeRequest(Ref(value), Ref(callbacks2)))
+  EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2)))
       .WillOnce(Return(&active_request2));
   Common::Redis::Client::PoolRequest* request2 =
       conn_pool_->makeRequestToHost("2001:470:813B:0:0:0:0:1:3333", value, callbacks2);
@@ -676,15 +797,15 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndEndWithNoDraining) {
   Common::Redis::RespValue value;
   Common::Redis::Client::MockPoolRequest auth_request1, active_request1;
   Common::Redis::Client::MockPoolRequest auth_request2, active_request2;
-  Common::Redis::Client::MockPoolCallbacks callbacks1;
-  Common::Redis::Client::MockPoolCallbacks callbacks2;
+  Common::Redis::Client::MockClientCallbacks callbacks1;
+  Common::Redis::Client::MockClientCallbacks callbacks2;
   Common::Redis::Client::MockClient* client1 = new NiceMock<Common::Redis::Client::MockClient>();
   Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();
   Upstream::HostConstSharedPtr host1;
   Upstream::HostConstSharedPtr host2;
 
   EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1)));
-  EXPECT_CALL(*client1, makeRequest(Ref(value), Ref(callbacks1)))
+  EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1)))
       .WillOnce(Return(&active_request1));
   Common::Redis::Client::PoolRequest* request1 =
       conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1);
@@ -695,7 +816,7 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndEndWithNoDraining) {
   // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around
   // the address.
   EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2)));
-  EXPECT_CALL(*client2, makeRequest(Ref(value), Ref(callbacks2)))
+  EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2)))
       .WillOnce(Return(&active_request2));
   Common::Redis::Client::PoolRequest* request2 =
       conn_pool_->makeRequestToHost("2001:470:813B:0:0:0:0:1:3333", value, callbacks2);
@@ -754,15 +875,15 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndEndWithClusterRemoval) {
   Common::Redis::RespValue value;
   Common::Redis::Client::MockPoolRequest auth_request1, active_request1;
   Common::Redis::Client::MockPoolRequest auth_request2, active_request2;
-  Common::Redis::Client::MockPoolCallbacks callbacks1;
-  Common::Redis::Client::MockPoolCallbacks callbacks2;
+  Common::Redis::Client::MockClientCallbacks callbacks1;
+  Common::Redis::Client::MockClientCallbacks callbacks2;
   Common::Redis::Client::MockClient* client1 = new NiceMock<Common::Redis::Client::MockClient>();
   Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();
   Upstream::HostConstSharedPtr host1;
   Upstream::HostConstSharedPtr host2;
 
   EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1)));
-  EXPECT_CALL(*client1, makeRequest(Ref(value), Ref(callbacks1)))
+  EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1)))
       .WillOnce(Return(&active_request1));
   Common::Redis::Client::PoolRequest* request1 =
       conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1);
@@ -773,7 +894,7 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndEndWithClusterRemoval) {
   // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around
   // the address.
   EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2)));
-  EXPECT_CALL(*client2, makeRequest(Ref(value), Ref(callbacks2)))
+  EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2)))
       .WillOnce(Return(&active_request2));
   Common::Redis::Client::PoolRequest* request2 =
       conn_pool_->makeRequestToHost("2001:470:813B:0:0:0:0:1:3333", value, callbacks2);
@@ -867,6 +988,163 @@ TEST_F(RedisConnPoolImplTest, MakeRequestToRedisClusterHashtag) {
   tls_.shutdownThread();
 };
 
+TEST_F(RedisConnPoolImplTest, MovedRedirectionSuccess) {
+  InSequence s;
+
+  setup();
+
+  Common::Redis::RespValueSharedPtr request_value = std::make_shared<Common::Redis::RespValue>();
+  Common::Redis::Client::MockPoolRequest active_request;
+  MockPoolCallbacks callbacks;
+  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();
+  makeRequest(client, request_value, callbacks, active_request);
+
+  Common::Redis::Client::MockPoolRequest active_request2;
+  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();
+  Upstream::HostConstSharedPtr host1;
+
+  Common::Redis::RespValuePtr moved_response{new Common::Redis::RespValue()};
+  moved_response->type(Common::Redis::RespType::Error);
+  moved_response->asString() = "MOVED 1111 10.1.2.3:4000";
+
+  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2)));
+  EXPECT_CALL(*client2, makeRequest_(Ref(*request_value), _)).WillOnce(Return(&active_request2));
+  EXPECT_TRUE(client->client_callbacks_.back()->onRedirection(std::move(moved_response),
+                                                              "10.1.2.3:4000", false));
+  EXPECT_EQ(host1->address()->asString(), "10.1.2.3:4000");
+
+  respond(callbacks, client2);
+
+  EXPECT_CALL(*client, close());
+  tls_.shutdownThread();
+}
+
+TEST_F(RedisConnPoolImplTest, MovedRedirectionFailure) {
+  InSequence s;
+
+  setup();
+
+  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();
+
+  // Test with a badly specified host address (no colon, no address, no port).
+  verifyInvalidMoveResponse(client, "bad", true);
+
+  // Test with a badly specified IPv4 address.
+  verifyInvalidMoveResponse(client, "10.0.bad:3000", false);
+
+  // Test with a badly specified TCP port.
+  verifyInvalidMoveResponse(client, "10.0.bad:3000", false);
+
+  // Test with a TCP port outside of the acceptable range for a 32-bit integer.
+  verifyInvalidMoveResponse(client, "10.0.0.1:4294967297", false); // 2^32 + 1
+
+  // Test with a TCP port outside of the acceptable range for a TCP port (0 .. 65535).
+  verifyInvalidMoveResponse(client, "10.0.0.1:65536", false);
+
+  // Test with a badly specified IPv6-like address.
+  verifyInvalidMoveResponse(client, "bad:ipv6:3000", false);
+
+  // Test with a valid IPv6 address and a badly specified TCP port (out of range).
+  verifyInvalidMoveResponse(client, "2001:470:813b:::70000", false);
+
+  // Test an upstream error preventing the request from being sent.
+  MockPoolCallbacks callbacks;
+  Common::Redis::RespValueSharedPtr request3 = std::make_shared<Common::Redis::RespValue>();
+  Common::Redis::Client::MockPoolRequest active_request3;
+  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();
+  Upstream::HostConstSharedPtr host1;
+  makeRequest(client, request3, callbacks, active_request3, false);
+  Common::Redis::RespValuePtr moved_response3{new Common::Redis::RespValue()};
+  moved_response3->type(Common::Redis::RespType::Error);
+  moved_response3->asString() = "MOVED 1111 10.1.2.3:4000";
+  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2)));
+  EXPECT_CALL(*client2, makeRequest_(Ref(*request3), _)).WillOnce(Return(nullptr));
+  EXPECT_CALL(callbacks, onResponse_(Ref(moved_response3)));
+  EXPECT_FALSE(client->client_callbacks_.back()->onRedirection(std::move(moved_response3),
+                                                               "10.1.2.3:4000", false));
+  EXPECT_EQ(host1->address()->asString(), "10.1.2.3:4000");
+
+  EXPECT_CALL(*client, close());
+  tls_.shutdownThread();
+}
+
+TEST_F(RedisConnPoolImplTest, AskRedirectionSuccess) {
+  InSequence s;
+
+  setup();
+
+  Common::Redis::RespValueSharedPtr request_value = std::make_shared<Common::Redis::RespValue>();
+  Common::Redis::Client::MockPoolRequest active_request;
+  MockPoolCallbacks callbacks;
+  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();
+  makeRequest(client, request_value, callbacks, active_request);
+
+  Common::Redis::Client::MockPoolRequest ask_request, active_request2;
+  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();
+  Upstream::HostConstSharedPtr host1;
+
+  Common::Redis::RespValuePtr ask_response{new Common::Redis::RespValue()};
+  ask_response->type(Common::Redis::RespType::Error);
+  ask_response->asString() = "ASK 1111 10.1.2.3:4000";
+  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2)));
+  // Verify that the request has been properly prepended with an "asking" command.
+  EXPECT_CALL(*client2, makeRequest_(Ref(Common::Redis::Utility::AskingRequest::instance()), _))
+      .WillOnce(Return(&ask_request));
+  EXPECT_CALL(*client2, makeRequest_(Ref(*request_value), _)).WillOnce(Return(&active_request2));
+  EXPECT_TRUE(client->client_callbacks_.back()->onRedirection(std::move(ask_response),
+                                                              "10.1.2.3:4000", true));
+  EXPECT_EQ(host1->address()->asString(), "10.1.2.3:4000");
+
+  respond(callbacks, client2);
+
+  EXPECT_CALL(*client, close());
+  tls_.shutdownThread();
+}
+
+TEST_F(RedisConnPoolImplTest, AskRedirectionFailure) {
+  InSequence s;
+
+  setup();
+
+  MockPoolCallbacks callbacks;
+  Common::Redis::Client::MockClient* client = new NiceMock<Common::Redis::Client::MockClient>();
+
+  // Test an upstream error from trying to send an "asking" command upstream.
+  Common::Redis::Client::MockPoolRequest active_request3;
+  Common::Redis::RespValueSharedPtr request3 = std::make_shared<Common::Redis::RespValue>();
+  Common::Redis::Client::MockClient* client2 = new NiceMock<Common::Redis::Client::MockClient>();
+  Upstream::HostConstSharedPtr host1;
+  makeRequest(client, request3, callbacks, active_request3);
+  Common::Redis::RespValuePtr ask_response3{new Common::Redis::RespValue()};
+  ask_response3->type(Common::Redis::RespType::Error);
+  ask_response3->asString() = "ASK 1111 10.1.2.3:4000";
+  EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2)));
+  EXPECT_CALL(*client2, makeRequest_(Ref(Common::Redis::Utility::AskingRequest::instance()), _))
+      .WillOnce(Return(nullptr));
+  EXPECT_CALL(callbacks, onResponse_(Ref(ask_response3)));
+  EXPECT_FALSE(client->client_callbacks_.back()->onRedirection(std::move(ask_response3),
+                                                               "10.1.2.3:4000", true));
+  EXPECT_EQ(host1->address()->asString(), "10.1.2.3:4000");
+
+  // Test an upstream error from trying to send the original request after the "asking" command is
+  // sent successfully.
+  Common::Redis::Client::MockPoolRequest active_request4, active_request5;
+  Common::Redis::RespValueSharedPtr request4 = std::make_shared<Common::Redis::RespValue>();
+  makeRequest(client, request4, callbacks, active_request4, false);
+  Common::Redis::RespValuePtr ask_response4{new Common::Redis::RespValue()};
+  ask_response4->type(Common::Redis::RespType::Error);
+  ask_response4->asString() = "ASK 1111 10.1.2.3:4000";
+  EXPECT_CALL(*client2, makeRequest_(Ref(Common::Redis::Utility::AskingRequest::instance()), _))
+      .WillOnce(Return(&active_request5));
+  EXPECT_CALL(*client2, makeRequest_(Ref(*request4), _)).WillOnce(Return(nullptr));
+  EXPECT_CALL(callbacks, onResponse_(Ref(ask_response4)));
+  EXPECT_FALSE(client->client_callbacks_.back()->onRedirection(std::move(ask_response4),
+                                                               "10.1.2.3:4000", true));
+
+  EXPECT_CALL(*client, close());
+  tls_.shutdownThread();
+}
+
 } // namespace ConnPool
 } // namespace RedisProxy
 } // namespace NetworkFilters
diff --git a/test/extensions/filters/network/redis_proxy/mocks.cc b/test/extensions/filters/network/redis_proxy/mocks.cc
index 9c1457258377..d51809ba27f6 100644
--- a/test/extensions/filters/network/redis_proxy/mocks.cc
+++ b/test/extensions/filters/network/redis_proxy/mocks.cc
@@ -1,5 +1,6 @@
 #include "mocks.h"
 
+using testing::_;
 using testing::Return;
 using testing::ReturnRef;
 
@@ -8,7 +9,9 @@ namespace Extensions {
 namespace NetworkFilters {
 namespace RedisProxy {
 
-MockRouter::MockRouter() = default;
+MockRouter::MockRouter(RouteSharedPtr route) : route_(std::move(route)) {
+  ON_CALL(*this, upstreamPool(_)).WillByDefault(Return(route_));
+}
 MockRouter::~MockRouter() = default;
 
 MockRoute::MockRoute(ConnPool::InstanceSharedPtr conn_pool) : conn_pool_(std::move(conn_pool)) {
@@ -17,8 +20,17 @@ MockRoute::MockRoute(ConnPool::InstanceSharedPtr conn_pool) : conn_pool_(std::mo
 }
 MockRoute::~MockRoute() = default;
 
+MockMirrorPolicy::MockMirrorPolicy(ConnPool::InstanceSharedPtr conn_pool)
+    : conn_pool_(std::move(conn_pool)) {
+  ON_CALL(*this, upstream()).WillByDefault(Return(conn_pool_));
+  ON_CALL(*this, shouldMirror(_)).WillByDefault(Return(true));
+}
+
 namespace ConnPool {
 
+MockPoolCallbacks::MockPoolCallbacks() = default;
+MockPoolCallbacks::~MockPoolCallbacks() = default;
+
 MockInstance::MockInstance() = default;
 MockInstance::~MockInstance() = default;
 
diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h
index e90fb2e616c0..fb96d0dd646e 100644
--- a/test/extensions/filters/network/redis_proxy/mocks.h
+++ b/test/extensions/filters/network/redis_proxy/mocks.h
@@ -22,10 +22,11 @@ namespace RedisProxy {
 
 class MockRouter : public Router {
 public:
-  MockRouter();
+  MockRouter(RouteSharedPtr route);
   ~MockRouter() override;
 
   MOCK_METHOD1(upstreamPool, RouteSharedPtr(std::string& key));
+  RouteSharedPtr route_;
 };
 
 class MockRoute : public Route {
@@ -36,27 +37,50 @@ class MockRoute : public Route {
   MOCK_CONST_METHOD0(upstream, ConnPool::InstanceSharedPtr());
   MOCK_CONST_METHOD0(mirrorPolicies, const MirrorPolicies&());
   ConnPool::InstanceSharedPtr conn_pool_;
-  const MirrorPolicies policies_;
+  MirrorPolicies policies_;
+};
+
+class MockMirrorPolicy : public MirrorPolicy {
+public:
+  MockMirrorPolicy(ConnPool::InstanceSharedPtr);
+  ~MockMirrorPolicy() = default;
+
+  MOCK_CONST_METHOD0(upstream, ConnPool::InstanceSharedPtr());
+  MOCK_CONST_METHOD1(shouldMirror, bool(const std::string&));
+
+  ConnPool::InstanceSharedPtr conn_pool_;
 };
 
 namespace ConnPool {
 
+class MockPoolCallbacks : public PoolCallbacks {
+public:
+  MockPoolCallbacks();
+  ~MockPoolCallbacks() override;
+
+  void onResponse(Common::Redis::RespValuePtr&& value) override { onResponse_(value); }
+  void onFailure() override { onFailure_(); }
+
+  MOCK_METHOD1(onResponse_, void(Common::Redis::RespValuePtr& value));
+  MOCK_METHOD0(onFailure_, void());
+};
+
 class MockInstance : public Instance {
 public:
   MockInstance();
   ~MockInstance() override;
 
-  MOCK_METHOD3(makeRequest,
-               Common::Redis::Client::PoolRequest*(
-                   const std::string& hash_key, const Common::Redis::RespValue& request,
-                   Common::Redis::Client::PoolCallbacks& callbacks));
-  MOCK_METHOD3(makeRequestToHost,
-               Common::Redis::Client::PoolRequest*(
-                   const std::string& host_address, const Common::Redis::RespValue& request,
-                   Common::Redis::Client::PoolCallbacks& callbacks));
+  Common::Redis::Client::PoolRequest* makeRequest(const std::string& hash_key,
+                                                  RespVariant&& request,
+                                                  PoolCallbacks& callbacks) override {
+    return makeRequest_(hash_key, request, callbacks);
+  }
+
+  MOCK_METHOD3(makeRequest_,
+               Common::Redis::Client::PoolRequest*(const std::string& hash_key,
+                                                   RespVariant& request, PoolCallbacks& callbacks));
   MOCK_METHOD0(onRedirection, bool());
 };
-
 } // namespace ConnPool
 
 namespace CommandSplitter {
diff --git a/test/extensions/filters/network/thrift_proxy/router_test.cc b/test/extensions/filters/network/thrift_proxy/router_test.cc
index 5b29318b4997..a3f28a518362 100644
--- a/test/extensions/filters/network/thrift_proxy/router_test.cc
+++ b/test/extensions/filters/network/thrift_proxy/router_test.cc
@@ -85,7 +85,7 @@ class ThriftRouterTestBase {
     route_ = new NiceMock<MockRoute>();
     route_ptr_.reset(route_);
 
-    router_ = std::make_unique<Router>(context_.clusterManager());
+    router_ = std::make_unique<Router>(context_.clusterManager(), "test", context_.scope());
 
     EXPECT_EQ(nullptr, router_->downstreamConnection());
 
@@ -437,6 +437,7 @@ TEST_F(ThriftRouterTest, NoRoute) {
         EXPECT_TRUE(end_stream);
       }));
   EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_));
+  EXPECT_EQ(1U, context_.scope().counter("test.route_missing").value());
 }
 
 TEST_F(ThriftRouterTest, NoCluster) {
@@ -455,6 +456,7 @@ TEST_F(ThriftRouterTest, NoCluster) {
         EXPECT_TRUE(end_stream);
       }));
   EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_));
+  EXPECT_EQ(1U, context_.scope().counter("test.unknown_cluster").value());
 }
 
 TEST_F(ThriftRouterTest, ClusterMaintenanceMode) {
@@ -475,6 +477,7 @@ TEST_F(ThriftRouterTest, ClusterMaintenanceMode) {
         EXPECT_TRUE(end_stream);
       }));
   EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_));
+  EXPECT_EQ(1U, context_.scope().counter("test.upstream_rq_maintenance_mode").value());
 }
 
 TEST_F(ThriftRouterTest, NoHealthyHosts) {
@@ -496,6 +499,7 @@ TEST_F(ThriftRouterTest, NoHealthyHosts) {
       }));
 
   EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_));
+  EXPECT_EQ(1U, context_.scope().counter("test.no_healthy_upstream").value());
 }
 
 TEST_F(ThriftRouterTest, TruncatedResponse) {
diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc
index e82eee9931a0..306f1fa3369e 100644
--- a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc
+++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc
@@ -12,6 +12,7 @@ using testing::AtLeast;
 using testing::ByMove;
 using testing::InSequence;
 using testing::Return;
+using testing::ReturnNew;
 using testing::SaveArg;
 
 namespace Envoy {
@@ -60,37 +61,47 @@ class UdpProxyFilterTest : public testing::Test {
               }));
     }
 
-    void recvDataFromUpstream(const std::string& data, int send_sys_errno = 0) {
+    void recvDataFromUpstream(const std::string& data, int recv_sys_errno = 0,
+                              int send_sys_errno = 0) {
       EXPECT_CALL(*idle_timer_, enableTimer(parent_.config_->sessionTimeout(), nullptr));
 
       // Return the datagram.
       EXPECT_CALL(*io_handle_, recvmsg(_, 1, _, _))
-          .WillOnce(Invoke(
-              [this, data](Buffer::RawSlice* slices, const uint64_t, uint32_t,
-                           Network::IoHandle::RecvMsgOutput& output) -> Api::IoCallUint64Result {
-                ASSERT(data.size() <= slices[0].len_);
-                memcpy(slices[0].mem_, data.data(), data.size());
-                output.peer_address_ = upstream_address_;
-                return makeNoError(data.size());
+          .WillOnce(
+              Invoke([this, data, recv_sys_errno](
+                         Buffer::RawSlice* slices, const uint64_t, uint32_t,
+                         Network::IoHandle::RecvMsgOutput& output) -> Api::IoCallUint64Result {
+                if (recv_sys_errno != 0) {
+                  return makeError(recv_sys_errno);
+                } else {
+                  ASSERT(data.size() <= slices[0].len_);
+                  memcpy(slices[0].mem_, data.data(), data.size());
+                  output.peer_address_ = upstream_address_;
+                  return makeNoError(data.size());
+                }
               }));
-      // Send the datagram downstream.
-      EXPECT_CALL(parent_.callbacks_.udp_listener_, send(_))
-          .WillOnce(Invoke([data, send_sys_errno](
-                               const Network::UdpSendData& send_data) -> Api::IoCallUint64Result {
-            // TODO(mattklein123): Verify peer/local address.
-            EXPECT_EQ(send_data.buffer_.toString(), data);
-            if (send_sys_errno == 0) {
-              send_data.buffer_.drain(send_data.buffer_.length());
-              return makeNoError(data.size());
-            } else {
-              return makeError(send_sys_errno);
-            }
-          }));
-      // Return an EAGAIN result.
-      EXPECT_CALL(*io_handle_, recvmsg(_, 1, _, _))
-          .WillOnce(Return(ByMove(Api::IoCallUint64Result(
-              0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(),
-                                 Network::IoSocketError::deleteIoError)))));
+
+      if (recv_sys_errno == 0) {
+        // Send the datagram downstream.
+        EXPECT_CALL(parent_.callbacks_.udp_listener_, send(_))
+            .WillOnce(Invoke([data, send_sys_errno](
+                                 const Network::UdpSendData& send_data) -> Api::IoCallUint64Result {
+              // TODO(mattklein123): Verify peer/local address.
+              EXPECT_EQ(send_data.buffer_.toString(), data);
+              if (send_sys_errno == 0) {
+                send_data.buffer_.drain(send_data.buffer_.length());
+                return makeNoError(data.size());
+              } else {
+                return makeError(send_sys_errno);
+              }
+            }));
+        // Return an EAGAIN result.
+        EXPECT_CALL(*io_handle_, recvmsg(_, 1, _, _))
+            .WillOnce(Return(ByMove(Api::IoCallUint64Result(
+                0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(),
+                                   Network::IoSocketError::deleteIoError)))));
+      }
+
       // Kick off the receive.
       file_event_cb_(Event::FileReadyType::Read);
     }
@@ -108,15 +119,25 @@ class UdpProxyFilterTest : public testing::Test {
     EXPECT_CALL(callbacks_, udpListener()).Times(AtLeast(0));
     EXPECT_CALL(*cluster_manager_.thread_local_cluster_.lb_.host_, address())
         .WillRepeatedly(Return(upstream_address_));
+    EXPECT_CALL(*cluster_manager_.thread_local_cluster_.lb_.host_, health())
+        .WillRepeatedly(Return(Upstream::Host::Health::Healthy));
   }
 
   ~UdpProxyFilterTest() { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); }
 
-  void setup(const std::string& yaml) {
+  void setup(const std::string& yaml, bool has_cluster = true) {
     envoy::config::filter::udp::udp_proxy::v2alpha::UdpProxyConfig config;
     TestUtility::loadFromYamlAndValidate(yaml, config);
     config_ = std::make_shared<UdpProxyFilterConfig>(cluster_manager_, time_system_, stats_store_,
                                                      config);
+    EXPECT_CALL(cluster_manager_, addThreadLocalClusterUpdateCallbacks_(_))
+        .WillOnce(DoAll(SaveArgAddress(&cluster_update_callbacks_),
+                        ReturnNew<Upstream::MockClusterUpdateCallbacksHandle>()));
+    if (has_cluster) {
+      EXPECT_CALL(cluster_manager_, get(_));
+    } else {
+      EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(nullptr));
+    }
     filter_ = std::make_unique<TestUdpProxyFilter>(callbacks_, config_);
   }
 
@@ -130,10 +151,9 @@ class UdpProxyFilterTest : public testing::Test {
     filter_->onData(data);
   }
 
-  void expectSessionCreate() {
-    test_sessions_.emplace_back(*this, upstream_address_);
+  void expectSessionCreate(const Network::Address::InstanceConstSharedPtr& address) {
+    test_sessions_.emplace_back(*this, address);
     TestSession& new_session = test_sessions_.back();
-    EXPECT_CALL(cluster_manager_, get(_));
     new_session.idle_timer_ = new Event::MockTimer(&callbacks_.udp_listener_.dispatcher_);
     EXPECT_CALL(*filter_, createIoHandle(_))
         .WillOnce(Return(ByMove(Network::IoHandlePtr{test_sessions_.back().io_handle_})));
@@ -156,9 +176,9 @@ class UdpProxyFilterTest : public testing::Test {
   Stats::IsolatedStoreImpl stats_store_;
   UdpProxyFilterConfigSharedPtr config_;
   Network::MockUdpReadFilterCallbacks callbacks_;
+  Upstream::ClusterUpdateCallbacks* cluster_update_callbacks_{};
   std::unique_ptr<TestUdpProxyFilter> filter_;
   std::vector<TestSession> test_sessions_;
-  // If a test ever support more than 1 upstream host this will need to move to the session/test.
   const Network::Address::InstanceConstSharedPtr upstream_address_;
 };
 
@@ -171,7 +191,7 @@ stat_prefix: foo
 cluster: fake_cluster
   )EOF");
 
-  expectSessionCreate();
+  expectSessionCreate(upstream_address_);
   test_sessions_[0].expectUpstreamWrite("hello");
   recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
   EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
@@ -202,7 +222,7 @@ stat_prefix: foo
 cluster: fake_cluster
   )EOF");
 
-  expectSessionCreate();
+  expectSessionCreate(upstream_address_);
   test_sessions_[0].expectUpstreamWrite("hello");
   recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
   EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
@@ -212,7 +232,7 @@ cluster: fake_cluster
   EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
   EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());
 
-  expectSessionCreate();
+  expectSessionCreate(upstream_address_);
   test_sessions_[1].expectUpstreamWrite("hello");
   recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
   EXPECT_EQ(2, config_->stats().downstream_sess_total_.value());
@@ -231,14 +251,223 @@ cluster: fake_cluster
   filter_->onReceiveError(Api::IoError::IoErrorCode::UnknownError);
   EXPECT_EQ(1, config_->stats().downstream_sess_rx_errors_.value());
 
-  expectSessionCreate();
+  expectSessionCreate(upstream_address_);
   test_sessions_[0].expectUpstreamWrite("hello");
   recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
   checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);
+  EXPECT_EQ(5, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_
+                   .upstream_cx_tx_bytes_total_.value());
 
-  test_sessions_[0].recvDataFromUpstream("world2", EMSGSIZE);
+  test_sessions_[0].recvDataFromUpstream("world2", 0, EMSGSIZE);
   checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);
+  EXPECT_EQ(6, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_
+                   .upstream_cx_rx_bytes_total_.value());
   EXPECT_EQ(1, config_->stats().downstream_sess_tx_errors_.value());
+
+  test_sessions_[0].recvDataFromUpstream("world2", EMSGSIZE, 0);
+  checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);
+  EXPECT_EQ(6, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_
+                   .upstream_cx_rx_bytes_total_.value());
+  EXPECT_EQ(1, TestUtility::findCounter(
+                   cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_,
+                   "udp.sess_rx_errors")
+                   ->value());
+
+  test_sessions_[0].expectUpstreamWrite("hello", EMSGSIZE);
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  checkTransferStats(10 /*rx_bytes*/, 2 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/);
+  EXPECT_EQ(5, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_
+                   .upstream_cx_tx_bytes_total_.value());
+  EXPECT_EQ(1, TestUtility::findCounter(
+                   cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_,
+                   "udp.sess_tx_errors")
+                   ->value());
+}
+
+// No upstream host handling.
+TEST_F(UdpProxyFilterTest, NoUpstreamHost) {
+  InSequence s;
+
+  setup(R"EOF(
+stat_prefix: foo
+cluster: fake_cluster
+  )EOF");
+
+  EXPECT_CALL(cluster_manager_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(nullptr));
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(1, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_
+                   .upstream_cx_none_healthy_.value());
+}
+
+// No cluster at filter creation.
+TEST_F(UdpProxyFilterTest, NoUpstreamClusterAtCreation) {
+  InSequence s;
+
+  setup(R"EOF(
+stat_prefix: foo
+cluster: fake_cluster
+  )EOF",
+        false);
+
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(1, config_->stats().downstream_sess_no_route_.value());
+}
+
+// Dynamic cluster addition and removal handling.
+TEST_F(UdpProxyFilterTest, ClusterDynamicAddAndRemoval) {
+  InSequence s;
+
+  setup(R"EOF(
+stat_prefix: foo
+cluster: fake_cluster
+  )EOF",
+        false);
+
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(1, config_->stats().downstream_sess_no_route_.value());
+  EXPECT_EQ(0, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());
+
+  // Add a cluster that we don't care about.
+  NiceMock<Upstream::MockThreadLocalCluster> other_thread_local_cluster;
+  other_thread_local_cluster.cluster_.info_->name_ = "other_cluster";
+  cluster_update_callbacks_->onClusterAddOrUpdate(other_thread_local_cluster);
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(2, config_->stats().downstream_sess_no_route_.value());
+  EXPECT_EQ(0, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());
+
+  // Now add the cluster we care about.
+  cluster_update_callbacks_->onClusterAddOrUpdate(cluster_manager_.thread_local_cluster_);
+  expectSessionCreate(upstream_address_);
+  test_sessions_[0].expectUpstreamWrite("hello");
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());
+
+  // Remove a cluster we don't care about.
+  cluster_update_callbacks_->onClusterRemoval("other_cluster");
+  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());
+
+  // Remove the cluster we do care about. This should purge all sessions.
+  cluster_update_callbacks_->onClusterRemoval("fake_cluster");
+  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());
+}
+
+// Hitting the maximum per-cluster connection/session circuit breaker.
+TEST_F(UdpProxyFilterTest, MaxSessionsCircuitBreaker) {
+  InSequence s;
+
+  setup(R"EOF(
+stat_prefix: foo
+cluster: fake_cluster
+  )EOF");
+
+  // Allow only a single session.
+  cluster_manager_.thread_local_cluster_.cluster_.info_->resetResourceManager(1, 0, 0, 0, 0);
+
+  expectSessionCreate(upstream_address_);
+  test_sessions_[0].expectUpstreamWrite("hello");
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());
+
+  // This should hit the session circuit breaker.
+  recvDataFromDownstream("10.0.0.2:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(
+      1,
+      cluster_manager_.thread_local_cluster_.cluster_.info_->stats_.upstream_cx_overflow_.value());
+  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());
+
+  // Timing out the 1st session should allow us to create another.
+  test_sessions_[0].idle_timer_->invokeCallback();
+  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());
+  expectSessionCreate(upstream_address_);
+  test_sessions_[1].expectUpstreamWrite("hello");
+  recvDataFromDownstream("10.0.0.2:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(2, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());
+}
+
+// Verify that all sessions for a host are removed when a host is removed.
+TEST_F(UdpProxyFilterTest, RemoveHostSessions) {
+  InSequence s;
+
+  setup(R"EOF(
+stat_prefix: foo
+cluster: fake_cluster
+  )EOF");
+
+  expectSessionCreate(upstream_address_);
+  test_sessions_[0].expectUpstreamWrite("hello");
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());
+
+  cluster_manager_.thread_local_cluster_.cluster_.priority_set_.runUpdateCallbacks(
+      0, {}, {cluster_manager_.thread_local_cluster_.lb_.host_});
+  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(0, config_->stats().downstream_sess_active_.value());
+
+  expectSessionCreate(upstream_address_);
+  test_sessions_[1].expectUpstreamWrite("hello");
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(2, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());
+}
+
+// In this case the host becomes unhealthy, but we get the same host back, so just keep using the
+// current session.
+TEST_F(UdpProxyFilterTest, HostUnhealthyPickSameHost) {
+  InSequence s;
+
+  setup(R"EOF(
+stat_prefix: foo
+cluster: fake_cluster
+  )EOF");
+
+  expectSessionCreate(upstream_address_);
+  test_sessions_[0].expectUpstreamWrite("hello");
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());
+
+  EXPECT_CALL(*cluster_manager_.thread_local_cluster_.lb_.host_, health())
+      .WillRepeatedly(Return(Upstream::Host::Health::Unhealthy));
+  test_sessions_[0].expectUpstreamWrite("hello");
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+}
+
+// Make sure that we are able to create a new session if there is an available healthy host and
+// our current host is unhealthy.
+TEST_F(UdpProxyFilterTest, HostUnhealthyPickDifferentHost) {
+  InSequence s;
+
+  setup(R"EOF(
+stat_prefix: foo
+cluster: fake_cluster
+  )EOF");
+
+  expectSessionCreate(upstream_address_);
+  test_sessions_[0].expectUpstreamWrite("hello");
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(1, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());
+
+  EXPECT_CALL(*cluster_manager_.thread_local_cluster_.lb_.host_, health())
+      .WillRepeatedly(Return(Upstream::Host::Health::Unhealthy));
+  auto new_host = std::make_shared<NiceMock<Upstream::MockHost>>();
+  auto new_host_address = Network::Utility::parseInternetAddressAndPort("20.0.0.2:443");
+  ON_CALL(*new_host, address()).WillByDefault(Return(new_host_address));
+  ON_CALL(*new_host, health()).WillByDefault(Return(Upstream::Host::Health::Healthy));
+  EXPECT_CALL(cluster_manager_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(new_host));
+  expectSessionCreate(new_host_address);
+  test_sessions_[1].expectUpstreamWrite("hello");
+  recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello");
+  EXPECT_EQ(2, config_->stats().downstream_sess_total_.value());
+  EXPECT_EQ(1, config_->stats().downstream_sess_active_.value());
 }
 
 } // namespace
diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc
index 727b21ab0870..56901f2d25f0 100644
--- a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc
+++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc
@@ -84,7 +84,7 @@ class UdpProxyIntegrationTest : public testing::TestWithParam<Network::Address::
     EXPECT_EQ("hello", request_datagram.buffer_->toString());
 
     // Respond from the upstream.
-    fake_upstreams_[0]->sendUdpDatagram("world1", *request_datagram.addresses_.peer_);
+    fake_upstreams_[0]->sendUdpDatagram("world1", request_datagram.addresses_.peer_);
     Network::UdpRecvData response_datagram;
     client.recv(response_datagram);
     EXPECT_EQ("world1", response_datagram.buffer_->toString());
@@ -92,8 +92,16 @@ class UdpProxyIntegrationTest : public testing::TestWithParam<Network::Address::
 
     EXPECT_EQ(5, test_server_->counter("udp.foo.downstream_sess_rx_bytes")->value());
     EXPECT_EQ(1, test_server_->counter("udp.foo.downstream_sess_rx_datagrams")->value());
-    EXPECT_EQ(6, test_server_->counter("udp.foo.downstream_sess_tx_bytes")->value());
-    EXPECT_EQ(1, test_server_->counter("udp.foo.downstream_sess_tx_datagrams")->value());
+    EXPECT_EQ(5, test_server_->counter("cluster.cluster_0.upstream_cx_tx_bytes_total")->value());
+    EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.udp.sess_tx_datagrams")->value());
+
+    EXPECT_EQ(6, test_server_->counter("cluster.cluster_0.upstream_cx_rx_bytes_total")->value());
+    EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.udp.sess_rx_datagrams")->value());
+    // The stat is incremented after the send so there is a race condition and we must wait for
+    // the counter to be incremented.
+    test_server_->waitForCounterEq("udp.foo.downstream_sess_tx_bytes", 6);
+    test_server_->waitForCounterEq("udp.foo.downstream_sess_tx_datagrams", 1);
+
     EXPECT_EQ(1, test_server_->counter("udp.foo.downstream_sess_total")->value());
     EXPECT_EQ(1, test_server_->gauge("udp.foo.downstream_sess_active")->value());
   }
@@ -167,9 +175,8 @@ TEST_P(UdpProxyIntegrationTest, MultipleClients) {
   EXPECT_NE(*client1_request_datagram.addresses_.peer_, *client2_request_datagram.addresses_.peer_);
 
   // Send two datagrams back to client 2.
-  fake_upstreams_[0]->sendUdpDatagram("client2_world", *client2_request_datagram.addresses_.peer_);
-  fake_upstreams_[0]->sendUdpDatagram("client2_world_2",
-                                      *client2_request_datagram.addresses_.peer_);
+  fake_upstreams_[0]->sendUdpDatagram("client2_world", client2_request_datagram.addresses_.peer_);
+  fake_upstreams_[0]->sendUdpDatagram("client2_world_2", client2_request_datagram.addresses_.peer_);
   Network::UdpRecvData response_datagram;
   client2.recv(response_datagram);
   EXPECT_EQ("client2_world", response_datagram.buffer_->toString());
@@ -177,7 +184,7 @@ TEST_P(UdpProxyIntegrationTest, MultipleClients) {
   EXPECT_EQ("client2_world_2", response_datagram.buffer_->toString());
 
   // Send 1 datagram back to client 1.
-  fake_upstreams_[0]->sendUdpDatagram("client1_world", *client1_request_datagram.addresses_.peer_);
+  fake_upstreams_[0]->sendUdpDatagram("client1_world", client1_request_datagram.addresses_.peer_);
   client1.recv(response_datagram);
   EXPECT_EQ("client1_world", response_datagram.buffer_->toString());
 }
@@ -199,8 +206,8 @@ TEST_P(UdpProxyIntegrationTest, MultipleUpstreams) {
   ASSERT_TRUE(fake_upstreams_[0]->waitForUdpDatagram(request_datagram));
   EXPECT_EQ("hello2", request_datagram.buffer_->toString());
 
-  fake_upstreams_[0]->sendUdpDatagram("world1", *request_datagram.addresses_.peer_);
-  fake_upstreams_[0]->sendUdpDatagram("world2", *request_datagram.addresses_.peer_);
+  fake_upstreams_[0]->sendUdpDatagram("world1", request_datagram.addresses_.peer_);
+  fake_upstreams_[0]->sendUdpDatagram("world2", request_datagram.addresses_.peer_);
   Network::UdpRecvData response_datagram;
   client.recv(response_datagram);
   EXPECT_EQ("world1", response_datagram.buffer_->toString());
diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc
index c55616220361..010c455acd30 100644
--- a/test/extensions/health_checkers/redis/redis_test.cc
+++ b/test/extensions/health_checkers/redis/redis_test.cc
@@ -146,13 +146,13 @@ class RedisHealthCheckerTest
   }
 
   void expectExistsRequestCreate() {
-    EXPECT_CALL(*client_, makeRequest(Ref(RedisHealthChecker::existsHealthCheckRequest("")), _))
+    EXPECT_CALL(*client_, makeRequest_(Ref(RedisHealthChecker::existsHealthCheckRequest("")), _))
         .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_)));
     EXPECT_CALL(*timeout_timer_, enableTimer(_, _));
   }
 
   void expectPingRequestCreate() {
-    EXPECT_CALL(*client_, makeRequest(Ref(RedisHealthChecker::pingHealthCheckRequest()), _))
+    EXPECT_CALL(*client_, makeRequest_(Ref(RedisHealthChecker::pingHealthCheckRequest()), _))
         .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_)));
     EXPECT_CALL(*timeout_timer_, enableTimer(_, _));
   }
@@ -183,7 +183,7 @@ class RedisHealthCheckerTest
   Event::MockTimer* interval_timer_{};
   Extensions::NetworkFilters::Common::Redis::Client::MockClient* client_{};
   Extensions::NetworkFilters::Common::Redis::Client::MockPoolRequest pool_request_;
-  Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks* pool_callbacks_{};
+  Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks* pool_callbacks_{};
   std::shared_ptr<RedisHealthChecker> health_checker_;
   Api::ApiPtr api_;
 };
@@ -437,10 +437,11 @@ TEST_F(RedisHealthCheckerTest, ExistsRedirected) {
   // Success with moved redirection
   EXPECT_CALL(*timeout_timer_, disableTimer());
   EXPECT_CALL(*interval_timer_, enableTimer(_, _));
-  NetworkFilters::Common::Redis::RespValue moved_response;
-  moved_response.type(NetworkFilters::Common::Redis::RespType::Error);
-  moved_response.asString() = "MOVED 1111 127.0.0.1:81"; // exact values not important
-  pool_callbacks_->onRedirection(moved_response);
+  NetworkFilters::Common::Redis::RespValuePtr moved_response{
+      new NetworkFilters::Common::Redis::RespValue()};
+  moved_response->type(NetworkFilters::Common::Redis::RespType::Error);
+  moved_response->asString() = "MOVED 1111 127.0.0.1:81"; // exact values not important
+  pool_callbacks_->onRedirection(std::move(moved_response), "127.0.0.1:81", false);
 
   expectExistsRequestCreate();
   interval_timer_->invokeCallback();
@@ -448,10 +449,11 @@ TEST_F(RedisHealthCheckerTest, ExistsRedirected) {
   // Success with ask redirection
   EXPECT_CALL(*timeout_timer_, disableTimer());
   EXPECT_CALL(*interval_timer_, enableTimer(_, _));
-  NetworkFilters::Common::Redis::RespValue ask_response;
-  ask_response.type(NetworkFilters::Common::Redis::RespType::Error);
-  ask_response.asString() = "ASK 1111 127.0.0.1:81"; // exact values not important
-  pool_callbacks_->onRedirection(ask_response);
+  NetworkFilters::Common::Redis::RespValuePtr ask_response{
+      new NetworkFilters::Common::Redis::RespValue()};
+  ask_response->type(NetworkFilters::Common::Redis::RespType::Error);
+  ask_response->asString() = "ASK 1111 127.0.0.1:81"; // exact values not important
+  pool_callbacks_->onRedirection(std::move(ask_response), "127.0.0.1:81", true);
 
   EXPECT_CALL(*client_, close());
 
diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD
index e225d839e0be..2b34c7904094 100644
--- a/test/extensions/quic_listeners/quiche/BUILD
+++ b/test/extensions/quic_listeners/quiche/BUILD
@@ -69,6 +69,26 @@ envoy_cc_test(
     ],
 )
 
+envoy_cc_test(
+    name = "envoy_quic_client_stream_test",
+    srcs = ["envoy_quic_client_stream_test.cc"],
+    tags = ["nofips"],
+    deps = [
+        ":quic_test_utils_for_envoy_lib",
+        ":test_utils_lib",
+        "//source/common/http:headers_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_client_connection_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_client_session_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib",
+        "//test/mocks/http:http_mocks",
+        "//test/mocks/http:stream_decoder_mock",
+        "//test/mocks/network:network_mocks",
+        "//test/test_common:utility_lib",
+        "@com_googlesource_quiche//:quic_core_http_spdy_session_lib",
+    ],
+)
+
 envoy_cc_test(
     name = "envoy_quic_server_session_test",
     srcs = ["envoy_quic_server_session_test.cc"],
@@ -96,6 +116,27 @@ envoy_cc_test(
     ],
 )
 
+envoy_cc_test(
+    name = "envoy_quic_client_session_test",
+    srcs = ["envoy_quic_client_session_test.cc"],
+    tags = ["nofips"],
+    deps = [
+        ":quic_test_utils_for_envoy_lib",
+        "//include/envoy/stats:stats_macros",
+        "//source/extensions/quic_listeners/quiche:codec_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_client_connection_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_client_session_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib",
+        "//test/mocks/http:http_mocks",
+        "//test/mocks/http:stream_decoder_mock",
+        "//test/mocks/network:network_mocks",
+        "//test/mocks/stats:stats_mocks",
+        "//test/test_common:logging_lib",
+        "//test/test_common:simulated_time_system_lib",
+    ],
+)
+
 envoy_cc_test(
     name = "active_quic_listener_test",
     srcs = ["active_quic_listener_test.cc"],
diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc
index 9ab3753ec855..80578bd22f13 100644
--- a/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc
+++ b/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc
@@ -189,5 +189,17 @@ TEST_F(EnvoyQuicAlarmTest, CancelActiveAlarm) {
   EXPECT_FALSE(unowned_delegate->fired());
 }
 
+TEST_F(EnvoyQuicAlarmTest, CancelUponDestruction) {
+  auto unowned_delegate = new TestDelegate();
+  quic::QuicAlarm* alarm = alarm_factory_.CreateAlarm(unowned_delegate);
+  // alarm becomes active upon Set().
+  alarm->Set(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));
+  // delegate should be destroyed with alarm.
+  delete alarm;
+  // alarm firing callback should have been cancelled, otherwise the delegate
+  // would be used after free.
+  advanceMsAndLoop(10);
+}
+
 } // namespace Quic
 } // namespace Envoy
diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc
new file mode 100644
index 000000000000..ea2c111e0a58
--- /dev/null
+++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc
@@ -0,0 +1,236 @@
+#pragma GCC diagnostic push
+// QUICHE allows unused parameters.
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+// QUICHE uses offsetof().
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+
+#include "quiche/quic/core/crypto/null_encrypter.h"
+#include "quiche/quic/test_tools/crypto_test_utils.h"
+#include "quiche/quic/test_tools/quic_test_utils.h"
+
+#pragma GCC diagnostic pop
+
+#include "extensions/quic_listeners/quiche/envoy_quic_client_session.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_client_connection.h"
+#include "extensions/quic_listeners/quiche/codec_impl.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_utils.h"
+
+#include "envoy/stats/stats_macros.h"
+#include "test/mocks/event/mocks.h"
+#include "test/mocks/http/stream_decoder.h"
+#include "test/mocks/http/mocks.h"
+#include "test/mocks/network/mocks.h"
+#include "test/mocks/stats/mocks.h"
+#include "test/test_common/logging.h"
+#include "test/test_common/simulated_time_system.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+using testing::_;
+using testing::Invoke;
+using testing::Return;
+using testing::ReturnRef;
+
+namespace Envoy {
+namespace Quic {
+
+class TestEnvoyQuicClientConnection : public EnvoyQuicClientConnection {
+public:
+  TestEnvoyQuicClientConnection(const quic::QuicConnectionId& server_connection_id,
+                                quic::QuicConnectionHelperInterface& helper,
+                                quic::QuicAlarmFactory& alarm_factory,
+                                quic::QuicPacketWriter& writer,
+                                const quic::ParsedQuicVersionVector& supported_versions,
+                                Event::Dispatcher& dispatcher,
+                                Network::ConnectionSocketPtr&& connection_socket)
+      : EnvoyQuicClientConnection(server_connection_id, helper, alarm_factory, &writer, false,
+                                  supported_versions, dispatcher, std::move(connection_socket)) {
+    SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE);
+    SetEncrypter(quic::ENCRYPTION_FORWARD_SECURE,
+                 std::make_unique<quic::NullEncrypter>(quic::Perspective::IS_CLIENT));
+  }
+
+  MOCK_METHOD2(SendConnectionClosePacket, void(quic::QuicErrorCode, const std::string&));
+  MOCK_METHOD1(SendControlFrame, bool(const quic::QuicFrame& frame));
+
+  using EnvoyQuicClientConnection::connectionStats;
+};
+
+class TestQuicCryptoClientStream : public quic::QuicCryptoClientStream {
+public:
+  TestQuicCryptoClientStream(const quic::QuicServerId& server_id, quic::QuicSession* session,
+                             std::unique_ptr<quic::ProofVerifyContext> verify_context,
+                             quic::QuicCryptoClientConfig* crypto_config,
+                             ProofHandler* proof_handler)
+      : quic::QuicCryptoClientStream(server_id, session, std::move(verify_context), crypto_config,
+                                     proof_handler) {}
+
+  bool encryption_established() const override { return true; }
+};
+
+class TestEnvoyQuicClientSession : public EnvoyQuicClientSession {
+public:
+  TestEnvoyQuicClientSession(const quic::QuicConfig& config,
+                             const quic::ParsedQuicVersionVector& supported_versions,
+                             std::unique_ptr<EnvoyQuicClientConnection> connection,
+                             const quic::QuicServerId& server_id,
+                             quic::QuicCryptoClientConfig* crypto_config,
+                             quic::QuicClientPushPromiseIndex* push_promise_index,
+                             Event::Dispatcher& dispatcher, uint32_t send_buffer_limit)
+      : EnvoyQuicClientSession(config, supported_versions, std::move(connection), server_id,
+                               crypto_config, push_promise_index, dispatcher, send_buffer_limit) {}
+
+  std::unique_ptr<quic::QuicCryptoClientStreamBase> CreateQuicCryptoStream() override {
+    return std::make_unique<TestQuicCryptoClientStream>(
+        server_id(), this, crypto_config()->proof_verifier()->CreateDefaultContext(),
+        crypto_config(), this);
+  }
+};
+
+class EnvoyQuicClientSessionTest : public testing::TestWithParam<bool> {
+public:
+  EnvoyQuicClientSessionTest()
+      : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher()),
+        connection_helper_(*dispatcher_),
+        alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() {
+          SetQuicReloadableFlag(quic_enable_version_99, GetParam());
+          return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0);
+        }()),
+        peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(),
+                                                        12345)),
+        self_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(),
+                                                        54321)),
+        quic_connection_(new TestEnvoyQuicClientConnection(
+            quic::test::TestConnectionId(), connection_helper_, alarm_factory_, writer_,
+            quic_version_, *dispatcher_, createConnectionSocket(peer_addr_, self_addr_, nullptr))),
+        crypto_config_(quic::test::crypto_test_utils::ProofVerifierForTesting()),
+        envoy_quic_session_(quic_config_, quic_version_,
+                            std::unique_ptr<TestEnvoyQuicClientConnection>(quic_connection_),
+                            quic::QuicServerId("example.com", 443, false), &crypto_config_, nullptr,
+                            *dispatcher_,
+                            /*send_buffer_limit*/ 1024 * 1024),
+        http_connection_(envoy_quic_session_, http_connection_callbacks_) {
+    EXPECT_EQ(time_system_.systemTime(), envoy_quic_session_.streamInfo().startTime());
+    EXPECT_EQ(EMPTY_STRING, envoy_quic_session_.nextProtocol());
+    EXPECT_EQ(Http::Protocol::Http3, http_connection_.protocol());
+
+    time_system_.sleep(std::chrono::milliseconds(1));
+    ON_CALL(writer_, WritePacket(_, _, _, _, _))
+        .WillByDefault(testing::Return(quic::WriteResult(quic::WRITE_STATUS_OK, 1)));
+  }
+
+  void SetUp() override {
+    envoy_quic_session_.Initialize();
+    envoy_quic_session_.addConnectionCallbacks(network_connection_callbacks_);
+    envoy_quic_session_.setConnectionStats(
+        {read_total_, read_current_, write_total_, write_current_, nullptr, nullptr});
+    EXPECT_EQ(&read_total_, &quic_connection_->connectionStats().read_total_);
+  }
+
+  void TearDown() override {
+    if (quic_connection_->connected()) {
+      EXPECT_CALL(*quic_connection_,
+                  SendConnectionClosePacket(quic::QUIC_NO_ERROR, "Closed by application"));
+      EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));
+      envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush);
+    }
+  }
+
+  EnvoyQuicClientStream& sendGetRequest(Http::StreamDecoder& response_decoder,
+                                        Http::StreamCallbacks& stream_callbacks) {
+    auto& stream =
+        dynamic_cast<EnvoyQuicClientStream&>(http_connection_.newStream(response_decoder));
+    stream.getStream().addCallbacks(stream_callbacks);
+
+    std::string host("www.abc.com");
+    Http::TestHeaderMapImpl request_headers{
+        {":authority", host}, {":method", "GET"}, {":path", "/"}};
+    stream.encodeHeaders(request_headers, true);
+    return stream;
+  }
+
+protected:
+  Event::SimulatedTimeSystemHelper time_system_;
+  Api::ApiPtr api_;
+  Event::DispatcherPtr dispatcher_;
+  EnvoyQuicConnectionHelper connection_helper_;
+  EnvoyQuicAlarmFactory alarm_factory_;
+  quic::ParsedQuicVersionVector quic_version_;
+  testing::NiceMock<quic::test::MockPacketWriter> writer_;
+  Network::Address::InstanceConstSharedPtr peer_addr_;
+  Network::Address::InstanceConstSharedPtr self_addr_;
+  TestEnvoyQuicClientConnection* quic_connection_;
+  quic::QuicConfig quic_config_;
+  quic::QuicCryptoClientConfig crypto_config_;
+  TestEnvoyQuicClientSession envoy_quic_session_;
+  Network::MockConnectionCallbacks network_connection_callbacks_;
+  Http::MockServerConnectionCallbacks http_connection_callbacks_;
+  testing::StrictMock<Stats::MockCounter> read_total_;
+  testing::StrictMock<Stats::MockGauge> read_current_;
+  testing::StrictMock<Stats::MockCounter> write_total_;
+  testing::StrictMock<Stats::MockGauge> write_current_;
+  QuicHttpClientConnectionImpl http_connection_;
+};
+
+INSTANTIATE_TEST_SUITE_P(EnvoyQuicClientSessionTests, EnvoyQuicClientSessionTest,
+                         testing::ValuesIn({true, false}));
+
+TEST_P(EnvoyQuicClientSessionTest, NewStream) {
+  Http::MockStreamDecoder response_decoder;
+  Http::MockStreamCallbacks stream_callbacks;
+  EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks);
+
+  quic::QuicHeaderList headers;
+  headers.OnHeaderBlockStart();
+  headers.OnHeader(":status", "200");
+  headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0);
+  // Response headers should be propagated to decoder.
+  EXPECT_CALL(response_decoder, decodeHeaders_(_, /*end_stream=*/true))
+      .WillOnce(Invoke([](const Http::HeaderMapPtr& decoded_headers, bool) {
+        EXPECT_EQ("200", decoded_headers->Status()->value().getStringView());
+      }));
+  stream.OnStreamHeaderList(/*fin=*/true, headers.uncompressed_header_bytes(), headers);
+}
+
+TEST_P(EnvoyQuicClientSessionTest, OnResetFrame) {
+  Http::MockStreamDecoder response_decoder;
+  Http::MockStreamCallbacks stream_callbacks;
+  EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks);
+
+  // G-QUIC or IETF bi-directional stream.
+  quic::QuicStreamId stream_id = stream.id();
+  quic::QuicRstStreamFrame rst1(/*control_frame_id=*/1u, stream_id,
+                                quic::QUIC_ERROR_PROCESSING_STREAM, /*bytes_written=*/0u);
+  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::RemoteReset, _));
+  stream.OnStreamReset(rst1);
+}
+
+TEST_P(EnvoyQuicClientSessionTest, ConnectionClose) {
+  std::string error_details("dummy details");
+  quic::QuicErrorCode error(quic::QUIC_INVALID_FRAME_DATA);
+  quic::QuicConnectionCloseFrame frame(quic_version_[0].transport_version, error, error_details,
+                                       /* transport_close_frame_type = */ 0);
+  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose));
+  quic_connection_->OnConnectionCloseFrame(frame);
+  EXPECT_EQ(absl::StrCat(quic::QuicErrorCodeToString(error), " with details: ", error_details),
+            envoy_quic_session_.transportFailureReason());
+  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());
+}
+
+TEST_P(EnvoyQuicClientSessionTest, ConnectionCloseWithActiveStream) {
+  Http::MockStreamDecoder response_decoder;
+  Http::MockStreamCallbacks stream_callbacks;
+  EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks);
+  EXPECT_CALL(*quic_connection_,
+              SendConnectionClosePacket(quic::QUIC_NO_ERROR, "Closed by application"));
+  EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose));
+  EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _));
+  envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush);
+  EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state());
+  EXPECT_TRUE(stream.write_side_closed() && stream.reading_stopped());
+}
+
+} // namespace Quic
+} // namespace Envoy
diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc
new file mode 100644
index 000000000000..4a11acde7965
--- /dev/null
+++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc
@@ -0,0 +1,261 @@
+#include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_client_connection.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_client_stream.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_utils.h"
+
+#include "test/extensions/quic_listeners/quiche/test_utils.h"
+#include "test/mocks/http/mocks.h"
+#include "test/mocks/http/stream_decoder.h"
+#include "test/mocks/network/mocks.h"
+#include "test/test_common/utility.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace Envoy {
+namespace Quic {
+
+using testing::_;
+using testing::Invoke;
+using testing::Return;
+
+class EnvoyQuicClientStreamTest : public testing::TestWithParam<bool> {
+public:
+  EnvoyQuicClientStreamTest()
+      : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher()),
+        connection_helper_(*dispatcher_),
+        alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() {
+          SetQuicReloadableFlag(quic_enable_version_99, GetParam());
+          return quic::CurrentSupportedVersions()[0];
+        }()),
+        peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(),
+                                                        12345)),
+        self_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(),
+                                                        54321)),
+        quic_connection_(new EnvoyQuicClientConnection(
+            quic::test::TestConnectionId(), connection_helper_, alarm_factory_, &writer_,
+            /*owns_writer=*/false, {quic_version_}, *dispatcher_,
+            createConnectionSocket(peer_addr_, self_addr_, nullptr))),
+        quic_session_(quic_config_, {quic_version_}, quic_connection_, *dispatcher_,
+                      quic_config_.GetInitialStreamFlowControlWindowToSend() * 2),
+        stream_id_(quic_version_.transport_version == quic::QUIC_VERSION_99 ? 4u : 5u),
+        quic_stream_(new EnvoyQuicClientStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL)),
+        request_headers_{{":authority", host_}, {":method", "POST"}, {":path", "/"}} {
+    quic_stream_->setDecoder(stream_decoder_);
+    quic_stream_->addCallbacks(stream_callbacks_);
+    quic_session_.ActivateStream(std::unique_ptr<EnvoyQuicClientStream>(quic_stream_));
+    EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _))
+        .WillRepeatedly(Invoke([](quic::QuicStream*, quic::QuicStreamId, size_t write_length,
+                                  quic::QuicStreamOffset, quic::StreamSendingState state) {
+          return quic::QuicConsumedData{write_length, state != quic::NO_FIN};
+        }));
+    EXPECT_CALL(writer_, WritePacket(_, _, _, _, _))
+        .WillRepeatedly(Invoke([](const char*, size_t buf_len, const quic::QuicIpAddress&,
+                                  const quic::QuicSocketAddress&, quic::PerPacketOptions*) {
+          return quic::WriteResult{quic::WRITE_STATUS_OK, static_cast<int>(buf_len)};
+        }));
+  }
+
+  void SetUp() override {
+    quic_session_.Initialize();
+    quic_connection_->setUpConnectionSocket();
+    response_headers_.OnHeaderBlockStart();
+    response_headers_.OnHeader(":status", "200");
+    response_headers_.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0,
+                                       /*compressed_header_bytes=*/0);
+
+    trailers_.OnHeaderBlockStart();
+    trailers_.OnHeader("key1", "value1");
+    if (quic_version_.transport_version != quic::QUIC_VERSION_99) {
+      // ":final-offset" is required and stripped off by quic.
+      trailers_.OnHeader(":final-offset", absl::StrCat("", response_body_.length()));
+    }
+    trailers_.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0);
+  }
+
+  void TearDown() override {
+    if (quic_connection_->connected()) {
+      quic_connection_->CloseConnection(
+          quic::QUIC_NO_ERROR, "Closed by application",
+          quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
+    }
+  }
+
+protected:
+  Api::ApiPtr api_;
+  Event::DispatcherPtr dispatcher_;
+  EnvoyQuicConnectionHelper connection_helper_;
+  EnvoyQuicAlarmFactory alarm_factory_;
+  testing::NiceMock<quic::test::MockPacketWriter> writer_;
+  quic::ParsedQuicVersion quic_version_;
+  quic::QuicConfig quic_config_;
+  Network::Address::InstanceConstSharedPtr peer_addr_;
+  Network::Address::InstanceConstSharedPtr self_addr_;
+  EnvoyQuicClientConnection* quic_connection_;
+  MockEnvoyQuicClientSession quic_session_;
+  quic::QuicStreamId stream_id_;
+  EnvoyQuicClientStream* quic_stream_;
+  Http::MockStreamDecoder stream_decoder_;
+  Http::MockStreamCallbacks stream_callbacks_;
+  std::string host_{"www.abc.com"};
+  Http::TestHeaderMapImpl request_headers_;
+  quic::QuicHeaderList response_headers_;
+  quic::QuicHeaderList trailers_;
+  Buffer::OwnedImpl request_body_{"Hello world"};
+  std::string response_body_{"OK\n"};
+};
+
+INSTANTIATE_TEST_SUITE_P(EnvoyQuicClientStreamTests, EnvoyQuicClientStreamTest,
+                         testing::ValuesIn({true, false}));
+
+TEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) {
+  quic_stream_->encodeHeaders(request_headers_, false);
+  quic_stream_->encodeData(request_body_, true);
+
+  EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false))
+      .WillOnce(Invoke([](const Http::HeaderMapPtr& headers, bool) {
+        EXPECT_EQ("200", headers->Status()->value().getStringView());
+      }));
+  quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(),
+                                   response_headers_);
+  EXPECT_TRUE(quic_stream_->FinishedReadingHeaders());
+
+  EXPECT_CALL(stream_decoder_, decodeData(_, _))
+      .Times(testing::AtMost(2))
+      .WillOnce(Invoke([&](Buffer::Instance& buffer, bool finished_reading) {
+        EXPECT_EQ(response_body_, buffer.toString());
+        EXPECT_FALSE(finished_reading);
+      }))
+      // Depends on QUIC version, there may be an empty STREAM_FRAME with FIN. But
+      // since there is trailers, finished_reading should always be false.
+      .WillOnce(Invoke([](Buffer::Instance& buffer, bool finished_reading) {
+        EXPECT_FALSE(finished_reading);
+        EXPECT_EQ(0, buffer.length());
+      }));
+  std::string data = response_body_;
+  if (quic_version_.transport_version == quic::QUIC_VERSION_99) {
+    std::unique_ptr<char[]> data_buffer;
+    quic::QuicByteCount data_frame_header_length =
+        quic::HttpEncoder::SerializeDataFrameHeader(response_body_.length(), &data_buffer);
+    quic::QuicStringPiece data_frame_header(data_buffer.get(), data_frame_header_length);
+    data = absl::StrCat(data_frame_header, response_body_);
+  }
+  quic::QuicStreamFrame frame(stream_id_, false, 0, data);
+  quic_stream_->OnStreamFrame(frame);
+
+  EXPECT_CALL(stream_decoder_, decodeTrailers_(_))
+      .WillOnce(Invoke([](const Http::HeaderMapPtr& headers) {
+        Http::LowerCaseString key1("key1");
+        Http::LowerCaseString key2(":final-offset");
+        EXPECT_EQ("value1", headers->get(key1)->value().getStringView());
+        EXPECT_EQ(nullptr, headers->get(key2));
+      }));
+  quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_);
+}
+
+TEST_P(EnvoyQuicClientStreamTest, OutOfOrderTrailers) {
+  if (quic::VersionUsesHttp3(quic_version_.transport_version)) {
+    EXPECT_CALL(stream_callbacks_, onResetStream(_, _));
+    return;
+  }
+  quic_stream_->encodeHeaders(request_headers_, true);
+  EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false))
+      .WillOnce(Invoke([](const Http::HeaderMapPtr& headers, bool) {
+        EXPECT_EQ("200", headers->Status()->value().getStringView());
+      }));
+  quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(),
+                                   response_headers_);
+  EXPECT_TRUE(quic_stream_->FinishedReadingHeaders());
+
+  // Trailer should be delivered to HCM later after body arrives.
+  quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_);
+
+  std::string data = response_body_;
+  if (quic_version_.transport_version == quic::QUIC_VERSION_99) {
+    std::unique_ptr<char[]> data_buffer;
+    quic::QuicByteCount data_frame_header_length =
+        quic::HttpEncoder::SerializeDataFrameHeader(response_body_.length(), &data_buffer);
+    quic::QuicStringPiece data_frame_header(data_buffer.get(), data_frame_header_length);
+    data = absl::StrCat(data_frame_header, response_body_);
+  }
+  quic::QuicStreamFrame frame(stream_id_, false, 0, data);
+  EXPECT_CALL(stream_decoder_, decodeData(_, _))
+      .Times(testing::AtMost(2))
+      .WillOnce(Invoke([this](Buffer::Instance& buffer, bool finished_reading) {
+        EXPECT_EQ(response_body_, buffer.toString());
+        EXPECT_FALSE(finished_reading);
+      }))
+      // Depends on QUIC version, there may be an empty STREAM_FRAME with FIN. But
+      // since there is trailers, finished_reading should always be false.
+      .WillOnce(Invoke([](Buffer::Instance& buffer, bool finished_reading) {
+        EXPECT_FALSE(finished_reading);
+        EXPECT_EQ(0, buffer.length());
+      }));
+
+  EXPECT_CALL(stream_decoder_, decodeTrailers_(_))
+      .WillOnce(Invoke([](const Http::HeaderMapPtr& headers) {
+        Http::LowerCaseString key1("key1");
+        Http::LowerCaseString key2(":final-offset");
+        EXPECT_EQ("value1", headers->get(key1)->value().getStringView());
+        EXPECT_EQ(nullptr, headers->get(key2));
+      }));
+  quic_stream_->OnStreamFrame(frame);
+}
+
+TEST_P(EnvoyQuicClientStreamTest, WatermarkSendBuffer) {
+  // Bump connection flow control window large enough not to cause connection
+  // level flow control blocked.
+  quic::QuicWindowUpdateFrame window_update(
+      quic::kInvalidControlFrameId,
+      quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024);
+  quic_session_.OnWindowUpdateFrame(window_update);
+
+  request_headers_.addCopy(":content-length", "32770"); // 32KB + 2 byte
+  quic_stream_->encodeHeaders(request_headers_, /*end_stream=*/false);
+  // Encode 32kB request body. first 16KB should be written out right away. The
+  // rest should be buffered. The high watermark is 16KB, so this call should
+  // make the send buffer reach its high watermark.
+  std::string request(32 * 1024 + 1, 'a');
+  Buffer::OwnedImpl buffer(request);
+  EXPECT_CALL(stream_callbacks_, onAboveWriteBufferHighWatermark());
+  quic_stream_->encodeData(buffer, false);
+
+  EXPECT_EQ(0u, buffer.length());
+  EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked());
+
+  // Receive a WINDOW_UPDATE frame not large enough to drain half of the send
+  // buffer.
+  quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(),
+                                             16 * 1024 + 8 * 1024);
+  quic_stream_->OnWindowUpdateFrame(window_update1);
+  EXPECT_FALSE(quic_stream_->flow_controller()->IsBlocked());
+  quic_session_.OnCanWrite();
+  EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked());
+
+  // Receive another WINDOW_UPDATE frame to drain the send buffer till below low
+  // watermark.
+  quic::QuicWindowUpdateFrame window_update2(quic::kInvalidControlFrameId, quic_stream_->id(),
+                                             16 * 1024 + 8 * 1024 + 1024);
+  quic_stream_->OnWindowUpdateFrame(window_update2);
+  EXPECT_FALSE(quic_stream_->flow_controller()->IsBlocked());
+  EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([this]() {
+    std::string rest_request(1, 'a');
+    Buffer::OwnedImpl buffer(rest_request);
+    quic_stream_->encodeData(buffer, true);
+  }));
+  quic_session_.OnCanWrite();
+  EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked());
+
+  quic::QuicWindowUpdateFrame window_update3(quic::kInvalidControlFrameId, quic_stream_->id(),
+                                             32 * 1024 + 1024);
+  quic_stream_->OnWindowUpdateFrame(window_update3);
+  quic_session_.OnCanWrite();
+
+  EXPECT_TRUE(quic_stream_->local_end_stream_);
+  EXPECT_TRUE(quic_stream_->write_side_closed());
+  EXPECT_CALL(stream_callbacks_, onResetStream(_, _));
+}
+
+} // namespace Quic
+} // namespace Envoy
diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc
index e64f4fcf12eb..9fb48a169010 100644
--- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc
+++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc
@@ -7,6 +7,7 @@
 #pragma GCC diagnostic ignored "-Winvalid-offsetof"
 
 #include "quiche/quic/core/quic_dispatcher.h"
+#include "quiche/quic/test_tools/quic_dispatcher_peer.h"
 #include "quiche/quic/test_tools/crypto_test_utils.h"
 #include "quiche/quic/test_tools/quic_test_utils.h"
 #include "quiche/quic/platform/api/quic_text_utils.h"
@@ -36,18 +37,6 @@ using testing::Invoke;
 using testing::Return;
 using testing::ReturnRef;
 
-namespace quic {
-namespace test {
-class QuicDispatcherPeer {
-public:
-  static quic::QuicTimeWaitListManager* time_wait_list_manager(QuicDispatcher* dispatcher) {
-    return dispatcher->time_wait_list_manager_.get();
-  }
-};
-
-} // namespace test
-} // namespace quic
-
 namespace Envoy {
 namespace Quic {
 
@@ -233,7 +222,7 @@ TEST_P(EnvoyQuicDispatcherTest, CloseConnectionDueToMissingFilterChain) {
       *received_packet);
   EXPECT_EQ(0u, envoy_quic_dispatcher_.session_map().size());
   EXPECT_EQ(0u, connection_handler_.numConnections());
-  EXPECT_TRUE(quic::test::QuicDispatcherPeer::time_wait_list_manager(&envoy_quic_dispatcher_)
+  EXPECT_TRUE(quic::test::QuicDispatcherPeer::GetTimeWaitListManager(&envoy_quic_dispatcher_)
                   ->IsConnectionIdInTimeWait(connection_id));
   EXPECT_EQ(1u, listener_stats_.no_filter_chain_match_.value());
 }
@@ -264,7 +253,7 @@ TEST_P(EnvoyQuicDispatcherTest, CloseConnectionDueToEmptyFilterChain) {
       *received_packet);
   EXPECT_EQ(0u, envoy_quic_dispatcher_.session_map().size());
   EXPECT_EQ(0u, connection_handler_.numConnections());
-  EXPECT_TRUE(quic::test::QuicDispatcherPeer::time_wait_list_manager(&envoy_quic_dispatcher_)
+  EXPECT_TRUE(quic::test::QuicDispatcherPeer::GetTimeWaitListManager(&envoy_quic_dispatcher_)
                   ->IsConnectionIdInTimeWait(connection_id));
 }
 
diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc
index 4737a532f558..57bdf94e9e1f 100644
--- a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc
+++ b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc
@@ -62,8 +62,8 @@ TEST_F(EnvoyQuicFakeProofSourceTest, TestGetProof) {
 TEST_F(EnvoyQuicFakeProofSourceTest, TestVerifyProof) {
   EXPECT_EQ(quic::QUIC_SUCCESS,
             proof_verifier_.VerifyProof(hostname_, /*port=*/0, server_config_, version_, chlo_hash_,
-                                        expected_certs_, "Fake timestamp", expected_signature_,
-                                        nullptr, nullptr, nullptr, nullptr));
+                                        expected_certs_, "", expected_signature_, nullptr, nullptr,
+                                        nullptr, nullptr));
   std::vector<std::string> wrong_certs{"wrong cert"};
   EXPECT_EQ(quic::QUIC_FAILURE,
             proof_verifier_.VerifyProof(hostname_, /*port=*/0, server_config_, version_, chlo_hash_,
diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc
index 87c6e320091c..aa9d2e3ff6fd 100644
--- a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc
+++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc
@@ -47,8 +47,6 @@ using testing::Invoke;
 using testing::Return;
 using testing::ReturnRef;
 
-#include <iostream>
-
 namespace Envoy {
 namespace Quic {
 
@@ -131,7 +129,7 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam<bool> {
   bool installReadFilter() {
     // Setup read filter.
     envoy_quic_session_.addReadFilter(read_filter_);
-    EXPECT_EQ(Http::Protocol::Http2,
+    EXPECT_EQ(Http::Protocol::Http3,
               read_filter_->callbacks_->connection().streamInfo().protocol().value());
     EXPECT_EQ(envoy_quic_session_.id(), read_filter_->callbacks_->connection().id());
     EXPECT_EQ(&envoy_quic_session_, &read_filter_->callbacks_->connection());
@@ -143,7 +141,7 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam<bool> {
       // Create ServerConnection instance and setup callbacks for it.
       http_connection_ = std::make_unique<QuicHttpServerConnectionImpl>(envoy_quic_session_,
                                                                         http_connection_callbacks_);
-      EXPECT_EQ(Http::Protocol::Http2, http_connection_->protocol());
+      EXPECT_EQ(Http::Protocol::Http3, http_connection_->protocol());
       // Stop iteration to avoid calling getRead/WriteBuffer().
       return Network::FilterStatus::StopIteration;
     }));
diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc
index 54304dd722db..532ada283095 100644
--- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc
+++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc
@@ -52,8 +52,8 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam<bool> {
     quic_session_.ActivateStream(std::unique_ptr<EnvoyQuicServerStream>(quic_stream_));
     EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _))
         .WillRepeatedly(Invoke([](quic::QuicStream*, quic::QuicStreamId, size_t write_length,
-                                  quic::QuicStreamOffset, quic::StreamSendingState) {
-          return quic::QuicConsumedData{write_length, true};
+                                  quic::QuicStreamOffset, quic::StreamSendingState state) {
+          return quic::QuicConsumedData{write_length, state != quic::NO_FIN};
         }));
     EXPECT_CALL(writer_, WritePacket(_, _, _, _, _))
         .WillRepeatedly(Invoke([](const char*, size_t buf_len, const quic::QuicIpAddress&,
@@ -106,12 +106,8 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam<bool> {
           EXPECT_EQ(Http::Headers::get().MethodValues.Post,
                     headers->Method()->value().getStringView());
         }));
-    if (quic::VersionUsesHttp3(quic_version_.transport_version)) {
-      quic_stream_->OnHeadersDecoded(request_headers_);
-    } else {
-      quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(),
-                                       request_headers_);
-    }
+    quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(),
+                                     request_headers_);
     EXPECT_TRUE(quic_stream_->FinishedReadingHeaders());
 
     EXPECT_CALL(stream_decoder_, decodeData(_, _))
@@ -247,6 +243,7 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableUponLargePost) {
 
   // Re-enable reading just once shouldn't unblock stream.
   quic_stream_->readDisable(false);
+  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);
 
   // This data frame should also be buffered.
   std::string last_part_request = bodyToStreamPayload("ccc");
@@ -264,6 +261,8 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableUponLargePost) {
         EXPECT_TRUE(finished_reading);
       }));
   quic_stream_->readDisable(false);
+  dispatcher_->run(Event::Dispatcher::RunType::NonBlock);
+
   EXPECT_CALL(stream_callbacks_, onResetStream(_, _));
 }
 
@@ -276,12 +275,8 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableAndReEnableImmediately) {
         EXPECT_EQ(Http::Headers::get().MethodValues.Post,
                   headers->Method()->value().getStringView());
       }));
-  if (quic::VersionUsesHttp3(quic_version_.transport_version)) {
-    quic_stream_->OnHeadersDecoded(request_headers_);
-  } else {
-    quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(),
-                                     request_headers_);
-  }
+  quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(),
+                                   request_headers_);
   EXPECT_TRUE(quic_stream_->FinishedReadingHeaders());
 
   std::string payload(1024, 'a');
@@ -314,10 +309,18 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableAndReEnableImmediately) {
 TEST_P(EnvoyQuicServerStreamTest, WatermarkSendBuffer) {
   sendRequest(request_body_, true, request_body_.size() * 2);
 
+  // Bump connection flow control window large enough not to cause connection
+  // level flow control blocked.
+  quic::QuicWindowUpdateFrame window_update(
+      quic::kInvalidControlFrameId,
+      quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024);
+  quic_session_.OnWindowUpdateFrame(window_update);
+
   // 32KB + 2 byte. The initial stream flow control window is 16k.
   response_headers_.addCopy(":content-length", "32770");
   quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/false);
-  // encode 32kB response body. first 16KB should be written out right away. The
+
+  // Encode 32kB response body. first 16KB should be written out right away. The
   // rest should be buffered. The high watermark is 16KB, so this call should
   // make the send buffer reach its high watermark.
   std::string response(32 * 1024 + 1, 'a');
@@ -327,12 +330,6 @@ TEST_P(EnvoyQuicServerStreamTest, WatermarkSendBuffer) {
 
   EXPECT_EQ(0u, buffer.length());
   EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked());
-  // Bump connection flow control window large enough not to cause connection
-  // level flow control blocked.
-  quic::QuicWindowUpdateFrame window_update(
-      quic::kInvalidControlFrameId,
-      quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024);
-  quic_session_.OnWindowUpdateFrame(window_update);
 
   // Receive a WINDOW_UPDATE frame not large enough to drain half of the send
   // buffer.
diff --git a/test/extensions/quic_listeners/quiche/integration/BUILD b/test/extensions/quic_listeners/quiche/integration/BUILD
new file mode 100644
index 000000000000..2f2a48b28ab8
--- /dev/null
+++ b/test/extensions/quic_listeners/quiche/integration/BUILD
@@ -0,0 +1,26 @@
+licenses(["notice"])  # Apache 2
+
+load(
+    "//bazel:envoy_build_system.bzl",
+    "envoy_cc_test",
+    "envoy_package",
+)
+
+envoy_package()
+
+envoy_cc_test(
+    name = "quic_http_integration_test",
+    srcs = ["quic_http_integration_test.cc"],
+    data = ["//test/config/integration/certs"],
+    tags = ["nofips"],
+    deps = [
+        "//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib",
+        "//source/extensions/quic_listeners/quiche:codec_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_client_connection_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_client_session_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib",
+        "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib",
+        "//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib",
+        "//test/integration:http_integration_lib",
+    ],
+)
diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc
new file mode 100644
index 000000000000..d65792479b98
--- /dev/null
+++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc
@@ -0,0 +1,182 @@
+#include "test/config/utility.h"
+#include "test/integration/http_integration.h"
+#include "test/test_common/utility.h"
+
+#pragma GCC diagnostic push
+// QUICHE allows unused parameters.
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+// QUICHE uses offsetof().
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+
+#include "quiche/quic/core/http/quic_client_push_promise_index.h"
+#include "quiche/quic/core/quic_utils.h"
+
+#pragma GCC diagnostic pop
+
+#include "extensions/quic_listeners/quiche/envoy_quic_client_session.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_client_connection.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h"
+#include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h"
+
+namespace Envoy {
+namespace Quic {
+
+class CodecClientCallbacksForTest : public Http::CodecClientCallbacks {
+public:
+  void onStreamDestroy() override {}
+
+  void onStreamReset(Http::StreamResetReason reason) override {
+    last_stream_reset_reason_ = reason;
+  }
+
+  Http::StreamResetReason last_stream_reset_reason_{Http::StreamResetReason::LocalReset};
+};
+
+class QuicHttpIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,
+                                public HttpIntegrationTest {
+public:
+  QuicHttpIntegrationTest()
+      : HttpIntegrationTest(Http::CodecClient::Type::HTTP3, GetParam(),
+                            ConfigHelper::QUIC_HTTP_PROXY_CONFIG),
+        supported_versions_(quic::CurrentSupportedVersions()),
+        crypto_config_(std::make_unique<EnvoyQuicFakeProofVerifier>()), conn_helper_(*dispatcher_),
+        alarm_factory_(*dispatcher_, *conn_helper_.GetClock()) {}
+
+  Network::ClientConnectionPtr makeClientConnection(uint32_t port) override {
+    Network::Address::InstanceConstSharedPtr server_addr = Network::Utility::resolveUrl(
+        fmt::format("udp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port));
+    Network::Address::InstanceConstSharedPtr local_addr =
+        Network::Test::getCanonicalLoopbackAddress(version_);
+    // Initiate a QUIC connection with the highest supported version. If not
+    // supported by server, this connection will fail.
+    // TODO(danzh) Implement retry upon version mismatch and modify test frame work to specify a
+    // different version set on server side to test that.
+    auto connection = std::make_unique<EnvoyQuicClientConnection>(
+        getNextServerDesignatedConnectionId(), server_addr, conn_helper_, alarm_factory_,
+        quic::ParsedQuicVersionVector{supported_versions_[0]}, local_addr, *dispatcher_, nullptr);
+    auto session = std::make_unique<EnvoyQuicClientSession>(
+        quic_config_, supported_versions_, std::move(connection), server_id_, &crypto_config_,
+        &push_promise_index_, *dispatcher_, 0);
+    session->Initialize();
+    return session;
+  }
+
+  // This call may fail because of INVALID_VERSION, because QUIC connection doesn't support
+  // in-connection version negotiation.
+  // TODO(#8479) Propagate INVALID_VERSION error to caller and let caller to use server advertised
+  // version list to create a new connection with mutually supported version and make client codec
+  // again.
+  IntegrationCodecClientPtr makeRawHttpConnection(Network::ClientConnectionPtr&& conn) override {
+    IntegrationCodecClientPtr codec = HttpIntegrationTest::makeRawHttpConnection(std::move(conn));
+    if (codec->disconnected()) {
+      // Connection may get closed during version negotiation or handshake.
+      ENVOY_LOG(error, "Fail to connect to server with error: {}",
+                codec->connection()->transportFailureReason());
+    } else {
+      codec->setCodecClientCallbacks(client_codec_callback_);
+    }
+    return codec;
+  }
+
+  quic::QuicConnectionId getNextServerDesignatedConnectionId() {
+    quic::QuicCryptoClientConfig::CachedState* cached = crypto_config_.LookupOrCreate(server_id_);
+    // If the cached state indicates that we should use a server-designated
+    // connection ID, then return that connection ID.
+    quic::QuicConnectionId conn_id = cached->has_server_designated_connection_id()
+                                         ? cached->GetNextServerDesignatedConnectionId()
+                                         : quic::EmptyQuicConnectionId();
+    return conn_id.IsEmpty() ? quic::QuicUtils::CreateRandomConnectionId() : conn_id;
+  }
+
+  void initialize() override {
+    config_helper_.addConfigModifier([](envoy::config::bootstrap::v2::Bootstrap& bootstrap) {
+      envoy::api::v2::auth::DownstreamTlsContext tls_context;
+      ConfigHelper::initializeTls({}, *tls_context.mutable_common_tls_context());
+      auto* filter_chain =
+          bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0);
+      auto* transport_socket = filter_chain->mutable_transport_socket();
+      TestUtility::jsonConvert(tls_context, *transport_socket->mutable_config());
+    });
+    config_helper_.addConfigModifier(
+        [](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager&
+               hcm) {
+          hcm.mutable_delayed_close_timeout()->set_nanos(0);
+          EXPECT_EQ(hcm.codec_type(), envoy::config::filter::network::http_connection_manager::v2::
+                                          HttpConnectionManager::HTTP3);
+        });
+
+    HttpIntegrationTest::initialize();
+    registerTestServerPorts({"http"});
+  }
+
+protected:
+  quic::QuicConfig quic_config_;
+  quic::QuicServerId server_id_{"example.com", 443, false};
+  quic::QuicClientPushPromiseIndex push_promise_index_;
+  quic::ParsedQuicVersionVector supported_versions_;
+  quic::QuicCryptoClientConfig crypto_config_;
+  EnvoyQuicConnectionHelper conn_helper_;
+  EnvoyQuicAlarmFactory alarm_factory_;
+  CodecClientCallbacksForTest client_codec_callback_;
+};
+
+INSTANTIATE_TEST_SUITE_P(IpVersions, QuicHttpIntegrationTest,
+                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()),
+                         TestUtility::ipTestParamsToString);
+
+TEST_P(QuicHttpIntegrationTest, GetRequestAndEmptyResponse) {
+  testRouterHeaderOnlyRequestAndResponse();
+}
+
+TEST_P(QuicHttpIntegrationTest, GetRequestAndResponseWithBody) {
+  initialize();
+  sendRequestAndVerifyResponse(default_request_headers_, /*request_size=*/0,
+                               default_response_headers_, /*response_size=*/1024,
+                               /*backend_index*/ 0);
+}
+
+TEST_P(QuicHttpIntegrationTest, PostRequestAndResponseWithBody) {
+  testRouterRequestAndResponseWithBody(1024, 512, false);
+}
+
+TEST_P(QuicHttpIntegrationTest, PostRequestWithBigHeadersAndResponseWithBody) {
+  testRouterRequestAndResponseWithBody(1024, 512, true);
+}
+
+TEST_P(QuicHttpIntegrationTest, RouterUpstreamDisconnectBeforeRequestcomplete) {
+  testRouterUpstreamDisconnectBeforeRequestComplete();
+}
+
+TEST_P(QuicHttpIntegrationTest, RouterUpstreamDisconnectBeforeResponseComplete) {
+  testRouterUpstreamDisconnectBeforeResponseComplete();
+  EXPECT_EQ(Http::StreamResetReason::RemoteReset, client_codec_callback_.last_stream_reset_reason_);
+}
+
+TEST_P(QuicHttpIntegrationTest, RouterDownstreamDisconnectBeforeRequestComplete) {
+  testRouterDownstreamDisconnectBeforeRequestComplete();
+}
+
+TEST_P(QuicHttpIntegrationTest, RouterDownstreamDisconnectBeforeResponseComplete) {
+  testRouterDownstreamDisconnectBeforeResponseComplete();
+}
+
+TEST_P(QuicHttpIntegrationTest, RouterUpstreamResponseBeforeRequestComplete) {
+  testRouterUpstreamResponseBeforeRequestComplete();
+}
+
+TEST_P(QuicHttpIntegrationTest, Retry) { testRetry(); }
+
+TEST_P(QuicHttpIntegrationTest, UpstreamReadDisabledOnGiantResponseBody) {
+  config_helper_.setBufferLimits(/*upstream_buffer_limit=*/1024, /*downstream_buffer_limit=*/1024);
+  testRouterRequestAndResponseWithBody(/*request_size=*/512, /*response_size=*/1024 * 1024, false);
+}
+
+TEST_P(QuicHttpIntegrationTest, DownstreamReadDisabledOnGiantPost) {
+  config_helper_.setBufferLimits(/*upstream_buffer_limit=*/1024, /*downstream_buffer_limit=*/1024);
+  testRouterRequestAndResponseWithBody(/*request_size=*/1024 * 1024, /*response_size=*/1024, false);
+}
+
+} // namespace Quic
+} // namespace Envoy
diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc
index b2848e0594db..0dbaa876bd7e 100644
--- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc
+++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc
@@ -27,6 +27,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "quiche/common/platform/api/quiche_endian.h"
 #include "quiche/epoll_server/fake_simple_epoll_server.h"
 #include "quiche/quic/platform/api/quic_aligned.h"
 #include "quiche/quic/platform/api/quic_arraysize.h"
@@ -34,7 +35,6 @@
 #include "quiche/quic/platform/api/quic_cert_utils.h"
 #include "quiche/quic/platform/api/quic_client_stats.h"
 #include "quiche/quic/platform/api/quic_containers.h"
-#include "quiche/quic/platform/api/quic_endian.h"
 #include "quiche/quic/platform/api/quic_estimate_memory_usage.h"
 #include "quiche/quic/platform/api/quic_expect_bug.h"
 #include "quiche/quic/platform/api/quic_exported_stats.h"
@@ -188,9 +188,10 @@ TEST_F(QuicPlatformTest, QuicInlinedVector) {
   EXPECT_EQ(3, vec[0]);
 }
 
-TEST_F(QuicPlatformTest, QuicEndian) {
-  EXPECT_EQ(0x1234, QuicEndian::NetToHost16(QuicEndian::HostToNet16(0x1234)));
-  EXPECT_EQ(0x12345678, QuicEndian::NetToHost32(QuicEndian::HostToNet32(0x12345678)));
+TEST_F(QuicPlatformTest, QuicheEndian) {
+  EXPECT_EQ(0x1234, quiche::QuicheEndian::NetToHost16(quiche::QuicheEndian::HostToNet16(0x1234)));
+  EXPECT_EQ(0x12345678,
+            quiche::QuicheEndian::NetToHost32(quiche::QuicheEndian::HostToNet32(0x12345678)));
 }
 
 TEST_F(QuicPlatformTest, QuicEstimateMemoryUsage) {
diff --git a/test/extensions/quic_listeners/quiche/platform/quiche_test_impl.h b/test/extensions/quic_listeners/quiche/platform/quiche_test_impl.h
index e351735c3ab1..3d9fcf729bce 100644
--- a/test/extensions/quic_listeners/quiche/platform/quiche_test_impl.h
+++ b/test/extensions/quic_listeners/quiche/platform/quiche_test_impl.h
@@ -8,3 +8,11 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+
+namespace quiche {
+namespace test {
+
+using QuicheTest = ::testing::Test;
+
+} // namespace test
+} // namespace quiche
diff --git a/test/extensions/quic_listeners/quiche/test_utils.h b/test/extensions/quic_listeners/quiche/test_utils.h
index 6efb3678a5dc..690191eda8c1 100644
--- a/test/extensions/quic_listeners/quiche/test_utils.h
+++ b/test/extensions/quic_listeners/quiche/test_utils.h
@@ -7,8 +7,10 @@
 #pragma GCC diagnostic ignored "-Winvalid-offsetof"
 
 #include "quiche/quic/core/http/quic_spdy_session.h"
+#include "quiche/quic/core/http/quic_spdy_client_session.h"
 #include "quiche/quic/test_tools/quic_test_utils.h"
 #include "quiche/quic/core/quic_utils.h"
+#include "quiche/quic/test_tools/crypto_test_utils.h"
 
 #pragma GCC diagnostic pop
 
@@ -22,7 +24,7 @@ class MockEnvoyQuicSession : public quic::QuicSpdySession, public QuicFilterMana
                        EnvoyQuicConnection* connection, Event::Dispatcher& dispatcher,
                        uint32_t send_buffer_limit)
       : quic::QuicSpdySession(connection, /*visitor=*/nullptr, config, supported_versions),
-        QuicFilterManagerConnectionImpl(connection, dispatcher, send_buffer_limit) {
+        QuicFilterManagerConnectionImpl(*connection, dispatcher, send_buffer_limit) {
     crypto_stream_ = std::make_unique<quic::test::MockQuicCryptoStream>(this);
   }
 
@@ -53,5 +55,41 @@ class MockEnvoyQuicSession : public quic::QuicSpdySession, public QuicFilterMana
   std::unique_ptr<quic::QuicCryptoStream> crypto_stream_;
 };
 
+class MockEnvoyQuicClientSession : public quic::QuicSpdyClientSession,
+                                   public QuicFilterManagerConnectionImpl {
+public:
+  MockEnvoyQuicClientSession(const quic::QuicConfig& config,
+                             const quic::ParsedQuicVersionVector& supported_versions,
+                             EnvoyQuicConnection* connection, Event::Dispatcher& dispatcher,
+                             uint32_t send_buffer_limit)
+      : quic::QuicSpdyClientSession(config, supported_versions, connection,
+                                    quic::QuicServerId("example.com", 443, false), &crypto_config_,
+                                    nullptr),
+        QuicFilterManagerConnectionImpl(*connection, dispatcher, send_buffer_limit),
+        crypto_config_(quic::test::crypto_test_utils::ProofVerifierForTesting()) {}
+
+  // From QuicSession.
+  MOCK_METHOD1(CreateIncomingStream, quic::QuicSpdyClientStream*(quic::QuicStreamId id));
+  MOCK_METHOD1(CreateIncomingStream, quic::QuicSpdyClientStream*(quic::PendingStream* pending));
+  MOCK_METHOD0(CreateOutgoingBidirectionalStream, quic::QuicSpdyClientStream*());
+  MOCK_METHOD0(CreateOutgoingUnidirectionalStream, quic::QuicSpdyClientStream*());
+  MOCK_METHOD1(ShouldCreateIncomingStream, bool(quic::QuicStreamId id));
+  MOCK_METHOD0(ShouldCreateOutgoingBidirectionalStream, bool());
+  MOCK_METHOD0(ShouldCreateOutgoingUnidirectionalStream, bool());
+  MOCK_METHOD5(WritevData,
+               quic::QuicConsumedData(quic::QuicStream* stream, quic::QuicStreamId id,
+                                      size_t write_length, quic::QuicStreamOffset offset,
+                                      quic::StreamSendingState state));
+
+  absl::string_view requestedServerName() const override {
+    return {GetCryptoStream()->crypto_negotiated_params().sni};
+  }
+
+  using quic::QuicSpdySession::ActivateStream;
+
+private:
+  quic::QuicCryptoClientConfig crypto_config_;
+};
+
 } // namespace Quic
 } // namespace Envoy
diff --git a/test/fuzz/BUILD b/test/fuzz/BUILD
index 3763795e5910..44507b1c1a55 100644
--- a/test/fuzz/BUILD
+++ b/test/fuzz/BUILD
@@ -51,6 +51,7 @@ envoy_cc_test_library(
     deps = [
         ":common_proto_cc_proto",
         "//source/common/common:empty_string",
+        "//source/common/network:resolver_lib",
         "//source/common/network:utility_lib",
         "//test/common/stream_info:test_util",
         "//test/mocks/ssl:ssl_mocks",
diff --git a/test/fuzz/common.proto b/test/fuzz/common.proto
index 11cecec91b7c..dcd1ccea1597 100644
--- a/test/fuzz/common.proto
+++ b/test/fuzz/common.proto
@@ -3,9 +3,12 @@ syntax = "proto3";
 package test.fuzz;
 
 import "envoy/api/v2/core/base.proto";
+import "envoy/api/v2/core/address.proto";
 
 import "google/protobuf/wrappers.proto";
 
+import "validate/validate.proto";
+
 // Common fuzzing input types.
 
 message Headers {
@@ -17,4 +20,6 @@ message StreamInfo {
   uint64 start_time = 2;
   google.protobuf.UInt32Value response_code = 3;
   envoy.api.v2.core.Metadata upstream_metadata = 4;
+  string requested_server_name = 5;
+  envoy.api.v2.core.Address address = 6;
 }
diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h
index 421a031aea57..62771bd42b48 100644
--- a/test/fuzz/utility.h
+++ b/test/fuzz/utility.h
@@ -1,6 +1,7 @@
 #pragma once
 
 #include "common/common/empty_string.h"
+#include "common/network/resolver_impl.h"
 #include "common/network/utility.h"
 
 #include "test/common/stream_info/test_util.h"
@@ -112,6 +113,7 @@ const std::string TestSubjectPeer =
 
 inline TestStreamInfo fromStreamInfo(const test::fuzz::StreamInfo& stream_info) {
   // Set mocks' default string return value to be an empty string.
+  // TODO(asraa): Speed up this function, which is slowed because of the use of mocks.
   testing::DefaultValue<const std::string&>::Set(EMPTY_STRING);
   TestStreamInfo test_stream_info;
   test_stream_info.metadata_ = stream_info.dynamic_metadata();
@@ -125,12 +127,15 @@ inline TestStreamInfo fromStreamInfo(const test::fuzz::StreamInfo& stream_info)
   if (stream_info.has_response_code()) {
     test_stream_info.response_code_ = stream_info.response_code().value();
   }
+  test_stream_info.setRequestedServerName(stream_info.requested_server_name());
   auto upstream_host = std::make_shared<NiceMock<Upstream::MockHostDescription>>();
   auto upstream_metadata = std::make_shared<envoy::api::v2::core::Metadata>(
       replaceInvalidStringValues(stream_info.upstream_metadata()));
   ON_CALL(*upstream_host, metadata()).WillByDefault(testing::Return(upstream_metadata));
   test_stream_info.upstream_host_ = upstream_host;
-  auto address = Network::Utility::resolveUrl("tcp://10.0.0.1:443");
+  auto address = stream_info.has_address()
+                     ? Envoy::Network::Address::resolveProtoAddress(stream_info.address())
+                     : Network::Utility::resolveUrl("tcp://10.0.0.1:443");
   test_stream_info.upstream_local_address_ = address;
   test_stream_info.downstream_local_address_ = address;
   test_stream_info.downstream_direct_remote_address_ = address;
diff --git a/test/integration/BUILD b/test/integration/BUILD
index 233eab357812..65f9e6dc1611 100644
--- a/test/integration/BUILD
+++ b/test/integration/BUILD
@@ -799,6 +799,15 @@ envoy_cc_test(
     ],
 )
 
+envoy_cc_test(
+    name = "version_integration_test",
+    srcs = ["version_integration_test.cc"],
+    deps = [
+        ":http_integration_lib",
+        "//source/extensions/filters/http/ip_tagging:config",
+    ],
+)
+
 envoy_cc_test(
     name = "dynamic_validation_integration_test",
     srcs = ["dynamic_validation_integration_test.cc"],
diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc
index e94f96e2a0c2..05d1e3732db5 100644
--- a/test/integration/fake_upstream.cc
+++ b/test/integration/fake_upstream.cc
@@ -580,10 +580,10 @@ void FakeUpstream::onRecvDatagram(Network::UdpRecvData& data) {
 }
 
 void FakeUpstream::sendUdpDatagram(const std::string& buffer,
-                                   const Network::Address::Instance& peer) {
-  dispatcher_->post([this, buffer, &peer] {
+                                   const Network::Address::InstanceConstSharedPtr& peer) {
+  dispatcher_->post([this, buffer, peer] {
     const auto rc = Network::Utility::writeToSocket(socket_->ioHandle(), Buffer::OwnedImpl(buffer),
-                                                    nullptr, peer);
+                                                    nullptr, *peer);
     EXPECT_TRUE(rc.rc_ == buffer.length());
   });
 }
diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h
index 9a7c0edf340a..0405b820d1af 100644
--- a/test/integration/fake_upstream.h
+++ b/test/integration/fake_upstream.h
@@ -569,7 +569,8 @@ class FakeUpstream : Logger::Loggable<Logger::Id::testing>,
                      std::chrono::milliseconds timeout = TestUtility::DefaultTimeout);
 
   // Send a UDP datagram on the fake upstream thread.
-  void sendUdpDatagram(const std::string& buffer, const Network::Address::Instance& peer);
+  void sendUdpDatagram(const std::string& buffer,
+                       const Network::Address::InstanceConstSharedPtr& peer);
 
   // Network::FilterChainManager
   const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override {
diff --git a/test/integration/filters/call_decodedata_once_filter.cc b/test/integration/filters/call_decodedata_once_filter.cc
index 15abc0658fd0..ff387d974104 100644
--- a/test/integration/filters/call_decodedata_once_filter.cc
+++ b/test/integration/filters/call_decodedata_once_filter.cc
@@ -16,8 +16,10 @@ class CallDecodeDataOnceFilter : public Http::PassThroughFilter {
   constexpr static char name[] = "call-decodedata-once-filter";
 
   Http::FilterHeadersStatus decodeHeaders(Http::HeaderMap& header_map, bool) override {
-    Http::HeaderEntry* entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size"));
-    Http::HeaderEntry* entry_added = header_map.get(Envoy::Http::LowerCaseString("added_size"));
+    const Http::HeaderEntry* entry_content =
+        header_map.get(Envoy::Http::LowerCaseString("content_size"));
+    const Http::HeaderEntry* entry_added =
+        header_map.get(Envoy::Http::LowerCaseString("added_size"));
     ASSERT(entry_content != nullptr && entry_added != nullptr);
     content_size_ = std::stoul(std::string(entry_content->value().getStringView()));
     added_size_ = std::stoul(std::string(entry_added->value().getStringView()));
diff --git a/test/integration/filters/decode_headers_return_stop_all_filter.cc b/test/integration/filters/decode_headers_return_stop_all_filter.cc
index e2049155ef98..151a26e0fe03 100644
--- a/test/integration/filters/decode_headers_return_stop_all_filter.cc
+++ b/test/integration/filters/decode_headers_return_stop_all_filter.cc
@@ -27,12 +27,14 @@ class DecodeHeadersReturnStopAllFilter : public Http::PassThroughFilter {
   // Http::FilterHeadersStatus::StopAllIterationAndWatermark for headers. Triggers a timer to
   // continue iteration after 5s.
   Http::FilterHeadersStatus decodeHeaders(Http::HeaderMap& header_map, bool) override {
-    Http::HeaderEntry* entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size"));
-    Http::HeaderEntry* entry_added = header_map.get(Envoy::Http::LowerCaseString("added_size"));
+    const Http::HeaderEntry* entry_content =
+        header_map.get(Envoy::Http::LowerCaseString("content_size"));
+    const Http::HeaderEntry* entry_added =
+        header_map.get(Envoy::Http::LowerCaseString("added_size"));
     ASSERT(entry_content != nullptr && entry_added != nullptr);
     content_size_ = std::stoul(std::string(entry_content->value().getStringView()));
     added_size_ = std::stoul(std::string(entry_added->value().getStringView()));
-    Http::HeaderEntry* entry_is_first_trigger =
+    const Http::HeaderEntry* entry_is_first_trigger =
         header_map.get(Envoy::Http::LowerCaseString("is_first_trigger"));
     is_first_trigger_ = entry_is_first_trigger != nullptr;
     // Remove "first_trigger" headers so that if the filter is registered twice in a filter chain,
@@ -41,7 +43,8 @@ class DecodeHeadersReturnStopAllFilter : public Http::PassThroughFilter {
 
     createTimerForContinue();
 
-    Http::HeaderEntry* entry_buffer = header_map.get(Envoy::Http::LowerCaseString("buffer_limit"));
+    const Http::HeaderEntry* entry_buffer =
+        header_map.get(Envoy::Http::LowerCaseString("buffer_limit"));
     if (entry_buffer == nullptr || !is_first_trigger_) {
       return Http::FilterHeadersStatus::StopAllIterationAndBuffer;
     } else {
diff --git a/test/integration/filters/encode_headers_return_stop_all_filter.cc b/test/integration/filters/encode_headers_return_stop_all_filter.cc
index 778c4897a15a..768fc1ad343a 100644
--- a/test/integration/filters/encode_headers_return_stop_all_filter.cc
+++ b/test/integration/filters/encode_headers_return_stop_all_filter.cc
@@ -27,8 +27,10 @@ class EncodeHeadersReturnStopAllFilter : public Http::PassThroughFilter {
   // Http::FilterHeadersStatus::StopAllIterationAndWatermark for headers. Triggers a timer to
   // continue iteration after 5s.
   Http::FilterHeadersStatus encodeHeaders(Http::HeaderMap& header_map, bool) override {
-    Http::HeaderEntry* entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size"));
-    Http::HeaderEntry* entry_added = header_map.get(Envoy::Http::LowerCaseString("added_size"));
+    const Http::HeaderEntry* entry_content =
+        header_map.get(Envoy::Http::LowerCaseString("content_size"));
+    const Http::HeaderEntry* entry_added =
+        header_map.get(Envoy::Http::LowerCaseString("added_size"));
     ASSERT(entry_content != nullptr && entry_added != nullptr);
     content_size_ = std::stoul(std::string(entry_content->value().getStringView()));
     added_size_ = std::stoul(std::string(entry_added->value().getStringView()));
@@ -39,7 +41,8 @@ class EncodeHeadersReturnStopAllFilter : public Http::PassThroughFilter {
     Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map);
     encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr));
 
-    Http::HeaderEntry* entry_buffer = header_map.get(Envoy::Http::LowerCaseString("buffer_limit"));
+    const Http::HeaderEntry* entry_buffer =
+        header_map.get(Envoy::Http::LowerCaseString("buffer_limit"));
     if (entry_buffer == nullptr) {
       return Http::FilterHeadersStatus::StopAllIterationAndBuffer;
     } else {
diff --git a/test/integration/filters/metadata_stop_all_filter.cc b/test/integration/filters/metadata_stop_all_filter.cc
index 3ff6b7983d01..423df634ab5d 100644
--- a/test/integration/filters/metadata_stop_all_filter.cc
+++ b/test/integration/filters/metadata_stop_all_filter.cc
@@ -22,7 +22,8 @@ class MetadataStopAllFilter : public Http::PassThroughFilter {
   constexpr static char name[] = "metadata-stop-all-filter";
 
   Http::FilterHeadersStatus decodeHeaders(Http::HeaderMap& header_map, bool) override {
-    Http::HeaderEntry* entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size"));
+    const Http::HeaderEntry* entry_content =
+        header_map.get(Envoy::Http::LowerCaseString("content_size"));
     ASSERT(entry_content != nullptr);
     content_size_ = std::stoul(std::string(entry_content->value().getStringView()));
 
diff --git a/test/integration/filters/pause_filter.cc b/test/integration/filters/pause_filter.cc
index c6463ef27b65..8c4a9b85ba93 100644
--- a/test/integration/filters/pause_filter.cc
+++ b/test/integration/filters/pause_filter.cc
@@ -26,8 +26,9 @@ class TestPauseFilter : public Http::PassThroughFilter {
     if (end_stream) {
       absl::WriterMutexLock m(&encode_lock_);
       number_of_decode_calls_ref_++;
-      if (number_of_decode_calls_ref_ == 2) {
-        // If this is the second stream to decode headers, force low watermark state.
+      // If this is the second stream to decode headers and we're at high watermark. force low
+      // watermark state
+      if (number_of_decode_calls_ref_ == 2 && connection()->aboveHighWatermark()) {
         connection()->onLowWatermark();
       }
     }
@@ -38,8 +39,9 @@ class TestPauseFilter : public Http::PassThroughFilter {
     if (end_stream) {
       absl::WriterMutexLock m(&encode_lock_);
       number_of_encode_calls_ref_++;
-      if (number_of_encode_calls_ref_ == 1) {
-        // If this is the first stream to encode headers, force high watermark state.
+      // If this is the first stream to encode headers and we're not at high watermark, force high
+      // watermark state.
+      if (number_of_encode_calls_ref_ == 1 && !connection()->aboveHighWatermark()) {
         connection()->onHighWatermark();
       }
     }
diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh
index e9dea29f0eb3..8c6563203ca6 100755
--- a/test/integration/hotrestart_test.sh
+++ b/test/integration/hotrestart_test.sh
@@ -21,6 +21,7 @@ if [[ -z "${ENVOY_IP_TEST_VERSIONS}" ]] || [[ "${ENVOY_IP_TEST_VERSIONS}" == "al
     sed -e "s#{{ test_rundir }}#$TEST_SRCDIR/envoy#" | \
     sed -e "s#{{ test_tmpdir }}#$TEST_TMPDIR#" | \
     sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \
+    sed -e "s#{{ reuse_port }}#false#" | \
     sed -e "s#{{ dns_lookup_family }}#V4_ONLY#" | \
     cat > "${HOT_RESTART_JSON_V4}"
   JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_V4}")
@@ -34,6 +35,7 @@ if [[ -z "${ENVOY_IP_TEST_VERSIONS}" ]] || [[ "${ENVOY_IP_TEST_VERSIONS}" == "al
     sed -e "s#{{ test_rundir }}#$TEST_SRCDIR/envoy#" | \
     sed -e "s#{{ test_tmpdir }}#$TEST_TMPDIR#" | \
     sed -e "s#{{ ip_loopback_address }}#::1#" | \
+    sed -e "s#{{ reuse_port }}#false#" | \
     sed -e "s#{{ dns_lookup_family }}#v6_only#" | \
     cat > "${HOT_RESTART_JSON_V6}"
   JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_V6}")
@@ -49,6 +51,19 @@ cat "${TEST_SRCDIR}/envoy"/test/config/integration/server_unix_listener.yaml |
   cat > "${HOT_RESTART_JSON_UDS}"
 JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_UDS}")
 
+# Test reuse port listener.
+HOT_RESTART_JSON_REUSE_PORT="${TEST_TMPDIR}"/hot_restart_v4.yaml
+echo building ${HOT_RESTART_JSON_V4} ...
+cat "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml |
+  sed -e "s#{{ upstream_. }}#0#g" | \
+  sed -e "s#{{ test_rundir }}#$TEST_SRCDIR/envoy#" | \
+  sed -e "s#{{ test_tmpdir }}#$TEST_TMPDIR#" | \
+  sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \
+  sed -e "s#{{ reuse_port }}#true#" | \
+  sed -e "s#{{ dns_lookup_family }}#V4_ONLY#" | \
+  cat > "${HOT_RESTART_JSON_REUSE_PORT}"
+JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_REUSE_PORT}")
+
 # Enable this test to work with --runs_per_test
 if [[ -z "${TEST_RANDOM_SEED}" ]]; then
   BASE_ID=1
diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc
index b2f8714d3443..51996a6bacef 100644
--- a/test/integration/http2_integration_test.cc
+++ b/test/integration/http2_integration_test.cc
@@ -1440,16 +1440,19 @@ const int64_t TransmitThreshold = 100 * 1024 * 1024;
 } // namespace
 
 void Http2FloodMitigationTest::setNetworkConnectionBufferSize() {
-  // nghttp2 library has its own internal mitigation for outbound control frames. The mitigation is
-  // triggered when there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2
-  // internal outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering
-  // Envoy's own flood mitigation. This can happen when a buffer larger enough to contain over 10K
-  // PING or SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening
-  // the network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS
-  // frames). Set it to the arbitrarily chosen value of 32K.
+  // nghttp2 library has its own internal mitigation for outbound control frames (see
+  // NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM). The default nghttp2 mitigation threshold of 1K is modified
+  // to 10K in the ConnectionImpl::Http2Options::Http2Options. The mitigation is triggered when
+  // there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2 internal
+  // outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering Envoy's
+  // own flood mitigation. This can happen when a buffer large enough to contain over 10K PING or
+  // SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening the
+  // network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS frames).
+  // Set it to the arbitrarily chosen value of 32K. Note that this buffer has 16K lower bound.
   config_helper_.addConfigModifier([](envoy::config::bootstrap::v2::Bootstrap& bootstrap) -> void {
     RELEASE_ASSERT(bootstrap.mutable_static_resources()->listeners_size() >= 1, "");
     auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0);
+
     listener->mutable_per_connection_buffer_limit_bytes()->set_value(32 * 1024);
   });
 }
diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc
index 14698039c2f2..b75c75bf3c97 100644
--- a/test/integration/http_integration.cc
+++ b/test/integration/http_integration.cc
@@ -46,6 +46,9 @@ typeToCodecType(Http::CodecClient::Type type) {
   case Http::CodecClient::Type::HTTP2:
     return envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager::
         HTTP2;
+  case Http::CodecClient::Type::HTTP3:
+    return envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager::
+        HTTP3;
   default:
     RELEASE_ASSERT(0, "");
   }
@@ -180,6 +183,13 @@ void IntegrationCodecClient::ConnectionCallbacks::onEvent(Network::ConnectionEve
     parent_.disconnected_ = true;
     parent_.connection_->dispatcher().exit();
   } else {
+    if (parent_.type() == CodecClient::Type::HTTP3 && !parent_.connected_) {
+      // Before handshake gets established, any connection failure should exit the loop. I.e. a QUIC
+      // connection may fail of INVALID_VERSION if both this client doesn't support any of the
+      // versions the server advertised before handshake established. In this case the connection is
+      // closed locally and this is in a blocking event loop.
+      parent_.connection_->dispatcher().exit();
+    }
     parent_.disconnected_ = true;
   }
 }
@@ -203,7 +213,7 @@ HttpIntegrationTest::makeRawHttpConnection(Network::ClientConnectionPtr&& conn)
 IntegrationCodecClientPtr
 HttpIntegrationTest::makeHttpConnection(Network::ClientConnectionPtr&& conn) {
   auto codec = makeRawHttpConnection(std::move(conn));
-  EXPECT_TRUE(codec->connected());
+  EXPECT_TRUE(codec->connected()) << codec->connection()->transportFailureReason();
   return codec;
 }
 
@@ -230,9 +240,9 @@ HttpIntegrationTest::HttpIntegrationTest(Http::CodecClient::Type downstream_prot
   config_helper_.setClientCodec(typeToCodecType(downstream_protocol_));
 }
 
-void HttpIntegrationTest::useAccessLog() {
+void HttpIntegrationTest::useAccessLog(absl::string_view format) {
   access_log_name_ = TestEnvironment::temporaryPath(TestUtility::uniqueFilename());
-  ASSERT_TRUE(config_helper_.setAccessLog(access_log_name_));
+  ASSERT_TRUE(config_helper_.setAccessLog(access_log_name_, format));
 }
 
 HttpIntegrationTest::~HttpIntegrationTest() {
@@ -903,6 +913,7 @@ void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count,
 
   Http::TestHeaderMapImpl big_headers{
       {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}};
+
   // Already added four headers.
   for (unsigned int i = 0; i < count; i++) {
     big_headers.addCopy(std::to_string(i), std::string(size * 1024, 'a'));
@@ -969,6 +980,8 @@ void HttpIntegrationTest::testLargeRequestTrailers(uint32_t size, uint32_t max_s
 }
 
 void HttpIntegrationTest::testManyRequestHeaders(std::chrono::milliseconds time) {
+  // This test uses an Http::HeaderMapImpl instead of an Http::TestHeaderMapImpl to avoid
+  // time-consuming asserts when using a large number of headers.
   max_request_headers_kb_ = 96;
   max_request_headers_count_ = 20005;
 
@@ -980,11 +993,13 @@ void HttpIntegrationTest::testManyRequestHeaders(std::chrono::milliseconds time)
             max_request_headers_count_);
       });
 
-  Http::TestHeaderMapImpl big_headers{
-      {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}};
+  Http::HeaderMapImpl big_headers{{Http::Headers::get().Method, "GET"},
+                                  {Http::Headers::get().Path, "/test/long/url"},
+                                  {Http::Headers::get().Scheme, "http"},
+                                  {Http::Headers::get().Host, "host"}};
 
   for (int i = 0; i < 20000; i++) {
-    big_headers.addCopy(std::to_string(i), std::string(0, 'a'));
+    big_headers.addCopy(Http::LowerCaseString(std::to_string(i)), std::string(0, 'a'));
   }
   initialize();
 
diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h
index 679c93fbce72..9dce110a222b 100644
--- a/test/integration/http_integration.h
+++ b/test/integration/http_integration.h
@@ -104,11 +104,11 @@ class HttpIntegrationTest : public BaseIntegrationTest {
   std::string waitForAccessLog(const std::string& filename);
 
 protected:
-  void useAccessLog();
+  void useAccessLog(absl::string_view format = "");
 
   IntegrationCodecClientPtr makeHttpConnection(uint32_t port);
   // Makes a http connection object without checking its connected state.
-  IntegrationCodecClientPtr makeRawHttpConnection(Network::ClientConnectionPtr&& conn);
+  virtual IntegrationCodecClientPtr makeRawHttpConnection(Network::ClientConnectionPtr&& conn);
   // Makes a http connection object with asserting a connected state.
   IntegrationCodecClientPtr makeHttpConnection(Network::ClientConnectionPtr&& conn);
 
diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc
index b0ec3fb192c4..ad47453cab7f 100644
--- a/test/integration/idle_timeout_integration_test.cc
+++ b/test/integration/idle_timeout_integration_test.cc
@@ -1,12 +1,15 @@
 #include "test/integration/http_protocol_integration.h"
 #include "test/test_common/test_time.h"
 
+using testing::HasSubstr;
+
 namespace Envoy {
 namespace {
 
 class IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest {
 public:
   void initialize() override {
+    useAccessLog("%RESPONSE_CODE_DETAILS%");
     config_helper_.addConfigModifier(
         [&](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm)
             -> void {
@@ -172,6 +175,8 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeaders) {
   EXPECT_TRUE(response->complete());
   EXPECT_EQ("408", response->headers().Status()->value().getStringView());
   EXPECT_EQ("stream timeout", response->body());
+
+  EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("stream_idle_timeout"));
 }
 
 // Per-stream idle timeout after having sent downstream head request.
diff --git a/test/integration/integration.h b/test/integration/integration.h
index 66461a88a1c7..2e088e52390e 100644
--- a/test/integration/integration.h
+++ b/test/integration/integration.h
@@ -138,7 +138,7 @@ struct ApiFilesystemConfig {
 /**
  * Test fixture for all integration tests.
  */
-class BaseIntegrationTest : Logger::Loggable<Logger::Id::testing> {
+class BaseIntegrationTest : protected Logger::Loggable<Logger::Id::testing> {
 public:
   using TestTimeSystemPtr = std::unique_ptr<Event::TestTimeSystem>;
   using InstanceConstSharedPtrFn = std::function<Network::Address::InstanceConstSharedPtr(int)>;
@@ -191,7 +191,7 @@ class BaseIntegrationTest : Logger::Loggable<Logger::Id::testing> {
   void setUpstreamAddress(uint32_t upstream_index,
                           envoy::api::v2::endpoint::LbEndpoint& endpoint) const;
 
-  Network::ClientConnectionPtr makeClientConnection(uint32_t port);
+  virtual Network::ClientConnectionPtr makeClientConnection(uint32_t port);
 
   void registerTestServerPorts(const std::vector<std::string>& port_names);
   void createTestServer(const std::string& json_path, const std::vector<std::string>& port_names);
diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc
index 78134daffed4..c65cbabdfd99 100644
--- a/test/integration/integration_test.cc
+++ b/test/integration/integration_test.cc
@@ -490,6 +490,34 @@ TEST_P(IntegrationTest, Pipeline) {
   connection.close();
 }
 
+// Add a pipeline test where complete request headers in the first request merit
+// an inline sendLocalReply to make sure the "kick" works under the call stack
+// of dispatch as well as when a response is proxied from upstream.
+TEST_P(IntegrationTest, PipelineInline) {
+  autonomous_upstream_ = true;
+  initialize();
+  std::string response;
+
+  Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\nGET / HTTP/1.0\r\n\r\n");
+  RawConnectionDriver connection(
+      lookupPort("http"), buffer,
+      [&](Network::ClientConnection&, const Buffer::Instance& data) -> void {
+        response.append(data.toString());
+      },
+      version_);
+
+  while (response.find("400") == std::string::npos) {
+    connection.run(Event::Dispatcher::RunType::NonBlock);
+  }
+  EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n"));
+
+  while (response.find("426") == std::string::npos) {
+    connection.run(Event::Dispatcher::RunType::NonBlock);
+  }
+  EXPECT_THAT(response, HasSubstr("HTTP/1.1 426 Upgrade Required\r\n"));
+  connection.close();
+}
+
 TEST_P(IntegrationTest, NoHost) {
   initialize();
   codec_client_ = makeHttpConnection(lookupPort("http"));
diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc
index afe3b4d81133..e2bc8f8d3cd6 100644
--- a/test/integration/protocol_integration_test.cc
+++ b/test/integration/protocol_integration_test.cc
@@ -1012,8 +1012,10 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyRequestTrailersAccepted) {
   EXPECT_EQ("200", response->headers().Status()->value().getStringView());
 }
 
+// This test uses an Http::HeaderMapImpl instead of an Http::TestHeaderMapImpl to avoid
+// time-consuming byte size validations that will cause this test to timeout.
 TEST_P(DownstreamProtocolIntegrationTest, ManyRequestHeadersTimeout) {
-  // Set timeout for 5 seconds, and ensure that a request with 20k+ headers can be sent.
+  // Set timeout for 5 seconds, and ensure that a request with 10k+ headers can be sent.
   testManyRequestHeaders(std::chrono::milliseconds(5000));
 }
 
@@ -1025,6 +1027,8 @@ TEST_P(DownstreamProtocolIntegrationTest, LargeRequestTrailersRejected) {
   testLargeRequestTrailers(66, 60);
 }
 
+// This test uses an Http::HeaderMapImpl instead of an Http::TestHeaderMapImpl to avoid
+// time-consuming byte size verification that will cause this test to timeout.
 TEST_P(DownstreamProtocolIntegrationTest, ManyTrailerHeaders) {
   max_request_headers_kb_ = 96;
   max_request_headers_count_ = 20005;
@@ -1037,9 +1041,9 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyTrailerHeaders) {
             max_request_headers_count_);
       });
 
-  Http::TestHeaderMapImpl request_trailers{};
+  Http::HeaderMapImpl request_trailers{};
   for (int i = 0; i < 20000; i++) {
-    request_trailers.addCopy(std::to_string(i), "");
+    request_trailers.addCopy(Http::LowerCaseString(std::to_string(i)), "");
   }
 
   initialize();
diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc
index 7b4a04d72982..8ee9d2d1c2f0 100644
--- a/test/integration/redirect_integration_test.cc
+++ b/test/integration/redirect_integration_test.cc
@@ -82,6 +82,49 @@ TEST_P(RedirectIntegrationTest, BasicInternalRedirect) {
                    ->value());
 }
 
+TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithBody) {
+  // Validate that header sanitization is only called once.
+  config_helper_.addConfigModifier(
+      [](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) {
+        hcm.set_via("via_value");
+      });
+  config_helper_.addFilter(R"EOF(
+  name: pause-filter
+  typed_config:
+    "@type": type.googleapis.com/google.protobuf.Empty
+  )EOF");
+  initialize();
+  fake_upstreams_[0]->set_allow_unexpected_disconnects(true);
+
+  codec_client_ = makeHttpConnection(lookupPort("http"));
+
+  default_request_headers_.setHost("handle.internal.redirect");
+  IntegrationStreamDecoderPtr response =
+      codec_client_->makeHeaderOnlyRequest(default_request_headers_);
+
+  waitForNextUpstreamRequest();
+  upstream_request_->encodeHeaders(redirect_response_, true);
+
+  waitForNextUpstreamRequest();
+  ASSERT(upstream_request_->headers().EnvoyOriginalUrl() != nullptr);
+  EXPECT_EQ("http://handle.internal.redirect/test/long/url",
+            upstream_request_->headers().EnvoyOriginalUrl()->value().getStringView());
+  EXPECT_EQ("/new/url", upstream_request_->headers().Path()->value().getStringView());
+  EXPECT_EQ("authority2", upstream_request_->headers().Host()->value().getStringView());
+  EXPECT_EQ("via_value", upstream_request_->headers().Via()->value().getStringView());
+
+  Http::TestHeaderMapImpl response_with_big_body(
+      {{":status", "200"}, {"content-length", "2000000"}});
+  upstream_request_->encodeHeaders(response_with_big_body, false);
+  upstream_request_->encodeData(2000000, true);
+
+  response->waitForEndStream();
+  ASSERT_TRUE(response->complete());
+  EXPECT_EQ("200", response->headers().Status()->value().getStringView());
+  EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total")
+                   ->value());
+}
+
 TEST_P(RedirectIntegrationTest, InvalidRedirect) {
   initialize();
 
diff --git a/test/integration/version_integration_test.cc b/test/integration/version_integration_test.cc
new file mode 100644
index 000000000000..e5d09ff9a41e
--- /dev/null
+++ b/test/integration/version_integration_test.cc
@@ -0,0 +1,86 @@
+#include "test/integration/http_integration.h"
+
+namespace Envoy {
+namespace {
+
+// Integration test for ingestion of configuration across API versions.
+// Currently we only have static tests, but there will also be xDS tests added
+// later.
+class VersionIntegrationTest : public testing::TestWithParam<Network::Address::IpVersion>,
+                               public HttpIntegrationTest {
+public:
+  VersionIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {}
+};
+
+INSTANTIATE_TEST_SUITE_P(IpVersions, VersionIntegrationTest,
+                         testing::ValuesIn(TestEnvironment::getIpVersionsForTest()));
+
+// Just IP tagging for now.
+const char ExampleIpTaggingConfig[] = R"EOF(
+    request_type: both
+    ip_tags:
+      - ip_tag_name: external_request
+        ip_list:
+          - {address_prefix: 1.2.3.4, prefix_len: 32}
+)EOF";
+
+// envoy.ip_tagging from v2 Struct config.
+TEST_P(VersionIntegrationTest, DEPRECATED_FEATURE_TEST(IpTaggingV2StaticStructConfig)) {
+  config_helper_.addFilter(absl::StrCat(R"EOF(
+  name: envoy.ip_tagging
+  config:
+  )EOF",
+                                        ExampleIpTaggingConfig));
+  initialize();
+}
+
+// envoy.ip_tagging from v2 TypedStruct config.
+TEST_P(VersionIntegrationTest, IpTaggingV2StaticTypedStructConfig) {
+  config_helper_.addFilter(absl::StrCat(R"EOF(
+name: envoy.ip_tagging
+typed_config:
+  "@type": type.googleapis.com/udpa.type.v1.TypedStruct
+  type_url: type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging
+  value:
+  )EOF",
+                                        ExampleIpTaggingConfig));
+  initialize();
+}
+
+// envoy.ip_tagging from v3alpha TypedStruct config.
+TEST_P(VersionIntegrationTest, IpTaggingV3AlphaStaticTypedStructConfig) {
+  config_helper_.addFilter(absl::StrCat(R"EOF(
+name: envoy.ip_tagging
+typed_config:
+  "@type": type.googleapis.com/udpa.type.v1.TypedStruct
+  type_url: type.googleapis.com/envoy.config.filter.http.ip_tagging.v3alpha.IPTagging
+  value:
+  )EOF",
+                                        ExampleIpTaggingConfig));
+  initialize();
+}
+
+// envoy.ip_tagging from v2 typed Any config.
+TEST_P(VersionIntegrationTest, IpTaggingV2StaticTypedConfig) {
+  config_helper_.addFilter(absl::StrCat(R"EOF(
+  name: envoy.ip_tagging
+  typed_config:
+    "@type": type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging
+  )EOF",
+                                        ExampleIpTaggingConfig));
+  initialize();
+}
+
+// envoy.ip_tagging from v3alpha typed Any config.
+TEST_P(VersionIntegrationTest, IpTaggingV3AlphaStaticTypedConfig) {
+  config_helper_.addFilter(absl::StrCat(R"EOF(
+  name: envoy.ip_tagging
+  typed_config:
+    "@type": type.googleapis.com/envoy.config.filter.http.ip_tagging.v3alpha.IPTagging
+  )EOF",
+                                        ExampleIpTaggingConfig));
+  initialize();
+}
+
+} // namespace
+} // namespace Envoy
diff --git a/test/integration/vhds_integration_test.cc b/test/integration/vhds_integration_test.cc
index 561cd99917d3..1e54a38c20a4 100644
--- a/test/integration/vhds_integration_test.cc
+++ b/test/integration/vhds_integration_test.cc
@@ -72,6 +72,17 @@ const char Config[] = R"EOF(
                     cluster_name: xds_cluster
 )EOF";
 
+// TODO (dmitri-d) move config yaml into ConfigHelper
+const char RdsWithoutVhdsConfig[] = R"EOF(
+name: my_route
+virtual_hosts:
+- name: vhost_rds1
+  domains: ["vhost.rds.first"]
+  routes:
+  - match: { prefix: "/rdsone" }
+    route: { cluster: my_service }
+)EOF";
+
 const char RdsConfig[] = R"EOF(
 name: my_route
 vhds:
@@ -100,6 +111,113 @@ name: my_route
           cluster_name: xds_cluster
 )EOF";
 
+const char VhostTemplate[] = R"EOF(
+name: {}
+domains: [{}]
+routes:
+- match: {{ prefix: "/" }}
+  route: {{ cluster: "my_service" }}
+)EOF";
+
+class VhdsInitializationTest : public HttpIntegrationTest,
+                               public Grpc::GrpcClientIntegrationParamTest {
+public:
+  VhdsInitializationTest()
+      : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), realTime(), Config) {
+    use_lds_ = false;
+  }
+
+  void TearDown() override {
+    cleanUpXdsConnection();
+    test_server_.reset();
+    fake_upstreams_.clear();
+  }
+
+  // Overridden to insert this stuff into the initialize() at the very beginning of
+  // HttpIntegrationTest::testRouterRequestAndResponseWithBody().
+  void initialize() override {
+    // Controls how many fake_upstreams_.emplace_back(new FakeUpstream) will happen in
+    // BaseIntegrationTest::createUpstreams() (which is part of initialize()).
+    // Make sure this number matches the size of the 'clusters' repeated field in the bootstrap
+    // config that you use!
+    setUpstreamCount(2);                                  // the CDS cluster
+    setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // CDS uses gRPC uses HTTP2.
+
+    // BaseIntegrationTest::initialize() does many things:
+    // 1) It appends to fake_upstreams_ as many as you asked for via setUpstreamCount().
+    // 2) It updates your bootstrap config with the ports your fake upstreams are actually listening
+    //    on (since you're supposed to leave them as 0).
+    // 3) It creates and starts an IntegrationTestServer - the thing that wraps the almost-actual
+    //    Envoy used in the tests.
+    // 4) Bringing up the server usually entails waiting to ensure that any listeners specified in
+    //    the bootstrap config have come up, and registering them in a port map (see lookupPort()).
+    //    However, this test needs to defer all of that to later.
+    defer_listener_finalization_ = true;
+    HttpIntegrationTest::initialize();
+
+    // Now that the upstream has been created, process Envoy's request to discover it.
+    // (First, we have to let Envoy establish its connection to the RDS server.)
+    AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection.
+        fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_);
+    RELEASE_ASSERT(result, result.message());
+    result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_);
+    RELEASE_ASSERT(result, result.message());
+    xds_stream_->startGrpcStream();
+    fake_upstreams_[0]->set_allow_unexpected_disconnects(true);
+
+    EXPECT_TRUE(compareSotwDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "",
+                                            {"my_route"}, true));
+    sendSotwDiscoveryResponse<envoy::api::v2::RouteConfiguration>(
+        Config::TypeUrl::get().RouteConfiguration,
+        {TestUtility::parseYaml<envoy::api::v2::RouteConfiguration>(RdsWithoutVhdsConfig)}, "1");
+
+    // Wait for our statically specified listener to become ready, and register its port in the
+    // test framework's downstream listener port map.
+    test_server_->waitUntilListenersReady();
+    registerTestServerPorts({"http"});
+  }
+
+  FakeStreamPtr vhds_stream_;
+};
+
+INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, VhdsInitializationTest,
+                         GRPC_CLIENT_INTEGRATION_PARAMS);
+
+// tests a scenario when:
+//  - RouteConfiguration without VHDS is received
+//  - RouteConfiguration update with VHDS configuration in it is received
+//  - Upstream makes a request to a VirtualHost in the VHDS update
+TEST_P(VhdsInitializationTest, InitializeVhdsAfterRdsHasBeenInitialized) {
+  // Calls our initialize(), which includes establishing a listener, route, and cluster.
+  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first");
+  cleanupUpstreamAndDownstream();
+  codec_client_->waitForDisconnect();
+
+  // Update RouteConfig, this time include VHDS config
+  sendSotwDiscoveryResponse<envoy::api::v2::RouteConfiguration>(
+      Config::TypeUrl::get().RouteConfiguration,
+      {TestUtility::parseYaml<envoy::api::v2::RouteConfiguration>(RdsConfigWithVhosts)}, "2");
+
+  auto result = xds_connection_->waitForNewStream(*dispatcher_, vhds_stream_, true);
+  RELEASE_ASSERT(result, result.message());
+  vhds_stream_->startGrpcStream();
+
+  EXPECT_TRUE(
+      compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));
+  sendDeltaDiscoveryResponse<envoy::api::v2::route::VirtualHost>(
+      Config::TypeUrl::get().VirtualHost,
+      {TestUtility::parseYaml<envoy::api::v2::route::VirtualHost>(
+          fmt::format(VhostTemplate, "vhost_0", "vhost.first"))},
+      {}, "1", vhds_stream_);
+  EXPECT_TRUE(
+      compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, {}, {}, vhds_stream_));
+
+  // Confirm vhost.first that was configured via VHDS is reachable
+  testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/", "vhost.first");
+  cleanupUpstreamAndDownstream();
+  codec_client_->waitForDisconnect();
+}
+
 class VhdsIntegrationTest : public HttpIntegrationTest,
                             public Grpc::GrpcClientIntegrationParamTest {
 public:
@@ -115,14 +233,7 @@ class VhdsIntegrationTest : public HttpIntegrationTest,
   }
 
   std::string virtualHostYaml(const std::string& name, const std::string& domain) {
-    return fmt::format(R"EOF(
-      name: {}
-      domains: [{}]
-      routes:
-      - match: {{ prefix: "/" }}
-        route: {{ cluster: "my_service" }}
-    )EOF",
-                       name, domain);
+    return fmt::format(VhostTemplate, name, domain);
   }
 
   envoy::api::v2::route::VirtualHost buildVirtualHost() {
@@ -206,8 +317,6 @@ class VhdsIntegrationTest : public HttpIntegrationTest,
   bool use_rds_with_vhosts{false};
 };
 
-INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, VhdsIntegrationTest, GRPC_CLIENT_INTEGRATION_PARAMS);
-
 // tests a scenario when:
 //  - a spontaneous VHDS DiscoveryResponse adds two virtual hosts
 //  - the next spontaneous VHDS DiscoveryResponse removes newly added virtual hosts
diff --git a/test/integration/websocket_integration_test.cc b/test/integration/websocket_integration_test.cc
index 90ead02098e5..618e874071e3 100644
--- a/test/integration/websocket_integration_test.cc
+++ b/test/integration/websocket_integration_test.cc
@@ -100,7 +100,7 @@ void WebsocketIntegrationTest::commonValidate(Http::HeaderMap& proxied_headers,
       original_headers.Connection()->value() == "keep-alive, upgrade") {
     // The keep-alive is implicit for HTTP/1.1, so Envoy only sets the upgrade
     // header when converting from HTTP/1.1 to H2
-    proxied_headers.Connection()->value().setCopy("keep-alive, upgrade", 19);
+    proxied_headers.setConnection("keep-alive, upgrade");
   }
 }
 
diff --git a/test/integration/xfcc_integration_test.cc b/test/integration/xfcc_integration_test.cc
index e8f023302a26..56f4545c6f56 100644
--- a/test/integration/xfcc_integration_test.cc
+++ b/test/integration/xfcc_integration_test.cc
@@ -92,7 +92,7 @@ Network::TransportSocketFactoryPtr XfccIntegrationTest::createUpstreamSslContext
       std::move(cfg), *context_manager_, *upstream_stats_store, std::vector<std::string>{});
 }
 
-Network::ClientConnectionPtr XfccIntegrationTest::makeClientConnection() {
+Network::ClientConnectionPtr XfccIntegrationTest::makeTcpClientConnection() {
   Network::Address::InstanceConstSharedPtr address =
       Network::Utility::resolveUrl("tcp://" + Network::Test::getLoopbackAddressUrlString(version_) +
                                    ":" + std::to_string(lookupPort("http")));
@@ -147,7 +147,7 @@ void XfccIntegrationTest::initialize() {
 
 void XfccIntegrationTest::testRequestAndResponseWithXfccHeader(std::string previous_xfcc,
                                                                std::string expected_xfcc) {
-  Network::ClientConnectionPtr conn = tls_ ? makeMtlsClientConnection() : makeClientConnection();
+  Network::ClientConnectionPtr conn = tls_ ? makeMtlsClientConnection() : makeTcpClientConnection();
   Http::TestHeaderMapImpl header_map;
   if (previous_xfcc.empty()) {
     header_map = Http::TestHeaderMapImpl{{":method", "GET"},
diff --git a/test/integration/xfcc_integration_test.h b/test/integration/xfcc_integration_test.h
index 488ff98aad65..5e1ad2082890 100644
--- a/test/integration/xfcc_integration_test.h
+++ b/test/integration/xfcc_integration_test.h
@@ -46,7 +46,7 @@ class XfccIntegrationTest : public testing::TestWithParam<Network::Address::IpVe
 
   Network::TransportSocketFactoryPtr createUpstreamSslContext();
   Network::TransportSocketFactoryPtr createClientSslContext(bool mtls);
-  Network::ClientConnectionPtr makeClientConnection();
+  Network::ClientConnectionPtr makeTcpClientConnection();
   Network::ClientConnectionPtr makeTlsClientConnection();
   Network::ClientConnectionPtr makeMtlsClientConnection();
   void testRequestAndResponseWithXfccHeader(std::string privous_xfcc, std::string expected_xfcc);
diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h
index a5ab4479e4a5..c4d51c5ccf9d 100644
--- a/test/mocks/event/mocks.h
+++ b/test/mocks/event/mocks.h
@@ -96,9 +96,10 @@ class MockDispatcher : public Dispatcher {
                                  Network::Address::InstanceConstSharedPtr source_address,
                                  Network::TransportSocketPtr& transport_socket,
                                  const Network::ConnectionSocket::OptionsSharedPtr& options));
-  MOCK_METHOD1(createDnsResolver,
+  MOCK_METHOD2(createDnsResolver,
                Network::DnsResolverSharedPtr(
-                   const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers));
+                   const std::vector<Network::Address::InstanceConstSharedPtr>& resolvers,
+                   const bool use_tcp_for_dns_lookups));
   MOCK_METHOD4(createFileEvent_,
                FileEvent*(int fd, FileReadyCb cb, FileTriggerType trigger, uint32_t events));
   MOCK_METHOD0(createFilesystemWatcher_, Filesystem::Watcher*());
diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h
index 91c49470539b..6e9f703dfec7 100644
--- a/test/mocks/router/mocks.h
+++ b/test/mocks/router/mocks.h
@@ -372,6 +372,7 @@ class MockRouteTracing : public RouteTracing {
   MOCK_CONST_METHOD0(getClientSampling, const envoy::type::FractionalPercent&());
   MOCK_CONST_METHOD0(getRandomSampling, const envoy::type::FractionalPercent&());
   MOCK_CONST_METHOD0(getOverallSampling, const envoy::type::FractionalPercent&());
+  MOCK_CONST_METHOD0(getCustomTags, const Tracing::CustomTagMap&());
 };
 
 class MockRoute : public Route {
diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc
index c68029233dfb..16ea2134de64 100644
--- a/test/mocks/server/mocks.cc
+++ b/test/mocks/server/mocks.cc
@@ -91,9 +91,10 @@ MockOverloadManager::~MockOverloadManager() = default;
 MockListenerComponentFactory::MockListenerComponentFactory()
     : socket_(std::make_shared<NiceMock<Network::MockListenSocket>>()) {
   ON_CALL(*this, createListenSocket(_, _, _, _))
-      .WillByDefault(Invoke(
-          [&](Network::Address::InstanceConstSharedPtr, Network::Address::SocketType,
-              const Network::Socket::OptionsSharedPtr& options, bool) -> Network::SocketSharedPtr {
+      .WillByDefault(
+          Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Address::SocketType,
+                     const Network::Socket::OptionsSharedPtr& options,
+                     const ListenSocketCreationParams&) -> Network::SocketSharedPtr {
             if (!Network::Socket::applyOptions(options, *socket_,
                                                envoy::api::v2::core::SocketOption::STATE_PREBIND)) {
               throw EnvoyException("MockListenerComponentFactory: Setting socket options failed");
diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h
index 50853ff411cb..097e2382e17a 100644
--- a/test/mocks/server/mocks.h
+++ b/test/mocks/server/mocks.h
@@ -259,7 +259,7 @@ class MockListenerComponentFactory : public ListenerComponentFactory {
                Network::SocketSharedPtr(Network::Address::InstanceConstSharedPtr address,
                                         Network::Address::SocketType socket_type,
                                         const Network::Socket::OptionsSharedPtr& options,
-                                        bool bind_to_port));
+                                        const ListenSocketCreationParams& params));
   MOCK_METHOD1(createDrainManager_, DrainManager*(envoy::api::v2::Listener::DrainType drain_type));
   MOCK_METHOD0(nextListenerTag, uint64_t());
 
diff --git a/test/mocks/tracing/mocks.cc b/test/mocks/tracing/mocks.cc
index 3a688957f0af..bf81e65ac7d3 100644
--- a/test/mocks/tracing/mocks.cc
+++ b/test/mocks/tracing/mocks.cc
@@ -14,7 +14,7 @@ MockSpan::~MockSpan() = default;
 
 MockConfig::MockConfig() {
   ON_CALL(*this, operationName()).WillByDefault(Return(operation_name_));
-  ON_CALL(*this, requestHeadersForTags()).WillByDefault(ReturnRef(headers_));
+  ON_CALL(*this, customTags()).WillByDefault(Return(&custom_tags_));
   ON_CALL(*this, verbose()).WillByDefault(Return(verbose_));
   ON_CALL(*this, maxPathTagLength()).WillByDefault(Return(uint32_t(256)));
 }
diff --git a/test/mocks/tracing/mocks.h b/test/mocks/tracing/mocks.h
index 1cd5b3a0c9c0..ae1b6ca17cdc 100644
--- a/test/mocks/tracing/mocks.h
+++ b/test/mocks/tracing/mocks.h
@@ -16,12 +16,12 @@ class MockConfig : public Config {
   ~MockConfig() override;
 
   MOCK_CONST_METHOD0(operationName, OperationName());
-  MOCK_CONST_METHOD0(requestHeadersForTags, const std::vector<Http::LowerCaseString>&());
+  MOCK_CONST_METHOD0(customTags, const CustomTagMap*());
   MOCK_CONST_METHOD0(verbose, bool());
   MOCK_CONST_METHOD0(maxPathTagLength, uint32_t());
 
   OperationName operation_name_{OperationName::Ingress};
-  std::vector<Http::LowerCaseString> headers_;
+  CustomTagMap custom_tags_;
   bool verbose_{false};
 };
 
diff --git a/test/server/config_validation/dispatcher_test.cc b/test/server/config_validation/dispatcher_test.cc
index 7dcf46e89b3a..38f2ddede328 100644
--- a/test/server/config_validation/dispatcher_test.cc
+++ b/test/server/config_validation/dispatcher_test.cc
@@ -52,9 +52,9 @@ TEST_P(ConfigValidation, createConnection) {
 TEST_F(ConfigValidation, SharedDnsResolver) {
   std::vector<Network::Address::InstanceConstSharedPtr> resolvers;
 
-  Network::DnsResolverSharedPtr dns1 = dispatcher_->createDnsResolver(resolvers);
+  Network::DnsResolverSharedPtr dns1 = dispatcher_->createDnsResolver(resolvers, false);
   long use_count = dns1.use_count();
-  Network::DnsResolverSharedPtr dns2 = dispatcher_->createDnsResolver(resolvers);
+  Network::DnsResolverSharedPtr dns2 = dispatcher_->createDnsResolver(resolvers, false);
 
   EXPECT_EQ(dns1.get(), dns2.get());          // Both point to the same instance.
   EXPECT_EQ(use_count + 1, dns2.use_count()); // Each call causes ++ in use_count.
diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc
index 9f1030026bce..38f077650228 100644
--- a/test/server/connection_handler_test.cc
+++ b/test/server/connection_handler_test.cc
@@ -25,6 +25,7 @@ using testing::HasSubstr;
 using testing::InSequence;
 using testing::Invoke;
 using testing::NiceMock;
+using testing::Ref;
 using testing::Return;
 using testing::ReturnRef;
 using testing::SaveArg;
diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc
index c65862f5f58f..9fab43a8ff35 100644
--- a/test/server/http/admin_test.cc
+++ b/test/server/http/admin_test.cc
@@ -839,7 +839,7 @@ TEST_P(AdminInstanceTest, EscapeHelpTextWithPunctuation) {
   Http::HeaderMapImpl header_map;
   Buffer::OwnedImpl response;
   EXPECT_EQ(Http::Code::OK, getCallback("/", header_map, response));
-  Http::HeaderString& content_type = header_map.ContentType()->value();
+  const Http::HeaderString& content_type = header_map.ContentType()->value();
   EXPECT_THAT(std::string(content_type.getStringView()), testing::HasSubstr("text/html"));
   EXPECT_EQ(-1, response.search(planets.data(), planets.size(), 0));
   const std::string escaped_planets = "jupiter&gt;saturn&gt;mars";
diff --git a/test/server/listener_manager_impl_quic_only_test.cc b/test/server/listener_manager_impl_quic_only_test.cc
index 010c2f71a32f..0946bb5eeb82 100644
--- a/test/server/listener_manager_impl_quic_only_test.cc
+++ b/test/server/listener_manager_impl_quic_only_test.cc
@@ -45,10 +45,12 @@ TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) {
   EXPECT_CALL(server_.random_, uuid());
   expectCreateListenSocket(envoy::api::v2::core::SocketOption::STATE_PREBIND,
 #ifdef SO_RXQ_OVFL
-                           /* expected_num_options */ 2);
+                           /* expected_num_options */ 3, // SO_REUSEPORT is on forcibly for UDP
 #else
-                           /* expected_num_options */ 1);
+                           /* expected_num_options */ 2,
 #endif
+                           /* expected_creation_params */ {true, false});
+
   expectSetsockopt(os_sys_calls_,
                    /* expected_sockopt_level */ IPPROTO_IP,
                    /* expected_sockopt_name */ ENVOY_IP_PKTINFO,
@@ -62,6 +64,12 @@ TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) {
                    /* expected_num_calls */ 1);
 #endif
 
+  expectSetsockopt(os_sys_calls_,
+                   /* expected_sockopt_level */ SOL_SOCKET,
+                   /* expected_sockopt_name */ SO_REUSEPORT,
+                   /* expected_value */ 1,
+                   /* expected_num_calls */ 1);
+
   manager_->addOrUpdateListener(listener_proto, "", true);
   EXPECT_EQ(1u, manager_->listeners().size());
   EXPECT_FALSE(manager_->listeners()[0].get().udpListenerFactory()->isTransportConnectionless());
diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc
index 6bdf788c7d93..977a757d8f80 100644
--- a/test/server/listener_manager_impl_test.cc
+++ b/test/server/listener_manager_impl_test.cc
@@ -60,9 +60,10 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest {
   void testSocketOption(const envoy::api::v2::Listener& listener,
                         const envoy::api::v2::core::SocketOption::SocketState& expected_state,
                         const Network::SocketOptionName& expected_option, int expected_value,
-                        uint32_t expected_num_options = 1) {
+                        uint32_t expected_num_options = 1,
+                        ListenSocketCreationParams expected_creation_params = {true, true}) {
     if (expected_option.has_value()) {
-      expectCreateListenSocket(expected_state, expected_num_options);
+      expectCreateListenSocket(expected_state, expected_num_options, expected_creation_params);
       expectSetsockopt(os_sys_calls_, expected_option.level(), expected_option.option(),
                        expected_value, expected_num_options);
       manager_->addOrUpdateListener(listener, "", true);
@@ -91,7 +92,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, EmptyFilter) {
   )EOF";
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
   EXPECT_EQ(std::chrono::milliseconds(15000),
@@ -108,7 +109,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, DefaultListenerPerConnectionBuffe
 - filters: []
   )EOF";
 
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1024 * 1024U, manager_->listeners().back().get().perConnectionBufferLimitBytes());
 }
@@ -124,7 +125,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SetListenerPerConnectionBufferLim
 per_connection_buffer_limit_bytes: 8192
   )EOF";
 
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(8192U, manager_->listeners().back().get().perConnectionBufferLimitBytes());
 }
@@ -156,7 +157,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsTransportSocket) {
   )EOF",
                                                        Network::Address::IpVersion::v4);
 
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -189,7 +190,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, DEPRECATED_FEATURE_TEST(TlsContex
   )EOF",
                                                        Network::Address::IpVersion::v4);
 
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -218,7 +219,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) {
   EXPECT_CALL(server_.random_, uuid());
   EXPECT_CALL(*worker_, addListener(_, _));
   EXPECT_CALL(listener_factory_,
-              createListenSocket(_, Network::Address::SocketType::Datagram, _, true));
+              createListenSocket(_, Network::Address::SocketType::Datagram, _, {{true, false}}));
   EXPECT_CALL(os_sys_calls_, setsockopt_(_, _, _, _, _)).Times(testing::AtLeast(1));
   EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno}));
   manager_->addOrUpdateListener(listener_proto, "", true);
@@ -396,7 +397,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, StatsScopeTest) {
     config: {}
   )EOF";
 
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, false));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {false}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   manager_->listeners().front().get().listenerScope().counter("foo").inc();
 
@@ -414,7 +415,7 @@ TEST_F(ListenerManagerImplTest, NotDefaultListenerFiltersTimeout) {
     listener_filters_timeout: 0s
   )EOF";
 
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true));
   EXPECT_EQ(std::chrono::milliseconds(),
             manager_->listeners().front().get().listenerFiltersTimeout());
@@ -435,7 +436,7 @@ TEST_F(ListenerManagerImplTest, ModifyOnlyDrainType) {
 
   ListenerHandle* listener_foo =
       expectListenerCreate(false, true, envoy::api::v2::Listener_DrainType_MODIFY_ONLY);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   checkStats(1, 0, 0, 0, 1, 0);
 
@@ -459,7 +460,7 @@ drain_type: default
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(false, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   checkStats(1, 0, 0, 0, 1, 0);
 
@@ -514,7 +515,7 @@ drain_type: default
   ON_CALL(os_sys_calls, socket(AF_INET6, _, 0)).WillByDefault(Return(Api::SysCallIntResult{-1, 0}));
   ON_CALL(os_sys_calls, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0}));
 
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
 
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   checkStats(1, 0, 0, 0, 1, 0);
@@ -547,7 +548,7 @@ drain_type: default
   ON_CALL(os_sys_calls, socket(AF_INET6, _, 0)).WillByDefault(Return(Api::SysCallIntResult{5, 0}));
   ON_CALL(os_sys_calls, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0}));
 
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
 
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   checkStats(1, 0, 0, 0, 1, 0);
@@ -572,7 +573,7 @@ name: foo
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(false, false);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", false));
   checkStats(1, 0, 0, 0, 1, 0);
   checkConfigDump(R"EOF(
@@ -639,7 +640,7 @@ filter_chains: {}
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(false, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_TRUE(
       manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version1", true));
   checkStats(1, 0, 0, 0, 1, 0);
@@ -782,7 +783,7 @@ filter_chains: {}
   )EOF";
 
   ListenerHandle* listener_bar = expectListenerCreate(false, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(*worker_, addListener(_, _));
   EXPECT_TRUE(
       manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "version4", true));
@@ -803,7 +804,7 @@ filter_chains: {}
   )EOF";
 
   ListenerHandle* listener_baz = expectListenerCreate(true, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(listener_baz->target_, initialize());
   EXPECT_TRUE(
       manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_yaml), "version5", true));
@@ -916,7 +917,7 @@ name: foo
   ON_CALL(*listener_factory_.socket_, localAddress()).WillByDefault(ReturnRef(local_address));
 
   ListenerHandle* listener_foo = expectListenerCreate(false, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(*worker_, addListener(_, _));
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   worker_->callAddCompletion(true);
@@ -976,7 +977,7 @@ name: foo
   ON_CALL(*listener_factory_.socket_, localAddress()).WillByDefault(ReturnRef(local_address));
 
   ListenerHandle* listener_foo = expectListenerCreate(false, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(*worker_, addListener(_, _));
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   worker_->callAddCompletion(true);
@@ -994,7 +995,7 @@ name: foo
 
   // Add foo again. We should use the socket from draining.
   ListenerHandle* listener_foo2 = expectListenerCreate(false, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(*worker_, addListener(_, _));
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   worker_->callAddCompletion(true);
@@ -1027,12 +1028,12 @@ name: foo
   auto syscall_result = os_syscall.socket(AF_INET, SOCK_STREAM, 0);
   ASSERT_GE(syscall_result.rc_, 0);
   ListenerHandle* listener_foo = expectListenerCreate(true, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, false))
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {false}))
       .WillOnce(Invoke([this, &syscall_result, &real_listener_factory](
                            const Network::Address::InstanceConstSharedPtr& address,
                            Network::Address::SocketType socket_type,
                            const Network::Socket::OptionsSharedPtr& options,
-                           bool bind_to_port) -> Network::SocketSharedPtr {
+                           const ListenSocketCreationParams& params) -> Network::SocketSharedPtr {
         EXPECT_CALL(server_, hotRestart).Times(0);
         // When bind_to_port is equal to false, create socket fd directly, and do not get socket
         // fd through hot restart.
@@ -1040,8 +1041,46 @@ name: foo
         TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);
         ON_CALL(os_sys_calls, socket(AF_INET, _, 0))
             .WillByDefault(Return(Api::SysCallIntResult{syscall_result.rc_, 0}));
-        return real_listener_factory.createListenSocket(address, socket_type, options,
-                                                        bind_to_port);
+        return real_listener_factory.createListenSocket(address, socket_type, options, params);
+      }));
+  EXPECT_CALL(listener_foo->target_, initialize());
+  EXPECT_CALL(*listener_foo, onDestroy());
+  EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
+}
+
+TEST_F(ListenerManagerImplTest, ReusePortEqualToTrue) {
+  InSequence s;
+  ProdListenerComponentFactory real_listener_factory(server_);
+  EXPECT_CALL(*worker_, start(_));
+  manager_->startWorkers(guard_dog_);
+  const std::string listener_foo_yaml = R"EOF(
+name: foo
+address:
+  socket_address:
+    address: 127.0.0.1
+    port_value: 0
+reuse_port: true
+filter_chains:
+- filters: []
+  )EOF";
+
+  Api::OsSysCallsImpl os_syscall;
+  auto syscall_result = os_syscall.socket(AF_INET, SOCK_STREAM, 0);
+  ASSERT_GE(syscall_result.rc_, 0);
+
+  ListenerHandle* listener_foo = expectListenerCreate(true, true);
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {{true, false}}))
+      .WillOnce(Invoke([this, &syscall_result, &real_listener_factory](
+                           const Network::Address::InstanceConstSharedPtr& address,
+                           Network::Address::SocketType socket_type,
+                           const Network::Socket::OptionsSharedPtr& options,
+                           const ListenSocketCreationParams& params) -> Network::SocketSharedPtr {
+        EXPECT_CALL(server_, hotRestart).Times(0);
+        NiceMock<Api::MockOsSysCalls> os_sys_calls;
+        TestThreadsafeSingletonInjector<Api::OsSysCallsImpl> os_calls(&os_sys_calls);
+        ON_CALL(os_sys_calls, socket(AF_INET, _, 0))
+            .WillByDefault(Return(Api::SysCallIntResult{syscall_result.rc_, 0}));
+        return real_listener_factory.createListenSocket(address, socket_type, options, params);
       }));
   EXPECT_CALL(listener_foo->target_, initialize());
   EXPECT_CALL(*listener_foo, onDestroy());
@@ -1052,7 +1091,7 @@ TEST_F(ListenerManagerImplTest, NotSupportedDatagramUds) {
   ProdListenerComponentFactory real_listener_factory(server_);
   EXPECT_THROW_WITH_MESSAGE(real_listener_factory.createListenSocket(
                                 std::make_shared<Network::Address::PipeInstance>("/foo"),
-                                Network::Address::SocketType::Datagram, nullptr, true),
+                                Network::Address::SocketType::Datagram, nullptr, {true}),
                             EnvoyException,
                             "socket type SocketType::Datagram not supported for pipes");
 }
@@ -1075,7 +1114,7 @@ name: foo
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(true, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true))
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}))
       .WillOnce(Throw(EnvoyException("can't bind")));
   EXPECT_CALL(*listener_foo, onDestroy());
   EXPECT_THROW(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true),
@@ -1164,7 +1203,7 @@ name: foo
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(false, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(*worker_, addListener(_, _));
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   worker_->callAddCompletion(true);
@@ -1218,7 +1257,7 @@ name: foo
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(true, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(listener_foo->target_, initialize());
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   EXPECT_EQ(0UL, manager_->listeners().size());
@@ -1232,7 +1271,7 @@ name: foo
 
   // Add foo again and initialize it.
   listener_foo = expectListenerCreate(true, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(listener_foo->target_, initialize());
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   checkStats(2, 0, 1, 1, 0, 0);
@@ -1299,7 +1338,7 @@ traffic_direction: INBOUND
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(true, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(listener_foo->target_, initialize());
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   checkStats(1, 0, 0, 1, 0, 0);
@@ -1322,7 +1361,7 @@ traffic_direction: OUTBOUND
   )EOF";
 
   ListenerHandle* listener_foo_outbound = expectListenerCreate(true, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(listener_foo_outbound->target_, initialize());
   EXPECT_TRUE(
       manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_outbound_yaml), "", true));
@@ -1350,7 +1389,7 @@ traffic_direction: OUTBOUND
   )EOF";
 
   ListenerHandle* listener_bar_outbound = expectListenerCreate(false, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(*worker_, addListener(_, _));
   EXPECT_TRUE(
       manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_outbound_yaml), "", true));
@@ -1393,7 +1432,7 @@ name: foo
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(true, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(listener_foo->target_, initialize());
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   checkStats(1, 0, 0, 1, 0, 0);
@@ -1442,7 +1481,7 @@ traffic_direction: INBOUND
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(true, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(listener_foo->target_, initialize());
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
   checkStats(1, 0, 0, 1, 0, 0);
@@ -1499,7 +1538,7 @@ name: foo
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(false, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   EXPECT_CALL(*worker_, addListener(_, _));
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
 
@@ -1552,7 +1591,7 @@ name: foo
   )EOF";
 
   ListenerHandle* listener_foo = expectListenerCreate(true, true);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, false));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {false}));
   EXPECT_CALL(listener_foo->target_, initialize());
   EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true));
 
@@ -1621,7 +1660,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationP
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -1667,7 +1706,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationI
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -1713,7 +1752,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithServerNamesM
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -1760,7 +1799,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithTransportPro
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -1803,7 +1842,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationP
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -1846,7 +1885,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -1903,7 +1942,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -1959,7 +1998,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpv6Ma
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -1998,7 +2037,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMa
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2062,7 +2101,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2144,7 +2183,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2230,7 +2269,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2325,7 +2364,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNam
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2397,7 +2436,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithTransport
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2442,7 +2481,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicati
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2490,7 +2529,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleR
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2561,7 +2600,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDifferent
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 }
@@ -2602,7 +2641,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest,
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 }
@@ -2721,7 +2760,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsFilterChainWithoutTlsInspector
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2748,7 +2787,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SniFilterChainWithoutTlsInspector
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2775,7 +2814,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, AlpnFilterChainWithoutTlsInspecto
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2803,7 +2842,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CustomTransportProtocolWithSniWit
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -2842,7 +2881,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInline) {
   )EOF");
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 }
@@ -2867,7 +2906,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateChainInlinePrivateK
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 }
@@ -3025,7 +3064,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) {
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -3097,7 +3136,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilter) {
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -3171,7 +3210,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterIPv6) {
                                                        Network::Address::IpVersion::v6);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 
@@ -3213,10 +3252,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TransparentFreebindListenerDisabl
     - filters:
   )EOF",
                                                        Network::Address::IpVersion::v4);
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true))
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}))
       .WillOnce(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Address::SocketType,
                            const Network::Socket::OptionsSharedPtr& options,
-                           bool) -> Network::SocketSharedPtr {
+                           const ListenSocketCreationParams&) -> Network::SocketSharedPtr {
         EXPECT_EQ(options, nullptr);
         return listener_factory_.socket_;
       }));
@@ -3266,6 +3305,64 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, FastOpenListenerEnabled) {
                    ENVOY_SOCKET_TCP_FASTOPEN, /* expected_value */ 1);
 }
 
+// Validate that when reuse_port is set in the Listener, we see the socket option
+// propagated to setsockopt().
+TEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerEnabledForTcp) {
+  auto listener = createIPv4Listener("ReusePortListener");
+  listener.set_reuse_port(true);
+  // when reuse_port is true, port should be 0 for creating the shared socket,
+  // otherwise socket creation will be done on worker thread.
+  listener.mutable_address()->mutable_socket_address()->set_port_value(0);
+  testSocketOption(listener, envoy::api::v2::core::SocketOption::STATE_PREBIND,
+                   ENVOY_SOCKET_SO_REUSEPORT, /* expected_value */ 1,
+                   /* expected_num_options */ 1,
+                   /* expected_creation_params */ {true, false});
+}
+
+TEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerEnabledForUdp) {
+
+  auto listener = createIPv4Listener("UdpListener");
+  listener.mutable_address()->mutable_socket_address()->set_protocol(
+      envoy::api::v2::core::SocketAddress::UDP);
+  // For UDP, reuse_port is set to true forcibly, even it's set to false explicitly in config
+  listener.set_reuse_port(false);
+
+  // Port should be 0 for creating the shared socket, otherwise socket
+  // creation will happen worker thread.
+  listener.mutable_address()->mutable_socket_address()->set_port_value(0);
+
+  // IpPacketInfo and RxQueueOverFlow are always set if supported
+  expectCreateListenSocket(envoy::api::v2::core::SocketOption::STATE_PREBIND,
+#ifdef SO_RXQ_OVFL
+                           /* expected_num_options */ 3,
+#else
+                           /* expected_num_options */ 2,
+#endif
+                           /* expected_creation_params */ {true, false});
+
+  expectSetsockopt(os_sys_calls_,
+                   /* expected_sockopt_level */ IPPROTO_IP,
+                   /* expected_sockopt_name */ ENVOY_IP_PKTINFO,
+                   /* expected_value */ 1,
+                   /* expected_num_calls */ 1);
+#ifdef SO_RXQ_OVFL
+  expectSetsockopt(os_sys_calls_,
+                   /* expected_sockopt_level */ SOL_SOCKET,
+                   /* expected_sockopt_name */ SO_RXQ_OVFL,
+                   /* expected_value */ 1,
+                   /* expected_num_calls */ 1);
+#endif
+
+  expectSetsockopt(os_sys_calls_,
+                   /* expected_sockopt_level */ SOL_SOCKET,
+                   /* expected_sockopt_name */ SO_REUSEPORT,
+                   /* expected_value */ 1,
+                   /* expected_num_calls */ 1);
+
+  manager_->addOrUpdateListener(listener, "", true);
+  EXPECT_EQ(1U, manager_->listeners().size());
+}
+
 TEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) {
   const envoy::api::v2::Listener listener = parseListenerFromV2Yaml(R"EOF(
     name: SockoptsListener
@@ -3314,7 +3411,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, AddressResolver) {
 
   Registry::InjectFactory<Network::Address::Resolver> register_resolver(mock_resolver);
 
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 }
@@ -3339,7 +3436,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLFilename) {
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 }
@@ -3367,7 +3464,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLInline) {
                                                        Network::Address::IpVersion::v4);
 
   EXPECT_CALL(server_.random_, uuid());
-  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true));
+  EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true}));
   manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true);
   EXPECT_EQ(1U, manager_->listeners().size());
 }
diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h
index 883b1baa35bb..2acc73e11440 100644
--- a/test/server/listener_manager_impl_test.h
+++ b/test/server/listener_manager_impl_test.h
@@ -147,13 +147,14 @@ class ListenerManagerImplTest : public testing::Test {
    */
   void
   expectCreateListenSocket(const envoy::api::v2::core::SocketOption::SocketState& expected_state,
-                           Network::Socket::Options::size_type expected_num_options) {
-    EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true))
-        .WillOnce(Invoke([this, expected_num_options,
-                          &expected_state](const Network::Address::InstanceConstSharedPtr&,
-                                           Network::Address::SocketType,
-                                           const Network::Socket::OptionsSharedPtr& options,
-                                           bool) -> Network::SocketSharedPtr {
+                           Network::Socket::Options::size_type expected_num_options,
+                           ListenSocketCreationParams expected_creation_params = {true, true}) {
+    EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, expected_creation_params))
+        .WillOnce(Invoke([this, expected_num_options, &expected_state](
+                             const Network::Address::InstanceConstSharedPtr&,
+                             Network::Address::SocketType,
+                             const Network::Socket::OptionsSharedPtr& options,
+                             const ListenSocketCreationParams&) -> Network::SocketSharedPtr {
           EXPECT_NE(options.get(), nullptr);
           EXPECT_EQ(options->size(), expected_num_options);
           EXPECT_TRUE(
diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc
index e4b01924c021..1f2dc3c87cce 100644
--- a/test/server/options_impl_test.cc
+++ b/test/server/options_impl_test.cc
@@ -32,15 +32,12 @@ namespace {
 class OptionsImplTest : public testing::Test {
 
 public:
-  // Do the ugly work of turning a std::string into a char** and create an OptionsImpl. Args are
+  // Do the ugly work of turning a std::string into a vector and create an OptionsImpl. Args are
   // separated by a single space: no fancy quoting or escaping.
   std::unique_ptr<OptionsImpl> createOptionsImpl(const std::string& args) {
     std::vector<std::string> words = TestUtility::split(args, ' ');
-    std::vector<const char*> argv;
-    std::transform(words.cbegin(), words.cend(), std::back_inserter(argv),
-                   [](const std::string& arg) { return arg.c_str(); });
     return std::make_unique<OptionsImpl>(
-        argv.size(), argv.data(), [](bool) { return "1"; }, spdlog::level::warn);
+        std::move(words), [](bool) { return "1"; }, spdlog::level::warn);
   }
 };
 
@@ -252,6 +249,25 @@ TEST_F(OptionsImplTest, OptionsAreInSyncWithProto) {
   EXPECT_EQ(options->count() - 6, command_line_options->GetDescriptor()->field_count());
 }
 
+TEST_F(OptionsImplTest, OptionsFromArgv) {
+  const std::array<const char*, 3> args{"envoy", "-c", "hello"};
+  std::unique_ptr<OptionsImpl> options = std::make_unique<OptionsImpl>(
+      args.size(), args.data(), [](bool) { return "1"; }, spdlog::level::warn);
+  // Spot check that the arguments were parsed.
+  EXPECT_EQ("hello", options->configPath());
+}
+
+TEST_F(OptionsImplTest, OptionsFromArgvPrefix) {
+  const std::array<const char*, 5> args{"envoy", "-c", "hello", "--admin-address-path", "goodbye"};
+  std::unique_ptr<OptionsImpl> options = std::make_unique<OptionsImpl>(
+      args.size() - 2, // Pass in only a prefix of the args
+      args.data(), [](bool) { return "1"; }, spdlog::level::warn);
+  EXPECT_EQ("hello", options->configPath());
+  // This should still have the default value since the extra arguments are
+  // ignored.
+  EXPECT_EQ("", options->adminAddressPath());
+}
+
 TEST_F(OptionsImplTest, BadCliOption) {
   EXPECT_THROW_WITH_REGEX(createOptionsImpl("envoy -c hello --local-address-ip-version foo"),
                           MalformedArgvException, "error: unknown IP address version 'foo'");
diff --git a/test/test_common/utility.h b/test/test_common/utility.h
index 62f6c2276a2f..ec1f0c32245e 100644
--- a/test/test_common/utility.h
+++ b/test/test_common/utility.h
@@ -644,6 +644,8 @@ class TestHeaderMapImpl : public HeaderMapImpl {
   std::string get_(const LowerCaseString& key) const;
   bool has(const std::string& key) const;
   bool has(const LowerCaseString& key) const;
+
+  void verifyByteSize() override { ASSERT(cached_byte_size_ == byteSizeInternal()); }
 };
 
 // Helper method to create a header map from an initializer list. Useful due to make_unique's
diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc
index 47482a3c1fae..936589c8ca46 100644
--- a/test/tools/router_check/router.cc
+++ b/test/tools/router_check/router.cc
@@ -73,7 +73,7 @@ RouterCheckTool RouterCheckTool::create(const std::string& router_config_file,
   auto api = Api::createApiForTest(*stats);
   TestUtility::loadFromFile(router_config_file, route_config, *api);
   assignUniqueRouteNames(route_config);
-
+  assignRuntimeFraction(route_config);
   auto factory_context =
       std::make_unique<NiceMock<Server::Configuration::MockServerFactoryContext>>();
   auto config = std::make_unique<Router::ConfigImpl>(
@@ -97,6 +97,18 @@ void RouterCheckTool::assignUniqueRouteNames(envoy::api::v2::RouteConfiguration&
   }
 }
 
+void RouterCheckTool::assignRuntimeFraction(envoy::api::v2::RouteConfiguration& route_config) {
+  for (auto& host : *route_config.mutable_virtual_hosts()) {
+    for (auto& route : *host.mutable_routes()) {
+      if (route.match().has_runtime_fraction() &&
+          route.match().runtime_fraction().default_value().numerator() == 0) {
+        route.mutable_match()->mutable_runtime_fraction()->mutable_default_value()->set_numerator(
+            1);
+      }
+    }
+  }
+}
+
 RouterCheckTool::RouterCheckTool(
     std::unique_ptr<NiceMock<Server::Configuration::MockServerFactoryContext>> factory_context,
     std::unique_ptr<Router::ConfigImpl> config, std::unique_ptr<Stats::IsolatedStoreImpl> stats,
diff --git a/test/tools/router_check/router.h b/test/tools/router_check/router.h
index fe8d20c421b8..8083e356c994 100644
--- a/test/tools/router_check/router.h
+++ b/test/tools/router_check/router.h
@@ -110,6 +110,11 @@ class RouterCheckTool : Logger::Loggable<Logger::Id::testing> {
    * Set UUID as the name for each route for detecting missing tests during the coverage check.
    */
   static void assignUniqueRouteNames(envoy::api::v2::RouteConfiguration& route_config);
+  /**
+   * For each route with runtime fraction 0%, set the numerator to a nonzero value so the
+   * route can be tested as enabled or disabled.
+   */
+  static void assignRuntimeFraction(envoy::api::v2::RouteConfiguration& route_config);
 
   bool compareCluster(ToolConfig& tool_config, const std::string& expected);
   bool compareCluster(ToolConfig& tool_config,
diff --git a/test/tools/router_check/test/config/Runtime.golden.proto.json b/test/tools/router_check/test/config/Runtime.golden.proto.json
index e981f82c8a4f..63a6481418a4 100644
--- a/test/tools/router_check/test/config/Runtime.golden.proto.json
+++ b/test/tools/router_check/test/config/Runtime.golden.proto.json
@@ -10,7 +10,12 @@
           "internal": true
         },
         "validate": {
-          "cluster_name": "www3"
+          "cluster_name": "www3",
+          "virtual_cluster_name": "",
+          "virtual_host_name": "www2",
+          "path_rewrite": "/",
+          "host_rewrite": "www.lyft.com",
+          "path_redirect": ""
         }
       },
       {
@@ -25,7 +30,12 @@
           "random_value": 70
         },
         "validate": {
-          "cluster_name": "www3"
+          "cluster_name": "www3",
+          "virtual_cluster_name": "",
+          "virtual_host_name": "www2",
+          "path_rewrite": "/",
+          "host_rewrite": "www.lyft.com",
+          "path_redirect": ""
         }
       },
       {
@@ -40,8 +50,90 @@
           "random_value": 20
         },
         "validate": {
-          "cluster_name": "www2"
+          "cluster_name": "www2",
+          "virtual_cluster_name": "",
+          "virtual_host_name": "www2",
+          "path_rewrite": "/",
+          "host_rewrite": "www.lyft.com",
+          "path_redirect": ""
+        }
+      },
+      {
+        "test_name": "Test_4",
+        "input": {
+          "authority": "www.lyft.com",
+          "path": "/disabled",
+          "method": "GET",
+          "ssl": true,
+          "internal": true,
+          "runtime": "runtime.key",
+          "random_value": 0
+        },
+        "validate": {
+          "cluster_name": "www4",
+          "virtual_cluster_name": "",
+          "virtual_host_name": "www2",
+          "path_rewrite": "/disabled",
+          "host_rewrite": "www.lyft.com",
+          "path_redirect": ""
+        }
+      },
+      {
+        "test_name": "Test_5",
+        "input": {
+          "authority": "www.lyft.com",
+          "path": "/disabled",
+          "method": "GET",
+          "ssl": true,
+          "internal": true,
+          "runtime": "runtime.key",
+          "random_value": 2
+        },
+        "validate": {
+          "cluster_name": "www2",
+          "virtual_cluster_name": "",
+          "virtual_host_name": "www2",
+          "path_rewrite": "/disabled",
+          "host_rewrite": "www.lyft.com",
+          "path_redirect": ""
+        }
+      },
+      {
+        "test_name": "Test_6",
+        "input": {
+          "authority": "www.lyft.com",
+          "path": "/disabled",
+          "method": "GET",
+          "ssl": true,
+          "internal": true,
+          "random_value": 2
+        },
+        "validate": {
+          "cluster_name": "www3",
+          "virtual_cluster_name": "",
+          "virtual_host_name": "www2",
+          "path_rewrite": "/disabled",
+          "host_rewrite": "www.lyft.com",
+          "path_redirect": ""
+        }
+      },
+      {
+        "test_name": "Test_7",
+        "input": {
+          "authority": "www.lyft.com",
+          "path": "/disabled",
+          "method": "GET",
+          "ssl": true,
+          "internal": true
+        },
+        "validate": {
+          "cluster_name": "www3",
+          "virtual_cluster_name": "",
+          "virtual_host_name": "www2",
+          "path_rewrite": "/disabled",
+          "host_rewrite": "www.lyft.com",
+          "path_redirect": ""
         }
       }
     ]
-  }
+}
diff --git a/test/tools/router_check/test/config/Runtime.yaml b/test/tools/router_check/test/config/Runtime.yaml
index 2e91810912e3..a35c9165a079 100644
--- a/test/tools/router_check/test/config/Runtime.yaml
+++ b/test/tools/router_check/test/config/Runtime.yaml
@@ -3,6 +3,15 @@ virtual_hosts:
   domains:
   - www.lyft.com
   routes:
+    - match:
+        prefix: /disabled
+        runtime_fraction:
+          runtime_key: runtime.key
+          default_value:
+            numerator: 0
+            denominator: HUNDRED
+      route:
+        cluster: www4
     - match:
         prefix: /
         runtime_fraction:
diff --git a/test/tools/router_check/test/route_tests.sh b/test/tools/router_check/test/route_tests.sh
index 8c867036dff3..edf5a35e0046 100755
--- a/test/tools/router_check/test/route_tests.sh
+++ b/test/tools/router_check/test/route_tests.sh
@@ -41,6 +41,12 @@ if [[ "${COVERAGE_OUTPUT}" != *"Current route coverage: 100%"* ]] ; then
   exit 1
 fi
 
+RUNTIME_COVERAGE_OUTPUT=$("${PATH_BIN}" "-c" "${PATH_CONFIG}/Runtime.yaml" "-t" "${PATH_CONFIG}/Runtime.golden.proto.json" "--details" "--useproto" "--covall" 2>&1) ||
+  echo "${RUNTIME_COVERAGE_OUTPUT:-no-output}"
+if [[ "${RUNTIME_COVERAGE_OUTPUT}" != *"Current route coverage: 100%"* ]] ; then
+  exit 1
+fi
+
 # Testing coverage flag fails
 COVERAGE_OUTPUT=$($COVERAGE_CMD "100" 2>&1) || echo "${COVERAGE_OUTPUT:-no-output}"
 if [[ "${COVERAGE_OUTPUT}" != *"Failed to meet coverage requirement: 100%"* ]] ; then
diff --git a/tools/api_proto_plugin/BUILD b/tools/api_proto_plugin/BUILD
index d74743ea8689..4eb5ddaa7832 100644
--- a/tools/api_proto_plugin/BUILD
+++ b/tools/api_proto_plugin/BUILD
@@ -1,5 +1,8 @@
 licenses(["notice"])  # Apache 2
 
+load("@rules_python//python:defs.bzl", "py_library")
+load("//tools/type_whisperer:type_database.bzl", "type_database")
+
 py_library(
     name = "api_proto_plugin",
     srcs = [
@@ -22,3 +25,16 @@ py_library(
     srcs_version = "PY3",
     visibility = ["//visibility:public"],
 )
+
+label_flag(
+    name = "default_type_db_target",
+    # While this is not completely empty but type_db_gen generates nothing on this target.
+    build_setting_default = "@com_google_protobuf//:empty_proto",
+    visibility = ["//visibility:public"],
+)
+
+type_database(
+    name = "default_type_db",
+    targets = [":default_type_db_target"],
+    visibility = ["//visibility:public"],
+)
diff --git a/tools/api_proto_plugin/plugin.bzl b/tools/api_proto_plugin/plugin.bzl
index 357a91ef00ca..5bc0ef7f7112 100644
--- a/tools/api_proto_plugin/plugin.bzl
+++ b/tools/api_proto_plugin/plugin.bzl
@@ -1,3 +1,5 @@
+load("@rules_proto//proto:defs.bzl", "ProtoInfo")
+
 # Borrowed from https://github.com/grpc/grpc-java/blob/v1.24.1/java_grpc_library.bzl#L61
 def _path_ignoring_repository(f):
     # Bazel creates a _virtual_imports directory in case the .proto source files
@@ -43,16 +45,24 @@ def api_proto_plugin_impl(target, ctx, output_group, mnemonic, output_suffixes):
                                              output_suffix) for f in proto_sources]
 
     # Create the protoc command-line args.
+    inputs = target[ProtoInfo].transitive_sources
     ctx_path = ctx.label.package + "/" + ctx.label.name
     output_path = outputs[0].root.path + "/" + outputs[0].owner.workspace_root + "/" + ctx_path
     args = ["-I./" + ctx.label.workspace_root]
     args += ["-I" + import_path for import_path in import_paths]
     args += ["--plugin=protoc-gen-api_proto_plugin=" + ctx.executable._api_proto_plugin.path, "--api_proto_plugin_out=" + output_path]
+    if hasattr(ctx.attr, "_type_db"):
+        inputs = depset(transitive = [inputs] + [ctx.attr._type_db.files])
+        if len(ctx.attr._type_db.files.to_list()) != 1:
+            fail("{} must have one type database file".format(ctx.attr._type_db))
+        args += ["--api_proto_plugin_opt=type_db_path=" + ctx.attr._type_db.files.to_list()[0].path]
     args += [src.path for src in target[ProtoInfo].direct_sources]
+    env = {}
+
     ctx.actions.run(
         executable = ctx.executable._protoc,
         arguments = args,
-        inputs = target[ProtoInfo].transitive_sources,
+        inputs = inputs,
         tools = [ctx.executable._api_proto_plugin],
         outputs = outputs,
         mnemonic = mnemonic,
@@ -62,20 +72,25 @@ def api_proto_plugin_impl(target, ctx, output_group, mnemonic, output_suffixes):
     transitive_outputs = depset(outputs, transitive = [transitive_outputs])
     return [OutputGroupInfo(**{output_group: transitive_outputs})]
 
-def api_proto_plugin_aspect(tool_label, aspect_impl):
+def api_proto_plugin_aspect(tool_label, aspect_impl, use_type_db = False):
+    _attrs = {
+        "_protoc": attr.label(
+            default = Label("@com_google_protobuf//:protoc"),
+            executable = True,
+            cfg = "exec",
+        ),
+        "_api_proto_plugin": attr.label(
+            default = Label(tool_label),
+            executable = True,
+            cfg = "exec",
+        ),
+    }
+    if use_type_db:
+        _attrs["_type_db"] = attr.label(
+            default = Label("@envoy//tools/api_proto_plugin:default_type_db"),
+        )
     return aspect(
         attr_aspects = ["deps"],
-        attrs = {
-            "_protoc": attr.label(
-                default = Label("@com_google_protobuf//:protoc"),
-                executable = True,
-                cfg = "exec",
-            ),
-            "_api_proto_plugin": attr.label(
-                default = Label(tool_label),
-                executable = True,
-                cfg = "exec",
-            ),
-        },
+        attrs = _attrs,
         implementation = aspect_impl,
     )
diff --git a/tools/api_proto_plugin/plugin.py b/tools/api_proto_plugin/plugin.py
index d568afd0a9ee..31ac2c0dbb94 100644
--- a/tools/api_proto_plugin/plugin.py
+++ b/tools/api_proto_plugin/plugin.py
@@ -17,9 +17,9 @@
         # Output files are generated alongside their corresponding input .proto,
         # with the output_suffix appended.
         'output_suffix',
-        # The visitor is a visitor.Visitor defining the business logic of the plugin
-        # for the specific output descriptor.
-        'visitor',
+        # The visitor factory is a function to create a visitor.Visitor defining
+        # the business logic of the plugin for the specific output descriptor.
+        'visitor_factory',
         # FileDescriptorProto transformer; this is applied to the input
         # before any output generation.
         'xform',
@@ -30,7 +30,7 @@ def DirectOutputDescriptor(output_suffix, visitor):
   return OutputDescriptor(output_suffix, visitor, lambda x: x)
 
 
-def Plugin(output_descriptors):
+def Plugin(output_descriptors, parameter_callback=None):
   """Protoc plugin entry point.
 
   This defines protoc plugin and manages the stdin -> stdout flow. An
@@ -48,6 +48,9 @@ def Plugin(output_descriptors):
   response = plugin_pb2.CodeGeneratorResponse()
   cprofile_enabled = os.getenv('CPROFILE_ENABLED')
 
+  if request.HasField("parameter") and parameter_callback:
+    parameter_callback(request.parameter)
+
   # We use request.file_to_generate rather than request.file_proto here since we
   # are invoked inside a Bazel aspect, each node in the DAG will be visited once
   # by the aspect and we only want to generate docs for the current node.
@@ -61,7 +64,8 @@ def Plugin(output_descriptors):
       f = response.file.add()
       f.name = file_proto.name + od.output_suffix
       xformed_proto = od.xform(file_proto)
-      f.content = traverse.TraverseFile(od.xform(file_proto), od.visitor) if xformed_proto else ''
+      f.content = traverse.TraverseFile(xformed_proto,
+                                        od.visitor_factory()) if xformed_proto else ''
     if cprofile_enabled:
       pr.disable()
       stats_stream = io.StringIO()
diff --git a/tools/check_format.py b/tools/check_format.py
index 90260a1d23dd..f555b74e2406 100755
--- a/tools/check_format.py
+++ b/tools/check_format.py
@@ -307,6 +307,12 @@ def whitelistedForGrpcInit(file_path):
   return file_path in GRPC_INIT_WHITELIST
 
 
+def whitelistedForUnpackTo(file_path):
+  return file_path.startswith("./test") or file_path in [
+      "./source/common/protobuf/utility.cc", "./source/common/protobuf/utility.h"
+  ]
+
+
 def findSubstringAndReturnError(pattern, file_path, error_message):
   text = readFile(file_path)
   if pattern in text:
@@ -499,6 +505,9 @@ def checkSourceLine(line, file_path, reportError):
        "std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \
        "std::this_thread::sleep_for" in line or hasCondVarWaitFor(line):
       reportError("Don't reference real-world time sources from production code; use injection")
+  if not whitelistedForUnpackTo(file_path):
+    if "UnpackTo" in line:
+      reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead")
   # Check that we use the absl::Time library
   if "std::get_time" in line:
     if "test/" in file_path:
@@ -851,7 +860,10 @@ def checkErrorMessages(error_messages):
   namespace_check = args.namespace_check
   namespace_check_excluded_paths = args.namespace_check_excluded_paths
   build_fixer_check_excluded_paths = args.build_fixer_check_excluded_paths + [
-      "./bazel/external/", "./bazel/toolchains/", "./bazel/BUILD"
+      "./bazel/external/",
+      "./bazel/toolchains/",
+      "./bazel/BUILD",
+      "./tools/clang_tools",
   ]
   include_dir_order = args.include_dir_order
   if args.add_excluded_prefixes:
diff --git a/tools/check_format_test_helper.py b/tools/check_format_test_helper.py
index 74bb3c7ccddb..dbe943dad7ca 100755
--- a/tools/check_format_test_helper.py
+++ b/tools/check_format_test_helper.py
@@ -180,6 +180,8 @@ def runChecks():
   errors += checkUnfixableError("real_time_system.cc", real_time_inject_error)
   errors += checkUnfixableError("system_clock.cc", real_time_inject_error)
   errors += checkUnfixableError("steady_clock.cc", real_time_inject_error)
+  errors += checkUnfixableError(
+      "unpack_to.cc", "Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead")
   errors += checkUnfixableError("condvar_wait_for.cc", real_time_inject_error)
   errors += checkUnfixableError("sleep.cc", real_time_inject_error)
   errors += checkUnfixableError("std_atomic_free_functions.cc", "std::atomic_*")
diff --git a/tools/clang_tools/README.md b/tools/clang_tools/README.md
new file mode 100644
index 000000000000..2c4738d1eea5
--- /dev/null
+++ b/tools/clang_tools/README.md
@@ -0,0 +1,55 @@
+# Envoy Clang libtool developer tools
+
+## Overview
+
+A number of tools live in this directory that are intended for use by Envoy
+developers (and potentially CI). These are host tools and should not be linked
+into the Envoy target. They are based around Clang's
+[libtooling](https://clang.llvm.org/docs/LibTooling.html) libraries, a C++
+framework for writing Clang tools in the style of `clang-format` and
+`clang-check`.
+
+## Building and running
+
+To build tools in this tree, a Clang binary install must be available. If you
+are building Envoy with `clang`, this should already be true of your system. You
+can find prebuilt binary releases of Clang at https://releases.llvm.org. You
+will need the Clang version used by Envoy in CI (currently clang-9.0).
+
+To build a tool, set the following environment variable:
+
+```console
+export LLVM_CONFIG=<path to clang installation>/bin/llvm-config
+```
+
+Assuming that `CC` and `CXX` already point at Clang, you should be able to build
+with:
+
+```console
+bazel build @envoy_dev//clang_tools/syntax_only
+```
+
+To run `libtooling` based tools against Envoy, you will need to first generate a
+compilation database, which tells the tool how to take a source file and locate
+its various dependencies. The `tools/gen_compilation_database.py` script
+generates this and also does setup of the Bazel cache paths to allow external
+dependencies to be located:
+
+```console
+tools/gen_compilation_database.py --run_bazel_build --include_headers
+```
+
+Finally, the tool can be run against source files in the Envoy tree:
+
+```console
+bazel-bin/external/envoy_dev/clang_tools/syntax_only/syntax_only \
+  source/common/common/logger.cc
+```
+
+## Adding a new Envoy libtooling based tool
+
+Follow the example at `tools/clang_tools/syntax_only`, based on the tutorial
+example at https://clang.llvm.org/docs/LibTooling.html. Please use the
+`envoy_clang_tools_cc_binary` Bazel macro for the tool, this disables use of
+RTTI/exceptions and allows developer tools to be structurally excluded from the
+build as needed.
diff --git a/tools/clang_tools/support/BUILD b/tools/clang_tools/support/BUILD
new file mode 100644
index 000000000000..779d1695d3b7
--- /dev/null
+++ b/tools/clang_tools/support/BUILD
@@ -0,0 +1 @@
+licenses(["notice"])  # Apache 2
diff --git a/tools/clang_tools/support/BUILD.prebuilt b/tools/clang_tools/support/BUILD.prebuilt
new file mode 100644
index 000000000000..3785175e4908
--- /dev/null
+++ b/tools/clang_tools/support/BUILD.prebuilt
@@ -0,0 +1,398 @@
+# Clang 9.0 library pre-built Bazel.
+#
+# This file was mostly manually assembled (with some hacky Python scripts) from
+# clang+llvm-9.0.0-x86_64-linux-gnu-ubuntu-16.04.tar.xz and corresponding
+# https://github.com/llvm/llvm-project.git source. It needs Clang 9.0 to work.
+#
+# The BUILD file has sufficient dependency relationships
+# between the prebuilt libraries in a clang-llvm distribution to support building libtooling
+# based binaries in the Envoy repository. We're chasing a moving target and as new libraries are
+# depended upon in new Clang versions, it will be necessary to augment these definitions.
+#
+# The key to understanding llvm-project layout is that there are a collection of libraries in
+# {clang,llvm}/lib. For the clang libraries, the CMakeLists.txt supplies the Clang library deps in
+# LINK_LIBS inside add_clang_library() and the llvm deps in LLVM_LINK_COMPONENTS. For the llvm
+# libraries, LLVMBuild.txt provides llvm deps (it does not reference any of the clang libs).
+#
+# It's kind of terrible that we need to do this by hand, but llvm-project is CMake canonical, and we
+# don't want to use rules_foreign_cc to build the libraries from source just to access some
+# developer libs which will exist on the filesystem of most devs who are using Clang.
+
+package(default_visibility = ["//visibility:public"])
+
+# We should use cc_import below, but it doesn't like .def files in Clang. See
+# https://github.com/bazelbuild/bazel/issues/6767.
+#
+
+cc_library(
+    name = "clang_analysis",
+    srcs = ["lib/libclangAnalysis.a"],
+    hdrs = glob(["clang/Analysis/**"]),
+    deps = [
+        ":clang_ast",
+        ":clang_astmatchers",
+        ":clang_basic",
+        ":clang_lex",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_basic",
+    srcs = ["lib/libclangBasic.a"],
+    hdrs = glob([
+        "clang/Basic/**",
+        "clang-c/**",
+    ]),
+    deps = [
+        ":llvm_core",
+        ":llvm_mc",
+        ":llvm_support",
+        ":llvm_target",
+    ],
+)
+
+cc_library(
+    name = "clang_ast",
+    srcs = ["lib/libclangAST.a"],
+    hdrs = glob(["clang/AST/**"]),
+    deps = [
+        ":clang_basic",
+        ":clang_lex",
+        ":llvm_binaryformat",
+        ":llvm_core",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_astmatchers",
+    srcs = ["lib/libclangASTMatchers.a"],
+    hdrs = glob(["clang/ASTMatchers/**"]),
+    deps = [
+        ":clang_ast",
+        ":clang_basic",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_driver",
+    srcs = ["lib/libclangDriver.a"],
+    hdrs = glob(["clang/Driver/**"]),
+    deps = [
+        ":clang_basic",
+        ":llvm_binaryformat",
+        ":llvm_option",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_edit",
+    srcs = ["lib/libclangEdit.a"],
+    hdrs = glob(["clang/Edit/**"]),
+    deps = [
+        ":clang_ast",
+        ":clang_basic",
+        ":clang_lex",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_format",
+    srcs = ["lib/libclangFormat.a"],
+    hdrs = glob(["clang/Format/**"]),
+    deps = [
+        ":clang_basic",
+        ":clang_lex",
+        ":clang_toolingcore",
+        ":clang_toolinginclusions",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_frontend",
+    srcs = ["lib/libclangFrontend.a"],
+    hdrs = glob(["clang/Frontend/**"]),
+    deps = [
+        ":clang_ast",
+        ":clang_basic",
+        ":clang_driver",
+        ":clang_edit",
+        ":clang_lex",
+        ":clang_parse",
+        ":clang_sema",
+        ":clang_serialization",
+        ":llvm_bitreader",
+        ":llvm_option",
+        ":llvm_profiledata",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_lex",
+    srcs = ["lib/libclangLex.a"],
+    hdrs = glob(["clang/Lex/**"]),
+    deps = [
+        ":clang_basic",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_parse",
+    srcs = ["lib/libclangParse.a"],
+    hdrs = glob(["clang/Parse/**"]),
+    deps = [
+        ":clang_ast",
+        ":clang_basic",
+        ":clang_lex",
+        ":clang_sema",
+        ":llvm_mc",
+        ":llvm_mcparser",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_rewrite",
+    srcs = ["lib/libclangRewrite.a"],
+    hdrs = glob(["clang/Rewrite/**"]),
+    deps = [
+        ":clang_basic",
+        ":clang_lex",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_sema",
+    srcs = ["lib/libclangSema.a"],
+    hdrs = glob(["clang/Sema/**"]),
+    deps = [
+        ":clang_analysis",
+        ":clang_ast",
+        ":clang_basic",
+        ":clang_edit",
+        ":clang_lex",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_serialization",
+    srcs = ["lib/libclangSerialization.a"],
+    hdrs = glob(["clang/Serialization/**"]),
+    deps = [
+        ":clang_ast",
+        ":clang_basic",
+        ":clang_lex",
+        ":clang_sema",
+        ":llvm_bitreader",
+        ":llvm_bitstreamreader",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_tooling",
+    srcs = ["lib/libclangTooling.a"],
+    hdrs = glob(["clang/Tooling/**"]),
+    deps = [
+        ":clang_ast",
+        ":clang_astmatchers",
+        ":clang_basic",
+        ":clang_driver",
+        ":clang_format",
+        ":clang_frontend",
+        ":clang_lex",
+        ":clang_rewrite",
+        ":clang_serialization",
+        ":clang_toolingcore",
+        ":llvm_option",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_toolingcore",
+    srcs = ["lib/libclangToolingCore.a"],
+    hdrs = glob(["clang/Tooling/Core/**"]),
+    deps = [
+        ":clang_ast",
+        ":clang_basic",
+        ":clang_lex",
+        ":clang_rewrite",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "clang_toolinginclusions",
+    srcs = ["lib/libclangToolingInclusions.a"],
+    hdrs = glob(["clang/Tooling/Inclusions/**"]),
+    deps = [
+        ":clang_basic",
+        ":clang_lex",
+        ":clang_rewrite",
+        ":clang_toolingcore",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_analysis",
+    srcs = ["lib/libLLVMAnalysis.a"],
+    hdrs = glob(["llvm/Analysis/**"]),
+    deps = [
+        ":llvm_binaryformat",
+        ":llvm_core",
+        ":llvm_object",
+        ":llvm_profiledata",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_binaryformat",
+    srcs = ["lib/libLLVMBinaryFormat.a"],
+    hdrs = glob(["llvm/BinaryFormat/**"]),
+    deps = [":llvm_support"],
+)
+
+cc_library(
+    name = "llvm_bitreader",
+    srcs = ["lib/libLLVMBitReader.a"],
+    hdrs = glob(["llvm/Bitcode/**"]),
+    deps = [
+        ":llvm_bitstreamreader",
+        ":llvm_core",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_bitstreamreader",
+    srcs = ["lib/libLLVMBitstreamReader.a"],
+    hdrs = glob(["llvm/Bitstream/**"]),
+    deps = [
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_core",
+    srcs = ["lib/libLLVMCore.a"],
+    hdrs = glob([
+        "llvm/ADT/**",
+        "llvm/IR/**",
+        "llvm/*",
+        "llvm-c/**",
+    ]),
+    deps = [
+        ":llvm_binaryformat",
+        ":llvm_remarks",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_demangle",
+    srcs = ["lib/libLLVMDemangle.a"],
+    hdrs = glob(["llvm/Demangle/**"]),
+)
+
+cc_library(
+    name = "llvm_mc",
+    srcs = ["lib/libLLVMMC.a"],
+    hdrs = glob(["llvm/MC/**"]),
+    deps = [
+        ":llvm_binaryformat",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_mcparser",
+    srcs = ["lib/libLLVMMCParser.a"],
+    hdrs = glob(["llvm/MC/MCParser/**"]),
+    deps = [
+        ":llvm_mc",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_object",
+    srcs = ["lib/libLLVMObject.a"],
+    hdrs = glob(["llvm/Object/**"]),
+    deps = [
+        ":llvm_binaryformat",
+        ":llvm_bitreader",
+        ":llvm_core",
+        ":llvm_mc",
+        ":llvm_mcparser",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_option",
+    srcs = ["lib/libLLVMOption.a"],
+    hdrs = glob(["llvm/Option/**"]),
+    deps = [
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_profiledata",
+    srcs = ["lib/libLLVMProfileData.a"],
+    hdrs = glob(["llvm/ProfileData/**"]),
+    deps = [
+        ":llvm_core",
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_remarks",
+    srcs = ["lib/libLLVMRemarks.a"],
+    hdrs = glob(["llvm/Remarks/**"]),
+    deps = [
+        ":llvm_support",
+    ],
+)
+
+cc_library(
+    name = "llvm_support",
+    srcs = ["lib/libLLVMSupport.a"],
+    hdrs = glob([
+        "llvm/Config/**",
+        "llvm/Support/**",
+    ]),
+    linkopts = [
+        "-lcurses",
+        "-lpthread",
+        "-lz",
+    ],
+    deps = [
+        ":llvm_demangle",
+    ],
+)
+
+cc_library(
+    name = "llvm_target",
+    srcs = ["lib/libLLVMTarget.a"],
+    hdrs = glob(["llvm/Target/**"]),
+    deps = [
+        ":llvm_analysis",
+        ":llvm_core",
+        ":llvm_mc",
+        ":llvm_support",
+    ],
+)
diff --git a/tools/clang_tools/support/clang_tools.bzl b/tools/clang_tools/support/clang_tools.bzl
new file mode 100644
index 000000000000..b8c2cd41b8ab
--- /dev/null
+++ b/tools/clang_tools/support/clang_tools.bzl
@@ -0,0 +1,10 @@
+def envoy_clang_tools_cc_binary(name, copts = [], tags = [], **kwargs):
+    native.cc_binary(
+        name = name,
+        copts = copts + [
+            "-fno-exceptions",
+            "-fno-rtti",
+        ],
+        tags = tags + ["manual"],
+        **kwargs
+    )
diff --git a/tools/clang_tools/syntax_only/BUILD b/tools/clang_tools/syntax_only/BUILD
new file mode 100644
index 000000000000..2f8f2b2ab37f
--- /dev/null
+++ b/tools/clang_tools/syntax_only/BUILD
@@ -0,0 +1,13 @@
+load("//clang_tools/support:clang_tools.bzl", "envoy_clang_tools_cc_binary")
+
+licenses(["notice"])  # Apache 2
+
+envoy_clang_tools_cc_binary(
+    name = "syntax_only",
+    srcs = ["main.cc"],
+    deps = [
+        "@clang_tools//:clang_astmatchers",
+        "@clang_tools//:clang_basic",
+        "@clang_tools//:clang_tooling",
+    ],
+)
diff --git a/tools/clang_tools/syntax_only/main.cc b/tools/clang_tools/syntax_only/main.cc
new file mode 100644
index 000000000000..0b239580b679
--- /dev/null
+++ b/tools/clang_tools/syntax_only/main.cc
@@ -0,0 +1,35 @@
+// This is a copy of the Hello World-style syntax check tool described in
+// https://clang.llvm.org/docs/LibTooling.html. It's purpose is to provide an
+// example of how to build and run libtooling based Envoy developer tools.
+//
+// NOLINT(namespace-envoy)
+
+// Declares clang::SyntaxOnlyAction.
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Tooling/CommonOptionsParser.h"
+#include "clang/Tooling/Tooling.h"
+
+// Declares llvm::cl::extrahelp.
+#include "llvm/Support/CommandLine.h"
+
+using namespace clang::tooling;
+using namespace llvm;
+
+int main(int argc, const char** argv) {
+
+  // Apply a custom category to all command-line options so that they are the
+  // only ones displayed.
+  llvm::cl::OptionCategory MyToolCategory("my-tool options");
+
+  // CommonOptionsParser declares HelpMessage with a description of the common
+  // command-line options related to the compilation database and input files.
+  // It's nice to have this help message in all tools.
+  cl::extrahelp CommonHelp(CommonOptionsParser::HelpMessage);
+
+  // A help message for this specific tool can be added afterwards.
+  cl::extrahelp MoreHelp("\nMore help text...\n");
+
+  CommonOptionsParser OptionsParser(argc, argv, MyToolCategory);
+  ClangTool Tool(OptionsParser.getCompilations(), OptionsParser.getSourcePathList());
+  return Tool.run(newFrontendActionFactory<clang::SyntaxOnlyAction>().get());
+}
diff --git a/tools/deprecate_features/deprecate_features.py b/tools/deprecate_features/deprecate_features.py
index 203d4d2a99db..3da00632ba9f 100644
--- a/tools/deprecate_features/deprecate_features.py
+++ b/tools/deprecate_features/deprecate_features.py
@@ -1,4 +1,4 @@
-# A simple script to snag deprecated proto fields and add them to runtime_features.h
+# A simple script to snag deprecated proto fields and add them to runtime_features.cc
 
 from __future__ import print_function
 import re
@@ -16,7 +16,8 @@ def deprecate_proto():
 
   # Compile the set of deprecated fields and the files they're in, deduping via set.
   deprecated_regex = re.compile(r'.*\/([^\/]*.proto):[^=]* ([^= ]+) =.*')
-  for line in grep_output.splitlines():
+  for byte_line in grep_output.splitlines():
+    line = str(byte_line)
     match = deprecated_regex.match(line)
     if match:
       filenames_and_fields.add(tuple([match.group(1), match.group(2)]))
@@ -46,46 +47,11 @@ def deprecate_proto():
   return email, code
 
 
-# Sorts out the list of features which should be default enabled and returns a tuple of
-# email and code changes.
-def flip_runtime_features():
-  grep_output = subprocess.check_output('grep -r "envoy.reloadable_features\." source/*',
-                                        shell=True)
-
-  features_to_flip = set()
-
-  # Compile the set of features to flip, deduping via set.
-  deprecated_regex = re.compile(r'.*"(envoy.reloadable_features\.[^"]+)".*')
-  for line in grep_output.splitlines():
-    match = deprecated_regex.match(line)
-    if match:
-      features_to_flip.add(match.group(1))
-    else:
-      print('no match in ' + line + ' please address manually!')
-
-  # Exempt the two test flags.
-  features_to_flip.remove('envoy.reloadable_features.my_feature_name')
-  features_to_flip.remove('envoy.reloadable_features.test_feature_true')
-
-  code_snippets = []
-  email_snippets = []
-  for (feature) in features_to_flip:
-    code_snippets.append('    "' + feature + '",\n')
-    email_snippets.append(feature + '\n')
-  code = ''.join(code_snippets)
-  email = ''
-  if email_snippets:
-    email = 'the following features will be defaulted to true:\n' + ''.join(email_snippets)
-
-  return email, code
-
-
 # Gather code and suggested email changes.
-runtime_email, runtime_features_code = flip_runtime_features()
 deprecate_email, deprecate_code = deprecate_proto()
 
 email = ('The Envoy maintainer team is cutting the next Envoy release.  In the new release ' +
-         runtime_email + deprecate_email)
+         deprecate_email)
 
 print('\n\nSuggested envoy-announce email: \n')
 print(email)
@@ -94,8 +60,6 @@ def flip_runtime_features():
   exit(1)
 
 for line in fileinput.FileInput('source/common/runtime/runtime_features.cc', inplace=1):
-  if 'envoy.reloadable_features.test_feature_true' in line:
-    line = line.replace(line, line + runtime_features_code)
   if 'envoy.deprecated_features.deprecated.proto:is_deprecated_fatal' in line:
     line = line.replace(line, line + deprecate_code)
   print(line, end='')
diff --git a/tools/deprecate_features/requirements.txt b/tools/deprecate_features/requirements.txt
index dc2a917a768e..b1013c19345c 100644
--- a/tools/deprecate_features/requirements.txt
+++ b/tools/deprecate_features/requirements.txt
@@ -1,2 +1 @@
-GitPython==3.0.0
-PyGithub==1.43.8
+six==1.12.0
diff --git a/tools/envoy_build_fixer.py b/tools/envoy_build_fixer.py
index 1a18778d39c6..0b63d9fa2d48 100755
--- a/tools/envoy_build_fixer.py
+++ b/tools/envoy_build_fixer.py
@@ -150,8 +150,10 @@ def FixApiDeps(path, contents):
     actual_api_deps = set(['@envoy_api//%s:pkg_cc_proto' % h for h in api_hdrs])
     existing_api_deps = set([])
     if deps != 'missing':
-      existing_api_deps = set(
-          [d for d in deps.split() if d.startswith('@envoy_api//') and d.endswith('pkg_cc_proto')])
+      existing_api_deps = set([
+          d for d in deps.split() if d.startswith('@envoy_api//') and d.endswith('pkg_cc_proto') and
+          d != '@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto'
+      ])
     deps_to_remove = existing_api_deps.difference(actual_api_deps)
     if deps_to_remove:
       deps_mutation_cmds.append(('remove deps %s' % ' '.join(deps_to_remove), name))
diff --git a/tools/proto_format.sh b/tools/proto_format.sh
index 96e9de54d158..4ee93b75eabf 100755
--- a/tools/proto_format.sh
+++ b/tools/proto_format.sh
@@ -14,21 +14,15 @@ rm -rf bazel-bin/external/envoy_api
 declare -r PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, @envoy_api//docs:protos))")
 
 # This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files.
-BAZEL_BUILD_OPTIONS+=" --remote_download_outputs=all --strategy=protoxform=sandboxed,local"
+BAZEL_BUILD_OPTIONS+=" --remote_download_outputs=all"
 
 # TODO(htuch): This script started life by cloning docs/build.sh. It depends on
 # the @envoy_api//docs:protos target in a few places as a result. This is not
 # guaranteed to be the precise set of protos we want to format, but as a
 # starting place it seems reasonable. In the future, we should change the logic
 # here.
-bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \
-  tools/type_whisperer/type_whisperer.bzl%type_whisperer_aspect --output_groups=types_pb_text \
-  --host_force_python=PY3
-declare -x -r TYPE_DB_PATH="${PWD}"/source/common/config/api_type_db.generated.pb_text
-bazel run ${BAZEL_BUILD_OPTIONS} //tools/type_whisperer:typedb_gen -- \
-  ${PWD} ${TYPE_DB_PATH} ${PROTO_TARGETS}
-bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \
-  tools/protoxform/protoxform.bzl%protoxform_aspect --output_groups=proto --action_env=CPROFILE_ENABLED=1 \
-  --action_env=TYPE_DB_PATH --host_force_python=PY3
+bazel build ${BAZEL_BUILD_OPTIONS} --//tools/api_proto_plugin:default_type_db_target=@envoy_api//docs:protos \
+  @envoy_api//docs:protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto \
+  --action_env=CPROFILE_ENABLED=1 --host_force_python=PY3
 
 ./tools/proto_sync.py "$1" ${PROTO_TARGETS}
diff --git a/tools/proto_sync.py b/tools/proto_sync.py
index 5aaa0c8c0b97..92b7a2cad7b8 100755
--- a/tools/proto_sync.py
+++ b/tools/proto_sync.py
@@ -3,6 +3,7 @@
 # Diff or copy protoxform artifacts from Bazel cache back to the source tree.
 
 import os
+import pathlib
 import re
 import shutil
 import string
@@ -43,6 +44,7 @@
 
 IMPORT_REGEX = re.compile('import "(.*)";')
 SERVICE_REGEX = re.compile('service \w+ {')
+PREVIOUS_MESSAGE_TYPE_REGEX = re.compile(r'previous_message_type\s+=\s+"([^"]*)";')
 
 
 class ProtoSyncError(Exception):
@@ -147,6 +149,10 @@ def GetImportDeps(proto_path):
         # We can ignore imports provided implicitly by api_proto_package().
         if any(import_path.startswith(p) for p in API_BUILD_SYSTEM_IMPORT_PREFIXES):
           continue
+        # Special case handling for in-built versioning annotations.
+        if import_path == 'udpa/annotations/versioning.proto':
+          imports.append('@com_github_cncf_udpa//udpa/annotations:pkg')
+          continue
         # Explicit remapping for external deps, compute paths for envoy/*.
         if import_path in external_proto_deps.EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP:
           imports.append(external_proto_deps.EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP[import_path])
@@ -163,6 +169,26 @@ def GetImportDeps(proto_path):
   return imports
 
 
+def GetPreviousMessageTypeDeps(proto_path):
+  """Obtain the Bazel dependencies for the previous version of messages in a .proto file.
+
+  We need to link in earlier proto descriptors to support Envoy reflection upgrades.
+
+  Args:
+    proto_path: path to .proto.
+
+  Returns:
+    A list of Bazel targets reflecting the previous message types in the .proto at proto_path.
+  """
+  contents = pathlib.Path(proto_path).read_text(encoding='utf8')
+  matches = re.findall(PREVIOUS_MESSAGE_TYPE_REGEX, contents)
+  deps = []
+  for m in matches:
+    target = '//%s:pkg' % '/'.join(s for s in m.split('.') if s and s[0].islower())
+    deps.append(target)
+  return deps
+
+
 def HasServices(proto_path):
   """Does a .proto file have any service definitions?
 
@@ -190,17 +216,19 @@ def BuildFileContents(root, files):
     A string containing the canonical BUILD file content for root.
   """
   import_deps = set(sum([GetImportDeps(os.path.join(root, f)) for f in files], []))
+  history_deps = set(sum([GetPreviousMessageTypeDeps(os.path.join(root, f)) for f in files], []))
+  deps = import_deps.union(history_deps)
   has_services = any(HasServices(os.path.join(root, f)) for f in files)
   fields = []
   if has_services:
     fields.append('    has_services = True,')
-  if import_deps:
-    if len(import_deps) == 1:
-      formatted_deps = '"%s"' % list(import_deps)[0]
+  if deps:
+    if len(deps) == 1:
+      formatted_deps = '"%s"' % list(deps)[0]
     else:
       formatted_deps = '\n' + '\n'.join(
           '        "%s",' % dep
-          for dep in sorted(import_deps, key=lambda s: s.replace(':', '!'))) + '\n    '
+          for dep in sorted(deps, key=lambda s: s.replace(':', '!'))) + '\n    '
     fields.append('    deps = [%s],' % formatted_deps)
   formatted_fields = '\n' + '\n'.join(fields) + '\n' if fields else ''
   return BUILD_FILE_TEMPLATE.substitute(fields=formatted_fields)
diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py
index eb9dbda8ceed..4d183c1ecd4e 100755
--- a/tools/protodoc/protodoc.py
+++ b/tools/protodoc/protodoc.py
@@ -562,7 +562,7 @@ def VisitFile(self, file_proto, type_context, services, msgs, enums):
 
 
 def Main():
-  plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor())])
+  plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)])
 
 
 if __name__ == '__main__':
diff --git a/tools/protoxform/BUILD b/tools/protoxform/BUILD
index 9d64587e02b7..b69b436d2b81 100644
--- a/tools/protoxform/BUILD
+++ b/tools/protoxform/BUILD
@@ -15,6 +15,7 @@ py_binary(
         "//tools/type_whisperer",
         "//tools/type_whisperer:api_type_db_proto_py_proto",
         "@com_envoyproxy_protoc_gen_validate//validate:validate_py",
+        "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto",
         "@com_google_googleapis//google/api:annotations_py_proto",
         "@com_google_protobuf//:protobuf_python",
     ],
diff --git a/tools/protoxform/migrate.py b/tools/protoxform/migrate.py
index 3fd184ab7dcf..6713f969248a 100644
--- a/tools/protoxform/migrate.py
+++ b/tools/protoxform/migrate.py
@@ -88,6 +88,7 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):
     upgraded_proto = copy.deepcopy(msg_proto)
     if upgraded_proto.options.deprecated:
       options.AddHideOption(upgraded_proto.options)
+    options.SetVersioningAnnotation(upgraded_proto.options, type_context.name)
     # Mark deprecated fields as ready for deletion by protoxform.
     for f in upgraded_proto.field:
       if f.options.deprecated:
@@ -152,7 +153,7 @@ def V3MigrationXform(file_proto):
     v3 FileDescriptorProto message.
   """
   # Load type database.
-  typedb = utils.LoadTypeDb()
+  typedb = utils.GetTypeDb()
   # If this isn't a proto in an upgraded package, return None.
   if file_proto.package not in typedb.next_version_packages or not typedb.next_version_packages[
       file_proto.package]:
diff --git a/tools/protoxform/options.py b/tools/protoxform/options.py
index d0a3c5fa827c..1be994f0c1e4 100644
--- a/tools/protoxform/options.py
+++ b/tools/protoxform/options.py
@@ -1,5 +1,7 @@
 # Manage internal options on messages/enums/fields/enum values.
 
+from udpa.annotations import versioning_pb2
+
 
 def AddHideOption(options):
   """Mark message/enum/field/enum value as hidden.
@@ -25,3 +27,30 @@ def HasHideOption(options):
   """
   return any(
       option.name[0].name_part == 'protoxform_hide' for option in options.uninterpreted_option)
+
+
+def SetVersioningAnnotation(options, previous_message_type):
+  """Set the udpa.annotations.versioning option.
+
+  Used by Envoy to chain back through the message type history.
+
+  Args:
+    options: MessageOptions message.
+    previous_message_type: string with earlier API type name for the message.
+  """
+  options.Extensions[versioning_pb2.versioning].previous_message_type = previous_message_type
+
+
+def GetVersioningAnnotation(options):
+  """Get the udpa.annotations.versioning option.
+
+  Used by Envoy to chain back through the message type history.
+
+  Args:
+    options: MessageOptions message.
+  Returns:
+    versioning.Annotation if set otherwise None.
+  """
+  if not options.HasExtension(versioning_pb2.versioning):
+    return None
+  return options.Extensions[versioning_pb2.versioning]
diff --git a/tools/protoxform/protoxform.bzl b/tools/protoxform/protoxform.bzl
index d8df4c439705..09e8eb434d71 100644
--- a/tools/protoxform/protoxform.bzl
+++ b/tools/protoxform/protoxform.bzl
@@ -9,4 +9,4 @@ def _protoxform_impl(target, ctx):
 #
 #   bazel build //api --aspects tools/protoxform/protoxform.bzl%protoxform_aspect \
 #       --output_groups=proto
-protoxform_aspect = api_proto_plugin_aspect("//tools/protoxform", _protoxform_impl)
+protoxform_aspect = api_proto_plugin_aspect("//tools/protoxform", _protoxform_impl, use_type_db = True)
diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py
index 06ceb701940a..8d489e3ea287 100755
--- a/tools/protoxform/protoxform.py
+++ b/tools/protoxform/protoxform.py
@@ -11,6 +11,7 @@
 
 from collections import deque
 import functools
+import io
 import os
 import re
 import subprocess
@@ -20,14 +21,18 @@
 from tools.api_proto_plugin import traverse
 from tools.api_proto_plugin import visitor
 from tools.protoxform import migrate
-from tools.protoxform import options
+from tools.protoxform import options as protoxform_options
 from tools.protoxform import utils
 from tools.type_whisperer import type_whisperer
 from tools.type_whisperer.types_pb2 import Types
 
-from google.api import annotations_pb2
+from google.protobuf import descriptor_pb2
 from google.protobuf import text_format
-from validate import validate_pb2
+
+# Note: we have to include those proto definitions to make FormatOptions work,
+# this also serves as whitelist of extended options.
+from google.api import annotations_pb2 as _
+from validate import validate_pb2 as _
 
 CLANG_FORMAT_STYLE = ('{ColumnLimit: 100, SpacesInContainerLiterals: false, '
                       'AllowShortFunctionsOnASingleLine: false}')
@@ -143,7 +148,7 @@ def FormatHeaderFromFile(source_code_info, file_proto):
     Formatted proto header as a string.
   """
   # Load the type database.
-  typedb = utils.LoadTypeDb()
+  typedb = utils.GetTypeDb()
   # Figure out type dependencies in this .proto.
   types = Types()
   text_format.Merge(traverse.TraverseFile(file_proto, type_whisperer.TypeWhispererVisitor()), types)
@@ -163,24 +168,26 @@ def CamelCase(s):
   package_line = 'package %s;\n' % file_proto.package
   file_block = '\n'.join(['syntax = "proto3";\n', package_line])
 
-  options = [
-      'option java_outer_classname = "%s";' % CamelCase(os.path.basename(file_proto.name)),
-      'option java_multiple_files = true;',
-      'option java_package = "io.envoyproxy.%s";' % file_proto.package,
-  ]
+  options = descriptor_pb2.FileOptions()
+  options.java_outer_classname = CamelCase(os.path.basename(file_proto.name))
+  options.java_multiple_files = True
+  options.java_package = "io.envoyproxy." + file_proto.package
+
   # This is a workaround for C#/Ruby namespace conflicts between packages and
   # objects, see https://github.com/envoyproxy/envoy/pull/3854.
   # TODO(htuch): remove once v3 fixes this naming issue in
   # https://github.com/envoyproxy/envoy/issues/8120.
   if file_proto.package in ['envoy.api.v2.listener', 'envoy.api.v2.cluster']:
     qualified_package = '.'.join(s.capitalize() for s in file_proto.package.split('.')) + 'NS'
-    options += [
-        'option csharp_namespace = "%s";' % qualified_package,
-        'option ruby_package = "%s";' % qualified_package,
-    ]
+    options.csharp_namespace = qualified_package
+    options.ruby_package = qualified_package
+
   if file_proto.service:
-    options += ['option java_generic_services = true;']
-  options_block = FormatBlock('\n'.join(options))
+    options.java_generic_services = True
+  options_block = FormatOptions(options)
+
+  requires_versioning_import = any(
+      protoxform_options.GetVersioningAnnotation(m.options) for m in file_proto.message_type)
 
   envoy_imports = list(envoy_proto_paths)
   google_imports = []
@@ -196,9 +203,15 @@ def CamelCase(s):
       google_imports.append(d)
     elif d.startswith('validate/'):
       infra_imports.append(d)
+    elif d in ['udpa/annotations/versioning.proto']:
+      # Skip, we decide to add this based on requires_versioning_import
+      pass
     else:
       misc_imports.append(d)
 
+  if requires_versioning_import:
+    misc_imports.append('udpa/annotations/versioning.proto')
+
   def FormatImportBlock(xs):
     if not xs:
       return ''
@@ -324,32 +337,12 @@ def FormatServiceMethod(type_context, method):
   def FormatStreaming(s):
     return 'stream ' if s else ''
 
-  def FormatHttpOptions(options):
-    if options.HasExtension(annotations_pb2.http):
-      http_options = options.Extensions[annotations_pb2.http]
-      return 'option (google.api.http) = { %s };' % str(http_options)
-    return ''
-
   leading_comment, trailing_comment = FormatTypeContextComments(type_context)
   return '%srpc %s(%s%s%s) returns (%s%s) {%s}\n' % (
       leading_comment, method.name, trailing_comment, FormatStreaming(
           method.client_streaming), NormalizeFieldTypeName(
               type_context, method.input_type), FormatStreaming(method.server_streaming),
-      NormalizeFieldTypeName(type_context, method.output_type), FormatHttpOptions(method.options))
-
-
-def FormatValidateFieldRules(rules):
-  """Format validate_pb2 rules.
-
-  Args:
-    rules: validate_pb2 rules proto.
-
-  Returns:
-    Formatted validation rules as string, suitable for proto field annotation.
-  """
-  return ' '.join('.%s = { %s }' %
-                  (field.name, text_format.MessageToString(value, as_one_line=True))
-                  for field, value in rules.ListFields())
+      NormalizeFieldTypeName(type_context, method.output_type), FormatOptions(method.options))
 
 
 def FormatField(type_context, field):
@@ -362,18 +355,12 @@ def FormatField(type_context, field):
   Returns:
     Formatted proto field as a string.
   """
-  if options.HasHideOption(field.options):
+  if protoxform_options.HasHideOption(field.options):
     return ''
   leading_comment, trailing_comment = FormatTypeContextComments(type_context)
-  annotations = []
-  if field.options.HasExtension(validate_pb2.rules):
-    rules = field.options.Extensions[validate_pb2.rules]
-    annotations.append('(validate.rules) %s' % FormatValidateFieldRules(rules))
-  if field.options.deprecated:
-    annotations.append('deprecated = true')
-  formatted_annotations = '[ %s]' % ','.join(annotations) if annotations else ''
-  return '%s%s %s = %d%s;\n%s' % (leading_comment, FormatFieldType(
-      type_context, field), field.name, field.number, formatted_annotations, trailing_comment)
+
+  return '%s%s %s = %d%s;\n%s' % (leading_comment, FormatFieldType(type_context, field), field.name,
+                                  field.number, FormatOptions(field.options), trailing_comment)
 
 
 def FormatEnumValue(type_context, value):
@@ -386,28 +373,58 @@ def FormatEnumValue(type_context, value):
   Returns:
     Formatted proto enum value as a string.
   """
-  if options.HasHideOption(value.options):
+  if protoxform_options.HasHideOption(value.options):
     return ''
   leading_comment, trailing_comment = FormatTypeContextComments(type_context)
-  annotations = []
-  if value.options.deprecated:
-    annotations.append('deprecated = true')
-  formatted_annotations = '[ %s]' % ','.join(annotations) if annotations else ''
+  formatted_annotations = FormatOptions(value.options)
   return '%s%s = %d%s;\n%s' % (leading_comment, value.name, value.number, formatted_annotations,
                                trailing_comment)
 
 
+def TextFormatValue(field, value):
+  """Format the value as protobuf text format
+
+  Args:
+    field: a FieldDescriptor that describes the field
+    value: the value stored in the field
+
+  Returns:
+    value in protobuf text format
+  """
+  out = io.StringIO()
+  text_format.PrintFieldValue(field, value, out)
+  return out.getvalue()
+
+
 def FormatOptions(options):
-  """Format MessageOptions/EnumOptions message.
+  """Format *Options (e.g. MessageOptions, FieldOptions) message.
 
   Args:
-    options: A MessageOptions/EnumOptions message.
+    options: A *Options (e.g. MessageOptions, FieldOptions) message.
 
   Returns:
     Formatted options as a string.
   """
-  if options.deprecated:
-    return FormatBlock('option deprecated = true;\n')
+
+  formatted_options = []
+  for option_descriptor, option_value in sorted(options.ListFields(), key=lambda x: x[0].number):
+    option_name = "({})".format(
+        option_descriptor.full_name) if option_descriptor.is_extension else option_descriptor.name
+    if option_descriptor.message_type and option_descriptor.label != option_descriptor.LABEL_REPEATED:
+      formatted_options.extend([
+          "{}.{} = {}".format(option_name, subfield.name, TextFormatValue(subfield, value))
+          for subfield, value in option_value.ListFields()
+      ])
+    else:
+      formatted_options.append("{} = {}".format(option_name,
+                                                TextFormatValue(option_descriptor, option_value)))
+
+  if formatted_options:
+    if options.DESCRIPTOR.name in ('EnumValueOptions', 'FieldOptions'):
+      return '[{}]'.format(','.join(formatted_options))
+    else:
+      return FormatBlock(''.join(
+          "option {};\n".format(formatted_option) for formatted_option in formatted_options))
   return ''
 
 
@@ -459,7 +476,7 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):
     # Skip messages synthesized to represent map types.
     if msg_proto.options.map_entry:
       return ''
-    if options.HasHideOption(msg_proto.options):
+    if protoxform_options.HasHideOption(msg_proto.options):
       return ''
     annotation_xforms = {
         annotations.NEXT_FREE_FIELD_ANNOTATION: CreateNextFreeFieldXform(msg_proto)
@@ -482,14 +499,11 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):
       if oneof_index is None and field.HasField('oneof_index'):
         oneof_index = field.oneof_index
         oneof_proto = msg_proto.oneof_decl[oneof_index]
-        if oneof_proto.options.HasExtension(validate_pb2.required):
-          oneof_options = 'option (validate.required) = true;\n\n'
-        else:
-          oneof_options = ''
         oneof_leading_comment, oneof_trailing_comment = FormatTypeContextComments(
             type_context.ExtendOneof(oneof_index, field.name))
         fields += '%soneof %s {\n%s%s' % (oneof_leading_comment, oneof_proto.name,
-                                          oneof_trailing_comment, oneof_options)
+                                          oneof_trailing_comment, FormatOptions(
+                                              oneof_proto.options))
       fields += FormatBlock(FormatField(type_context.ExtendField(index, field.name), field))
     if oneof_index is not None:
       fields += '}\n\n'
@@ -505,11 +519,17 @@ def VisitFile(self, file_proto, type_context, services, msgs, enums):
     return ClangFormat(header + formatted_services + formatted_enums + formatted_msgs)
 
 
+def ParameterCallback(parameter):
+  params = dict(param.split('=') for param in parameter.split(','))
+  if params["type_db_path"]:
+    utils.LoadTypeDb(params["type_db_path"])
+
+
 def Main():
   plugin.Plugin([
-      plugin.DirectOutputDescriptor('.v2.proto', ProtoFormatVisitor()),
-      plugin.OutputDescriptor('.v3alpha.proto', ProtoFormatVisitor(), migrate.V3MigrationXform)
-  ])
+      plugin.DirectOutputDescriptor('.v2.proto', ProtoFormatVisitor),
+      plugin.OutputDescriptor('.v3alpha.proto', ProtoFormatVisitor, migrate.V3MigrationXform)
+  ], ParameterCallback)
 
 
 if __name__ == '__main__':
diff --git a/tools/protoxform/utils.py b/tools/protoxform/utils.py
index ca822b88d5a6..0413248baac3 100644
--- a/tools/protoxform/utils.py
+++ b/tools/protoxform/utils.py
@@ -4,9 +4,16 @@
 
 from google.protobuf import text_format
 
+_typedb = None
 
-def LoadTypeDb():
-  typedb = TypeDb()
-  with open(os.getenv('TYPE_DB_PATH'), 'r') as f:
-    text_format.Merge(f.read(), typedb)
-  return typedb
+
+def GetTypeDb():
+  assert _typedb != None
+  return _typedb
+
+
+def LoadTypeDb(type_db_path):
+  global _typedb
+  _typedb = TypeDb()
+  with open(type_db_path, 'r') as f:
+    text_format.Merge(f.read(), _typedb)
diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt
index 170b47e32f9b..f5b1dd134e81 100644
--- a/tools/spelling_dictionary.txt
+++ b/tools/spelling_dictionary.txt
@@ -19,6 +19,7 @@ BSON
 CAS
 CB
 CDS
+CEL
 CHACHA
 CHLO
 CHMOD
@@ -84,6 +85,7 @@ EXPECTs
 EXPR
 FAQ
 FASTOPEN
+FB
 FCDS
 FFFF
 FIN
@@ -284,6 +286,7 @@ UDP
 UDS
 URI
 URL
+USEVC
 UTC
 UTF
 UUID
@@ -967,6 +970,7 @@ superset
 symlink
 symlinked
 symlinks
+syncookie
 sys
 syscall
 syscalls
@@ -1059,6 +1063,7 @@ unreferenced
 unregister
 unregisters
 unresolvable
+unserializable
 unsetenv
 unsubscription
 unterminated
@@ -1124,3 +1129,4 @@ zag
 zig
 zipkin
 zlib
+OBQ
diff --git a/tools/testdata/check_format/unpack_to.cc b/tools/testdata/check_format/unpack_to.cc
new file mode 100644
index 000000000000..82c9745f057c
--- /dev/null
+++ b/tools/testdata/check_format/unpack_to.cc
@@ -0,0 +1,9 @@
+namespace Envoy {
+
+int foo() {
+  ProtobufWky::Any bar;
+  Protobuf::Message baz;
+  bar.UnpackTo(baz);
+}
+
+} // namespace Envoy
diff --git a/tools/type_whisperer/type_database.bzl b/tools/type_whisperer/type_database.bzl
new file mode 100644
index 000000000000..6311d27afc8e
--- /dev/null
+++ b/tools/type_whisperer/type_database.bzl
@@ -0,0 +1,44 @@
+load(":type_whisperer.bzl", "type_whisperer_aspect")
+
+def _type_database_impl(ctx):
+    type_db_deps = []
+    for target in ctx.attr.targets:
+        type_db_deps.append(target[OutputGroupInfo].types_pb_text)
+
+    type_db_deps = depset(transitive = type_db_deps)
+
+    args = [ctx.outputs.pb_text.path]
+    for dep in type_db_deps.to_list():
+        if dep.owner.workspace_name in ctx.attr.proto_repositories:
+            args.append(dep.path)
+
+    ctx.actions.run(
+        executable = ctx.executable._type_db_gen,
+        arguments = args,
+        inputs = type_db_deps,
+        outputs = [ctx.outputs.pb_text],
+        mnemonic = "TypeDbGen",
+        use_default_shell_env = True,
+    )
+
+type_database = rule(
+    attrs = {
+        "targets": attr.label_list(
+            aspects = [type_whisperer_aspect],
+            doc = "List of all proto_library target to be included.",
+        ),
+        "proto_repositories": attr.string_list(
+            default = ["envoy_api"],
+            allow_empty = False,
+        ),
+        "_type_db_gen": attr.label(
+            default = Label("//tools/type_whisperer:typedb_gen"),
+            executable = True,
+            cfg = "exec",
+        ),
+    },
+    outputs = {
+        "pb_text": "%{name}.pb_text",
+    },
+    implementation = _type_database_impl,
+)
diff --git a/tools/type_whisperer/type_whisperer.py b/tools/type_whisperer/type_whisperer.py
index 7fa90ce69dbe..8f5ab3a34883 100755
--- a/tools/type_whisperer/type_whisperer.py
+++ b/tools/type_whisperer/type_whisperer.py
@@ -46,7 +46,7 @@ def VisitFile(self, file_proto, type_context, services, msgs, enums):
 
 def Main():
   plugin.Plugin([
-      plugin.DirectOutputDescriptor('.types.pb_text', TypeWhispererVisitor()),
+      plugin.DirectOutputDescriptor('.types.pb_text', TypeWhispererVisitor),
   ])
 
 
diff --git a/tools/type_whisperer/typedb_gen.py b/tools/type_whisperer/typedb_gen.py
index 8d43d06a8c89..0c3aee88aaa2 100644
--- a/tools/type_whisperer/typedb_gen.py
+++ b/tools/type_whisperer/typedb_gen.py
@@ -7,7 +7,6 @@
 
 from google.protobuf import text_format
 
-from tools.api_proto_plugin.utils import BazelBinPathForOutputArtifact
 from tools.type_whisperer.api_type_db_pb2 import TypeDb
 from tools.type_whisperer.types_pb2 import Types, TypeDescription
 
@@ -118,18 +117,13 @@ def NextVersionUpgrade(type_name, type_map, next_version_upgrade_memo, visited=N
 
 
 if __name__ == '__main__':
-  # Root of source tree.
-  src_root = sys.argv[1]
   # Output path for type database.
-  out_path = sys.argv[2]
-  # Bazel labels for source .proto.
-  src_labels = sys.argv[3:]
-
-  # Load type descriptors for each .proto.
-  type_desc_paths = [
-      BazelBinPathForOutputArtifact(label, '.types.pb_text', root=src_root) for label in src_labels
-  ]
+  out_path = sys.argv[1]
+
+  # Load type descriptors for each type whisper
+  type_desc_paths = sys.argv[2:]
   type_whispers = map(LoadTypes, type_desc_paths)
+
   # Aggregate type descriptors to a single type map.
   type_map = dict(sum([list(t.types.items()) for t in type_whispers], []))
   all_pkgs = set([type_desc.qualified_package for type_desc in type_map.values()])