diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml
index 02b31478f4ce..4efd687e546c 100644
--- a/.azure-pipelines/pipelines.yml
+++ b/.azure-pipelines/pipelines.yml
@@ -10,217 +10,389 @@ trigger:
# PR build config is manually overridden in Azure pipelines UI with different secrets
pr: none
-jobs:
- - job: format
- dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel.
- pool:
- vmImage: "ubuntu-18.04"
- steps:
- - task: Cache@2
- inputs:
- key: "format | ./WORKSPACE | **/*.bzl"
- path: $(Build.StagingDirectory)/repository_cache
- continueOnError: true
-
- - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh'
- workingDirectory: $(Build.SourcesDirectory)
- env:
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
- displayName: "Run check format scripts"
-
- - task: PublishBuildArtifacts@1
- inputs:
- pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff"
- artifactName: format
- condition: failed()
-
- - job: release
- displayName: "Linux-x64 release"
- dependsOn: ["format"]
- # For master builds, continue even if format fails
- condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest')))
- timeoutInMinutes: 360
- pool:
- vmImage: "ubuntu-18.04"
- steps:
- - template: bazel.yml
- parameters:
- ciTarget: bazel.release
-
- - job: release_arm64
- displayName: "Linux-arm64 release"
- dependsOn: ["format"]
- # For master builds, continue even if format fails
+stages:
+ - stage: precheck
+ jobs:
+ - job: format
+ dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel.
+ pool:
+ vmImage: "ubuntu-18.04"
+ steps:
+ - task: Cache@2
+ inputs:
+ key: "format | ./WORKSPACE | **/*.bzl"
+ path: $(Build.StagingDirectory)/repository_cache
+ continueOnError: true
+
+ - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh'
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
+ BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
+ BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
+ GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
+ displayName: "Run check format scripts"
+
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff"
+ artifactName: format
+ condition: failed()
+
+ - job: docs
+ dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel.
+ pool:
+ vmImage: "ubuntu-18.04"
+ steps:
+ - task: Cache@2
+ inputs:
+ key: "docs | ./WORKSPACE | **/*.bzl"
+ path: $(Build.StagingDirectory)/repository_cache
+ continueOnError: true
+
+ - script: ci/run_envoy_docker.sh 'ci/do_ci.sh docs'
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
+ BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
+ BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
+ GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
+ displayName: "Generate docs"
+
+ - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/docs docs'
+ displayName: "Upload Docs to GCS"
+ env:
+ ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
+ GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
+ GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket)
+
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: "$(Build.SourcesDirectory)/generated/docs"
+ artifactName: docs
+ condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest'))
+
+ - task: InstallSSHKey@0
+ inputs:
+ hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=="
+ sshPublicKey: "$(DocsPublicKey)"
+ sshPassphrase: "$(SshDeployKeyPassphrase)"
+ sshKeySecureFile: "$(DocsPrivateKey)"
+ condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'), eq(variables['PostSubmit'], true))
+
+ - script: docs/publish.sh
+ displayName: "Publish to GitHub"
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ AZP_BRANCH: $(Build.SourceBranch)
+ AZP_SHA1: $(Build.SourceVersion)
+ condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'), eq(variables['PostSubmit'], true))
+
+ - stage: sync
+ condition: and(succeeded(), eq(variables['PostSubmit'], true))
+ dependsOn: []
+ jobs:
+ - job: filter_example
+ dependsOn: []
+ pool:
+ vmImage: "ubuntu-18.04"
+ steps:
+ - task: InstallSSHKey@0
+ inputs:
+ hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=="
+ sshPublicKey: "$(FilterExamplePublicKey)"
+ sshPassphrase: "$(SshDeployKeyPassphrase)"
+ sshKeySecureFile: "$(FilterExamplePrivateKey)"
+
+ - bash: ci/filter_example_mirror.sh
+ displayName: "Sync envoy-filter-example"
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ AZP_BRANCH: $(Build.SourceBranch)
+
+ - job: data_plane_api
+ dependsOn: []
+ pool:
+ vmImage: "ubuntu-18.04"
+ steps:
+ - task: InstallSSHKey@0
+ inputs:
+ hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=="
+ sshPublicKey: "$(DataPlaneApiPublicKey)"
+ sshPassphrase: "$(SshDeployKeyPassphrase)"
+ sshKeySecureFile: "$(DataPlaneApiPrivateKey)"
+
+ - bash: ci/api_mirror.sh
+ displayName: "Sync data-plane-api"
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ AZP_BRANCH: $(Build.SourceBranch)
+
+ - job: go_control_plane
+ dependsOn: []
+ steps:
+ - task: InstallSSHKey@0
+ inputs:
+ hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=="
+ sshPublicKey: "$(GoControlPlanePublicKey)"
+ sshPassphrase: "$(SshDeployKeyPassphrase)"
+ sshKeySecureFile: "$(GoControlPlanePrivateKey)"
+
+ - bash: |
+ cp -a ~/.ssh $(Build.StagingDirectory)/
+ ci/run_envoy_docker.sh 'ci/go_mirror.sh'
+ displayName: "Sync go-control-plane"
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
+ BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
+ BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
+ GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
+ AZP_BRANCH: $(Build.SourceBranch)
+
+ - stage: linux_x64
+ dependsOn: ["precheck"]
+ # For master builds, continue even if precheck fails
condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest')))
- timeoutInMinutes: 360
- pool: "arm-large"
- steps:
- - template: bazel.yml
- parameters:
- managedAgent: false
- ciTarget: bazel.release
- rbe: false
- artifactSuffix: ".arm64"
- bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base"
-
- - job: bazel
- displayName: "Linux-x64"
- dependsOn: ["release"]
- # For master builds, continue even if format fails
+ jobs:
+ - job: release
+ # For master builds, continue even if format fails
+ timeoutInMinutes: 360
+ pool:
+ vmImage: "ubuntu-18.04"
+ steps:
+ - template: bazel.yml
+ parameters:
+ ciTarget: bazel.release
+
+ - stage: linux_arm64
+ dependsOn: ["precheck"]
+ # For master builds, continue even if precheck fails
condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest')))
- strategy:
- maxParallel: 3
- matrix:
- gcc:
- CI_TARGET: "bazel.gcc"
- clang_tidy:
- CI_TARGET: "bazel.clang_tidy"
- asan:
- CI_TARGET: "bazel.asan"
- tsan:
- CI_TARGET: "bazel.tsan"
- compile_time_options:
- CI_TARGET: "bazel.compile_time_options"
- timeoutInMinutes: 360
- pool:
- vmImage: "ubuntu-18.04"
- steps:
- - template: bazel.yml
- parameters:
- ciTarget: $(CI_TARGET)
-
- - job: coverage
- displayName: "Linux-x64"
- dependsOn: ["release"]
- timeoutInMinutes: 360
- pool: "x64-large"
- strategy:
- maxParallel: 2
- matrix:
- coverage:
- CI_TARGET: "coverage"
- fuzz_coverage:
- CI_TARGET: "fuzz_coverage"
- steps:
- - template: bazel.yml
- parameters:
- managedAgent: false
- ciTarget: bazel.$(CI_TARGET)
- rbe: false
- # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces
- bazelBuildExtraOptions: "--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base"
-
- - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)'
- displayName: "Upload $(CI_TARGET) Report to GCS"
- env:
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
- GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket)
- condition: always()
-
- - job: docker
- displayName: "Linux multi-arch docker"
- dependsOn: ["release", "release_arm64"]
- pool:
- vmImage: "ubuntu-18.04"
- steps:
- - bash: .azure-pipelines/cleanup.sh
- displayName: "Removing tools from agent"
- - task: DownloadBuildArtifacts@0
- inputs:
- buildType: current
- artifactName: "bazel.release"
- itemPattern: "bazel.release/envoy_binary.tar.gz"
- downloadType: single
- targetPath: $(Build.StagingDirectory)
- - task: DownloadBuildArtifacts@0
- inputs:
- buildType: current
- artifactName: "bazel.release.arm64"
- itemPattern: "bazel.release.arm64/envoy_binary.tar.gz"
- downloadType: single
- targetPath: $(Build.StagingDirectory)
- - bash: |
- set -e
- mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64
- mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64
- ci/docker_ci.sh
- workingDirectory: $(Build.SourcesDirectory)
- env:
- AZP_BRANCH: $(Build.SourceBranch)
- AZP_SHA1: $(Build.SourceVersion)
- DOCKERHUB_USERNAME: $(DockerUsername)
- DOCKERHUB_PASSWORD: $(DockerPassword)
- - task: PublishBuildArtifacts@1
- inputs:
- pathtoPublish: "$(Build.StagingDirectory)/build_images"
- artifactName: docker
- condition: always()
-
- - job: examples
+ jobs:
+ - job: release
+ timeoutInMinutes: 360
+ pool: "arm-large"
+ steps:
+ - template: bazel.yml
+ parameters:
+ managedAgent: false
+ ciTarget: bazel.release
+ rbe: false
+ artifactSuffix: ".arm64"
+ bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base"
+
+ - stage: check
+ dependsOn: ["linux_x64"]
+ jobs:
+ - job: bazel
+ displayName: "linux_x64"
+ dependsOn: []
+ strategy:
+ maxParallel: 3
+ matrix:
+ api:
+ CI_TARGET: "bazel.api"
+ gcc:
+ CI_TARGET: "bazel.gcc"
+ clang_tidy:
+ CI_TARGET: "bazel.clang_tidy"
+ asan:
+ CI_TARGET: "bazel.asan"
+ tsan:
+ CI_TARGET: "bazel.tsan"
+ compile_time_options:
+ CI_TARGET: "bazel.compile_time_options"
+ timeoutInMinutes: 360
+ pool:
+ vmImage: "ubuntu-18.04"
+ steps:
+ - template: bazel.yml
+ parameters:
+ ciTarget: $(CI_TARGET)
+
+ - job: coverage
+ displayName: "linux_x64"
+ dependsOn: []
+ timeoutInMinutes: 360
+ pool: "x64-large"
+ strategy:
+ maxParallel: 2
+ matrix:
+ coverage:
+ CI_TARGET: "coverage"
+ fuzz_coverage:
+ CI_TARGET: "fuzz_coverage"
+ steps:
+ - template: bazel.yml
+ parameters:
+ managedAgent: false
+ ciTarget: bazel.$(CI_TARGET)
+ rbe: false
+ # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces
+ bazelBuildExtraOptions: "--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base"
+
+ - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)'
+ displayName: "Upload $(CI_TARGET) Report to GCS"
+ env:
+ ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
+ GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
+ GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket)
+ condition: always()
+
+ - stage: docker
+ dependsOn: ["linux_x64", "linux_arm64"]
+ jobs:
+ - job: docker
+ displayName: "linux multiarch"
+ pool:
+ vmImage: "ubuntu-18.04"
+ steps:
+ - bash: .azure-pipelines/cleanup.sh
+ displayName: "Removing tools from agent"
+ - bash: |
+ echo "disk space at beginning of build:"
+ df -h
+ displayName: "Check disk space at beginning"
+ - task: DownloadBuildArtifacts@0
+ inputs:
+ buildType: current
+ artifactName: "bazel.release"
+ itemPattern: "bazel.release/envoy_binary.tar.gz"
+ downloadType: single
+ targetPath: $(Build.StagingDirectory)
+ - task: DownloadBuildArtifacts@0
+ inputs:
+ buildType: current
+ artifactName: "bazel.release.arm64"
+ itemPattern: "bazel.release.arm64/envoy_binary.tar.gz"
+ downloadType: single
+ targetPath: $(Build.StagingDirectory)
+ - bash: |
+ set -e
+ mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64
+ mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64
+ ci/docker_ci.sh
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ AZP_BRANCH: $(Build.SourceBranch)
+ AZP_SHA1: $(Build.SourceVersion)
+ DOCKERHUB_USERNAME: $(DockerUsername)
+ DOCKERHUB_PASSWORD: $(DockerPassword)
+ - bash: |
+ echo "disk space at end of build:"
+ df -h
+ displayName: "Check disk space at end"
+ condition: always()
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: "$(Build.StagingDirectory)/build_images"
+ artifactName: docker
+ condition: always()
+
+ - stage: verify
dependsOn: ["docker"]
- displayName: "Verify examples run as documented"
- pool:
- vmImage: "ubuntu-18.04"
- steps:
- - task: DownloadBuildArtifacts@0
- inputs:
- buildType: current
- artifactName: "docker"
- itemPattern: "docker/envoy-docker-images.tar.xz"
- downloadType: single
- targetPath: $(Build.StagingDirectory)
- - bash: ./ci/do_ci.sh verify_examples
- env:
- ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
- NO_BUILD_SETUP: 1
-
- - job: macOS
- dependsOn: ["format"]
- timeoutInMinutes: 360
- pool:
- vmImage: "macos-latest"
- steps:
- - script: ./ci/mac_ci_setup.sh
- displayName: "Install dependencies"
-
- - script: ./ci/mac_ci_steps.sh
- displayName: "Run Mac CI"
- env:
- BAZEL_BUILD_EXTRA_OPTIONS: "--remote_download_toplevel --flaky_test_attempts=2"
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
-
- - task: PublishTestResults@2
- inputs:
- testResultsFiles: "**/bazel-testlogs/**/test.xml"
- testRunTitle: "macOS"
- condition: always()
-
- - script: ./ci/flaky_test/run_process_xml_mac.sh
- displayName: "Process Test Results"
- env:
- TEST_TMPDIR: $(Build.SourcesDirectory)
- SLACK_TOKEN: $(SLACK_TOKEN)
- CI_TARGET: "MacOS"
- REPO_URI: $(Build.Repository.Uri)
- BUILD_URI: $(Build.BuildUri)
-
- - job: Windows
- dependsOn: ["format"]
- timeoutInMinutes: 360
- pool:
- vmImage: "windows-latest"
- steps:
- - bash: ci/run_envoy_docker_windows.sh ci/windows_ci_steps.sh
- displayName: "Run Windows CI"
- env:
- ENVOY_RBE: "true"
- BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs)"
- BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
- BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
- GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
+ jobs:
+ - job: examples
+ pool:
+ vmImage: "ubuntu-18.04"
+ steps:
+ - task: DownloadBuildArtifacts@0
+ inputs:
+ buildType: current
+ artifactName: "docker"
+ itemPattern: "docker/envoy-docker-images.tar.xz"
+ downloadType: single
+ targetPath: $(Build.StagingDirectory)
+ - bash: ./ci/do_ci.sh verify_examples
+ env:
+ ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory)
+ NO_BUILD_SETUP: 1
+
+ - stage: macos
+ dependsOn: ["precheck"]
+ jobs:
+ - job: test
+ timeoutInMinutes: 360
+ pool:
+ vmImage: "macos-latest"
+ steps:
+ - script: ./ci/mac_ci_setup.sh
+ displayName: "Install dependencies"
+
+ - script: ./ci/mac_ci_steps.sh
+ displayName: "Run Mac CI"
+ env:
+ BAZEL_BUILD_EXTRA_OPTIONS: "--remote_download_toplevel --flaky_test_attempts=2"
+ BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
+ BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
+ GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
+
+ - task: PublishTestResults@2
+ inputs:
+ testResultsFiles: "**/bazel-testlogs/**/test.xml"
+ testRunTitle: "macOS"
+ condition: always()
+
+ - script: ./ci/flaky_test/run_process_xml.sh
+ displayName: "Process Test Results"
+ env:
+ TEST_TMPDIR: $(Build.SourcesDirectory)
+ SLACK_TOKEN: $(SLACK_TOKEN)
+ CI_TARGET: "MacOS"
+ REPO_URI: $(Build.Repository.Uri)
+ BUILD_URI: $(Build.BuildUri)
+
+ - stage: windows
+ dependsOn: ["precheck"]
+ jobs:
+ - job: release
+ timeoutInMinutes: 360
+ pool:
+ vmImage: "windows-latest"
+ steps:
+ - bash: ci/run_envoy_docker.sh ci/windows_ci_steps.sh
+ displayName: "Run Windows CI"
+ env:
+ ENVOY_DOCKER_BUILD_DIR: "$(Build.StagingDirectory)"
+ ENVOY_RBE: "true"
+ BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs)"
+ BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com
+ BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance
+ GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey)
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: "$(Build.StagingDirectory)/envoy"
+ artifactName: windows.release
+ condition: always()
+
+ - job: docker
+ dependsOn: ["release"]
+ timeoutInMinutes: 360
+ pool:
+ vmImage: "windows-latest"
+ steps:
+ - task: DownloadBuildArtifacts@0
+ inputs:
+ buildType: current
+ artifactName: "windows.release"
+ itemPattern: "windows.release/envoy_binary.tar.gz"
+ downloadType: single
+ targetPath: $(Build.StagingDirectory)
+ - bash: |
+ set -e
+ # Convert to Unix-style path so tar doesn't think drive letter is a hostname
+ STAGING_DIR="/$(echo '$(Build.StagingDirectory)' | tr -d ':' | tr '\\' '/')"
+ mkdir -p windows/amd64 && tar zxf "${STAGING_DIR}/windows.release/envoy_binary.tar.gz" -C ./windows/amd64
+ ci/docker_ci.sh
+ workingDirectory: $(Build.SourcesDirectory)
+ env:
+ AZP_BRANCH: $(Build.SourceBranch)
+ AZP_SHA1: $(Build.SourceVersion)
+ DOCKERHUB_USERNAME: $(DockerUsername)
+ DOCKERHUB_PASSWORD: $(DockerPassword)
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: "$(Build.StagingDirectory)/build_images"
+ artifactName: docker_windows
+ condition: always()
diff --git a/.bazelci/presubmit.yml b/.bazelci/presubmit.yml
index ab83156fbc47..dcef9acbef46 100644
--- a/.bazelci/presubmit.yml
+++ b/.bazelci/presubmit.yml
@@ -1,10 +1,5 @@
---
tasks:
- gcc:
- name: "GCC"
- platform: ubuntu1804
- build_targets:
- - "//source/exe:envoy-static"
rbe:
name: "RBE"
platform: ubuntu1804
diff --git a/.bazelrc b/.bazelrc
index d3326e1de006..16d8843d6a88 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -3,10 +3,10 @@
# Bazel doesn't need more than 200MB of memory for local build based on memory profiling:
# https://docs.bazel.build/versions/master/skylark/performance.html#memory-profiling
# The default JVM max heapsize is 1/4 of physical memory up to 32GB which could be large
-# enough to consume all memory constrained by cgroup in large host, which is the case in CircleCI.
+# enough to consume all memory constrained by cgroup in large host.
# Limiting JVM heapsize here to let it do GC more when approaching the limit to
# leave room for compiler/linker.
-# The number 2G is choosed heuristically to both support in CircleCI and large enough for RBE.
+# The number 2G is chosen heuristically to both support large VM and small VM with RBE.
# Startup options cannot be selected via config.
startup --host_jvm_args=-Xmx2g
@@ -19,7 +19,8 @@ build --host_javabase=@bazel_tools//tools/jdk:remote_jdk11
build --javabase=@bazel_tools//tools/jdk:remote_jdk11
build --enable_platform_specific_config
-# Enable position independent code, this option is not supported on Windows and default on on macOS.
+# Enable position independent code (this is the default on macOS and Windows)
+# (Workaround for https://github.com/bazelbuild/rules_foreign_cc/issues/421)
build:linux --copt=-fPIC
build:linux --cxxopt=-std=c++17
build:linux --conlyopt=-fexceptions
@@ -35,9 +36,6 @@ build --action_env=CXX
build --action_env=LLVM_CONFIG
build --action_env=PATH
-# Skip system ICU linking.
-build --@com_googlesource_googleurl//build_config:system_icu=0
-
# Common flags for sanitizers
build:sanitizer --define tcmalloc=disabled
build:sanitizer --linkopt -ldl
@@ -112,7 +110,8 @@ build:libc++ --config=clang
build:libc++ --action_env=CXXFLAGS=-stdlib=libc++
build:libc++ --action_env=LDFLAGS=-stdlib=libc++
build:libc++ --action_env=BAZEL_CXXOPTS=-stdlib=libc++
-build:libc++ --action_env=BAZEL_LINKLIBS=-l%:libc++.a:-l%:libc++abi.a:-lm
+build:libc++ --action_env=BAZEL_LINKLIBS=-l%:libc++.a:-l%:libc++abi.a
+build:libc++ --action_env=BAZEL_LINKOPTS=-lm:-pthread
build:libc++ --define force_libcpp=enabled
# Optimize build for binary size reduction.
@@ -141,7 +140,7 @@ build:coverage --strategy=CoverageReport=sandboxed,local
build:coverage --experimental_use_llvm_covmap
build:coverage --collect_code_coverage
build:coverage --test_tag_filters=-nocoverage
-build:coverage --instrumentation_filter="//source(?!/extensions/quic_listeners/quiche/platform)[/:],//include[/:]"
+build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]"
coverage:test-coverage --test_arg="-l trace"
coverage:fuzz-coverage --config=plain-fuzzer
coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh
@@ -230,7 +229,7 @@ build:remote-clang-cl --config=rbe-toolchain-clang-cl
# Docker sandbox
# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8
-build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:e7ea4e81bbd5028abb9d3a2f2c0afe063d9b62c0
+build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a
build:docker-sandbox --spawn_strategy=docker
build:docker-sandbox --strategy=Javac=docker
build:docker-sandbox --strategy=Closure=docker
@@ -275,6 +274,8 @@ build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1
# Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts.
build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer
build:plain-fuzzer --define ENVOY_CONFIG_ASAN=1
+build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link
+build:plain-fuzzer --linkopt=-fsanitize=fuzzer-no-link
# Compile database generation config
build:compdb --build_tag_filters=-nocompdb
@@ -285,27 +286,27 @@ build:windows --define signal_trace=disabled
build:windows --define hot_restart=disabled
build:windows --define tcmalloc=disabled
build:windows --define manual_stamp=manual_stamp
+build:windows --cxxopt="/std:c++17"
-# Should not be required after upstream fix to bazel,
-# and already a no-op to linux/macos builds
-# see issue https://github.com/bazelbuild/rules_foreign_cc/issues/301
+# TODO(wrowe,sunjayBhatia): Resolve bugs upstream in curl and rules_foreign_cc
+# See issue https://github.com/bazelbuild/rules_foreign_cc/issues/301
build:windows --copt="-DCARES_STATICLIB"
build:windows --copt="-DNGHTTP2_STATICLIB"
build:windows --copt="-DCURL_STATICLIB"
-build:windows --cxxopt="/std:c++17"
-# Required to work around build defects on Windows MSVC cl
-# Unguarded gcc pragmas in quiche are not recognized by MSVC
-build:msvc-cl --copt="/wd4068"
-# Allows 'nodiscard' function return values to be discarded
-build:msvc-cl --copt="/wd4834"
-# Allows inline functions to be undefined
-build:msvc-cl --copt="/wd4506"
-build:msvc-cl --copt="-D_SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING"
+# Override any clang preference if building msvc-cl
+# Drop the determinism feature (-DDATE etc are a no-op in msvc-cl)
+build:msvc-cl --action_env=USE_CLANG_CL=""
+build:msvc-cl --define clang_cl=0
+build:msvc-cl --features=-determinism
+
+# Windows build behaviors when using clang-cl
+build:clang-cl --action_env=USE_CLANG_CL=1
+build:clang-cl --define clang_cl=1
# Required to work around Windows clang-cl build defects
# Ignore conflicting definitions of _WIN32_WINNT
-# Overriding __TIME__ etc is problematic (and is actually an invalid no-op)
+# Override determinism flags (DATE etc) is valid on clang-cl compiler
build:clang-cl --copt="-Wno-macro-redefined"
build:clang-cl --copt="-Wno-builtin-macro-redefined"
build:clang-cl --action_env=USE_CLANG_CL=1
diff --git a/.circleci/config.yml b/.circleci/config.yml
deleted file mode 100644
index 2ee082e1f343..000000000000
--- a/.circleci/config.yml
+++ /dev/null
@@ -1,71 +0,0 @@
-version: 2.1
-
-executors:
- ubuntu-build:
- description: "A regular build executor based on ubuntu image"
- docker:
- # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8
- - image: envoyproxy/envoy-build-ubuntu:e7ea4e81bbd5028abb9d3a2f2c0afe063d9b62c0
- resource_class: xlarge
- working_directory: /source
-
-jobs:
- api:
- executor: ubuntu-build
- steps:
- - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken
- - checkout
- - run: ci/do_circle_ci.sh bazel.api
- - add_ssh_keys:
- fingerprints:
- - "fb:f3:fe:be:1c:b2:ec:b6:25:f9:7b:a6:87:54:02:8c"
- - run: ci/api_mirror.sh
- - store_artifacts:
- path: /build/envoy/generated
- destination: /
-
- go_control_plane_mirror:
- executor: ubuntu-build
- steps:
- - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken
- - checkout
- - run: ci/do_circle_ci.sh bazel.api
- - add_ssh_keys:
- fingerprints:
- - "9d:3b:fe:7c:09:3b:ce:a9:6a:de:de:41:fb:6b:52:62"
- - run: ci/go_mirror.sh
-
- filter_example_mirror:
- executor: ubuntu-build
- steps:
- - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken
- - checkout
- - add_ssh_keys:
- fingerprints:
- - "f6:f9:df:90:9c:4b:5f:9c:f4:69:fd:42:94:ff:88:24"
- - run: ci/filter_example_mirror.sh
-
- docs:
- executor: ubuntu-build
- steps:
- - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken
- - checkout
- - run: ci/do_circle_ci.sh docs
- - add_ssh_keys:
- fingerprints:
- - "44:c7:a1:9e:f4:9e:a5:33:11:f1:0e:79:e1:55:c9:04"
- - run: docs/publish.sh
- - store_artifacts:
- path: generated/docs
-
-workflows:
- version: 2
- all:
- jobs:
- - api
- - go_control_plane_mirror
- - filter_example_mirror
- - docs:
- filters:
- tags:
- only: /^v.*/
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index bd2530543f4c..21f934c44944 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -1,4 +1,4 @@
-FROM gcr.io/envoy-ci/envoy-build:e7ea4e81bbd5028abb9d3a2f2c0afe063d9b62c0
+FROM gcr.io/envoy-ci/envoy-build:b480535e8423b5fd7c102fd30c92f4785519e33a
ARG USERNAME=vscode
ARG USER_UID=501
diff --git a/.devcontainer/README.md b/.devcontainer/README.md
index 1cd314d2e4e0..f8119f9c5f6a 100644
--- a/.devcontainer/README.md
+++ b/.devcontainer/README.md
@@ -13,6 +13,8 @@ This task is needed to run everytime after:
- Changing a BUILD file that add/remove files from a target, changes dependencies
- Changing API proto files
+There are additional tools for VS Code located in [`tools/vscode`](../tools/vscode) directory.
+
## Advanced Usages
### Using Remote Build Execution
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 462b00ee78d0..97c37be6a676 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -38,6 +38,7 @@
"zxh404.vscode-proto3",
"bazelbuild.vscode-bazel",
"llvm-vs-code-extensions.vscode-clangd",
+ "vadimcn.vscode-lldb",
"webfreak.debug",
"ms-python.python"
]
diff --git a/.gitignore b/.gitignore
index a030c858c372..ae1f29656b59 100644
--- a/.gitignore
+++ b/.gitignore
@@ -36,3 +36,4 @@ CMakeLists.txt
cmake-build-debug
/linux
bazel.output.txt
+*~
diff --git a/CODEOWNERS b/CODEOWNERS
index 1edcf7c68c1e..9696b370c4c7 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -23,6 +23,8 @@ extensions/filters/common/original_src @snowp @klarose
/*/extensions/filters/network/rocketmq_proxy @aaron-ai @lizhanhui @lizan
# thrift_proxy extension
/*/extensions/filters/network/thrift_proxy @zuercher @rgs1
+# cdn_loop extension
+/*/extensions/filters/http/cdn_loop @justin-mp @penguingao @alyssawilk
# compressor used by http compression filters
/*/extensions/filters/http/common/compressor @gsagula @rojkov @dio
/*/extensions/filters/http/compressor @rojkov @dio
@@ -78,8 +80,16 @@ extensions/filters/common/original_src @snowp @klarose
/*/extensions/filters/listener/http_inspector @yxue @PiotrSikora @lizan
# attribute context
/*/extensions/filters/common/expr @kyessenov @yangminzhu @lizan
+# webassembly access logger extensions
+/*/extensions/access_loggers/wasm @PiotrSikora @lizan
+# webassembly bootstrap extensions
+/*/extensions/bootstrap/wasm @PiotrSikora @lizan
+# webassembly http extensions
+/*/extensions/filters/http/wasm @PiotrSikora @lizan
+# webassembly network extensions
+/*/extensions/filters/network/wasm @PiotrSikora @lizan
# webassembly common extension
-/*/extensions/common/wasm @jplevyak @PiotrSikora @lizan
+/*/extensions/common/wasm @PiotrSikora @lizan
# common matcher
/*/extensions/common/matcher @mattklein123 @yangminzhu
# common crypto extension
@@ -105,6 +115,8 @@ extensions/filters/common/original_src @snowp @klarose
/*/extensions/stat_sinks/dog_statsd @taiki45 @jmarantz
/*/extensions/stat_sinks/hystrix @trabetti @jmarantz
/*/extensions/stat_sinks/metrics_service @ramaraochavali @jmarantz
+# webassembly stat-sink extensions
+/*/extensions/stat_sinks/wasm @PiotrSikora @lizan
/*/extensions/resource_monitors/injected_resource @eziskind @htuch
/*/extensions/resource_monitors/common @eziskind @htuch
/*/extensions/resource_monitors/fixed_heap @eziskind @htuch
@@ -129,7 +141,7 @@ extensions/filters/common/original_src @snowp @klarose
/*/extensions/compression/gzip @junr03 @rojkov
/*/extensions/filters/http/decompressor @rojkov @dio
# Watchdog Extensions
-/*/extensions/watchdog/profile_action @kbaichoo @htuch
+/*/extensions/watchdog/profile_action @kbaichoo @antoniovicente
# Core upstream code
extensions/upstreams/http @alyssawilk @snowp @mattklein123
extensions/upstreams/http/http @alyssawilk @snowp @mattklein123
@@ -137,3 +149,6 @@ extensions/upstreams/http/tcp @alyssawilk @mattklein123
extensions/upstreams/http/default @alyssawilk @snowp @mattklein123
# OAuth2
extensions/filters/http/oauth2 @rgs1 @derekargueta @snowp
+# HTTP Local Rate Limit
+/*/extensions/filters/http/local_ratelimit @rgs1 @mattklein123
+/*/extensions/filters/common/local_ratelimit @mattklein123 @rgs1
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1bef2955c288..bafe92bb2d8a 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -69,7 +69,7 @@ versioning guidelines:
cause a configuration load failure, unless the feature in question is
explicitly overridden in
[runtime](https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime#using-runtime-overrides-for-deprecated-features)
- config ([example](configs/using_deprecated_config.v2.yaml)). Finally, following the deprecation
+ config ([example](configs/using_deprecated_config.yaml)). Finally, following the deprecation
of the API major version where the field was first
marked deprecated, the entire implementation code will be removed from the Envoy implementation.
* This policy means that organizations deploying master should have some time to get ready for
diff --git a/DEPENDENCY_POLICY.md b/DEPENDENCY_POLICY.md
index 50aad88708aa..0944ad59030b 100644
--- a/DEPENDENCY_POLICY.md
+++ b/DEPENDENCY_POLICY.md
@@ -21,12 +21,14 @@ An example entry for the `nghttp2` dependency is:
```python
com_github_nghttp2_nghttp2 = dict(
project_name = "Nghttp2",
+ project_desc = "Implementation of HTTP/2 and its header compression ...",
project_url = "https://nghttp2.org",
version = "1.41.0",
sha256 = "eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8",
strip_prefix = "nghttp2-{version}",
urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"],
use_category = ["dataplane"],
+ last_updated = "2020-06-02",
cpe = "cpe:2.3:a:nghttp2:nghttp2:*",
),
```
@@ -40,14 +42,16 @@ Dependency declarations must:
`{dash_version}`.
* Versions should prefer release versions over master branch GitHub SHA tarballs. A comment is
necessary if the latter is used. This comment should contain the reason that a non-release
- version is being used and the YYYY-MM-DD when the last update was performed.
+ version is being used.
* Provide accurate entries for `use_category`. Please think carefully about whether there are data
or control plane implications of the dependency.
+* Reflect the date (YYYY-MM-DD) at which they were last updated in the `last_updated` field. This
+ date is preferably the date at which the PR is created.
* CPEs are compulsory for all dependencies that are not purely build/test.
[CPEs](https://en.wikipedia.org/wiki/Common_Platform_Enumeration) provide metadata that allow us
to correlate with related CVEs in dashboards and other tooling, and also provide a machine
- consumable join key. You can consult the latest [CPE
- dictionary](https://nvd.nist.gov/products/cpe) to find a CPE for a dependency.`"N/A"` should only
+ consumable join key. You can consult [CPE
+ search](https://nvd.nist.gov/products/cpe/search) to find a CPE for a dependency.`"N/A"` should only
be used if no CPE for the project is available in the CPE database. CPEs should be _versionless_
with a `:*` suffix, since the version can be computed from `version`.
@@ -93,6 +97,33 @@ basis:
Where possible, we prefer the latest release version for external dependencies, rather than master
branch GitHub SHA tarballs.
+## Dependency shepherds
+
+Sign-off from the [dependency
+shepherds](https://github.com/orgs/envoyproxy/teams/dependency-shepherds) is
+required for every PR that modifies external dependencies. The shepherds will
+look to see that the policy in this document is enforced and that metadata is
+kept up-to-date.
+
+## Dependency patches
+
+Occasionally it is necessary to introduce an Envoy-side patch to a dependency in a `.patch` file.
+These are typically applied in [bazel/repositories.bzl](bazel/repositories.bzl). Our policy on this
+is as follows:
+
+* Patch files impede dependency updates. They are expedient at creation time but are a maintenance
+ penalty. They reduce the velocity and increase the effort of upgrades in response to security
+ vulnerabilities in external dependencies.
+
+* No patch will be accepted without a sincere and sustained effort to upstream the patch to the
+ dependency's canonical repository.
+
+* There should exist a plan-of-record, filed as an issue in Envoy or the upstream GitHub tracking
+ elimination of the patch.
+
+* Every patch must have comments at its point-of-use in [bazel/repositories.bzl](bazel/repositories.bzl)
+ providing a rationale and detailing the tracking issue.
+
## Policy exceptions
The following dependencies are exempt from the policy:
diff --git a/DEVELOPER.md b/DEVELOPER.md
index 465644c0e02c..6786925fa7e8 100644
--- a/DEVELOPER.md
+++ b/DEVELOPER.md
@@ -1,6 +1,7 @@
# Developer documentation
-Envoy is built using the Bazel build system. CircleCI builds, tests, and runs coverage against all pull requests and the master branch.
+Envoy is built using the Bazel build system. Our CI on Azure Pipelines builds, tests, and runs coverage against
+all pull requests and the master branch.
To get started building Envoy locally, see the [Bazel quick start](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#quick-start-bazel-build-for-developers).
To run tests, there are Bazel [targets](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#testing-envoy-with-bazel) for Google Test.
@@ -10,7 +11,7 @@ If you plan to contribute to Envoy, you may find it useful to install the Envoy
Below is a list of additional documentation to aid the development process:
-- [General build and installation documentation](https://www.envoyproxy.io/docs/envoy/latest/install/install)
+- [General build and installation documentation](https://www.envoyproxy.io/docs/envoy/latest/start/start)
- [Building and testing Envoy with Bazel](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md)
@@ -35,4 +36,3 @@ And some documents on components of Envoy architecture:
- [Envoy flow control](https://github.com/envoyproxy/envoy/blob/master/source/docs/flow_control.md)
- [Envoy's subset load balancer](https://github.com/envoyproxy/envoy/blob/master/source/docs/subset_load_balancer.md)
-
diff --git a/EXTENSION_POLICY.md b/EXTENSION_POLICY.md
index 0063a2a2139c..1c4a059b748c 100644
--- a/EXTENSION_POLICY.md
+++ b/EXTENSION_POLICY.md
@@ -59,6 +59,19 @@ In the event that the Extension PR author is a sponsoring maintainer and no othe
is available, another maintainer may be enlisted to perform a minimal review for style and common C++
anti-patterns. The Extension PR must still be approved by a non-maintainer reviewer.
+## Wasm extensions
+
+Wasm extensions are not allowed in the main envoyproxy/envoy repository unless
+part of the Wasm implementation validation. The rationale for this policy:
+* Wasm extensions should not depend upon Envoy implementation specifics as
+ they exist behind a version independent ABI. Hence, there is little value in
+ qualifying Wasm extensions in the main repository.
+* Wasm extensions introduce extensive dependencies via crates, etc. We would
+ prefer to keep the envoyproxy/envoy repository dependencies minimal, easy
+ to reason about and maintain.
+* We do not implement any core extensions in Wasm and do not plan to in the
+ medium term.
+
## Extension stability and security posture
Every extension is expected to be tagged with a `status` and `security_posture` in its
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
index 923559eea7ad..767605eda220 100644
--- a/GOVERNANCE.md
+++ b/GOVERNANCE.md
@@ -87,6 +87,7 @@ or you can subscribe to the iCal feed [here](webcal://kubernetes.app.opsgenie.co
* Remove the "Pending" tags and add dates to the top of the [release notes for this version](docs/root/version_history/current.rst).
* Switch the [VERSION](VERSION) from a "dev" variant to a final variant. E.g., "1.6.0-dev" to
"1.6.0".
+ * Update the [RELEASES](RELEASES.md) doc with the relevant dates.
* Get a review and merge.
* Wait for tests to pass on [master](https://dev.azure.com/cncf/envoy/_build).
* Create a [tagged release](https://github.com/envoyproxy/envoy/releases). The release should
@@ -137,10 +138,7 @@ Deprecated
----------
```
* Run the deprecate_versions.py script (e.g. `sh tools/deprecate_version/deprecate_version.sh`)
- to file tracking issues for code which can be removed.
-* Run the deprecate_features.py script (e.g. `sh tools/deprecate_features/deprecate_features.sh`)
- to make the last release's deprecated features fatal-by-default. Submit the resultant PR and send
- an email to envoy-announce.
+ to file tracking issues for runtime guarded code which can be removed.
* Check source/common/runtime/runtime_features.cc and see if any runtime guards in
disabled_runtime_features should be reassessed, and ping on the relevant issues.
@@ -186,7 +184,7 @@ build confidence in consistent application of the API guidelines to PRs.
Adding new [extensions](REPO_LAYOUT.md#sourceextensions-layout) has a dedicated policy. Please
see [this](./EXTENSION_POLICY.md) document for more information.
-# Exernal dependency policy
+# External dependency policy
Adding new external dependencies has a dedicated policy. Please see [this](DEPENDENCY_POLICY.md)
document for more information.
diff --git a/OWNERS.md b/OWNERS.md
index 6c3c85d64fa7..4adc81048c59 100644
--- a/OWNERS.md
+++ b/OWNERS.md
@@ -37,12 +37,13 @@ routing PRs, questions, etc. to the right place.
* Lua, access logging, and general miscellany.
* Joshua Marantz ([jmarantz](https://github.com/jmarantz)) (jmarantz@google.com)
* Stats, abseil, scalability, and performance.
+* Antonio Vicente ([antoniovicente](https://github.com/antoniovicente)) (avd@google.com)
+ * Event management, security, performance, data plane.
# Envoy security team
* All maintainers
* Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com)
-* Antonio Vicente ([antoniovicente](https://github.com/antoniovicente)) (avd@google.com)
* Tony Allen ([tonya11en](https://github.com/tonya11en)) (tallen@lyft.com)
# Emeritus maintainers
@@ -67,3 +68,5 @@ matter expert reviews. Feel free to loop them in as needed.
* Redis, Python, configuration/operational questions.
* Yuchen Dai ([lambdai](https://github.com/lambdai)) (lambdai@google.com)
* v2 xDS, listeners, filter chain discovery service.
+* Michael Payne ([moderation](https://github.com/moderation)) (m@m17e.org)
+ * External dependencies, Envoy's supply chain and documentation.
diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md
index 9b3d5cd043ba..aa8c5e750247 100644
--- a/PULL_REQUESTS.md
+++ b/PULL_REQUESTS.md
@@ -70,6 +70,16 @@ current version. Please include any relevant links. Each release note should be
relevant subsystem in **alphabetical order** (see existing examples as a guide) and include links
to relevant parts of the documentation. Thank you! Please write in N/A if there are no release notes.
+### Platform Specific Features
+
+If this change involves any platform specific features (e.g. utilizing OS-specific socket options)
+or only implements new features for a limited set of platforms (e.g. Linux amd64 only), please
+include an explanation that addresses the reasoning behind this. Please also open a new tracking
+issue for each platform this change is not implemented on (and link them in the PR) to enable
+maintainers and contributors to triage. Reviewers will look for the change to avoid
+`#ifdef ` and rather prefer feature guards to not enable the change on a given platform
+using the build system.
+
### Runtime guard
If this PR has a user-visible behavioral change, or otherwise falls under the
diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md
index 5a1545aacd7a..366209eed929 100644
--- a/PULL_REQUEST_TEMPLATE.md
+++ b/PULL_REQUEST_TEMPLATE.md
@@ -18,6 +18,7 @@ Risk Level:
Testing:
Docs Changes:
Release Notes:
+Platform Specific Features:
[Optional Runtime guard:]
[Optional Fixes #Issue]
[Optional Deprecated:]
diff --git a/README.md b/README.md
index 290119f82e23..03c0aa0432d6 100644
--- a/README.md
+++ b/README.md
@@ -10,15 +10,14 @@ involved and how Envoy plays a role, read the CNCF
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1266/badge)](https://bestpractices.coreinfrastructure.org/projects/1266)
[![Azure Pipelines](https://dev.azure.com/cncf/envoy/_apis/build/status/11?branchName=master)](https://dev.azure.com/cncf/envoy/_build/latest?definitionId=11&branchName=master)
-[![CircleCI](https://circleci.com/gh/envoyproxy/envoy/tree/master.svg?style=shield)](https://circleci.com/gh/envoyproxy/envoy/tree/master)
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/envoy.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:envoy)
-[![Jenkins](https://img.shields.io/jenkins/s/https/powerci.osuosl.org/job/build-envoy-master/badge/icon/.svg?label=ppc64le%20build)](http://powerci.osuosl.org/job/build-envoy-master/)
+[![Jenkins](https://powerci.osuosl.org/buildStatus/icon?job=build-envoy-static-master&subject=ppc64le%20build)](https://powerci.osuosl.org/job/build-envoy-static-master/)
## Documentation
* [Official documentation](https://www.envoyproxy.io/)
* [FAQ](https://www.envoyproxy.io/docs/envoy/latest/faq/overview)
-* [Unofficial Chinese documentation](https://github.com/servicemesher/envoy/)
+* [Unofficial Chinese documentation](https://www.servicemesher.com/envoy/)
* Watch [a video overview of Envoy](https://www.youtube.com/watch?v=RVZX4CwKhGE)
([transcript](https://www.microservices.com/talks/lyfts-envoy-monolith-service-mesh-matt-klein/))
to find out more about the origin story and design philosophy of Envoy
diff --git a/RELEASES.md b/RELEASES.md
index 3ca3f28c376c..0a58aa22c4c2 100644
--- a/RELEASES.md
+++ b/RELEASES.md
@@ -64,7 +64,7 @@ deadline of 3 weeks.
| 1.13.0 | 2019/12/31 | 2020/01/20 | +20 days | 2021/01/20 |
| 1.14.0 | 2020/03/31 | 2020/04/08 | +8 days | 2021/04/08 |
| 1.15.0 | 2020/06/30 | 2020/07/07 | +7 days | 2021/07/07 |
-| 1.16.0 | 2020/09/30 | | | |
+| 1.16.0 | 2020/09/30 | 2020/10/08 | +8 days | 2021/10/08 |
| 1.17.0 | 2020/12/31 | | | |
diff --git a/REPO_LAYOUT.md b/REPO_LAYOUT.md
index cd87e015ac5b..e4f2452a1417 100644
--- a/REPO_LAYOUT.md
+++ b/REPO_LAYOUT.md
@@ -4,7 +4,8 @@ This is a high level overview of how the repository is laid out to both aid in c
as well as to clearly specify how extensions are added to the repository. The top level directories
are:
-* [.circleci/](.circleci/): Configuration for [CircleCI](https://circleci.com/gh/envoyproxy).
+* [.azure-pipelines/](.azure-pipelines/): Configuration for
+[Azure Pipelines](https://azure.microsoft.com/en-us/services/devops/pipelines/).
* [api/](api/): Envoy data plane API.
* [bazel/](bazel/): Configuration for Envoy's use of [Bazel](https://bazel.build/).
* [ci/](ci/): Scripts used both during CI as well as to build Docker containers.
diff --git a/STYLE.md b/STYLE.md
index 7965f90f7236..ee2deadf170b 100644
--- a/STYLE.md
+++ b/STYLE.md
@@ -1,7 +1,7 @@
# C++ coding style
* The Envoy source code is formatted using clang-format. Thus all white spaces, etc.
- issues are taken care of automatically. The CircleCI tests will automatically check
+ issues are taken care of automatically. The Azure Pipelines will automatically check
the code format and fail. There are make targets that can both check the format
(check_format) as well as fix the code format for you (fix_format). Errors in
.clang-tidy are enforced while other warnings are suggestions. Note that code and
@@ -105,17 +105,18 @@
A few general notes on our error handling philosophy:
* All error code returns should be checked.
-* At a very high level, our philosophy is that errors that are *likely* to happen should be
- gracefully handled. Examples of likely errors include any type of network error, disk IO error,
- bad data returned by an API call, bad data read from runtime files, etc. Errors that are
- *unlikely* to happen should lead to process death, under the assumption that the additional burden
- of defensive coding and testing is not an effective use of time for an error that should not happen
- given proper system setup. Examples of these types of errors include not being able to open the shared
- memory region, an invalid initial JSON config read from disk, system calls that should not fail
- assuming correct parameters (which should be validated via tests), etc. Examples of system calls
- that should not fail when passed valid parameters include most usages of `setsockopt()`,
- `getsockopt()`, the kernel returning a valid `sockaddr` after a successful call to `accept()`,
- `pthread_create()`, `pthread_join()`, etc.
+* At a very high level, our philosophy is that errors should be handled gracefully when caused by:
+ - Untrusted network traffic OR
+ - Raised by the Envoy process environment and are *likely* to happen
+* Examples of likely environnmental errors include any type of network error, disk IO error, bad
+ data returned by an API call, bad data read from runtime files, etc. Errors in the Envoy
+ environment that are *unlikely* to happen after process initialization, should lead to process
+ death, under the assumption that the additional burden of defensive coding and testing is not an
+ effective use of time for an error that should not happen given proper system setup. Examples of
+ these types of errors include not being able to open the shared memory region, system calls that
+ should not fail assuming correct parameters (which should be validated via tests), etc. Examples
+ of system calls that should not fail when passed valid parameters include the kernel returning a
+ valid `sockaddr` after a successful call to `accept()`, `pthread_create()`, `pthread_join()`, etc.
* OOM events (both memory and FDs) are considered fatal crashing errors. An OOM error should never
silently be ignored and should crash the process either via the C++ allocation error exception, an
explicit `RELEASE_ASSERT` following a third party library call, or an obvious crash on a subsequent
diff --git a/VERSION b/VERSION
index 1f0d2f335194..ee8855caa4a7 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.16.0-dev
+1.17.0-dev
diff --git a/api/BUILD b/api/BUILD
index 4aad4899a847..345732128a0d 100644
--- a/api/BUILD
+++ b/api/BUILD
@@ -171,6 +171,7 @@ proto_library(
"//envoy/extensions/filters/http/aws_request_signing/v3:pkg",
"//envoy/extensions/filters/http/buffer/v3:pkg",
"//envoy/extensions/filters/http/cache/v3alpha:pkg",
+ "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg",
"//envoy/extensions/filters/http/compressor/v3:pkg",
"//envoy/extensions/filters/http/cors/v3:pkg",
"//envoy/extensions/filters/http/csrf/v3:pkg",
@@ -189,6 +190,7 @@ proto_library(
"//envoy/extensions/filters/http/health_check/v3:pkg",
"//envoy/extensions/filters/http/ip_tagging/v3:pkg",
"//envoy/extensions/filters/http/jwt_authn/v3:pkg",
+ "//envoy/extensions/filters/http/local_ratelimit/v3:pkg",
"//envoy/extensions/filters/http/lua/v3:pkg",
"//envoy/extensions/filters/http/oauth2/v3alpha:pkg",
"//envoy/extensions/filters/http/on_demand/v3:pkg",
@@ -235,6 +237,7 @@ proto_library(
"//envoy/extensions/network/socket_interface/v3:pkg",
"//envoy/extensions/retry/host/omit_host_metadata/v3:pkg",
"//envoy/extensions/retry/priority/previous_priorities/v3:pkg",
+ "//envoy/extensions/stat_sinks/wasm/v3:pkg",
"//envoy/extensions/transport_sockets/alts/v3:pkg",
"//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg",
"//envoy/extensions/transport_sockets/quic/v3:pkg",
@@ -268,6 +271,7 @@ proto_library(
"//envoy/type/metadata/v3:pkg",
"//envoy/type/tracing/v3:pkg",
"//envoy/type/v3:pkg",
+ "//envoy/watchdog/v3alpha:pkg",
],
)
diff --git a/api/CONTRIBUTING.md b/api/CONTRIBUTING.md
index 773248f2e2ea..01ba39b500b8 100644
--- a/api/CONTRIBUTING.md
+++ b/api/CONTRIBUTING.md
@@ -50,11 +50,11 @@ generated RST files are also viewable in `generated/rst`.
Note also that the generated documentation can be viewed in CI:
-1. Open docs job in CircleCI.
-2. Navigate to "artifacts" tab.
-3. Expand files and click on `index.html`.
+1. Open docs job in Azure Pipelines.
+2. Navigate to "Upload Docs to GCS" log.
+3. Click on the link there.
-If you do not see an artifacts tab this is a bug in CircleCI. Try logging out and logging back in.
+If you do not see "Upload Docs to GCS" or it is failing, that means the docs are not built correctly.
### Documentation guidelines
diff --git a/api/bazel/envoy_http_archive.bzl b/api/bazel/envoy_http_archive.bzl
index 13b98f770619..15fd65b2af27 100644
--- a/api/bazel/envoy_http_archive.bzl
+++ b/api/bazel/envoy_http_archive.bzl
@@ -10,8 +10,7 @@ def envoy_http_archive(name, locations, **kwargs):
# This repository has already been defined, probably because the user
# wants to override the version. Do nothing.
return
- loc_key = kwargs.pop("repository_key", name)
- location = locations[loc_key]
+ location = locations[name]
# HTTP tarball at a given URL. Add a BUILD file if requested.
http_archive(
diff --git a/api/bazel/external_deps.bzl b/api/bazel/external_deps.bzl
new file mode 100644
index 000000000000..588879c4bd0a
--- /dev/null
+++ b/api/bazel/external_deps.bzl
@@ -0,0 +1,140 @@
+load("@envoy_api//bazel:repository_locations_utils.bzl", "load_repository_locations_spec")
+
+# Envoy dependencies may be annotated with the following attributes:
+DEPENDENCY_ANNOTATIONS = [
+ # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID
+ # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See
+ # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements
+ # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned.
+ # This attribute is optional for components with use categories listed in the
+ # USE_CATEGORIES_WITH_CPE_OPTIONAL
+ "cpe",
+
+ # A list of extensions when 'use_category' contains 'dataplane_ext' or 'observability_ext'.
+ "extensions",
+
+ # Additional dependencies loaded transitively via this dependency that are not tracked in
+ # Envoy (see the external dependency at the given version for information).
+ "implied_untracked_deps",
+
+ # When the dependency was last updated in Envoy.
+ "last_updated",
+
+ # Project metadata.
+ "project_desc",
+ "project_name",
+ "project_url",
+
+ # List of the categories describing how the dependency is being used. This attribute is used
+ # for automatic tracking of security posture of Envoy's dependencies.
+ # Possible values are documented in the USE_CATEGORIES list below.
+ # This attribute is mandatory for each dependecy.
+ "use_category",
+
+ # The dependency version. This may be either a tagged release (preferred)
+ # or git SHA (as an exception when no release tagged version is suitable).
+ "version",
+]
+
+# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed
+# to be declared.
+USE_CATEGORIES = [
+ # This dependency is used in API protos.
+ "api",
+ # This dependency is used in build process.
+ "build",
+ # This dependency is used to process xDS requests.
+ "controlplane",
+ # This dependency is used in processing downstream or upstream requests (core).
+ "dataplane_core",
+ # This dependency is used in processing downstream or upstream requests (extensions).
+ "dataplane_ext",
+ # This dependecy is used for logging, metrics or tracing (core). It may process unstrusted input.
+ "observability_core",
+ # This dependecy is used for logging, metrics or tracing (extensions). It may process unstrusted input.
+ "observability_ext",
+ # This dependency does not handle untrusted data and is used for various utility purposes.
+ "other",
+ # This dependency is used only in tests.
+ "test_only",
+ # Documentation generation
+ "docs",
+ # Developer tools (not used in build or docs)
+ "devtools",
+]
+
+# Components with these use categories are not required to specify the 'cpe'
+# and 'last_updated' annotation.
+USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test_only", "api"]
+
+def _fail_missing_attribute(attr, key):
+ fail("The '%s' attribute must be defined for external dependecy " % attr + key)
+
+# Method for verifying content of the repository location specifications.
+#
+# We also remove repository metadata attributes so that further consumers, e.g.
+# http_archive, are not confused by them.
+def load_repository_locations(repository_locations_spec):
+ locations = {}
+ for key, location in load_repository_locations_spec(repository_locations_spec).items():
+ mutable_location = dict(location)
+ locations[key] = mutable_location
+
+ if "sha256" not in location or len(location["sha256"]) == 0:
+ _fail_missing_attribute("sha256", key)
+
+ if "project_name" not in location:
+ _fail_missing_attribute("project_name", key)
+
+ if "project_desc" not in location:
+ _fail_missing_attribute("project_desc", key)
+
+ if "project_url" not in location:
+ _fail_missing_attribute("project_url", key)
+ project_url = location["project_url"]
+ if not project_url.startswith("https://") and not project_url.startswith("http://"):
+ fail("project_url must start with https:// or http://: " + project_url)
+
+ if "version" not in location:
+ _fail_missing_attribute("version", key)
+
+ if "use_category" not in location:
+ _fail_missing_attribute("use_category", key)
+ use_category = location["use_category"]
+
+ if "dataplane_ext" in use_category or "observability_ext" in use_category:
+ if "extensions" not in location:
+ _fail_missing_attribute("extensions", key)
+
+ if "last_updated" not in location:
+ _fail_missing_attribute("last_updated", key)
+ last_updated = location["last_updated"]
+
+ # Starlark doesn't have regexes.
+ if len(last_updated) != 10 or last_updated[4] != "-" or last_updated[7] != "-":
+ fail("last_updated must match YYYY-DD-MM: " + last_updated)
+
+ if "cpe" in location:
+ cpe = location["cpe"]
+
+ # Starlark doesn't have regexes.
+ cpe_components = len(cpe.split(":"))
+
+ # We allow cpe:2.3:a:foo:*:* and cpe:2.3.:a:foo:bar:* only.
+ cpe_components_valid = (cpe_components == 6)
+ cpe_matches = (cpe == "N/A" or (cpe.startswith("cpe:2.3:a:") and cpe.endswith(":*") and cpe_components_valid))
+ if not cpe_matches:
+ fail("CPE must match cpe:2.3:a:::*: " + cpe)
+ elif not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location["use_category"]]:
+ _fail_missing_attribute("cpe", key)
+
+ for category in location["use_category"]:
+ if category not in USE_CATEGORIES:
+ fail("Unknown use_category value '" + category + "' for dependecy " + key)
+
+ # Remove any extra annotations that we add, so that we don't confuse http_archive etc.
+ for annotation in DEPENDENCY_ANNOTATIONS:
+ if annotation in mutable_location:
+ mutable_location.pop(annotation)
+
+ return locations
diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl
index a64e733cf74a..a12a0ea98b3a 100644
--- a/api/bazel/repositories.bzl
+++ b/api/bazel/repositories.bzl
@@ -1,40 +1,43 @@
load(":envoy_http_archive.bzl", "envoy_http_archive")
-load(":repository_locations.bzl", "REPOSITORY_LOCATIONS")
+load(":external_deps.bzl", "load_repository_locations")
+load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC")
-def api_dependencies():
+REPOSITORY_LOCATIONS = load_repository_locations(REPOSITORY_LOCATIONS_SPEC)
+
+# Use this macro to reference any HTTP archive from bazel/repository_locations.bzl.
+def external_http_archive(name, **kwargs):
envoy_http_archive(
- "bazel_skylib",
+ name,
locations = REPOSITORY_LOCATIONS,
+ **kwargs
)
- envoy_http_archive(
- "com_envoyproxy_protoc_gen_validate",
- locations = REPOSITORY_LOCATIONS,
+
+def api_dependencies():
+ external_http_archive(
+ name = "bazel_skylib",
)
- envoy_http_archive(
+ external_http_archive(
+ name = "com_envoyproxy_protoc_gen_validate",
+ )
+ external_http_archive(
name = "com_google_googleapis",
- locations = REPOSITORY_LOCATIONS,
)
- envoy_http_archive(
+ external_http_archive(
name = "com_github_cncf_udpa",
- locations = REPOSITORY_LOCATIONS,
)
- envoy_http_archive(
+ external_http_archive(
name = "prometheus_metrics_model",
- locations = REPOSITORY_LOCATIONS,
build_file_content = PROMETHEUSMETRICS_BUILD_CONTENT,
)
- envoy_http_archive(
+ external_http_archive(
name = "opencensus_proto",
- locations = REPOSITORY_LOCATIONS,
)
- envoy_http_archive(
+ external_http_archive(
name = "rules_proto",
- locations = REPOSITORY_LOCATIONS,
)
- envoy_http_archive(
+ external_http_archive(
name = "com_github_openzipkin_zipkinapi",
- locations = REPOSITORY_LOCATIONS,
build_file_content = ZIPKINAPI_BUILD_CONTENT,
)
diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl
index 2f0fdc723dbb..bdcf31e867d2 100644
--- a/api/bazel/repository_locations.bzl
+++ b/api/bazel/repository_locations.bzl
@@ -1,66 +1,91 @@
-BAZEL_SKYLIB_RELEASE = "1.0.3"
-BAZEL_SKYLIB_SHA256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c"
-
-OPENCENSUS_PROTO_RELEASE = "0.3.0"
-OPENCENSUS_PROTO_SHA256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0"
-
-PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020
-PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8"
-
-GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019
-GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405"
-
-PROMETHEUS_GIT_SHA = "60555c9708c786597e6b07bf846d0dc5c2a46f54" # Jun 23, 2020
-PROMETHEUS_SHA = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e"
-
-UDPA_RELEASE = "0.0.1"
-UDPA_SHA256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8"
-
-ZIPKINAPI_RELEASE = "0.2.2"
-ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b"
-
-RULES_PROTO_GIT_SHA = "40298556293ae502c66579620a7ce867d5f57311" # Aug 17, 2020
-RULES_PROTO_SHA256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5"
-
-REPOSITORY_LOCATIONS = dict(
+# This should match the schema defined in external_deps.bzl.
+REPOSITORY_LOCATIONS_SPEC = dict(
bazel_skylib = dict(
- sha256 = BAZEL_SKYLIB_SHA256,
- urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/" + BAZEL_SKYLIB_RELEASE + "/bazel-skylib-" + BAZEL_SKYLIB_RELEASE + ".tar.gz"],
+ project_name = "bazel-skylib",
+ project_desc = "Common useful functions and rules for Bazel",
+ project_url = "https://github.com/bazelbuild/bazel-skylib",
+ version = "1.0.3",
+ sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
+ urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"],
+ last_updated = "2020-08-27",
+ use_category = ["api"],
),
com_envoyproxy_protoc_gen_validate = dict(
- sha256 = PGV_SHA256,
- strip_prefix = "protoc-gen-validate-" + PGV_GIT_SHA,
- urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/" + PGV_GIT_SHA + ".tar.gz"],
+ project_name = "protoc-gen-validate (PGV)",
+ project_desc = "protoc plugin to generate polyglot message validators",
+ project_url = "https://github.com/envoyproxy/protoc-gen-validate",
+ version = "278964a8052f96a2f514add0298098f63fb7f47f",
+ sha256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8",
+ strip_prefix = "protoc-gen-validate-{version}",
+ urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/{version}.tar.gz"],
+ last_updated = "2020-06-09",
+ use_category = ["api"],
+ ),
+ com_github_cncf_udpa = dict(
+ project_name = "Universal Data Plane API",
+ project_desc = "Universal Data Plane API Working Group (UDPA-WG)",
+ project_url = "https://github.com/cncf/udpa",
+ version = "0.0.1",
+ sha256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8",
+ strip_prefix = "udpa-{version}",
+ urls = ["https://github.com/cncf/udpa/archive/v{version}.tar.gz"],
+ last_updated = "2020-09-23",
+ use_category = ["api"],
+ ),
+ com_github_openzipkin_zipkinapi = dict(
+ project_name = "Zipkin API",
+ project_desc = "Zipkin's language independent model and HTTP Api Definitions",
+ project_url = "https://github.com/openzipkin/zipkin-api",
+ version = "0.2.2",
+ sha256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b",
+ strip_prefix = "zipkin-api-{version}",
+ urls = ["https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz"],
+ last_updated = "2020-09-23",
+ use_category = ["api"],
),
com_google_googleapis = dict(
# TODO(dio): Consider writing a Starlark macro for importing Google API proto.
- sha256 = GOOGLEAPIS_SHA,
- strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA,
- urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"],
+ project_name = "Google APIs",
+ project_desc = "Public interface definitions of Google APIs",
+ project_url = "https://github.com/googleapis/googleapis",
+ version = "82944da21578a53b74e547774cf62ed31a05b841",
+ sha256 = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405",
+ strip_prefix = "googleapis-{version}",
+ urls = ["https://github.com/googleapis/googleapis/archive/{version}.tar.gz"],
+ last_updated = "2019-12-02",
+ use_category = ["api"],
),
- com_github_cncf_udpa = dict(
- sha256 = UDPA_SHA256,
- strip_prefix = "udpa-" + UDPA_RELEASE,
- urls = ["https://github.com/cncf/udpa/archive/v" + UDPA_RELEASE + ".tar.gz"],
+ opencensus_proto = dict(
+ project_name = "OpenCensus Proto",
+ project_desc = "Language Independent Interface Types For OpenCensus",
+ project_url = "https://github.com/census-instrumentation/opencensus-proto",
+ version = "0.3.0",
+ sha256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0",
+ strip_prefix = "opencensus-proto-{version}/src",
+ urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz"],
+ last_updated = "2020-06-20",
+ use_category = ["api"],
),
prometheus_metrics_model = dict(
- sha256 = PROMETHEUS_SHA,
- strip_prefix = "client_model-" + PROMETHEUS_GIT_SHA,
- urls = ["https://github.com/prometheus/client_model/archive/" + PROMETHEUS_GIT_SHA + ".tar.gz"],
- ),
- opencensus_proto = dict(
- sha256 = OPENCENSUS_PROTO_SHA256,
- strip_prefix = "opencensus-proto-" + OPENCENSUS_PROTO_RELEASE + "/src",
- urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v" + OPENCENSUS_PROTO_RELEASE + ".tar.gz"],
+ project_name = "Prometheus client model",
+ project_desc = "Data model artifacts for Prometheus",
+ project_url = "https://github.com/prometheus/client_model",
+ version = "60555c9708c786597e6b07bf846d0dc5c2a46f54",
+ sha256 = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e",
+ strip_prefix = "client_model-{version}",
+ urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"],
+ last_updated = "2020-06-23",
+ use_category = ["api"],
),
rules_proto = dict(
- sha256 = RULES_PROTO_SHA256,
- strip_prefix = "rules_proto-" + RULES_PROTO_GIT_SHA + "",
- urls = ["https://github.com/bazelbuild/rules_proto/archive/" + RULES_PROTO_GIT_SHA + ".tar.gz"],
- ),
- com_github_openzipkin_zipkinapi = dict(
- sha256 = ZIPKINAPI_SHA256,
- strip_prefix = "zipkin-api-" + ZIPKINAPI_RELEASE,
- urls = ["https://github.com/openzipkin/zipkin-api/archive/" + ZIPKINAPI_RELEASE + ".tar.gz"],
+ project_name = "Protobuf Rules for Bazel",
+ project_desc = "Protocol buffer rules for Bazel",
+ project_url = "https://github.com/bazelbuild/rules_proto",
+ version = "40298556293ae502c66579620a7ce867d5f57311",
+ sha256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5",
+ strip_prefix = "rules_proto-{version}",
+ urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"],
+ last_updated = "2020-08-17",
+ use_category = ["api"],
),
)
diff --git a/api/bazel/repository_locations_utils.bzl b/api/bazel/repository_locations_utils.bzl
new file mode 100644
index 000000000000..3b984e1bc580
--- /dev/null
+++ b/api/bazel/repository_locations_utils.bzl
@@ -0,0 +1,20 @@
+def _format_version(s, version):
+ return s.format(version = version, dash_version = version.replace(".", "-"), underscore_version = version.replace(".", "_"))
+
+# Generate a "repository location specification" from raw repository
+# specification. The information should match the format required by
+# external_deps.bzl. This function mostly does interpolation of {version} in
+# the repository info fields. This code should be capable of running in both
+# Python and Starlark.
+def load_repository_locations_spec(repository_locations_spec):
+ locations = {}
+ for key, location in repository_locations_spec.items():
+ mutable_location = dict(location)
+ locations[key] = mutable_location
+
+ # Fixup with version information.
+ if "version" in location:
+ if "strip_prefix" in location:
+ mutable_location["strip_prefix"] = _format_version(location["strip_prefix"], location["version"])
+ mutable_location["urls"] = [_format_version(url, location["version"]) for url in location["urls"]]
+ return locations
diff --git a/api/envoy/admin/v3/BUILD b/api/envoy/admin/v3/BUILD
index 4163de8e0aba..38eadcb09fea 100644
--- a/api/envoy/admin/v3/BUILD
+++ b/api/envoy/admin/v3/BUILD
@@ -9,6 +9,7 @@ api_proto_package(
"//envoy/admin/v2alpha:pkg",
"//envoy/annotations:pkg",
"//envoy/config/bootstrap/v3:pkg",
+ "//envoy/config/cluster/v3:pkg",
"//envoy/config/core/v3:pkg",
"//envoy/config/tap/v3:pkg",
"//envoy/type/v3:pkg",
diff --git a/api/envoy/admin/v3/certs.proto b/api/envoy/admin/v3/certs.proto
index 158c8aead28f..5580bb5ef17d 100644
--- a/api/envoy/admin/v3/certs.proto
+++ b/api/envoy/admin/v3/certs.proto
@@ -34,11 +34,19 @@ message Certificate {
repeated CertificateDetails cert_chain = 2;
}
-// [#next-free-field: 7]
+// [#next-free-field: 8]
message CertificateDetails {
option (udpa.annotations.versioning).previous_message_type =
"envoy.admin.v2alpha.CertificateDetails";
+ message OcspDetails {
+ // Indicates the time from which the OCSP response is valid.
+ google.protobuf.Timestamp valid_from = 1;
+
+ // Indicates the time at which the OCSP response expires.
+ google.protobuf.Timestamp expiration = 2;
+ }
+
// Path of the certificate.
string path = 1;
@@ -56,6 +64,9 @@ message CertificateDetails {
// Indicates the time at which the certificate expires.
google.protobuf.Timestamp expiration_time = 6;
+
+ // Details related to the OCSP response associated with this certificate, if any.
+ OcspDetails ocsp_details = 7;
}
message SubjectAlternateName {
diff --git a/api/envoy/admin/v3/clusters.proto b/api/envoy/admin/v3/clusters.proto
index fc05c8a10de2..8eeaec20becc 100644
--- a/api/envoy/admin/v3/clusters.proto
+++ b/api/envoy/admin/v3/clusters.proto
@@ -3,6 +3,7 @@ syntax = "proto3";
package envoy.admin.v3;
import "envoy/admin/v3/metrics.proto";
+import "envoy/config/cluster/v3/circuit_breaker.proto";
import "envoy/config/core/v3/address.proto";
import "envoy/config/core/v3/base.proto";
import "envoy/config/core/v3/health_check.proto";
@@ -28,7 +29,7 @@ message Clusters {
}
// Details an individual cluster's current status.
-// [#next-free-field: 6]
+// [#next-free-field: 7]
message ClusterStatus {
option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClusterStatus";
@@ -76,6 +77,9 @@ message ClusterStatus {
// threshold for that interval.
// 3. Outlier detection is not enabled for this cluster.
type.v3.Percent local_origin_success_rate_ejection_threshold = 5;
+
+ // :ref:`Circuit breaking ` settings of the cluster.
+ config.cluster.v3.CircuitBreakers circuit_breakers = 6;
}
// Current state of a particular host.
diff --git a/api/envoy/admin/v3/server_info.proto b/api/envoy/admin/v3/server_info.proto
index b91303f3d8fe..5e3765a8586f 100644
--- a/api/envoy/admin/v3/server_info.proto
+++ b/api/envoy/admin/v3/server_info.proto
@@ -2,6 +2,8 @@ syntax = "proto3";
package envoy.admin.v3;
+import "envoy/config/core/v3/base.proto";
+
import "google/protobuf/duration.proto";
import "envoy/annotations/deprecation.proto";
@@ -17,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// Proto representation of the value returned by /server_info, containing
// server version/server status information.
-// [#next-free-field: 7]
+// [#next-free-field: 8]
message ServerInfo {
option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ServerInfo";
@@ -52,9 +54,12 @@ message ServerInfo {
// Command line options the server is currently running with.
CommandLineOptions command_line_options = 6;
+
+ // Populated node identity of this server.
+ config.core.v3.Node node = 7;
}
-// [#next-free-field: 35]
+// [#next-free-field: 37]
message CommandLineOptions {
option (udpa.annotations.versioning).previous_message_type =
"envoy.admin.v2alpha.CommandLineOptions";
@@ -179,4 +184,10 @@ message CommandLineOptions {
// See :option:`--enable-fine-grain-logging` for details.
bool enable_fine_grain_logging = 34;
+
+ // See :option:`--socket-path` for details.
+ string socket_path = 35;
+
+ // See :option:`--socket-mode` for details.
+ uint32 socket_mode = 36;
}
diff --git a/api/envoy/admin/v3/tap.proto b/api/envoy/admin/v3/tap.proto
index ca7ab4405a9b..934170b2deea 100644
--- a/api/envoy/admin/v3/tap.proto
+++ b/api/envoy/admin/v3/tap.proto
@@ -21,7 +21,7 @@ message TapRequest {
// The opaque configuration ID used to match the configuration to a loaded extension.
// A tap extension configures a similar opaque ID that is used to match.
- string config_id = 1 [(validate.rules).string = {min_bytes: 1}];
+ string config_id = 1 [(validate.rules).string = {min_len: 1}];
// The tap configuration to load.
config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}];
diff --git a/api/envoy/admin/v4alpha/BUILD b/api/envoy/admin/v4alpha/BUILD
index f2cb1a2a70c0..28f1e7d8c821 100644
--- a/api/envoy/admin/v4alpha/BUILD
+++ b/api/envoy/admin/v4alpha/BUILD
@@ -9,6 +9,7 @@ api_proto_package(
"//envoy/admin/v3:pkg",
"//envoy/annotations:pkg",
"//envoy/config/bootstrap/v4alpha:pkg",
+ "//envoy/config/cluster/v4alpha:pkg",
"//envoy/config/core/v4alpha:pkg",
"//envoy/config/tap/v4alpha:pkg",
"//envoy/type/v3:pkg",
diff --git a/api/envoy/admin/v4alpha/certs.proto b/api/envoy/admin/v4alpha/certs.proto
index 585b09bccf4c..0dd868f71fa6 100644
--- a/api/envoy/admin/v4alpha/certs.proto
+++ b/api/envoy/admin/v4alpha/certs.proto
@@ -34,10 +34,21 @@ message Certificate {
repeated CertificateDetails cert_chain = 2;
}
-// [#next-free-field: 7]
+// [#next-free-field: 8]
message CertificateDetails {
option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CertificateDetails";
+ message OcspDetails {
+ option (udpa.annotations.versioning).previous_message_type =
+ "envoy.admin.v3.CertificateDetails.OcspDetails";
+
+ // Indicates the time from which the OCSP response is valid.
+ google.protobuf.Timestamp valid_from = 1;
+
+ // Indicates the time at which the OCSP response expires.
+ google.protobuf.Timestamp expiration = 2;
+ }
+
// Path of the certificate.
string path = 1;
@@ -55,6 +66,9 @@ message CertificateDetails {
// Indicates the time at which the certificate expires.
google.protobuf.Timestamp expiration_time = 6;
+
+ // Details related to the OCSP response associated with this certificate, if any.
+ OcspDetails ocsp_details = 7;
}
message SubjectAlternateName {
diff --git a/api/envoy/admin/v4alpha/clusters.proto b/api/envoy/admin/v4alpha/clusters.proto
index 9056262cae86..10d920976930 100644
--- a/api/envoy/admin/v4alpha/clusters.proto
+++ b/api/envoy/admin/v4alpha/clusters.proto
@@ -3,6 +3,7 @@ syntax = "proto3";
package envoy.admin.v4alpha;
import "envoy/admin/v4alpha/metrics.proto";
+import "envoy/config/cluster/v4alpha/circuit_breaker.proto";
import "envoy/config/core/v4alpha/address.proto";
import "envoy/config/core/v4alpha/base.proto";
import "envoy/config/core/v4alpha/health_check.proto";
@@ -28,7 +29,7 @@ message Clusters {
}
// Details an individual cluster's current status.
-// [#next-free-field: 6]
+// [#next-free-field: 7]
message ClusterStatus {
option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClusterStatus";
@@ -76,6 +77,9 @@ message ClusterStatus {
// threshold for that interval.
// 3. Outlier detection is not enabled for this cluster.
type.v3.Percent local_origin_success_rate_ejection_threshold = 5;
+
+ // :ref:`Circuit breaking ` settings of the cluster.
+ config.cluster.v4alpha.CircuitBreakers circuit_breakers = 6;
}
// Current state of a particular host.
diff --git a/api/envoy/admin/v4alpha/server_info.proto b/api/envoy/admin/v4alpha/server_info.proto
index 3f3570af0111..6f56978d49fe 100644
--- a/api/envoy/admin/v4alpha/server_info.proto
+++ b/api/envoy/admin/v4alpha/server_info.proto
@@ -2,6 +2,8 @@ syntax = "proto3";
package envoy.admin.v4alpha;
+import "envoy/config/core/v4alpha/base.proto";
+
import "google/protobuf/duration.proto";
import "envoy/annotations/deprecation.proto";
@@ -17,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO
// Proto representation of the value returned by /server_info, containing
// server version/server status information.
-// [#next-free-field: 7]
+// [#next-free-field: 8]
message ServerInfo {
option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ServerInfo";
@@ -52,9 +54,12 @@ message ServerInfo {
// Command line options the server is currently running with.
CommandLineOptions command_line_options = 6;
+
+ // Populated node identity of this server.
+ config.core.v4alpha.Node node = 7;
}
-// [#next-free-field: 35]
+// [#next-free-field: 37]
message CommandLineOptions {
option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions";
@@ -178,4 +183,10 @@ message CommandLineOptions {
// See :option:`--enable-fine-grain-logging` for details.
bool enable_fine_grain_logging = 34;
+
+ // See :option:`--socket-path` for details.
+ string socket_path = 35;
+
+ // See :option:`--socket-mode` for details.
+ uint32 socket_mode = 36;
}
diff --git a/api/envoy/admin/v4alpha/tap.proto b/api/envoy/admin/v4alpha/tap.proto
index 039dfcfeb812..e89259380418 100644
--- a/api/envoy/admin/v4alpha/tap.proto
+++ b/api/envoy/admin/v4alpha/tap.proto
@@ -21,7 +21,7 @@ message TapRequest {
// The opaque configuration ID used to match the configuration to a loaded extension.
// A tap extension configures a similar opaque ID that is used to match.
- string config_id = 1 [(validate.rules).string = {min_bytes: 1}];
+ string config_id = 1 [(validate.rules).string = {min_len: 1}];
// The tap configuration to load.
config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}];
diff --git a/api/envoy/api/v2/cluster.proto b/api/envoy/api/v2/cluster.proto
index d1a50fbdb91e..fab95f71b763 100644
--- a/api/envoy/api/v2/cluster.proto
+++ b/api/envoy/api/v2/cluster.proto
@@ -352,6 +352,10 @@ message Cluster {
// This header isn't sanitized by default, so enabling this feature allows HTTP clients to
// route traffic to arbitrary hosts and/or ports, which may have serious security
// consequences.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
bool use_http_header = 1;
}
@@ -677,10 +681,16 @@ message Cluster {
// :ref:`STRICT_DNS`
// and :ref:`LOGICAL_DNS`
// this setting is ignored.
+ // Setting this value causes failure if the
+ // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during
+ // server startup. Apple's API only allows overriding DNS resolvers via system settings.
repeated core.Address dns_resolvers = 18;
// [#next-major-version: Reconcile DNS options in a single message.]
// Always use TCP queries instead of UDP queries for DNS lookups.
+ // Setting this value causes failure if the
+ // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during
+ // server startup. Apple' API only uses UDP for DNS resolution.
bool use_tcp_for_dns_lookups = 45;
// If specified, outlier detection will be enabled for this upstream cluster.
diff --git a/api/envoy/api/v2/route/route_components.proto b/api/envoy/api/v2/route/route_components.proto
index 339c7bcbc53a..c1e84a5618a7 100644
--- a/api/envoy/api/v2/route/route_components.proto
+++ b/api/envoy/api/v2/route/route_components.proto
@@ -756,6 +756,10 @@ message RouteAction {
//
// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1
// *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string cluster_header = 2
[(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
@@ -866,6 +870,10 @@ message RouteAction {
//
// Pay attention to the potential security implications of using this option. Provided header
// must come from trusted source.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string auto_host_rewrite_header = 29 [
(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false},
(udpa.annotations.field_migrate).rename = "host_rewrite_header"
diff --git a/api/envoy/config/accesslog/v3/accesslog.proto b/api/envoy/config/accesslog/v3/accesslog.proto
index e9d815aafcea..d85c6af8294e 100644
--- a/api/envoy/config/accesslog/v3/accesslog.proto
+++ b/api/envoy/config/accesslog/v3/accesslog.proto
@@ -164,7 +164,7 @@ message RuntimeFilter {
// Runtime key to get an optional overridden numerator for use in the
// *percent_sampled* field. If found in runtime, this value will replace the
// default numerator.
- string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string runtime_key = 1 [(validate.rules).string = {min_len: 1}];
// The default sampling percentage. If not specified, defaults to 0% with
// denominator of 100.
@@ -254,6 +254,7 @@ message ResponseFlagFilter {
in: "UMSDR"
in: "RFCF"
in: "NFCF"
+ in: "DT"
}
}
}];
diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto
index bd4bcd48c4b4..0714b614c41d 100644
--- a/api/envoy/config/accesslog/v4alpha/accesslog.proto
+++ b/api/envoy/config/accesslog/v4alpha/accesslog.proto
@@ -164,7 +164,7 @@ message RuntimeFilter {
// Runtime key to get an optional overridden numerator for use in the
// *percent_sampled* field. If found in runtime, this value will replace the
// default numerator.
- string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string runtime_key = 1 [(validate.rules).string = {min_len: 1}];
// The default sampling percentage. If not specified, defaults to 0% with
// denominator of 100.
@@ -253,6 +253,7 @@ message ResponseFlagFilter {
in: "UMSDR"
in: "RFCF"
in: "NFCF"
+ in: "DT"
}
}
}];
diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto
index da88dce786ae..30c276f24276 100644
--- a/api/envoy/config/bootstrap/v2/bootstrap.proto
+++ b/api/envoy/config/bootstrap/v2/bootstrap.proto
@@ -169,6 +169,9 @@ message Bootstrap {
// when :ref:`dns_resolvers ` and
// :ref:`use_tcp_for_dns_lookups ` are
// specified.
+ // Setting this value causes failure if the
+ // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during
+ // server startup. Apple' API only uses UDP for DNS resolution.
bool use_tcp_for_dns_lookups = 20;
}
diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto
index a1e981fcbdda..a9a0290b297c 100644
--- a/api/envoy/config/bootstrap/v3/bootstrap.proto
+++ b/api/envoy/config/bootstrap/v3/bootstrap.proto
@@ -40,7 +40,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// ` for more detail.
// Bootstrap :ref:`configuration overview `.
-// [#next-free-field: 27]
+// [#next-free-field: 28]
message Bootstrap {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.bootstrap.v2.Bootstrap";
@@ -176,7 +176,13 @@ message Bootstrap {
}];
// Optional watchdog configuration.
- Watchdog watchdog = 8;
+ // This is for a single watchdog configuration for the entire system.
+ // Deprecated in favor of *watchdogs* which has finer granularity.
+ Watchdog watchdog = 8 [deprecated = true];
+
+ // Optional watchdogs configuration.
+ // This is used for specifying different watchdogs for the different subsystems.
+ Watchdogs watchdogs = 27;
// Configuration for an external tracing provider.
//
@@ -228,6 +234,9 @@ message Bootstrap {
// when :ref:`dns_resolvers ` and
// :ref:`use_tcp_for_dns_lookups ` are
// specified.
+ // Setting this value causes failure if the
+ // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during
+ // server startup. Apple' API only uses UDP for DNS resolution.
bool use_tcp_for_dns_lookups = 20;
// Specifies optional bootstrap extensions to be instantiated at startup time.
@@ -336,6 +345,17 @@ message ClusterManager {
core.v3.ApiConfigSource load_stats_config = 4;
}
+// Allows you to specify different watchdog configs for different subsystems.
+// This allows finer tuned policies for the watchdog. If a subsystem is omitted
+// the default values for that system will be used.
+message Watchdogs {
+ // Watchdog for the main thread.
+ Watchdog main_thread_watchdog = 1;
+
+ // Watchdog for the worker threads.
+ Watchdog worker_watchdog = 2;
+}
+
// Envoy process watchdog configuration. When configured, this monitors for
// nonresponsive threads and kills the process after the configured thresholds.
// See the :ref:`watchdog documentation ` for more information.
@@ -486,7 +506,7 @@ message RuntimeLayer {
// Descriptive name for the runtime layer. This is only used for the runtime
// :http:get:`/runtime` output.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
oneof layer_specifier {
option (validate.required) = true;
diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto
index 989ecd30ddc4..ef10dead9706 100644
--- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto
+++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto
@@ -38,7 +38,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO
// ` for more detail.
// Bootstrap :ref:`configuration overview `.
-// [#next-free-field: 27]
+// [#next-free-field: 28]
message Bootstrap {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.bootstrap.v3.Bootstrap";
@@ -98,9 +98,9 @@ message Bootstrap {
core.v4alpha.ApiConfigSource ads_config = 3;
}
- reserved 10, 11, 9;
+ reserved 10, 11, 8, 9;
- reserved "runtime", "tracing";
+ reserved "runtime", "watchdog", "tracing";
// Node identity to present to the management server and for instance
// identification purposes (e.g. in generated headers).
@@ -173,8 +173,9 @@ message Bootstrap {
gte {nanos: 1000000}
}];
- // Optional watchdog configuration.
- Watchdog watchdog = 8;
+ // Optional watchdogs configuration.
+ // This is used for specifying different watchdogs for the different subsystems.
+ Watchdogs watchdogs = 27;
// Configuration for the runtime configuration provider. If not
// specified, a “null” provider will be used which will result in all defaults
@@ -219,6 +220,9 @@ message Bootstrap {
// when :ref:`dns_resolvers ` and
// :ref:`use_tcp_for_dns_lookups ` are
// specified.
+ // Setting this value causes failure if the
+ // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during
+ // server startup. Apple' API only uses UDP for DNS resolution.
bool use_tcp_for_dns_lookups = 20;
// Specifies optional bootstrap extensions to be instantiated at startup time.
@@ -327,6 +331,20 @@ message ClusterManager {
core.v4alpha.ApiConfigSource load_stats_config = 4;
}
+// Allows you to specify different watchdog configs for different subsystems.
+// This allows finer tuned policies for the watchdog. If a subsystem is omitted
+// the default values for that system will be used.
+message Watchdogs {
+ option (udpa.annotations.versioning).previous_message_type =
+ "envoy.config.bootstrap.v3.Watchdogs";
+
+ // Watchdog for the main thread.
+ Watchdog main_thread_watchdog = 1;
+
+ // Watchdog for the worker threads.
+ Watchdog worker_watchdog = 2;
+}
+
// Envoy process watchdog configuration. When configured, this monitors for
// nonresponsive threads and kills the process after the configured thresholds.
// See the :ref:`watchdog documentation ` for more information.
@@ -481,7 +499,7 @@ message RuntimeLayer {
// Descriptive name for the runtime layer. This is only used for the runtime
// :http:get:`/runtime` output.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
oneof layer_specifier {
option (validate.required) = true;
diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto
index 3571ccf9abbd..8e039a1f16fe 100644
--- a/api/envoy/config/cluster/v3/cluster.proto
+++ b/api/envoy/config/cluster/v3/cluster.proto
@@ -170,7 +170,7 @@ message Cluster {
"envoy.api.v2.Cluster.CustomClusterType";
// The type of the cluster to instantiate. The name must match a supported cluster type.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Cluster specific configuration which depends on the cluster being instantiated.
// See the supported cluster for further documentation.
@@ -436,6 +436,10 @@ message Cluster {
// This header isn't sanitized by default, so enabling this feature allows HTTP clients to
// route traffic to arbitrary hosts and/or ports, which may have serious security
// consequences.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
bool use_http_header = 1;
}
@@ -612,7 +616,32 @@ message Cluster {
//
// This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can
// harm latency more than the prefetching helps.
- google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}];
+ google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1
+ [(validate.rules).double = {lte: 3.0 gte: 1.0}];
+
+ // Indicates how many many streams (rounded up) can be anticipated across a cluster for each
+ // stream, useful for low QPS services. This is currently supported for a subset of
+ // deterministic non-hash-based load-balancing algorithms (weighted round robin, random).
+ // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a
+ // cluster, doing best effort predictions of what upstream would be picked next and
+ // pre-establishing a connection.
+ //
+ // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first
+ // incoming stream, 2 connections will be prefetched - one to the first upstream for this
+ // cluster, one to the second on the assumption there will be a follow-up stream.
+ //
+ // Prefetching will be limited to one prefetch per configured upstream in the cluster.
+ //
+ // If this value is not set, or set explicitly to one, Envoy will fetch as many connections
+ // as needed to serve streams in flight, so during warm up and in steady state if a connection
+ // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for
+ // connection establishment.
+ //
+ // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met,
+ // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream.
+ // TODO(alyssawilk) per LB docs and LB overview docs when unhiding.
+ google.protobuf.DoubleValue predictive_prefetch_ratio = 2
+ [(validate.rules).double = {lte: 3.0 gte: 1.0}];
}
reserved 12, 15, 7, 11, 35;
@@ -675,7 +704,7 @@ message Cluster {
// :ref:`statistics ` if :ref:`alt_stat_name
// ` is not provided.
// Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// An optional alternative to the cluster name to be used while emitting stats.
// Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be
@@ -804,10 +833,16 @@ message Cluster {
// :ref:`STRICT_DNS`
// and :ref:`LOGICAL_DNS`
// this setting is ignored.
+ // Setting this value causes failure if the
+ // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during
+ // server startup. Apple's API only allows overriding DNS resolvers via system settings.
repeated core.v3.Address dns_resolvers = 18;
// [#next-major-version: Reconcile DNS options in a single message.]
// Always use TCP queries instead of UDP queries for DNS lookups.
+ // Setting this value causes failure if the
+ // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during
+ // server startup. Apple' API only uses UDP for DNS resolution.
bool use_tcp_for_dns_lookups = 45;
// If specified, outlier detection will be enabled for this upstream cluster.
diff --git a/api/envoy/config/cluster/v3/filter.proto b/api/envoy/config/cluster/v3/filter.proto
index af3116ec26eb..74f4a1137dab 100644
--- a/api/envoy/config/cluster/v3/filter.proto
+++ b/api/envoy/config/cluster/v3/filter.proto
@@ -21,7 +21,7 @@ message Filter {
// The name of the filter to instantiate. The name must match a
// :ref:`supported filter `.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Filter specific configuration which depends on the filter being
// instantiated. See the supported filters for further documentation.
diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto
index 9b7536836365..0ad15668e6cf 100644
--- a/api/envoy/config/cluster/v4alpha/cluster.proto
+++ b/api/envoy/config/cluster/v4alpha/cluster.proto
@@ -172,7 +172,7 @@ message Cluster {
"envoy.config.cluster.v3.Cluster.CustomClusterType";
// The type of the cluster to instantiate. The name must match a supported cluster type.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Cluster specific configuration which depends on the cluster being instantiated.
// See the supported cluster for further documentation.
@@ -442,6 +442,10 @@ message Cluster {
// This header isn't sanitized by default, so enabling this feature allows HTTP clients to
// route traffic to arbitrary hosts and/or ports, which may have serious security
// consequences.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
bool use_http_header = 1;
}
@@ -622,7 +626,32 @@ message Cluster {
//
// This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can
// harm latency more than the prefetching helps.
- google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}];
+ google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1
+ [(validate.rules).double = {lte: 3.0 gte: 1.0}];
+
+ // Indicates how many many streams (rounded up) can be anticipated across a cluster for each
+ // stream, useful for low QPS services. This is currently supported for a subset of
+ // deterministic non-hash-based load-balancing algorithms (weighted round robin, random).
+ // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a
+ // cluster, doing best effort predictions of what upstream would be picked next and
+ // pre-establishing a connection.
+ //
+ // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first
+ // incoming stream, 2 connections will be prefetched - one to the first upstream for this
+ // cluster, one to the second on the assumption there will be a follow-up stream.
+ //
+ // Prefetching will be limited to one prefetch per configured upstream in the cluster.
+ //
+ // If this value is not set, or set explicitly to one, Envoy will fetch as many connections
+ // as needed to serve streams in flight, so during warm up and in steady state if a connection
+ // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for
+ // connection establishment.
+ //
+ // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met,
+ // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream.
+ // TODO(alyssawilk) per LB docs and LB overview docs when unhiding.
+ google.protobuf.DoubleValue predictive_prefetch_ratio = 2
+ [(validate.rules).double = {lte: 3.0 gte: 1.0}];
}
reserved 12, 15, 7, 11, 35, 47;
@@ -685,7 +714,7 @@ message Cluster {
// :ref:`statistics ` if :ref:`alt_stat_name
// ` is not provided.
// Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// An optional alternative to the cluster name to be used while emitting stats.
// Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be
@@ -814,10 +843,16 @@ message Cluster {
// :ref:`STRICT_DNS`
// and :ref:`LOGICAL_DNS`
// this setting is ignored.
+ // Setting this value causes failure if the
+ // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during
+ // server startup. Apple's API only allows overriding DNS resolvers via system settings.
repeated core.v4alpha.Address dns_resolvers = 18;
// [#next-major-version: Reconcile DNS options in a single message.]
// Always use TCP queries instead of UDP queries for DNS lookups.
+ // Setting this value causes failure if the
+ // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during
+ // server startup. Apple' API only uses UDP for DNS resolution.
bool use_tcp_for_dns_lookups = 45;
// If specified, outlier detection will be enabled for this upstream cluster.
diff --git a/api/envoy/config/cluster/v4alpha/filter.proto b/api/envoy/config/cluster/v4alpha/filter.proto
index eb825fdeb6d5..5a4a4facbd81 100644
--- a/api/envoy/config/cluster/v4alpha/filter.proto
+++ b/api/envoy/config/cluster/v4alpha/filter.proto
@@ -21,7 +21,7 @@ message Filter {
// The name of the filter to instantiate. The name must match a
// :ref:`supported filter `.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Filter specific configuration which depends on the filter being
// instantiated. See the supported filters for further documentation.
diff --git a/api/envoy/config/core/v3/address.proto b/api/envoy/config/core/v3/address.proto
index 5102c2d57591..8228450eb93c 100644
--- a/api/envoy/config/core/v3/address.proto
+++ b/api/envoy/config/core/v3/address.proto
@@ -24,12 +24,24 @@ message Pipe {
// abstract namespace. The starting '@' is replaced by a null byte by Envoy.
// Paths starting with '@' will result in an error in environments other than
// Linux.
- string path = 1 [(validate.rules).string = {min_bytes: 1}];
+ string path = 1 [(validate.rules).string = {min_len: 1}];
// The mode for the Pipe. Not applicable for abstract sockets.
uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}];
}
+// [#not-implemented-hide:] The address represents an envoy internal listener.
+// TODO(lambdai): Make this address available for listener and endpoint.
+// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.
+message EnvoyInternalAddress {
+ oneof address_name_specifier {
+ option (validate.required) = true;
+
+ // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener.
+ string server_listener_name = 1;
+ }
+}
+
// [#next-free-field: 7]
message SocketAddress {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketAddress";
@@ -52,7 +64,7 @@ message SocketAddress {
// address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS
// (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized
// via :ref:`resolver_name `.
- string address = 2 [(validate.rules).string = {min_bytes: 1}];
+ string address = 2 [(validate.rules).string = {min_len: 1}];
oneof port_specifier {
option (validate.required) = true;
@@ -129,6 +141,9 @@ message Address {
SocketAddress socket_address = 1;
Pipe pipe = 2;
+
+ // [#not-implemented-hide:]
+ EnvoyInternalAddress envoy_internal_address = 3;
}
}
@@ -138,7 +153,7 @@ message CidrRange {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.CidrRange";
// IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``.
- string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string address_prefix = 1 [(validate.rules).string = {min_len: 1}];
// Length of prefix, e.g. 0, 32.
google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}];
diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto
index 15a17b49384d..4d7d69fae70b 100644
--- a/api/envoy/config/core/v3/base.proto
+++ b/api/envoy/config/core/v3/base.proto
@@ -237,7 +237,16 @@ message RuntimeUInt32 {
uint32 default_value = 2;
// Runtime key to get value for comparison. This value is used if defined.
- string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}];
+ string runtime_key = 3 [(validate.rules).string = {min_len: 1}];
+}
+
+// Runtime derived percentage with a default when not specified.
+message RuntimePercent {
+ // Default value if runtime value is not available.
+ type.v3.Percent default_value = 1;
+
+ // Runtime key to get value for comparison. This value is used if defined.
+ string runtime_key = 2 [(validate.rules).string = {min_len: 1}];
}
// Runtime derived double with a default when not specified.
@@ -248,7 +257,7 @@ message RuntimeDouble {
double default_value = 1;
// Runtime key to get value for comparison. This value is used if defined.
- string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string runtime_key = 2 [(validate.rules).string = {min_len: 1}];
}
// Runtime derived bool with a default when not specified.
@@ -262,7 +271,7 @@ message RuntimeFeatureFlag {
// Runtime key to get value for comparison. This value is used if defined. The boolean value must
// be represented via its
// `canonical JSON encoding `_.
- string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string runtime_key = 2 [(validate.rules).string = {min_len: 1}];
}
// Header name/value pair.
@@ -272,7 +281,7 @@ message HeaderValue {
// Header name.
string key = 1
[(validate.rules).string =
- {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];
// Header value.
//
@@ -312,13 +321,13 @@ message DataSource {
option (validate.required) = true;
// Local filesystem data source.
- string filename = 1 [(validate.rules).string = {min_bytes: 1}];
+ string filename = 1 [(validate.rules).string = {min_len: 1}];
// Bytes inlined in the configuration.
bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}];
// String inlined in the configuration.
- string inline_string = 3 [(validate.rules).string = {min_bytes: 1}];
+ string inline_string = 3 [(validate.rules).string = {min_len: 1}];
}
}
@@ -345,7 +354,7 @@ message RemoteDataSource {
HttpUri http_uri = 1 [(validate.rules).message = {required: true}];
// SHA256 string for verifying data.
- string sha256 = 2 [(validate.rules).string = {min_bytes: 1}];
+ string sha256 = 2 [(validate.rules).string = {min_len: 1}];
// Retry policy for fetching remote data.
RetryPolicy retry_policy = 3;
@@ -379,7 +388,7 @@ message TransportSocket {
// The name of the transport socket to instantiate. The name must match a supported transport
// socket implementation.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Implementation specific configuration which depends on the implementation being instantiated.
// See the supported transport socket implementations for further documentation.
diff --git a/api/envoy/config/core/v3/grpc_method_list.proto b/api/envoy/config/core/v3/grpc_method_list.proto
index 800d7b5332a0..e79ec24e0201 100644
--- a/api/envoy/config/core/v3/grpc_method_list.proto
+++ b/api/envoy/config/core/v3/grpc_method_list.proto
@@ -22,7 +22,7 @@ message GrpcMethodList {
"envoy.api.v2.core.GrpcMethodList.Service";
// The name of the gRPC service.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// The names of the gRPC methods in this service.
repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}];
diff --git a/api/envoy/config/core/v3/grpc_service.proto b/api/envoy/config/core/v3/grpc_service.proto
index 967c694d2bc4..e3730d017410 100644
--- a/api/envoy/config/core/v3/grpc_service.proto
+++ b/api/envoy/config/core/v3/grpc_service.proto
@@ -35,13 +35,13 @@ message GrpcService {
// The name of the upstream gRPC cluster. SSL credentials will be supplied
// in the :ref:`Cluster ` :ref:`transport_socket
// `.
- string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster_name = 1 [(validate.rules).string = {min_len: 1}];
// The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`.
// Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster.
string authority = 2
[(validate.rules).string =
- {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];
+ {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];
}
// [#next-free-field: 9]
@@ -160,10 +160,10 @@ message GrpcService {
// The path of subject token, a security token that represents the
// identity of the party on behalf of whom the request is being made.
- string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}];
+ string subject_token_path = 6 [(validate.rules).string = {min_len: 1}];
// Type of the subject token.
- string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}];
+ string subject_token_type = 7 [(validate.rules).string = {min_len: 1}];
// The path of actor token, a security token that represents the identity
// of the acting party. The acting party is authorized to use the
@@ -230,7 +230,7 @@ message GrpcService {
// The target URI when using the `Google C++ gRPC client
// `_. SSL credentials will be supplied in
// :ref:`channel_credentials `.
- string target_uri = 1 [(validate.rules).string = {min_bytes: 1}];
+ string target_uri = 1 [(validate.rules).string = {min_len: 1}];
ChannelCredentials channel_credentials = 2;
@@ -247,7 +247,7 @@ message GrpcService {
//
// streams_total, Counter, Total number of streams opened
// streams_closed_, Counter, Total streams closed with
- string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 4 [(validate.rules).string = {min_len: 1}];
// The name of the Google gRPC credentials factory to use. This must have been registered with
// Envoy. If this is empty, a default credentials factory will be used that sets up channel
@@ -286,8 +286,10 @@ message GrpcService {
// request.
google.protobuf.Duration timeout = 3;
- // Additional metadata to include in streams initiated to the GrpcService.
- // This can be used for scenarios in which additional ad hoc authorization
- // headers (e.g. ``x-foo-bar: baz-key``) are to be injected.
+ // Additional metadata to include in streams initiated to the GrpcService. This can be used for
+ // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to
+ // be injected. For more information, including details on header value syntax, see the
+ // documentation on :ref:`custom request headers
+ // `.
repeated HeaderValue initial_metadata = 5;
}
diff --git a/api/envoy/config/core/v3/health_check.proto b/api/envoy/config/core/v3/health_check.proto
index c6b4acfa937a..ccd473969846 100644
--- a/api/envoy/config/core/v3/health_check.proto
+++ b/api/envoy/config/core/v3/health_check.proto
@@ -54,7 +54,7 @@ enum HealthStatus {
DEGRADED = 5;
}
-// [#next-free-field: 24]
+// [#next-free-field: 25]
message HealthCheck {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck";
@@ -67,7 +67,7 @@ message HealthCheck {
option (validate.required) = true;
// Hex encoded payload. E.g., "000000FF".
- string text = 1 [(validate.rules).string = {min_bytes: 1}];
+ string text = 1 [(validate.rules).string = {min_len: 1}];
// [#not-implemented-hide:] Binary payload.
bytes binary = 2;
@@ -91,9 +91,8 @@ message HealthCheck {
// Specifies the HTTP path that will be requested during health checking. For example
// */healthcheck*.
- string path = 2 [
- (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}
- ];
+ string path = 2
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];
// [#not-implemented-hide:] HTTP specific payload.
Payload send = 3;
@@ -187,7 +186,7 @@ message HealthCheck {
reserved "config";
// The registered name of the custom health checker.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// A custom health checker specific configuration which depends on the custom health checker
// being instantiated. See :api:`envoy/config/health_checker` for reference.
@@ -285,6 +284,21 @@ message HealthCheck {
// The default value for "no traffic interval" is 60 seconds.
google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}];
+ // The "no traffic healthy interval" is a special health check interval that
+ // is used for hosts that are currently passing active health checking
+ // (including new hosts) when the cluster has received no traffic.
+ //
+ // This is useful for when we want to send frequent health checks with
+ // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once
+ // a host in the cluster is marked as healthy.
+ //
+ // Once a cluster has been used for traffic routing, Envoy will shift back to using the
+ // standard health check interval that is defined.
+ //
+ // If no_traffic_healthy_interval is not set, it will default to the
+ // no traffic interval and send that interval regardless of health state.
+ google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}];
+
// The "unhealthy interval" is a health check interval that is used for hosts that are marked as
// unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the
// standard health check interval that is defined.
diff --git a/api/envoy/config/core/v3/http_uri.proto b/api/envoy/config/core/v3/http_uri.proto
index 42bcd4f61572..5d1fc239e07e 100644
--- a/api/envoy/config/core/v3/http_uri.proto
+++ b/api/envoy/config/core/v3/http_uri.proto
@@ -27,7 +27,7 @@ message HttpUri {
//
// uri: https://www.googleapis.com/oauth2/v1/certs
//
- string uri = 1 [(validate.rules).string = {min_bytes: 1}];
+ string uri = 1 [(validate.rules).string = {min_len: 1}];
// Specify how `uri` is to be fetched. Today, this requires an explicit
// cluster, but in the future we may support dynamic cluster creation or
@@ -45,7 +45,7 @@ message HttpUri {
//
// cluster: jwks_cluster
//
- string cluster = 2 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 2 [(validate.rules).string = {min_len: 1}];
}
// Sets the maximum duration in milliseconds that a response can take to arrive upon request.
diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto
index 3e20f3b449ae..17a6955d6851 100644
--- a/api/envoy/config/core/v3/protocol.proto
+++ b/api/envoy/config/core/v3/protocol.proto
@@ -2,6 +2,8 @@ syntax = "proto3";
package envoy.config.core.v3;
+import "envoy/type/v3/percent.proto";
+
import "google/protobuf/duration.proto";
import "google/protobuf/wrappers.proto";
@@ -177,7 +179,27 @@ message Http1ProtocolOptions {
google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7;
}
-// [#next-free-field: 15]
+message KeepaliveSettings {
+ // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive.
+ google.protobuf.Duration interval = 1 [(validate.rules).duration = {
+ required: true
+ gte {nanos: 1000000}
+ }];
+
+ // How long to wait for a response to a keepalive PING. If a response is not received within this
+ // time period, the connection will be aborted.
+ google.protobuf.Duration timeout = 2 [(validate.rules).duration = {
+ required: true
+ gte {nanos: 1000000}
+ }];
+
+ // A random jitter amount as a percentage of interval that will be added to each interval.
+ // A value of zero means there will be no jitter.
+ // The default value is 15%.
+ type.v3.Percent interval_jitter = 3;
+}
+
+// [#next-free-field: 16]
message Http2ProtocolOptions {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.core.Http2ProtocolOptions";
@@ -345,6 +367,10 @@ message Http2ProtocolOptions {
// `_ for
// standardized identifiers.
repeated SettingsParameter custom_settings_parameters = 13;
+
+ // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer
+ // does not respond within the configured timeout, the connection will be aborted.
+ KeepaliveSettings connection_keepalive = 15;
}
// [#not-implemented-hide:]
diff --git a/api/envoy/config/core/v3/substitution_format_string.proto b/api/envoy/config/core/v3/substitution_format_string.proto
index 6c129707b2e2..10d99b878bdd 100644
--- a/api/envoy/config/core/v3/substitution_format_string.proto
+++ b/api/envoy/config/core/v3/substitution_format_string.proto
@@ -23,17 +23,20 @@ message SubstitutionFormatString {
// Specify a format with command operators to form a text string.
// Its details is described in :ref:`format string`.
//
- // .. code-block::
+ // For example, setting ``text_format`` like below,
//
- // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)%
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
//
- // The following plain text will be created:
+ // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n"
//
- // .. code-block::
+ // generates plain text similar to:
//
- // upstream connect error:204:path=/foo
+ // .. code-block:: text
//
- string text_format = 1 [(validate.rules).string = {min_bytes: 1}];
+ // upstream connect error:503:path=/foo
+ //
+ string text_format = 1 [(validate.rules).string = {min_len: 1}];
// Specify a format with command operators to form a JSON string.
// Its details is described in :ref:`format dictionary`.
@@ -41,11 +44,12 @@ message SubstitutionFormatString {
// Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA).
// See the documentation for a specific command operator for details.
//
- // .. code-block::
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
//
- // json_format:
- // status: %RESPONSE_CODE%
- // message: %LOCAL_REPLY_BODY%
+ // json_format:
+ // status: "%RESPONSE_CODE%"
+ // message: "%LOCAL_REPLY_BODY%"
//
// The following JSON object would be created:
//
@@ -65,4 +69,15 @@ message SubstitutionFormatString {
// empty string, so that empty values are omitted entirely.
// * for ``json_format`` the keys with null values are omitted in the output structure.
bool omit_empty_values = 3;
+
+ // Specify a *content_type* field.
+ // If this field is not set then ``text/plain`` is used for *text_format* and
+ // ``application/json`` is used for *json_format*.
+ //
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
+ //
+ // content_type: "text/html; charset=UTF-8"
+ //
+ string content_type = 4;
}
diff --git a/api/envoy/config/core/v4alpha/address.proto b/api/envoy/config/core/v4alpha/address.proto
index ffade4bed75b..6ae82359504e 100644
--- a/api/envoy/config/core/v4alpha/address.proto
+++ b/api/envoy/config/core/v4alpha/address.proto
@@ -24,12 +24,27 @@ message Pipe {
// abstract namespace. The starting '@' is replaced by a null byte by Envoy.
// Paths starting with '@' will result in an error in environments other than
// Linux.
- string path = 1 [(validate.rules).string = {min_bytes: 1}];
+ string path = 1 [(validate.rules).string = {min_len: 1}];
// The mode for the Pipe. Not applicable for abstract sockets.
uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}];
}
+// [#not-implemented-hide:] The address represents an envoy internal listener.
+// TODO(lambdai): Make this address available for listener and endpoint.
+// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.
+message EnvoyInternalAddress {
+ option (udpa.annotations.versioning).previous_message_type =
+ "envoy.config.core.v3.EnvoyInternalAddress";
+
+ oneof address_name_specifier {
+ option (validate.required) = true;
+
+ // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener.
+ string server_listener_name = 1;
+ }
+}
+
// [#next-free-field: 7]
message SocketAddress {
option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketAddress";
@@ -52,7 +67,7 @@ message SocketAddress {
// address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS
// (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized
// via :ref:`resolver_name `.
- string address = 2 [(validate.rules).string = {min_bytes: 1}];
+ string address = 2 [(validate.rules).string = {min_len: 1}];
oneof port_specifier {
option (validate.required) = true;
@@ -129,6 +144,9 @@ message Address {
SocketAddress socket_address = 1;
Pipe pipe = 2;
+
+ // [#not-implemented-hide:]
+ EnvoyInternalAddress envoy_internal_address = 3;
}
}
@@ -138,7 +156,7 @@ message CidrRange {
option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.CidrRange";
// IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``.
- string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string address_prefix = 1 [(validate.rules).string = {min_len: 1}];
// Length of prefix, e.g. 0, 32.
google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}];
diff --git a/api/envoy/config/core/v4alpha/base.proto b/api/envoy/config/core/v4alpha/base.proto
index b13b4e89bfd1..dc1104a219b7 100644
--- a/api/envoy/config/core/v4alpha/base.proto
+++ b/api/envoy/config/core/v4alpha/base.proto
@@ -229,7 +229,19 @@ message RuntimeUInt32 {
uint32 default_value = 2;
// Runtime key to get value for comparison. This value is used if defined.
- string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}];
+ string runtime_key = 3 [(validate.rules).string = {min_len: 1}];
+}
+
+// Runtime derived percentage with a default when not specified.
+message RuntimePercent {
+ option (udpa.annotations.versioning).previous_message_type =
+ "envoy.config.core.v3.RuntimePercent";
+
+ // Default value if runtime value is not available.
+ type.v3.Percent default_value = 1;
+
+ // Runtime key to get value for comparison. This value is used if defined.
+ string runtime_key = 2 [(validate.rules).string = {min_len: 1}];
}
// Runtime derived double with a default when not specified.
@@ -240,7 +252,7 @@ message RuntimeDouble {
double default_value = 1;
// Runtime key to get value for comparison. This value is used if defined.
- string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string runtime_key = 2 [(validate.rules).string = {min_len: 1}];
}
// Runtime derived bool with a default when not specified.
@@ -254,7 +266,7 @@ message RuntimeFeatureFlag {
// Runtime key to get value for comparison. This value is used if defined. The boolean value must
// be represented via its
// `canonical JSON encoding `_.
- string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string runtime_key = 2 [(validate.rules).string = {min_len: 1}];
}
// Header name/value pair.
@@ -264,7 +276,7 @@ message HeaderValue {
// Header name.
string key = 1
[(validate.rules).string =
- {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];
// Header value.
//
@@ -304,13 +316,13 @@ message DataSource {
option (validate.required) = true;
// Local filesystem data source.
- string filename = 1 [(validate.rules).string = {min_bytes: 1}];
+ string filename = 1 [(validate.rules).string = {min_len: 1}];
// Bytes inlined in the configuration.
bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}];
// String inlined in the configuration.
- string inline_string = 3 [(validate.rules).string = {min_bytes: 1}];
+ string inline_string = 3 [(validate.rules).string = {min_len: 1}];
}
}
@@ -337,7 +349,7 @@ message RemoteDataSource {
HttpUri http_uri = 1 [(validate.rules).message = {required: true}];
// SHA256 string for verifying data.
- string sha256 = 2 [(validate.rules).string = {min_bytes: 1}];
+ string sha256 = 2 [(validate.rules).string = {min_len: 1}];
// Retry policy for fetching remote data.
RetryPolicy retry_policy = 3;
@@ -373,7 +385,7 @@ message TransportSocket {
// The name of the transport socket to instantiate. The name must match a supported transport
// socket implementation.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Implementation specific configuration which depends on the implementation being instantiated.
// See the supported transport socket implementations for further documentation.
diff --git a/api/envoy/config/core/v4alpha/grpc_method_list.proto b/api/envoy/config/core/v4alpha/grpc_method_list.proto
index a4a7be077b27..371ea32c10f3 100644
--- a/api/envoy/config/core/v4alpha/grpc_method_list.proto
+++ b/api/envoy/config/core/v4alpha/grpc_method_list.proto
@@ -23,7 +23,7 @@ message GrpcMethodList {
"envoy.config.core.v3.GrpcMethodList.Service";
// The name of the gRPC service.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// The names of the gRPC methods in this service.
repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}];
diff --git a/api/envoy/config/core/v4alpha/grpc_service.proto b/api/envoy/config/core/v4alpha/grpc_service.proto
index 51f11fa1f346..9ea35b456470 100644
--- a/api/envoy/config/core/v4alpha/grpc_service.proto
+++ b/api/envoy/config/core/v4alpha/grpc_service.proto
@@ -35,13 +35,13 @@ message GrpcService {
// The name of the upstream gRPC cluster. SSL credentials will be supplied
// in the :ref:`Cluster ` :ref:`transport_socket
// `.
- string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster_name = 1 [(validate.rules).string = {min_len: 1}];
// The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`.
// Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster.
string authority = 2
[(validate.rules).string =
- {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];
+ {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];
}
// [#next-free-field: 9]
@@ -160,10 +160,10 @@ message GrpcService {
// The path of subject token, a security token that represents the
// identity of the party on behalf of whom the request is being made.
- string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}];
+ string subject_token_path = 6 [(validate.rules).string = {min_len: 1}];
// Type of the subject token.
- string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}];
+ string subject_token_type = 7 [(validate.rules).string = {min_len: 1}];
// The path of actor token, a security token that represents the identity
// of the acting party. The acting party is authorized to use the
@@ -236,7 +236,7 @@ message GrpcService {
// The target URI when using the `Google C++ gRPC client
// `_. SSL credentials will be supplied in
// :ref:`channel_credentials `.
- string target_uri = 1 [(validate.rules).string = {min_bytes: 1}];
+ string target_uri = 1 [(validate.rules).string = {min_len: 1}];
ChannelCredentials channel_credentials = 2;
@@ -253,7 +253,7 @@ message GrpcService {
//
// streams_total, Counter, Total number of streams opened
// streams_closed_, Counter, Total streams closed with
- string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 4 [(validate.rules).string = {min_len: 1}];
// The name of the Google gRPC credentials factory to use. This must have been registered with
// Envoy. If this is empty, a default credentials factory will be used that sets up channel
@@ -292,8 +292,10 @@ message GrpcService {
// request.
google.protobuf.Duration timeout = 3;
- // Additional metadata to include in streams initiated to the GrpcService.
- // This can be used for scenarios in which additional ad hoc authorization
- // headers (e.g. ``x-foo-bar: baz-key``) are to be injected.
+ // Additional metadata to include in streams initiated to the GrpcService. This can be used for
+ // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to
+ // be injected. For more information, including details on header value syntax, see the
+ // documentation on :ref:`custom request headers
+ // `.
repeated HeaderValue initial_metadata = 5;
}
diff --git a/api/envoy/config/core/v4alpha/health_check.proto b/api/envoy/config/core/v4alpha/health_check.proto
index 39badc334b01..2761b856a3d7 100644
--- a/api/envoy/config/core/v4alpha/health_check.proto
+++ b/api/envoy/config/core/v4alpha/health_check.proto
@@ -54,7 +54,7 @@ enum HealthStatus {
DEGRADED = 5;
}
-// [#next-free-field: 24]
+// [#next-free-field: 25]
message HealthCheck {
option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck";
@@ -67,7 +67,7 @@ message HealthCheck {
option (validate.required) = true;
// Hex encoded payload. E.g., "000000FF".
- string text = 1 [(validate.rules).string = {min_bytes: 1}];
+ string text = 1 [(validate.rules).string = {min_len: 1}];
// [#not-implemented-hide:] Binary payload.
bytes binary = 2;
@@ -91,9 +91,8 @@ message HealthCheck {
// Specifies the HTTP path that will be requested during health checking. For example
// */healthcheck*.
- string path = 2 [
- (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}
- ];
+ string path = 2
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];
// [#not-implemented-hide:] HTTP specific payload.
Payload send = 3;
@@ -187,7 +186,7 @@ message HealthCheck {
reserved "config";
// The registered name of the custom health checker.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// A custom health checker specific configuration which depends on the custom health checker
// being instantiated. See :api:`envoy/config/health_checker` for reference.
@@ -285,6 +284,21 @@ message HealthCheck {
// The default value for "no traffic interval" is 60 seconds.
google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}];
+ // The "no traffic healthy interval" is a special health check interval that
+ // is used for hosts that are currently passing active health checking
+ // (including new hosts) when the cluster has received no traffic.
+ //
+ // This is useful for when we want to send frequent health checks with
+ // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once
+ // a host in the cluster is marked as healthy.
+ //
+ // Once a cluster has been used for traffic routing, Envoy will shift back to using the
+ // standard health check interval that is defined.
+ //
+ // If no_traffic_healthy_interval is not set, it will default to the
+ // no traffic interval and send that interval regardless of health state.
+ google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}];
+
// The "unhealthy interval" is a health check interval that is used for hosts that are marked as
// unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the
// standard health check interval that is defined.
diff --git a/api/envoy/config/core/v4alpha/http_uri.proto b/api/envoy/config/core/v4alpha/http_uri.proto
index e88a9aa7d7df..ae1c0c9a3d4e 100644
--- a/api/envoy/config/core/v4alpha/http_uri.proto
+++ b/api/envoy/config/core/v4alpha/http_uri.proto
@@ -27,7 +27,7 @@ message HttpUri {
//
// uri: https://www.googleapis.com/oauth2/v1/certs
//
- string uri = 1 [(validate.rules).string = {min_bytes: 1}];
+ string uri = 1 [(validate.rules).string = {min_len: 1}];
// Specify how `uri` is to be fetched. Today, this requires an explicit
// cluster, but in the future we may support dynamic cluster creation or
@@ -45,7 +45,7 @@ message HttpUri {
//
// cluster: jwks_cluster
//
- string cluster = 2 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 2 [(validate.rules).string = {min_len: 1}];
}
// Sets the maximum duration in milliseconds that a response can take to arrive upon request.
diff --git a/api/envoy/config/core/v4alpha/protocol.proto b/api/envoy/config/core/v4alpha/protocol.proto
index 19e5de6d8b1a..807488cef49d 100644
--- a/api/envoy/config/core/v4alpha/protocol.proto
+++ b/api/envoy/config/core/v4alpha/protocol.proto
@@ -2,6 +2,8 @@ syntax = "proto3";
package envoy.config.core.v4alpha;
+import "envoy/type/v3/percent.proto";
+
import "google/protobuf/duration.proto";
import "google/protobuf/wrappers.proto";
@@ -177,7 +179,30 @@ message Http1ProtocolOptions {
google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7;
}
-// [#next-free-field: 15]
+message KeepaliveSettings {
+ option (udpa.annotations.versioning).previous_message_type =
+ "envoy.config.core.v3.KeepaliveSettings";
+
+ // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive.
+ google.protobuf.Duration interval = 1 [(validate.rules).duration = {
+ required: true
+ gte {nanos: 1000000}
+ }];
+
+ // How long to wait for a response to a keepalive PING. If a response is not received within this
+ // time period, the connection will be aborted.
+ google.protobuf.Duration timeout = 2 [(validate.rules).duration = {
+ required: true
+ gte {nanos: 1000000}
+ }];
+
+ // A random jitter amount as a percentage of interval that will be added to each interval.
+ // A value of zero means there will be no jitter.
+ // The default value is 15%.
+ type.v3.Percent interval_jitter = 3;
+}
+
+// [#next-free-field: 16]
message Http2ProtocolOptions {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.core.v3.Http2ProtocolOptions";
@@ -335,6 +360,10 @@ message Http2ProtocolOptions {
// `_ for
// standardized identifiers.
repeated SettingsParameter custom_settings_parameters = 13;
+
+ // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer
+ // does not respond within the configured timeout, the connection will be aborted.
+ KeepaliveSettings connection_keepalive = 15;
}
// [#not-implemented-hide:]
diff --git a/api/envoy/config/core/v4alpha/substitution_format_string.proto b/api/envoy/config/core/v4alpha/substitution_format_string.proto
index ffff2fe3e754..e996bcbc0cf6 100644
--- a/api/envoy/config/core/v4alpha/substitution_format_string.proto
+++ b/api/envoy/config/core/v4alpha/substitution_format_string.proto
@@ -27,17 +27,20 @@ message SubstitutionFormatString {
// Specify a format with command operators to form a text string.
// Its details is described in :ref:`format string`.
//
- // .. code-block::
+ // For example, setting ``text_format`` like below,
//
- // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)%
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
//
- // The following plain text will be created:
+ // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n"
//
- // .. code-block::
+ // generates plain text similar to:
//
- // upstream connect error:204:path=/foo
+ // .. code-block:: text
//
- string text_format = 1 [(validate.rules).string = {min_bytes: 1}];
+ // upstream connect error:503:path=/foo
+ //
+ string text_format = 1 [(validate.rules).string = {min_len: 1}];
// Specify a format with command operators to form a JSON string.
// Its details is described in :ref:`format dictionary`.
@@ -45,11 +48,12 @@ message SubstitutionFormatString {
// Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA).
// See the documentation for a specific command operator for details.
//
- // .. code-block::
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
//
- // json_format:
- // status: %RESPONSE_CODE%
- // message: %LOCAL_REPLY_BODY%
+ // json_format:
+ // status: "%RESPONSE_CODE%"
+ // message: "%LOCAL_REPLY_BODY%"
//
// The following JSON object would be created:
//
@@ -69,4 +73,15 @@ message SubstitutionFormatString {
// empty string, so that empty values are omitted entirely.
// * for ``json_format`` the keys with null values are omitted in the output structure.
bool omit_empty_values = 3;
+
+ // Specify a *content_type* field.
+ // If this field is not set then ``text/plain`` is used for *text_format* and
+ // ``application/json`` is used for *json_format*.
+ //
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
+ //
+ // content_type: "text/html; charset=UTF-8"
+ //
+ string content_type = 4;
}
diff --git a/api/envoy/config/endpoint/v3/endpoint.proto b/api/envoy/config/endpoint/v3/endpoint.proto
index e58c327156cf..214ce6c20883 100644
--- a/api/envoy/config/endpoint/v3/endpoint.proto
+++ b/api/envoy/config/endpoint/v3/endpoint.proto
@@ -46,7 +46,7 @@ message ClusterLoadAssignment {
"envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload";
// Identifier for the policy specifying the drop.
- string category = 1 [(validate.rules).string = {min_bytes: 1}];
+ string category = 1 [(validate.rules).string = {min_len: 1}];
// Percentage of traffic that should be dropped for the category.
type.v3.FractionalPercent drop_percentage = 2;
@@ -105,7 +105,7 @@ message ClusterLoadAssignment {
// ` value if specified
// in the cluster :ref:`EdsClusterConfig
// `.
- string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster_name = 1 [(validate.rules).string = {min_len: 1}];
// List of endpoints to load balance to.
repeated LocalityLbEndpoints endpoints = 2;
diff --git a/api/envoy/config/endpoint/v3/load_report.proto b/api/envoy/config/endpoint/v3/load_report.proto
index 3f067737ec25..7140ca05afc7 100644
--- a/api/envoy/config/endpoint/v3/load_report.proto
+++ b/api/envoy/config/endpoint/v3/load_report.proto
@@ -129,14 +129,14 @@ message ClusterStats {
"envoy.api.v2.endpoint.ClusterStats.DroppedRequests";
// Identifier for the policy specifying the drop.
- string category = 1 [(validate.rules).string = {min_bytes: 1}];
+ string category = 1 [(validate.rules).string = {min_len: 1}];
// Total number of deliberately dropped requests for the category.
uint64 dropped_count = 2;
}
// The name of the cluster.
- string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster_name = 1 [(validate.rules).string = {min_len: 1}];
// The eds_cluster_config service_name of the cluster.
// It's possible that two clusters send the same service_name to EDS,
diff --git a/api/envoy/config/filter/http/cache/v2alpha/cache.proto b/api/envoy/config/filter/http/cache/v2alpha/cache.proto
index d08b5462fd88..98035c05d45a 100644
--- a/api/envoy/config/filter/http/cache/v2alpha/cache.proto
+++ b/api/envoy/config/filter/http/cache/v2alpha/cache.proto
@@ -48,17 +48,14 @@ message CacheConfig {
// Config specific to the cache storage implementation.
google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];
- // [#not-implemented-hide:]
- //
- //
- // List of allowed *Vary* headers.
+ // List of matching rules that defines allowed *Vary* headers.
//
// The *vary* response header holds a list of header names that affect the
// contents of a response, as described by
// https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses.
//
// During insertion, *allowed_vary_headers* acts as a allowlist: if a
- // response's *vary* header mentions any header names that aren't in
+ // response's *vary* header mentions any header names that aren't matched by any rules in
// *allowed_vary_headers*, that response will not be cached.
//
// During lookup, *allowed_vary_headers* controls what request headers will be
diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto
index 29aa8380191b..436bb6bf4616 100644
--- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto
+++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto
@@ -51,6 +51,10 @@ message PerRouteConfig {
// :ref:`HCM host rewrite header `
// given that the value set here would be used for DNS lookups whereas the value set in the HCM
// would be used for host header forwarding which is not the desired outcome.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string auto_host_rewrite_header = 2
[(udpa.annotations.field_migrate).rename = "host_rewrite_header"];
}
diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto
index 06b13acb2f63..c05032df21a4 100644
--- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto
+++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto
@@ -586,6 +586,10 @@ message ScopedRoutes {
}
// The name of the header field to extract the value from.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string name = 1 [(validate.rules).string = {min_bytes: 1}];
// The element separator (e.g., ';' separates 'a;b;c;d').
diff --git a/api/envoy/config/grpc_credential/v3/aws_iam.proto b/api/envoy/config/grpc_credential/v3/aws_iam.proto
index eeb5d93ec689..e2e9c7da4833 100644
--- a/api/envoy/config/grpc_credential/v3/aws_iam.proto
+++ b/api/envoy/config/grpc_credential/v3/aws_iam.proto
@@ -24,7 +24,7 @@ message AwsIamConfig {
// of the Grpc endpoint.
//
// Example: appmesh
- string service_name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string service_name = 1 [(validate.rules).string = {min_len: 1}];
// The `region `_ hosting the Grpc
// endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment
diff --git a/api/envoy/config/listener/v3/listener.proto b/api/envoy/config/listener/v3/listener.proto
index 88e8ae4ad5b1..aac1166f49fd 100644
--- a/api/envoy/config/listener/v3/listener.proto
+++ b/api/envoy/config/listener/v3/listener.proto
@@ -33,10 +33,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// Listener list collections. Entries are *Listener* resources or references.
// [#not-implemented-hide:]
message ListenerCollection {
- udpa.core.v1.CollectionEntry entries = 1;
+ repeated udpa.core.v1.CollectionEntry entries = 1;
}
-// [#next-free-field: 25]
+// [#next-free-field: 26]
message Listener {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener";
@@ -116,6 +116,10 @@ message Listener {
// :ref:`FAQ entry `.
repeated FilterChain filter_chains = 3;
+ // The default filter chain if none of the filter chain matches. If no default filter chain is supplied,
+ // the connection will be closed. The filter chain match is ignored in this field.
+ FilterChain default_filter_chain = 25;
+
// Soft limit on size of the listener’s new connection read and write buffers.
// If unspecified, an implementation defined default is applied (1MiB).
google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5
diff --git a/api/envoy/config/listener/v3/listener_components.proto b/api/envoy/config/listener/v3/listener_components.proto
index 8a22fbc97f5f..3ecfc7932b56 100644
--- a/api/envoy/config/listener/v3/listener_components.proto
+++ b/api/envoy/config/listener/v3/listener_components.proto
@@ -32,7 +32,7 @@ message Filter {
// The name of the filter to instantiate. The name must match a
// :ref:`supported filter `.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Filter specific configuration which depends on the filter being
// instantiated. See the supported filters for further documentation.
@@ -65,6 +65,18 @@ message Filter {
// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter
// chain without ``server_names`` requirements).
//
+// A different way to reason about the filter chain matches:
+// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps.
+// In each step, filter chains which most specifically matches the attributes continue to the next step.
+// The listener guarantees at most 1 filter chain is left after all of the steps.
+//
+// Example:
+//
+// For destination port, filter chains specifying the destination port of incoming traffic are the
+// most specific match. If none of the filter chains specifies the exact destination port, the filter
+// chains which do not specify ports are the most specific match. Filter chains specifying the
+// wrong port can never be the most specific match.
+//
// [#comment: Implemented rules are kept in the preference order, with deprecated fields
// listed at the end, because that's how we want to list them in the docs.
//
@@ -303,7 +315,7 @@ message ListenerFilter {
// The name of the filter to instantiate. The name must match a
// :ref:`supported filter `.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Filter specific configuration which depends on the filter being instantiated.
// See the supported filters for further documentation.
diff --git a/api/envoy/config/listener/v4alpha/listener.proto b/api/envoy/config/listener/v4alpha/listener.proto
index 753f6d733cc0..fbc65d0880f3 100644
--- a/api/envoy/config/listener/v4alpha/listener.proto
+++ b/api/envoy/config/listener/v4alpha/listener.proto
@@ -36,10 +36,10 @@ message ListenerCollection {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.listener.v3.ListenerCollection";
- udpa.core.v1.CollectionEntry entries = 1;
+ repeated udpa.core.v1.CollectionEntry entries = 1;
}
-// [#next-free-field: 25]
+// [#next-free-field: 26]
message Listener {
option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener";
@@ -119,6 +119,10 @@ message Listener {
// :ref:`FAQ entry `.
repeated FilterChain filter_chains = 3;
+ // The default filter chain if none of the filter chain matches. If no default filter chain is supplied,
+ // the connection will be closed. The filter chain match is ignored in this field.
+ FilterChain default_filter_chain = 25;
+
// Soft limit on size of the listener’s new connection read and write buffers.
// If unspecified, an implementation defined default is applied (1MiB).
google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5
diff --git a/api/envoy/config/listener/v4alpha/listener_components.proto b/api/envoy/config/listener/v4alpha/listener_components.proto
index 61babe8e622f..0c75f92b4027 100644
--- a/api/envoy/config/listener/v4alpha/listener_components.proto
+++ b/api/envoy/config/listener/v4alpha/listener_components.proto
@@ -32,7 +32,7 @@ message Filter {
// The name of the filter to instantiate. The name must match a
// :ref:`supported filter `.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Filter specific configuration which depends on the filter being
// instantiated. See the supported filters for further documentation.
@@ -65,6 +65,18 @@ message Filter {
// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter
// chain without ``server_names`` requirements).
//
+// A different way to reason about the filter chain matches:
+// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps.
+// In each step, filter chains which most specifically matches the attributes continue to the next step.
+// The listener guarantees at most 1 filter chain is left after all of the steps.
+//
+// Example:
+//
+// For destination port, filter chains specifying the destination port of incoming traffic are the
+// most specific match. If none of the filter chains specifies the exact destination port, the filter
+// chains which do not specify ports are the most specific match. Filter chains specifying the
+// wrong port can never be the most specific match.
+//
// [#comment: Implemented rules are kept in the preference order, with deprecated fields
// listed at the end, because that's how we want to list them in the docs.
//
@@ -307,7 +319,7 @@ message ListenerFilter {
// The name of the filter to instantiate. The name must match a
// :ref:`supported filter `.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Filter specific configuration which depends on the filter being instantiated.
// See the supported filters for further documentation.
diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto
index c6113bf5a5d3..62afcf56e4e7 100644
--- a/api/envoy/config/metrics/v2/stats.proto
+++ b/api/envoy/config/metrics/v2/stats.proto
@@ -201,7 +201,7 @@ message TagSpecifier {
//
// {
// "tag_name": "envoy.cluster_name",
- // "regex": "^cluster\.((.+?)\.)"
+ // "regex": "^cluster\\.((.+?)\\.)"
// }
//
// Note that the regex will remove ``foo_cluster.`` making the tag extracted
@@ -218,11 +218,11 @@ message TagSpecifier {
// [
// {
// "tag_name": "envoy.http_user_agent",
- // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$"
+ // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$"
// },
// {
// "tag_name": "envoy.http_conn_manager_prefix",
- // "regex": "^http\.((.*?)\.)"
+ // "regex": "^http\\.((.*?)\\.)"
// }
// ]
//
diff --git a/api/envoy/config/metrics/v3/stats.proto b/api/envoy/config/metrics/v3/stats.proto
index 275db1f6457a..be0cbb9dab92 100644
--- a/api/envoy/config/metrics/v3/stats.proto
+++ b/api/envoy/config/metrics/v3/stats.proto
@@ -244,7 +244,7 @@ message TagSpecifier {
//
// {
// "tag_name": "envoy.cluster_name",
- // "regex": "^cluster\.((.+?)\.)"
+ // "regex": "^cluster\\.((.+?)\\.)"
// }
//
// Note that the regex will remove ``foo_cluster.`` making the tag extracted
@@ -261,11 +261,11 @@ message TagSpecifier {
// [
// {
// "tag_name": "envoy.http_user_agent",
- // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$"
+ // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$"
// },
// {
// "tag_name": "envoy.http_conn_manager_prefix",
- // "regex": "^http\.((.*?)\.)"
+ // "regex": "^http\\.((.*?)\\.)"
// }
// ]
//
diff --git a/api/envoy/config/metrics/v4alpha/stats.proto b/api/envoy/config/metrics/v4alpha/stats.proto
index 6265118cf9b8..bd37875c0bf9 100644
--- a/api/envoy/config/metrics/v4alpha/stats.proto
+++ b/api/envoy/config/metrics/v4alpha/stats.proto
@@ -244,7 +244,7 @@ message TagSpecifier {
//
// {
// "tag_name": "envoy.cluster_name",
- // "regex": "^cluster\.((.+?)\.)"
+ // "regex": "^cluster\\.((.+?)\\.)"
// }
//
// Note that the regex will remove ``foo_cluster.`` making the tag extracted
@@ -261,11 +261,11 @@ message TagSpecifier {
// [
// {
// "tag_name": "envoy.http_user_agent",
- // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$"
+ // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$"
// },
// {
// "tag_name": "envoy.http_conn_manager_prefix",
- // "regex": "^http\.((.*?)\.)"
+ // "regex": "^http\\.((.*?)\\.)"
// }
// ]
//
diff --git a/api/envoy/config/overload/v3/overload.proto b/api/envoy/config/overload/v3/overload.proto
index 061783a04b77..ac1d444b629a 100644
--- a/api/envoy/config/overload/v3/overload.proto
+++ b/api/envoy/config/overload/v3/overload.proto
@@ -37,7 +37,7 @@ message ResourceMonitor {
// `
// * :ref:`envoy.resource_monitors.injected_resource
// `
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Configuration for the resource monitor being instantiated.
oneof config_type {
@@ -69,7 +69,7 @@ message Trigger {
"envoy.config.overload.v2alpha.Trigger";
// The name of the resource this is a trigger for.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
oneof trigger_oneof {
option (validate.required) = true;
@@ -87,7 +87,7 @@ message OverloadAction {
// The name of the overload action. This is just a well-known string that listeners can
// use for registering callbacks. Custom overload actions should be named using reverse
// DNS to ensure uniqueness.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// A set of triggers for this action. The state of the action is the maximum
// state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners
diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto
index 93fb6b05911d..595fde141e6c 100644
--- a/api/envoy/config/route/v3/route_components.proto
+++ b/api/envoy/config/route/v3/route_components.proto
@@ -60,7 +60,7 @@ message VirtualHost {
// The logical name of the virtual host. This is used when emitting certain
// statistics but is not relevant for routing.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// A list of domains (host/authority header) that will be matched to this
// virtual host. Wildcard hosts are supported in the suffix or prefix form.
@@ -113,7 +113,7 @@ message VirtualHost {
// Specifies a list of HTTP headers that should be removed from each request
// handled by this virtual host.
repeated string request_headers_to_remove = 13 [(validate.rules).repeated = {
- items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
+ items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
}];
// Specifies a list of HTTP headers that should be added to each response
@@ -128,7 +128,7 @@ message VirtualHost {
// Specifies a list of HTTP headers that should be removed from each response
// handled by this virtual host.
repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {
- items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
+ items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
}];
// Indicates that the virtual host has a CORS policy.
@@ -263,7 +263,7 @@ message Route {
// Specifies a list of HTTP headers that should be removed from each request
// matching this route.
repeated string request_headers_to_remove = 12 [(validate.rules).repeated = {
- items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
+ items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
}];
// Specifies a set of headers that will be added to responses to requests
@@ -278,7 +278,7 @@ message Route {
// Specifies a list of HTTP headers that should be removed from each response
// to requests matching this route.
repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {
- items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
+ items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
}];
// Presence of the object defines whether the connection manager's tracing configuration
@@ -311,7 +311,7 @@ message WeightedCluster {
// Name of the upstream cluster. The cluster must exist in the
// :ref:`cluster manager configuration `.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// An integer between 0 and :ref:`total_weight
// `. When a request matches the route,
@@ -338,7 +338,9 @@ message WeightedCluster {
// Specifies a list of HTTP headers that should be removed from each request when
// this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.
- repeated string request_headers_to_remove = 9;
+ repeated string request_headers_to_remove = 9 [(validate.rules).repeated = {
+ items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}
+ }];
// Specifies a list of headers to be added to responses when this cluster is selected
// through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.
@@ -352,7 +354,9 @@ message WeightedCluster {
// Specifies a list of headers to be removed from responses when this cluster is selected
// through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`.
- repeated string response_headers_to_remove = 6;
+ repeated string response_headers_to_remove = 6 [(validate.rules).repeated = {
+ items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}
+ }];
// The per_filter_config field can be used to provide weighted cluster-specific
// configurations for filters. The key should match the filter name, such as
@@ -440,7 +444,7 @@ message RouteMatch {
// (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style
// upgrades.
// This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2,
- // where CONNECT requests may have a path, the path matchers will work if
+ // where Extended CONNECT requests may have a path, the path matchers will work if
// there is a path present.
// Note that CONNECT support is currently considered alpha in Envoy.
// [#comment:TODO(htuch): Replace the above comment with an alpha tag.
@@ -545,7 +549,7 @@ message CorsPolicy {
core.v3.RuntimeFractionalPercent shadow_enabled = 10;
}
-// [#next-free-field: 36]
+// [#next-free-field: 37]
message RouteAction {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction";
@@ -587,7 +591,7 @@ message RouteAction {
// Specifies the cluster that requests will be mirrored to. The cluster must
// exist in the cluster manager configuration.
- string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
// If not specified, all requests to the target cluster will be mirrored.
//
@@ -616,9 +620,8 @@ message RouteAction {
// The name of the request header that will be used to obtain the hash
// key. If the request header is not present, no hash will be produced.
- string header_name = 1 [
- (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}
- ];
+ string header_name = 1
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// If specified, the request header value will be rewritten and used
// to produce the hash key.
@@ -646,7 +649,7 @@ message RouteAction {
// The name of the cookie that will be used to obtain the hash key. If the
// cookie is not present and ttl below is not set, no hash will be
// produced.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// If specified, a cookie with the TTL will be generated if the cookie is
// not present. If the TTL is present and zero, the generated cookie will
@@ -673,7 +676,7 @@ message RouteAction {
// The name of the URL query parameter that will be used to obtain the hash
// key. If the parameter is not present, no hash will be produced. Query
// parameter names are case-sensitive.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
}
message FilterState {
@@ -683,7 +686,7 @@ message RouteAction {
// The name of the Object in the per-request filterState, which is an
// Envoy::Http::Hashable object. If there is no data associated with the key,
// or the stored object is not Envoy::Http::Hashable, no hash will be produced.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
}
oneof policy_specifier {
@@ -760,6 +763,32 @@ message RouteAction {
ConnectConfig connect_config = 3;
}
+ message MaxStreamDuration {
+ // Specifies the maximum duration allowed for streams on the route. If not specified, the value
+ // from the :ref:`max_stream_duration
+ // ` field in
+ // :ref:`HttpConnectionManager.common_http_protocol_options
+ // `
+ // is used. If this field is set explicitly to zero, any
+ // HttpConnectionManager max_stream_duration timeout will be disabled for
+ // this route.
+ google.protobuf.Duration max_stream_duration = 1;
+
+ // If present, and the request contains a `grpc-timeout header
+ // `_, use that value as the
+ // *max_stream_duration*, but limit the applied timeout to the maximum value specified here.
+ // If set to 0, the `grpc-timeout` header is used without modification.
+ google.protobuf.Duration grpc_timeout_header_max = 2;
+
+ // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by
+ // subtracting the provided duration from the header. This is useful for allowing Envoy to set
+ // its global timeout to be less than that of the deadline imposed by the calling client, which
+ // makes it more likely that Envoy will handle the timeout instead of having the call canceled
+ // by the client. If, after applying the offset, the resulting timeout is zero or negative,
+ // the stream will timeout immediately.
+ google.protobuf.Duration grpc_timeout_header_offset = 3;
+ }
+
reserved 12, 18, 19, 16, 22, 21, 10;
reserved "request_mirror_policy";
@@ -769,7 +798,7 @@ message RouteAction {
// Indicates the upstream cluster to which the request should be routed
// to.
- string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
// Envoy will determine the cluster to route to by reading the value of the
// HTTP header named by cluster_header from the request headers. If the
@@ -780,8 +809,12 @@ message RouteAction {
//
// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1
// *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string cluster_header = 2
- [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// Multiple upstream clusters can be specified for a given route. The
// request is routed to one of the upstream clusters based on weights
@@ -888,6 +921,10 @@ message RouteAction {
//
// Pay attention to the potential security implications of using this option. Provided header
// must come from trusted source.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string host_rewrite_header = 29
[(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];
@@ -968,7 +1005,9 @@ message RouteAction {
// limits. By default, if the route configured rate limits, the virtual host
// :ref:`rate_limits ` are not applied to the
// request.
- google.protobuf.BoolValue include_vh_rate_limits = 14;
+ //
+ // This field is deprecated. Please use :ref:`vh_rate_limits `
+ google.protobuf.BoolValue include_vh_rate_limits = 14 [deprecated = true];
// Specifies a list of hash policies to use for ring hash load balancing. Each
// hash policy is evaluated individually and the combined result is used to
@@ -987,6 +1026,7 @@ message RouteAction {
// Indicates that the route has a CORS policy.
CorsPolicy cors = 17;
+ // Deprecated by :ref:`grpc_timeout_header_max `
// If present, and the request is a gRPC request, use the
// `grpc-timeout header `_,
// or its default value (infinity) instead of
@@ -1006,8 +1046,9 @@ message RouteAction {
// :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,
// :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the
// :ref:`retry overview `.
- google.protobuf.Duration max_grpc_timeout = 23;
+ google.protobuf.Duration max_grpc_timeout = 23 [deprecated = true];
+ // Deprecated by :ref:`grpc_timeout_header_offset `.
// If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting
// the provided duration from the header. This is useful in allowing Envoy to set its global
// timeout to be less than that of the deadline imposed by the calling client, which makes it more
@@ -1015,7 +1056,7 @@ message RouteAction {
// The offset will only be applied if the provided grpc_timeout is greater than the offset. This
// ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning
// infinity).
- google.protobuf.Duration grpc_timeout_offset = 28;
+ google.protobuf.Duration grpc_timeout_offset = 28 [deprecated = true];
repeated UpgradeConfig upgrade_configs = 25;
@@ -1047,6 +1088,9 @@ message RouteAction {
// it'll take precedence over the virtual host level hedge policy entirely
// (e.g.: policies are not merged, most internal one becomes the enforced policy).
HedgePolicy hedge_policy = 27;
+
+ // Specifies the maximum stream duration for this route.
+ MaxStreamDuration max_stream_duration = 36;
}
// HTTP retry :ref:`architecture overview `.
@@ -1067,7 +1111,7 @@ message RetryPolicy {
reserved "config";
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
oneof config_type {
google.protobuf.Any typed_config = 3;
@@ -1082,7 +1126,7 @@ message RetryPolicy {
reserved "config";
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
oneof config_type {
google.protobuf.Any typed_config = 3;
@@ -1110,9 +1154,15 @@ message RetryPolicy {
}
message ResetHeader {
+ // The name of the reset header.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string name = 1
- [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ // The format of the reset header.
ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}];
}
@@ -1378,7 +1428,7 @@ message Decorator {
// For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden
// by the :ref:`x-envoy-decorator-operation
// ` header.
- string operation = 1 [(validate.rules).string = {min_bytes: 1}];
+ string operation = 1 [(validate.rules).string = {min_len: 1}];
// Whether the decorated details should be propagated to the other party. The default is true.
google.protobuf.BoolValue propagate = 2;
@@ -1453,14 +1503,14 @@ message VirtualCluster {
// Specifies the name of the virtual cluster. The virtual cluster name as well
// as the virtual host name are used when emitting statistics. The statistics are emitted by the
// router filter and are documented :ref:`here `.
- string name = 2 [(validate.rules).string = {min_bytes: 1}];
+ string name = 2 [(validate.rules).string = {min_len: 1}];
}
// Global rate limiting :ref:`architecture overview `.
message RateLimit {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit";
- // [#next-free-field: 8]
+ // [#next-free-field: 9]
message Action {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.route.RateLimit.Action";
@@ -1511,12 +1561,11 @@ message RateLimit {
// The header name to be queried from the request headers. The header’s
// value is used to populate the value of the descriptor entry for the
// descriptor_key.
- string header_name = 1 [
- (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}
- ];
+ string header_name = 1
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// The key to use in the descriptor entry.
- string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string descriptor_key = 2 [(validate.rules).string = {min_len: 1}];
// If set to true, Envoy skips the descriptor while calling rate limiting service
// when header is not present in the request. By default it skips calling the
@@ -1545,7 +1594,7 @@ message RateLimit {
"envoy.api.v2.route.RateLimit.Action.GenericKey";
// The value to use in the descriptor entry.
- string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}];
+ string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];
// An optional key to use in the descriptor entry. If not set it defaults
// to 'generic_key' as the descriptor key.
@@ -1562,7 +1611,7 @@ message RateLimit {
"envoy.api.v2.route.RateLimit.Action.HeaderValueMatch";
// The value to use in the descriptor entry.
- string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}];
+ string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];
// If set to true, the action will append a descriptor entry when the
// request matches the headers. If set to false, the action will append a
@@ -1578,14 +1627,18 @@ message RateLimit {
repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}];
}
- // The following descriptor entry is appended when the dynamic metadata contains a key value:
+ // The following descriptor entry is appended when the
+ // :ref:`dynamic metadata ` contains a key value:
//
// .. code-block:: cpp
//
- // ("", "")
+ // ("", "")
+ //
+ // .. attention::
+ // This action has been deprecated in favor of the :ref:`metadata ` action
message DynamicMetaData {
// The key to use in the descriptor entry.
- string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string descriptor_key = 1 [(validate.rules).string = {min_len: 1}];
// Metadata struct that defines the key and path to retrieve the string value. A match will
// only happen if the value in the dynamic metadata is of type string.
@@ -1596,6 +1649,35 @@ message RateLimit {
string default_value = 3;
}
+ // The following descriptor entry is appended when the metadata contains a key value:
+ //
+ // .. code-block:: cpp
+ //
+ // ("", "")
+ message MetaData {
+ enum Source {
+ // Query :ref:`dynamic metadata `
+ DYNAMIC = 0;
+
+ // Query :ref:`route entry metadata `
+ ROUTE_ENTRY = 1;
+ }
+
+ // The key to use in the descriptor entry.
+ string descriptor_key = 1 [(validate.rules).string = {min_len: 1}];
+
+ // Metadata struct that defines the key and path to retrieve the string value. A match will
+ // only happen if the value in the metadata is of type string.
+ type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}];
+
+ // An optional value to use if *metadata_key* is empty. If not set and
+ // no value is present under the metadata_key then no descriptor is generated.
+ string default_value = 3;
+
+ // Source of metadata
+ Source source = 4 [(validate.rules).enum = {defined_only: true}];
+ }
+
oneof action_specifier {
option (validate.required) = true;
@@ -1618,7 +1700,14 @@ message RateLimit {
HeaderValueMatch header_value_match = 6;
// Rate limit on dynamic metadata.
- DynamicMetaData dynamic_metadata = 7;
+ //
+ // .. attention::
+ // This field has been deprecated in favor of the :ref:`metadata ` field
+ DynamicMetaData dynamic_metadata = 7
+ [deprecated = true, (envoy.annotations.disallowed_by_default) = true];
+
+ // Rate limit on metadata.
+ MetaData metadata = 8;
}
}
@@ -1701,7 +1790,7 @@ message HeaderMatcher {
// Specifies the name of the header in the request.
string name = 1
- [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// Specifies how the header match will be performed to route the request.
oneof header_match_specifier {
@@ -1736,7 +1825,7 @@ message HeaderMatcher {
// Examples:
//
// * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*.
- string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}];
+ string prefix_match = 9 [(validate.rules).string = {min_len: 1}];
// If specified, header match will be performed based on the suffix of the header value.
// Note: empty suffix is not allowed, please use present_match instead.
@@ -1744,7 +1833,7 @@ message HeaderMatcher {
// Examples:
//
// * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*.
- string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}];
+ string suffix_match = 10 [(validate.rules).string = {min_len: 1}];
// If specified, header match will be performed based on whether the header value contains
// the given value or not.
@@ -1753,7 +1842,7 @@ message HeaderMatcher {
// Examples:
//
// * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*.
- string contains_match = 12 [(validate.rules).string = {min_bytes: 1}];
+ string contains_match = 12 [(validate.rules).string = {min_len: 1}];
}
// If specified, the match result will be inverted before checking. Defaults to false.
@@ -1778,7 +1867,7 @@ message QueryParameterMatcher {
// Specifies the name of a key that must be present in the requested
// *path*'s query string.
- string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}];
+ string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}];
oneof query_parameter_match_specifier {
// Specifies whether a query parameter value should match against a string.
diff --git a/api/envoy/config/route/v3/scoped_route.proto b/api/envoy/config/route/v3/scoped_route.proto
index d6611b0b1d06..b7e3aa66e07f 100644
--- a/api/envoy/config/route/v3/scoped_route.proto
+++ b/api/envoy/config/route/v3/scoped_route.proto
@@ -108,12 +108,12 @@ message ScopedRouteConfiguration {
bool on_demand = 4;
// The name assigned to the routing scope.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// The resource name to use for a :ref:`envoy_api_msg_service.discovery.v3.DiscoveryRequest` to an
// RDS server to fetch the :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` associated
// with this scope.
- string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}];
+ string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}];
// The key to match against.
Key key = 3 [(validate.rules).message = {required: true}];
diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto
index 1c510a4be725..0bf0b493e956 100644
--- a/api/envoy/config/route/v4alpha/route_components.proto
+++ b/api/envoy/config/route/v4alpha/route_components.proto
@@ -59,7 +59,7 @@ message VirtualHost {
// The logical name of the virtual host. This is used when emitting certain
// statistics but is not relevant for routing.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// A list of domains (host/authority header) that will be matched to this
// virtual host. Wildcard hosts are supported in the suffix or prefix form.
@@ -112,7 +112,7 @@ message VirtualHost {
// Specifies a list of HTTP headers that should be removed from each request
// handled by this virtual host.
repeated string request_headers_to_remove = 13 [(validate.rules).repeated = {
- items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
+ items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
}];
// Specifies a list of HTTP headers that should be added to each response
@@ -127,7 +127,7 @@ message VirtualHost {
// Specifies a list of HTTP headers that should be removed from each response
// handled by this virtual host.
repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {
- items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
+ items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
}];
// Indicates that the virtual host has a CORS policy.
@@ -262,7 +262,7 @@ message Route {
// Specifies a list of HTTP headers that should be removed from each request
// matching this route.
repeated string request_headers_to_remove = 12 [(validate.rules).repeated = {
- items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
+ items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
}];
// Specifies a set of headers that will be added to responses to requests
@@ -277,7 +277,7 @@ message Route {
// Specifies a list of HTTP headers that should be removed from each response
// to requests matching this route.
repeated string response_headers_to_remove = 11 [(validate.rules).repeated = {
- items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
+ items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
}];
// Presence of the object defines whether the connection manager's tracing configuration
@@ -311,7 +311,7 @@ message WeightedCluster {
// Name of the upstream cluster. The cluster must exist in the
// :ref:`cluster manager configuration `.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// An integer between 0 and :ref:`total_weight
// `. When a request matches the route,
@@ -338,7 +338,9 @@ message WeightedCluster {
// Specifies a list of HTTP headers that should be removed from each request when
// this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.
- repeated string request_headers_to_remove = 9;
+ repeated string request_headers_to_remove = 9 [(validate.rules).repeated = {
+ items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}
+ }];
// Specifies a list of headers to be added to responses when this cluster is selected
// through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.
@@ -352,7 +354,9 @@ message WeightedCluster {
// Specifies a list of headers to be removed from responses when this cluster is selected
// through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`.
- repeated string response_headers_to_remove = 6;
+ repeated string response_headers_to_remove = 6 [(validate.rules).repeated = {
+ items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}
+ }];
// The per_filter_config field can be used to provide weighted cluster-specific
// configurations for filters. The key should match the filter name, such as
@@ -442,7 +446,7 @@ message RouteMatch {
// (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style
// upgrades.
// This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2,
- // where CONNECT requests may have a path, the path matchers will work if
+ // where Extended CONNECT requests may have a path, the path matchers will work if
// there is a path present.
// Note that CONNECT support is currently considered alpha in Envoy.
// [#comment:TODO(htuch): Replace the above comment with an alpha tag.
@@ -547,7 +551,7 @@ message CorsPolicy {
core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10;
}
-// [#next-free-field: 36]
+// [#next-free-field: 37]
message RouteAction {
option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction";
@@ -580,7 +584,7 @@ message RouteAction {
// Specifies the cluster that requests will be mirrored to. The cluster must
// exist in the cluster manager configuration.
- string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
// If not specified, all requests to the target cluster will be mirrored.
//
@@ -609,9 +613,8 @@ message RouteAction {
// The name of the request header that will be used to obtain the hash
// key. If the request header is not present, no hash will be produced.
- string header_name = 1 [
- (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}
- ];
+ string header_name = 1
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// If specified, the request header value will be rewritten and used
// to produce the hash key.
@@ -639,7 +642,7 @@ message RouteAction {
// The name of the cookie that will be used to obtain the hash key. If the
// cookie is not present and ttl below is not set, no hash will be
// produced.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// If specified, a cookie with the TTL will be generated if the cookie is
// not present. If the TTL is present and zero, the generated cookie will
@@ -666,7 +669,7 @@ message RouteAction {
// The name of the URL query parameter that will be used to obtain the hash
// key. If the parameter is not present, no hash will be produced. Query
// parameter names are case-sensitive.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
}
message FilterState {
@@ -676,7 +679,7 @@ message RouteAction {
// The name of the Object in the per-request filterState, which is an
// Envoy::Http::Hashable object. If there is no data associated with the key,
// or the stored object is not Envoy::Http::Hashable, no hash will be produced.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
}
oneof policy_specifier {
@@ -756,16 +759,46 @@ message RouteAction {
ConnectConfig connect_config = 3;
}
- reserved 12, 18, 19, 16, 22, 21, 10, 26, 31;
+ message MaxStreamDuration {
+ option (udpa.annotations.versioning).previous_message_type =
+ "envoy.config.route.v3.RouteAction.MaxStreamDuration";
+
+ // Specifies the maximum duration allowed for streams on the route. If not specified, the value
+ // from the :ref:`max_stream_duration
+ // ` field in
+ // :ref:`HttpConnectionManager.common_http_protocol_options
+ // `
+ // is used. If this field is set explicitly to zero, any
+ // HttpConnectionManager max_stream_duration timeout will be disabled for
+ // this route.
+ google.protobuf.Duration max_stream_duration = 1;
+
+ // If present, and the request contains a `grpc-timeout header
+ // `_, use that value as the
+ // *max_stream_duration*, but limit the applied timeout to the maximum value specified here.
+ // If set to 0, the `grpc-timeout` header is used without modification.
+ google.protobuf.Duration grpc_timeout_header_max = 2;
+
+ // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by
+ // subtracting the provided duration from the header. This is useful for allowing Envoy to set
+ // its global timeout to be less than that of the deadline imposed by the calling client, which
+ // makes it more likely that Envoy will handle the timeout instead of having the call canceled
+ // by the client. If, after applying the offset, the resulting timeout is zero or negative,
+ // the stream will timeout immediately.
+ google.protobuf.Duration grpc_timeout_header_offset = 3;
+ }
+
+ reserved 12, 18, 19, 16, 22, 21, 10, 14, 23, 28, 26, 31;
- reserved "request_mirror_policy", "internal_redirect_action", "max_internal_redirects";
+ reserved "request_mirror_policy", "include_vh_rate_limits", "max_grpc_timeout",
+ "grpc_timeout_offset", "internal_redirect_action", "max_internal_redirects";
oneof cluster_specifier {
option (validate.required) = true;
// Indicates the upstream cluster to which the request should be routed
// to.
- string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
// Envoy will determine the cluster to route to by reading the value of the
// HTTP header named by cluster_header from the request headers. If the
@@ -776,8 +809,12 @@ message RouteAction {
//
// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1
// *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string cluster_header = 2
- [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// Multiple upstream clusters can be specified for a given route. The
// request is routed to one of the upstream clusters based on weights
@@ -884,6 +921,10 @@ message RouteAction {
//
// Pay attention to the potential security implications of using this option. Provided header
// must come from trusted source.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string host_rewrite_header = 29
[(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];
@@ -960,12 +1001,6 @@ message RouteAction {
// route.
repeated RateLimit rate_limits = 13;
- // Specifies if the rate limit filter should include the virtual host rate
- // limits. By default, if the route configured rate limits, the virtual host
- // :ref:`rate_limits ` are not applied to the
- // request.
- google.protobuf.BoolValue include_vh_rate_limits = 14;
-
// Specifies a list of hash policies to use for ring hash load balancing. Each
// hash policy is evaluated individually and the combined result is used to
// route the request. The method of combination is deterministic such that
@@ -983,36 +1018,6 @@ message RouteAction {
// Indicates that the route has a CORS policy.
CorsPolicy cors = 17;
- // If present, and the request is a gRPC request, use the
- // `grpc-timeout header `_,
- // or its default value (infinity) instead of
- // :ref:`timeout `, but limit the applied timeout
- // to the maximum value specified here. If configured as 0, the maximum allowed timeout for
- // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used
- // and gRPC requests time out like any other requests using
- // :ref:`timeout ` or its default.
- // This can be used to prevent unexpected upstream request timeouts due to potentially long
- // time gaps between gRPC request and response in gRPC streaming mode.
- //
- // .. note::
- //
- // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes
- // precedence over `grpc-timeout header `_, when
- // both are present. See also
- // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`,
- // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the
- // :ref:`retry overview `.
- google.protobuf.Duration max_grpc_timeout = 23;
-
- // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting
- // the provided duration from the header. This is useful in allowing Envoy to set its global
- // timeout to be less than that of the deadline imposed by the calling client, which makes it more
- // likely that Envoy will handle the timeout instead of having the call canceled by the client.
- // The offset will only be applied if the provided grpc_timeout is greater than the offset. This
- // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning
- // infinity).
- google.protobuf.Duration grpc_timeout_offset = 28;
-
repeated UpgradeConfig upgrade_configs = 25;
// If present, Envoy will try to follow an upstream redirect response instead of proxying the
@@ -1025,6 +1030,9 @@ message RouteAction {
// it'll take precedence over the virtual host level hedge policy entirely
// (e.g.: policies are not merged, most internal one becomes the enforced policy).
HedgePolicy hedge_policy = 27;
+
+ // Specifies the maximum stream duration for this route.
+ MaxStreamDuration max_stream_duration = 36;
}
// HTTP retry :ref:`architecture overview `.
@@ -1045,7 +1053,7 @@ message RetryPolicy {
reserved "config";
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
oneof config_type {
google.protobuf.Any typed_config = 3;
@@ -1060,7 +1068,7 @@ message RetryPolicy {
reserved "config";
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
oneof config_type {
google.protobuf.Any typed_config = 3;
@@ -1091,9 +1099,15 @@ message RetryPolicy {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.route.v3.RetryPolicy.ResetHeader";
+ // The name of the reset header.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string name = 1
- [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ // The format of the reset header.
ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}];
}
@@ -1362,7 +1376,7 @@ message Decorator {
// For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden
// by the :ref:`x-envoy-decorator-operation
// ` header.
- string operation = 1 [(validate.rules).string = {min_bytes: 1}];
+ string operation = 1 [(validate.rules).string = {min_len: 1}];
// Whether the decorated details should be propagated to the other party. The default is true.
google.protobuf.BoolValue propagate = 2;
@@ -1438,14 +1452,14 @@ message VirtualCluster {
// Specifies the name of the virtual cluster. The virtual cluster name as well
// as the virtual host name are used when emitting statistics. The statistics are emitted by the
// router filter and are documented :ref:`here `.
- string name = 2 [(validate.rules).string = {min_bytes: 1}];
+ string name = 2 [(validate.rules).string = {min_len: 1}];
}
// Global rate limiting :ref:`architecture overview `.
message RateLimit {
option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit";
- // [#next-free-field: 8]
+ // [#next-free-field: 9]
message Action {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.route.v3.RateLimit.Action";
@@ -1496,12 +1510,11 @@ message RateLimit {
// The header name to be queried from the request headers. The header’s
// value is used to populate the value of the descriptor entry for the
// descriptor_key.
- string header_name = 1 [
- (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}
- ];
+ string header_name = 1
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// The key to use in the descriptor entry.
- string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string descriptor_key = 2 [(validate.rules).string = {min_len: 1}];
// If set to true, Envoy skips the descriptor while calling rate limiting service
// when header is not present in the request. By default it skips calling the
@@ -1530,7 +1543,7 @@ message RateLimit {
"envoy.config.route.v3.RateLimit.Action.GenericKey";
// The value to use in the descriptor entry.
- string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}];
+ string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];
// An optional key to use in the descriptor entry. If not set it defaults
// to 'generic_key' as the descriptor key.
@@ -1547,7 +1560,7 @@ message RateLimit {
"envoy.config.route.v3.RateLimit.Action.HeaderValueMatch";
// The value to use in the descriptor entry.
- string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}];
+ string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];
// If set to true, the action will append a descriptor entry when the
// request matches the headers. If set to false, the action will append a
@@ -1563,17 +1576,21 @@ message RateLimit {
repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}];
}
- // The following descriptor entry is appended when the dynamic metadata contains a key value:
+ // The following descriptor entry is appended when the
+ // :ref:`dynamic metadata ` contains a key value:
//
// .. code-block:: cpp
//
- // ("", "")
+ // ("", "")
+ //
+ // .. attention::
+ // This action has been deprecated in favor of the :ref:`metadata ` action
message DynamicMetaData {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.route.v3.RateLimit.Action.DynamicMetaData";
// The key to use in the descriptor entry.
- string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string descriptor_key = 1 [(validate.rules).string = {min_len: 1}];
// Metadata struct that defines the key and path to retrieve the string value. A match will
// only happen if the value in the dynamic metadata is of type string.
@@ -1584,6 +1601,42 @@ message RateLimit {
string default_value = 3;
}
+ // The following descriptor entry is appended when the metadata contains a key value:
+ //
+ // .. code-block:: cpp
+ //
+ // ("", "")
+ message MetaData {
+ option (udpa.annotations.versioning).previous_message_type =
+ "envoy.config.route.v3.RateLimit.Action.MetaData";
+
+ enum Source {
+ // Query :ref:`dynamic metadata `
+ DYNAMIC = 0;
+
+ // Query :ref:`route entry metadata `
+ ROUTE_ENTRY = 1;
+ }
+
+ // The key to use in the descriptor entry.
+ string descriptor_key = 1 [(validate.rules).string = {min_len: 1}];
+
+ // Metadata struct that defines the key and path to retrieve the string value. A match will
+ // only happen if the value in the metadata is of type string.
+ type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}];
+
+ // An optional value to use if *metadata_key* is empty. If not set and
+ // no value is present under the metadata_key then no descriptor is generated.
+ string default_value = 3;
+
+ // Source of metadata
+ Source source = 4 [(validate.rules).enum = {defined_only: true}];
+ }
+
+ reserved 7;
+
+ reserved "dynamic_metadata";
+
oneof action_specifier {
option (validate.required) = true;
@@ -1605,8 +1658,8 @@ message RateLimit {
// Rate limit on the existence of request headers.
HeaderValueMatch header_value_match = 6;
- // Rate limit on dynamic metadata.
- DynamicMetaData dynamic_metadata = 7;
+ // Rate limit on metadata.
+ MetaData metadata = 8;
}
}
@@ -1696,7 +1749,7 @@ message HeaderMatcher {
// Specifies the name of the header in the request.
string name = 1
- [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// Specifies how the header match will be performed to route the request.
oneof header_match_specifier {
@@ -1731,7 +1784,7 @@ message HeaderMatcher {
// Examples:
//
// * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*.
- string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}];
+ string prefix_match = 9 [(validate.rules).string = {min_len: 1}];
// If specified, header match will be performed based on the suffix of the header value.
// Note: empty suffix is not allowed, please use present_match instead.
@@ -1739,7 +1792,7 @@ message HeaderMatcher {
// Examples:
//
// * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*.
- string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}];
+ string suffix_match = 10 [(validate.rules).string = {min_len: 1}];
// If specified, header match will be performed based on whether the header value contains
// the given value or not.
@@ -1748,7 +1801,7 @@ message HeaderMatcher {
// Examples:
//
// * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*.
- string contains_match = 12 [(validate.rules).string = {min_bytes: 1}];
+ string contains_match = 12 [(validate.rules).string = {min_len: 1}];
}
// If specified, the match result will be inverted before checking. Defaults to false.
@@ -1773,7 +1826,7 @@ message QueryParameterMatcher {
// Specifies the name of a key that must be present in the requested
// *path*'s query string.
- string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}];
+ string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}];
oneof query_parameter_match_specifier {
// Specifies whether a query parameter value should match against a string.
diff --git a/api/envoy/config/route/v4alpha/scoped_route.proto b/api/envoy/config/route/v4alpha/scoped_route.proto
index 33fc756a60a4..0704ceacbbac 100644
--- a/api/envoy/config/route/v4alpha/scoped_route.proto
+++ b/api/envoy/config/route/v4alpha/scoped_route.proto
@@ -108,12 +108,12 @@ message ScopedRouteConfiguration {
bool on_demand = 4;
// The name assigned to the routing scope.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// The resource name to use for a :ref:`envoy_api_msg_service.discovery.v4alpha.DiscoveryRequest` to an
// RDS server to fetch the :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` associated
// with this scope.
- string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}];
+ string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}];
// The key to match against.
Key key = 3 [(validate.rules).message = {required: true}];
diff --git a/api/envoy/config/tap/v3/common.proto b/api/envoy/config/tap/v3/common.proto
index 42783115f871..a8324a6ebc1a 100644
--- a/api/envoy/config/tap/v3/common.proto
+++ b/api/envoy/config/tap/v3/common.proto
@@ -261,7 +261,7 @@ message FilePerTapSink {
// Path prefix. The output file will be of the form _.pb, where is an
// identifier distinguishing the recorded trace for stream instances (the Envoy
// connection ID, HTTP stream ID, etc.).
- string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string path_prefix = 1 [(validate.rules).string = {min_len: 1}];
}
// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC
diff --git a/api/envoy/config/tap/v4alpha/common.proto b/api/envoy/config/tap/v4alpha/common.proto
index 8366187fd1bf..fbee12d7f99d 100644
--- a/api/envoy/config/tap/v4alpha/common.proto
+++ b/api/envoy/config/tap/v4alpha/common.proto
@@ -259,7 +259,7 @@ message FilePerTapSink {
// Path prefix. The output file will be of the form _.pb, where is an
// identifier distinguishing the recorded trace for stream instances (the Envoy
// connection ID, HTTP stream ID, etc.).
- string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string path_prefix = 1 [(validate.rules).string = {min_len: 1}];
}
// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC
diff --git a/api/envoy/config/trace/v3/datadog.proto b/api/envoy/config/trace/v3/datadog.proto
index f1fe3e666125..c101ab2f03c9 100644
--- a/api/envoy/config/trace/v3/datadog.proto
+++ b/api/envoy/config/trace/v3/datadog.proto
@@ -22,8 +22,8 @@ message DatadogConfig {
"envoy.config.trace.v2.DatadogConfig";
// The cluster to use for submitting traces to the Datadog agent.
- string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];
// The name used for the service when traces are generated by envoy.
- string service_name = 2 [(validate.rules).string = {min_bytes: 1}];
+ string service_name = 2 [(validate.rules).string = {min_len: 1}];
}
diff --git a/api/envoy/config/trace/v3/dynamic_ot.proto b/api/envoy/config/trace/v3/dynamic_ot.proto
index fb372da8c52a..c28106871542 100644
--- a/api/envoy/config/trace/v3/dynamic_ot.proto
+++ b/api/envoy/config/trace/v3/dynamic_ot.proto
@@ -28,7 +28,7 @@ message DynamicOtConfig {
// Dynamic library implementing the `OpenTracing API
// `_.
- string library = 1 [(validate.rules).string = {min_bytes: 1}];
+ string library = 1 [(validate.rules).string = {min_len: 1}];
// The configuration to use when creating a tracer from the given dynamic
// library.
diff --git a/api/envoy/config/trace/v3/http_tracer.proto b/api/envoy/config/trace/v3/http_tracer.proto
index 2a87a28db25e..33adea18a4d6 100644
--- a/api/envoy/config/trace/v3/http_tracer.proto
+++ b/api/envoy/config/trace/v3/http_tracer.proto
@@ -52,7 +52,7 @@ message Tracing {
// - *envoy.tracers.datadog*
// - *envoy.tracers.opencensus*
// - *envoy.tracers.xray*
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Trace driver specific configuration which depends on the driver being instantiated.
// See the trace drivers for examples:
diff --git a/api/envoy/config/trace/v3/lightstep.proto b/api/envoy/config/trace/v3/lightstep.proto
index 0e0b60b5bddb..0b7be7c4e609 100644
--- a/api/envoy/config/trace/v3/lightstep.proto
+++ b/api/envoy/config/trace/v3/lightstep.proto
@@ -38,11 +38,11 @@ message LightstepConfig {
}
// The cluster manager cluster that hosts the LightStep collectors.
- string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];
// File containing the access token to the `LightStep
// `_ API.
- string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}];
+ string access_token_file = 2 [(validate.rules).string = {min_len: 1}];
// Propagation modes to use by LightStep's tracer.
repeated PropagationMode propagation_modes = 3
diff --git a/api/envoy/config/trace/v3/zipkin.proto b/api/envoy/config/trace/v3/zipkin.proto
index 5c5349cdf155..ee4e4d9b7898 100644
--- a/api/envoy/config/trace/v3/zipkin.proto
+++ b/api/envoy/config/trace/v3/zipkin.proto
@@ -49,12 +49,12 @@ message ZipkinConfig {
// The cluster manager cluster that hosts the Zipkin collectors. Note that the
// Zipkin cluster must be defined in the :ref:`Bootstrap static cluster
// resources `.
- string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];
// The API endpoint of the Zipkin service where the spans will be sent. When
// using a standard Zipkin installation, the API endpoint is typically
// /api/v1/spans, which is the default value.
- string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}];
+ string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}];
// Determines whether a 128bit trace id will be used when creating a new
// trace instance. The default value is false, which will result in a 64 bit trace id being used.
diff --git a/api/envoy/config/trace/v4alpha/http_tracer.proto b/api/envoy/config/trace/v4alpha/http_tracer.proto
index 663886a97bb4..ea918ec2bff5 100644
--- a/api/envoy/config/trace/v4alpha/http_tracer.proto
+++ b/api/envoy/config/trace/v4alpha/http_tracer.proto
@@ -52,7 +52,7 @@ message Tracing {
// - *envoy.tracers.datadog*
// - *envoy.tracers.opencensus*
// - *envoy.tracers.xray*
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Trace driver specific configuration which depends on the driver being instantiated.
// See the trace drivers for examples:
diff --git a/api/envoy/data/accesslog/v3/accesslog.proto b/api/envoy/data/accesslog/v3/accesslog.proto
index c16b5be1ff0e..af7edab5836a 100644
--- a/api/envoy/data/accesslog/v3/accesslog.proto
+++ b/api/envoy/data/accesslog/v3/accesslog.proto
@@ -186,7 +186,7 @@ message AccessLogCommon {
}
// Flags indicating occurrences during request/response processing.
-// [#next-free-field: 23]
+// [#next-free-field: 24]
message ResponseFlags {
option (udpa.annotations.versioning).previous_message_type =
"envoy.data.accesslog.v2.ResponseFlags";
@@ -272,6 +272,9 @@ message ResponseFlags {
// Indicates that a filter configuration is not available.
bool no_filter_config_found = 22;
+
+ // Indicates that request or connection exceeded the downstream connection duration.
+ bool duration_timeout = 23;
}
// Properties of a negotiated TLS connection.
diff --git a/api/envoy/data/cluster/v3/outlier_detection_event.proto b/api/envoy/data/cluster/v3/outlier_detection_event.proto
index ae1ad4c94d17..f87cd1582b09 100644
--- a/api/envoy/data/cluster/v3/outlier_detection_event.proto
+++ b/api/envoy/data/cluster/v3/outlier_detection_event.proto
@@ -88,10 +88,10 @@ message OutlierDetectionEvent {
google.protobuf.UInt64Value secs_since_last_action = 3;
// The :ref:`cluster ` that owns the ejected host.
- string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}];
+ string cluster_name = 4 [(validate.rules).string = {min_len: 1}];
// The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``.
- string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}];
+ string upstream_url = 5 [(validate.rules).string = {min_len: 1}];
// The action that took place.
Action action = 6 [(validate.rules).enum = {defined_only: true}];
diff --git a/api/envoy/data/core/v3/health_check_event.proto b/api/envoy/data/core/v3/health_check_event.proto
index 88b195b92b3d..2b0f9d888f46 100644
--- a/api/envoy/data/core/v3/health_check_event.proto
+++ b/api/envoy/data/core/v3/health_check_event.proto
@@ -40,7 +40,7 @@ message HealthCheckEvent {
config.core.v3.Address host = 2;
- string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}];
+ string cluster_name = 3 [(validate.rules).string = {min_len: 1}];
oneof event {
option (validate.required) = true;
diff --git a/api/envoy/data/dns/v3/dns_table.proto b/api/envoy/data/dns/v3/dns_table.proto
index 354ad69fca66..4398403b7ed0 100644
--- a/api/envoy/data/dns/v3/dns_table.proto
+++ b/api/envoy/data/dns/v3/dns_table.proto
@@ -86,7 +86,8 @@ message DnsTable {
// This message defines a service selection record returned for a service query in a domain
message DnsService {
// The name of the service without the protocol or domain name
- string service_name = 1;
+ string service_name = 1
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];
// The service protocol. This can be specified as a string or the numeric value of the protocol
DnsServiceProtocol protocol = 2;
diff --git a/api/envoy/data/dns/v4alpha/dns_table.proto b/api/envoy/data/dns/v4alpha/dns_table.proto
index 140ca4489c20..f142cfa7bf8c 100644
--- a/api/envoy/data/dns/v4alpha/dns_table.proto
+++ b/api/envoy/data/dns/v4alpha/dns_table.proto
@@ -95,7 +95,8 @@ message DnsTable {
"envoy.data.dns.v3.DnsTable.DnsService";
// The name of the service without the protocol or domain name
- string service_name = 1;
+ string service_name = 1
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];
// The service protocol. This can be specified as a string or the numeric value of the protocol
DnsServiceProtocol protocol = 2;
diff --git a/api/envoy/extensions/access_loggers/file/v3/file.proto b/api/envoy/extensions/access_loggers/file/v3/file.proto
index de33623c207f..f17a2e7f4ca9 100644
--- a/api/envoy/extensions/access_loggers/file/v3/file.proto
+++ b/api/envoy/extensions/access_loggers/file/v3/file.proto
@@ -27,7 +27,7 @@ message FileAccessLog {
"envoy.config.accesslog.v2.FileAccessLog";
// A path to a local file to which to write the access log entries.
- string path = 1 [(validate.rules).string = {min_bytes: 1}];
+ string path = 1 [(validate.rules).string = {min_len: 1}];
oneof access_log_format {
// Access log :ref:`format string`.
diff --git a/api/envoy/extensions/access_loggers/file/v4alpha/file.proto b/api/envoy/extensions/access_loggers/file/v4alpha/file.proto
index fa2ec9a50495..03d138585d23 100644
--- a/api/envoy/extensions/access_loggers/file/v4alpha/file.proto
+++ b/api/envoy/extensions/access_loggers/file/v4alpha/file.proto
@@ -31,7 +31,7 @@ message FileAccessLog {
reserved "format", "json_format", "typed_json_format";
// A path to a local file to which to write the access log entries.
- string path = 1 [(validate.rules).string = {min_bytes: 1}];
+ string path = 1 [(validate.rules).string = {min_len: 1}];
oneof access_log_format {
// Configuration to form access log data and format.
diff --git a/api/envoy/extensions/access_loggers/grpc/v3/als.proto b/api/envoy/extensions/access_loggers/grpc/v3/als.proto
index 4996a877a9c6..968dfbeec016 100644
--- a/api/envoy/extensions/access_loggers/grpc/v3/als.proto
+++ b/api/envoy/extensions/access_loggers/grpc/v3/als.proto
@@ -62,7 +62,7 @@ message CommonGrpcAccessLogConfig {
// The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier
// `. This allows the
// access log server to differentiate between different access logs coming from the same Envoy.
- string log_name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string log_name = 1 [(validate.rules).string = {min_len: 1}];
// The gRPC service for the access log service.
config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}];
diff --git a/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto
index cd9db5906436..413743a203f0 100644
--- a/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto
+++ b/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto
@@ -12,9 +12,12 @@ option java_outer_classname = "WasmProto";
option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
-// [[#not-implemented-hide:]
+// [#protodoc-title: Wasm access log]
+// [#extension: envoy.access_loggers.wasm]
+
// Custom configuration for an :ref:`AccessLog `
-// that calls into a WASM VM.
+// that calls into a WASM VM. Configures the built-in *envoy.access_loggers.wasm*
+// AccessLog.
message WasmAccessLog {
envoy.extensions.wasm.v3.PluginConfig config = 1;
}
diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto
index 79cd583486ac..5579cc16bd97 100644
--- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto
+++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto
@@ -37,7 +37,7 @@ message DnsCacheConfig {
// configurations with the same name *must* otherwise have the same settings when referenced
// from different configuration components. Configuration will fail to load if this is not
// the case.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// The DNS lookup family to use during resolution.
//
@@ -98,5 +98,8 @@ message DnsCacheConfig {
// [#next-major-version: Reconcile DNS options in a single message.]
// Always use TCP queries instead of UDP queries for DNS lookups.
+ // Setting this value causes failure if the
+ // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during
+ // server startup. Apple' API only uses UDP for DNS resolution.
bool use_tcp_for_dns_lookups = 8;
}
diff --git a/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto
index 9255deb4b64d..30efa6026218 100644
--- a/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto
+++ b/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto
@@ -69,10 +69,10 @@ message RateLimitDescriptor {
"envoy.api.v2.ratelimit.RateLimitDescriptor.Entry";
// Descriptor key.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
// Descriptor value.
- string value = 2 [(validate.rules).string = {min_bytes: 1}];
+ string value = 2 [(validate.rules).string = {min_len: 1}];
}
// Override rate limit to apply to this descriptor instead of the limit
diff --git a/api/envoy/extensions/common/tap/v3/common.proto b/api/envoy/extensions/common/tap/v3/common.proto
index 68e80dad76b4..aa7ae8264757 100644
--- a/api/envoy/extensions/common/tap/v3/common.proto
+++ b/api/envoy/extensions/common/tap/v3/common.proto
@@ -64,5 +64,5 @@ message AdminConfig {
// Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is
// matched to the configured filter opaque ID to determine which filter to configure.
- string config_id = 1 [(validate.rules).string = {min_bytes: 1}];
+ string config_id = 1 [(validate.rules).string = {min_len: 1}];
}
diff --git a/api/envoy/extensions/common/tap/v4alpha/common.proto b/api/envoy/extensions/common/tap/v4alpha/common.proto
index 536f13d049c3..efa7744e357f 100644
--- a/api/envoy/extensions/common/tap/v4alpha/common.proto
+++ b/api/envoy/extensions/common/tap/v4alpha/common.proto
@@ -65,5 +65,5 @@ message AdminConfig {
// Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is
// matched to the configured filter opaque ID to determine which filter to configure.
- string config_id = 1 [(validate.rules).string = {min_bytes: 1}];
+ string config_id = 1 [(validate.rules).string = {min_len: 1}];
}
diff --git a/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto
index 8dd851f4020a..c524e022e859 100644
--- a/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto
+++ b/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto
@@ -51,10 +51,11 @@ message GradientControllerConfig {
"envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig."
"MinimumRTTCalculationParams";
- // The time interval between recalculating the minimum request round-trip time.
+ // The time interval between recalculating the minimum request round-trip time. Has to be
+ // positive.
google.protobuf.Duration interval = 1 [(validate.rules).duration = {
required: true
- gt {}
+ gte {nanos: 1000000}
}];
// The number of requests to aggregate/sample during the minRTT recalculation window before
diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto
index 6f01c88885f4..c77d93762099 100644
--- a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto
+++ b/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto
@@ -23,6 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Admission Control]
// [#extension: envoy.filters.http.admission_control]
+// [#next-free-field: 6]
message AdmissionControl {
// Default method of specifying what constitutes a successful request. All status codes that
// indicate a successful request must be explicitly specified if not relying on the default
@@ -75,16 +76,23 @@ message AdmissionControl {
}
// The sliding time window over which the success rate is calculated. The window is rounded to the
- // nearest second. Defaults to 120s.
+ // nearest second. Defaults to 30s.
google.protobuf.Duration sampling_window = 3;
// Rejection probability is defined by the formula::
//
- // max(0, (rq_count - aggression_coefficient * rq_success_count) / (rq_count + 1))
+ // max(0, (rq_count - rq_success_count / sr_threshold) / (rq_count + 1)) ^ (1 / aggression)
//
- // The coefficient dictates how aggressively the admission controller will throttle requests as
- // the success rate drops. Lower values will cause throttling to kick in at higher success rates
- // and result in more aggressive throttling. Any values less than 1.0, will be set to 1.0. If the
- // message is unspecified, the coefficient is 2.0.
- config.core.v3.RuntimeDouble aggression_coefficient = 4;
+ // The aggression dictates how heavily the admission controller will throttle requests upon SR
+ // dropping at or below the threshold. A value of 1 will result in a linear increase in
+ // rejection probability as SR drops. Any values less than 1.0, will be set to 1.0. If the
+ // message is unspecified, the aggression is 1.0. See `the admission control documentation
+ // `_
+ // for a diagram illustrating this.
+ config.core.v3.RuntimeDouble aggression = 4;
+
+ // Dictates the success rate at which the rejection probability is non-zero. As success rate drops
+ // below this threshold, rejection probability will increase. Any success rate above the threshold
+ // results in a rejection probability of 0. Defaults to 95%.
+ config.core.v3.RuntimePercent sr_threshold = 5;
}
diff --git a/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto b/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto
index b80bc1b82108..6a516b430028 100644
--- a/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto
+++ b/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto
@@ -25,13 +25,13 @@ message AwsRequestSigning {
// of the HTTP endpoint.
//
// Example: s3
- string service_name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string service_name = 1 [(validate.rules).string = {min_len: 1}];
// The `region `_ hosting the HTTP
// endpoint.
//
// Example: us-west-2
- string region = 2 [(validate.rules).string = {min_bytes: 1}];
+ string region = 2 [(validate.rules).string = {min_len: 1}];
// Indicates that before signing headers, the host header will be swapped with
// this value. If not set or empty, the original host header value
diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto
index f78b1d24ac2c..9260abe94a96 100644
--- a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto
+++ b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto
@@ -52,17 +52,14 @@ message CacheConfig {
// Config specific to the cache storage implementation.
google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];
- // [#not-implemented-hide:]
- //
- //
- // List of allowed *Vary* headers.
+ // List of matching rules that defines allowed *Vary* headers.
//
// The *vary* response header holds a list of header names that affect the
// contents of a response, as described by
// https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses.
//
// During insertion, *allowed_vary_headers* acts as a allowlist: if a
- // response's *vary* header mentions any header names that aren't in
+ // response's *vary* header mentions any header names that aren't matched by any rules in
// *allowed_vary_headers*, that response will not be cached.
//
// During lookup, *allowed_vary_headers* controls what request headers will be
diff --git a/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto
index 19921edb0310..ad9bb4c639a4 100644
--- a/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto
+++ b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto
@@ -52,17 +52,14 @@ message CacheConfig {
// Config specific to the cache storage implementation.
google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];
- // [#not-implemented-hide:]
- //
- //
- // List of allowed *Vary* headers.
+ // List of matching rules that defines allowed *Vary* headers.
//
// The *vary* response header holds a list of header names that affect the
// contents of a response, as described by
// https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses.
//
// During insertion, *allowed_vary_headers* acts as a allowlist: if a
- // response's *vary* header mentions any header names that aren't in
+ // response's *vary* header mentions any header names that aren't matched by any rules in
// *allowed_vary_headers*, that response will not be cached.
//
// During lookup, *allowed_vary_headers* controls what request headers will be
diff --git a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD b/api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD
new file mode 100644
index 000000000000..ee92fb652582
--- /dev/null
+++ b/api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD
@@ -0,0 +1,9 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"],
+)
diff --git a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto b/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto
new file mode 100644
index 000000000000..7952f9b3d448
--- /dev/null
+++ b/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto
@@ -0,0 +1,37 @@
+syntax = "proto3";
+
+package envoy.extensions.filters.http.cdn_loop.v3alpha;
+
+import "udpa/annotations/status.proto";
+import "udpa/annotations/versioning.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha";
+option java_outer_classname = "CdnLoopProto";
+option java_multiple_files = true;
+option (udpa.annotations.file_status).work_in_progress = true;
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: HTTP CDN-Loop Filter]
+// [#extension: envoy.filters.http.cdn_loop]
+
+// CDN-Loop Header filter config. See the :ref:`configuration overview
+// ` for more information.
+message CdnLoopConfig {
+ // The CDN identifier to use for loop checks and to append to the
+ // CDN-Loop header.
+ //
+ // RFC 8586 calls this the cdn-id. The cdn-id can either be a
+ // pseudonym or hostname the CDN is in control of.
+ //
+ // cdn_id must not be empty.
+ string cdn_id = 1 [(validate.rules).string = {min_len: 1}];
+
+ // The maximum allowed count of cdn_id in the downstream CDN-Loop
+ // request header.
+ //
+ // The default of 0 means a request can transit the CdnLoopFilter
+ // once. A value of 1 means that a request can transit the
+ // CdnLoopFilter twice and so on.
+ uint32 max_allowed_occurrences = 2;
+}
diff --git a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto
index b8a2525dbf54..70dd21a324b3 100644
--- a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto
+++ b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto
@@ -55,6 +55,10 @@ message PerRouteConfig {
// :ref:`HCM host rewrite header `
// given that the value set here would be used for DNS lookups whereas the value set in the HCM
// would be used for host header forwarding which is not the desired outcome.
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
string host_rewrite_header = 2;
}
}
diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto
index d9264ca66b66..395258802f56 100644
--- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto
+++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto
@@ -6,6 +6,7 @@ import "envoy/config/core/v3/base.proto";
import "envoy/config/core/v3/config_source.proto";
import "envoy/config/core/v3/grpc_service.proto";
import "envoy/config/core/v3/http_uri.proto";
+import "envoy/type/matcher/v3/metadata.proto";
import "envoy/type/matcher/v3/string.proto";
import "envoy/type/v3/http_status.proto";
@@ -23,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// External Authorization :ref:`configuration overview `.
// [#extension: envoy.filters.http.ext_authz]
-// [#next-free-field: 13]
+// [#next-free-field: 15]
message ExtAuthz {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.filter.http.ext_authz.v2.ExtAuthz";
@@ -103,6 +104,10 @@ message ExtAuthz {
// If this field is not specified, the filter will be enabled for all requests.
config.core.v3.RuntimeFractionalPercent filter_enabled = 9;
+ // Specifies if the filter is enabled with metadata matcher.
+ // If this field is not specified, the filter will be enabled for all requests.
+ type.matcher.v3.MetadataMatcher filter_enabled_metadata = 14;
+
// Specifies whether to deny the requests, when the filter is disabled.
// If :ref:`runtime_key ` is specified,
// Envoy will lookup the runtime key to determine whether to deny request for
@@ -117,6 +122,23 @@ message ExtAuthz {
// When this field is true, Envoy will include the peer X.509 certificate, if available, in the
// :ref:`certificate`.
bool include_peer_certificate = 10;
+
+ // Optional additional prefix to use when emitting statistics. This allows to distinguish
+ // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example:
+ //
+ // .. code-block:: yaml
+ //
+ // http_filters:
+ // - name: envoy.filters.http.ext_authz
+ // typed_config:
+ // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
+ // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc.
+ // - name: envoy.filters.http.ext_authz
+ // typed_config:
+ // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
+ // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc.
+ //
+ string stat_prefix = 13;
}
// Configuration for buffering the request data.
@@ -134,6 +156,13 @@ message BufferSettings {
// The authorization request will be dispatched and no 413 HTTP error will be returned by the
// filter.
bool allow_partial_message = 2;
+
+ // If true, the body sent to the external authorization service is set with raw bytes, it sets
+ // the :ref:`raw_body`
+ // field of HTTP request attribute context. Otherwise, :ref:`
+ // body` will be filled
+ // with UTF-8 string request body.
+ bool pack_as_bytes = 3;
}
// HttpService is used for raw HTTP communication between the filter and the authorization service.
@@ -243,11 +272,7 @@ message ExtAuthzPerRoute {
}
}
-// Extra settings for the check request. You can use this to provide extra context for the
-// external authorization server on specific virtual hosts \ routes. For example, adding a context
-// extension on the virtual host level can give the ext-authz server information on what virtual
-// host is used without needing to parse the host header. If CheckSettings is specified in multiple
-// per-filter-configs, they will be merged in order, and the result will be used.
+// Extra settings for the check request.
message CheckSettings {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.filter.http.ext_authz.v2.CheckSettings";
@@ -255,6 +280,12 @@ message CheckSettings {
// Context extensions to set on the CheckRequest's
// :ref:`AttributeContext.context_extensions`
//
+ // You can use this to provide extra context for the external authorization server on specific
+ // virtual hosts/routes. For example, adding a context extension on the virtual host level can
+ // give the ext-authz server information on what virtual host is used without needing to parse the
+ // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged
+ // in order, and the result will be used.
+ //
// Merge semantics for this field are such that keys from more specific configs override.
//
// .. note::
@@ -262,4 +293,8 @@ message CheckSettings {
// These settings are only applied to a filter configured with a
// :ref:`grpc_service`.
map context_extensions = 1;
+
+ // When set to true, disable the configured :ref:`with_request_body
+ // ` for a route.
+ bool disable_request_body_buffering = 2;
}
diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto
index 05ced9299258..ec8854f5d1be 100644
--- a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto
+++ b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto
@@ -6,6 +6,7 @@ import "envoy/config/core/v4alpha/base.proto";
import "envoy/config/core/v4alpha/config_source.proto";
import "envoy/config/core/v4alpha/grpc_service.proto";
import "envoy/config/core/v4alpha/http_uri.proto";
+import "envoy/type/matcher/v4alpha/metadata.proto";
import "envoy/type/matcher/v4alpha/string.proto";
import "envoy/type/v3/http_status.proto";
@@ -23,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO
// External Authorization :ref:`configuration overview `.
// [#extension: envoy.filters.http.ext_authz]
-// [#next-free-field: 13]
+// [#next-free-field: 15]
message ExtAuthz {
option (udpa.annotations.versioning).previous_message_type =
"envoy.extensions.filters.http.ext_authz.v3.ExtAuthz";
@@ -103,6 +104,10 @@ message ExtAuthz {
// If this field is not specified, the filter will be enabled for all requests.
config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9;
+ // Specifies if the filter is enabled with metadata matcher.
+ // If this field is not specified, the filter will be enabled for all requests.
+ type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 14;
+
// Specifies whether to deny the requests, when the filter is disabled.
// If :ref:`runtime_key ` is specified,
// Envoy will lookup the runtime key to determine whether to deny request for
@@ -117,6 +122,23 @@ message ExtAuthz {
// When this field is true, Envoy will include the peer X.509 certificate, if available, in the
// :ref:`certificate`.
bool include_peer_certificate = 10;
+
+ // Optional additional prefix to use when emitting statistics. This allows to distinguish
+ // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example:
+ //
+ // .. code-block:: yaml
+ //
+ // http_filters:
+ // - name: envoy.filters.http.ext_authz
+ // typed_config:
+ // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
+ // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc.
+ // - name: envoy.filters.http.ext_authz
+ // typed_config:
+ // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
+ // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc.
+ //
+ string stat_prefix = 13;
}
// Configuration for buffering the request data.
@@ -134,6 +156,13 @@ message BufferSettings {
// The authorization request will be dispatched and no 413 HTTP error will be returned by the
// filter.
bool allow_partial_message = 2;
+
+ // If true, the body sent to the external authorization service is set with raw bytes, it sets
+ // the :ref:`raw_body`
+ // field of HTTP request attribute context. Otherwise, :ref:`
+ // body` will be filled
+ // with UTF-8 string request body.
+ bool pack_as_bytes = 3;
}
// HttpService is used for raw HTTP communication between the filter and the authorization service.
@@ -243,11 +272,7 @@ message ExtAuthzPerRoute {
}
}
-// Extra settings for the check request. You can use this to provide extra context for the
-// external authorization server on specific virtual hosts \ routes. For example, adding a context
-// extension on the virtual host level can give the ext-authz server information on what virtual
-// host is used without needing to parse the host header. If CheckSettings is specified in multiple
-// per-filter-configs, they will be merged in order, and the result will be used.
+// Extra settings for the check request.
message CheckSettings {
option (udpa.annotations.versioning).previous_message_type =
"envoy.extensions.filters.http.ext_authz.v3.CheckSettings";
@@ -255,6 +280,12 @@ message CheckSettings {
// Context extensions to set on the CheckRequest's
// :ref:`AttributeContext.context_extensions`
//
+ // You can use this to provide extra context for the external authorization server on specific
+ // virtual hosts/routes. For example, adding a context extension on the virtual host level can
+ // give the ext-authz server information on what virtual host is used without needing to parse the
+ // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged
+ // in order, and the result will be used.
+ //
// Merge semantics for this field are such that keys from more specific configs override.
//
// .. note::
@@ -262,4 +293,8 @@ message CheckSettings {
// These settings are only applied to a filter configured with a
// :ref:`grpc_service`.
map context_extensions = 1;
+
+ // When set to true, disable the configured :ref:`with_request_body
+ // ` for a route.
+ bool disable_request_body_buffering = 2;
}
diff --git a/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto b/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto
index 85d7cbe1cecd..b2c4ad2ee681 100644
--- a/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto
+++ b/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto
@@ -23,7 +23,7 @@ message FilterConfig {
// The content-type to pass to the upstream when the gRPC bridge filter is applied.
// The filter will also validate that the upstream responds with the same content type.
- string content_type = 1 [(validate.rules).string = {min_bytes: 1}];
+ string content_type = 1 [(validate.rules).string = {min_len: 1}];
// If true, Envoy will assume that the upstream doesn't understand gRPC frames and
// strip the gRPC frame from the request, and add it back in to the response. This will
diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto
index ace7c535069a..5e399790a7ec 100644
--- a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto
+++ b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto
@@ -56,7 +56,7 @@ message Config {
string metadata_namespace = 1;
// The key to use within the namespace.
- string key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string key = 2 [(validate.rules).string = {min_len: 1}];
// The value to pair with the given key.
//
diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto
index 0d7c814584dc..5b06f1e78556 100644
--- a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto
+++ b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto
@@ -55,7 +55,7 @@ message Config {
string metadata_namespace = 1;
// The key to use within the namespace.
- string key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string key = 2 [(validate.rules).string = {min_len: 1}];
oneof value_type {
// The value to pair with the given key.
diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto
index 39fe6187f64f..5588961bf512 100644
--- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto
+++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto
@@ -62,7 +62,7 @@ message JwtProvider {
// Example: https://securetoken.google.com
// Example: 1234567-compute@developer.gserviceaccount.com
//
- string issuer = 1 [(validate.rules).string = {min_bytes: 1}];
+ string issuer = 1 [(validate.rules).string = {min_len: 1}];
// The list of JWT `audiences `_ are
// allowed to access. A JWT containing any of these audiences will be accepted. If not specified,
@@ -220,7 +220,7 @@ message JwtHeader {
// The HTTP header name.
string name = 1
- [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// The value prefix. The value format is "value_prefix"
// For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the
@@ -414,7 +414,7 @@ message FilterStateRule {
"envoy.config.filter.http.jwt_authn.v2alpha.FilterStateRule";
// The filter state name to retrieve the `Router::StringAccessor` object.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// A map of string keys to requirements. The string key is the string value
// in the FilterState with the name specified in the *name* field above.
diff --git a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto
index 302cf7253dde..12d4fa5fe1d3 100644
--- a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto
+++ b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto
@@ -62,7 +62,7 @@ message JwtProvider {
// Example: https://securetoken.google.com
// Example: 1234567-compute@developer.gserviceaccount.com
//
- string issuer = 1 [(validate.rules).string = {min_bytes: 1}];
+ string issuer = 1 [(validate.rules).string = {min_len: 1}];
// The list of JWT `audiences `_ are
// allowed to access. A JWT containing any of these audiences will be accepted. If not specified,
@@ -220,7 +220,7 @@ message JwtHeader {
// The HTTP header name.
string name = 1
- [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// The value prefix. The value format is "value_prefix"
// For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the
@@ -414,7 +414,7 @@ message FilterStateRule {
"envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule";
// The filter state name to retrieve the `Router::StringAccessor` object.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// A map of string keys to requirements. The string key is the string value
// in the FilterState with the name specified in the *name* field above.
diff --git a/api/envoy/extensions/filters/http/local_ratelimit/v3/BUILD b/api/envoy/extensions/filters/http/local_ratelimit/v3/BUILD
new file mode 100644
index 000000000000..ad2fc9a9a84f
--- /dev/null
+++ b/api/envoy/extensions/filters/http/local_ratelimit/v3/BUILD
@@ -0,0 +1,13 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = [
+ "//envoy/config/core/v3:pkg",
+ "//envoy/type/v3:pkg",
+ "@com_github_cncf_udpa//udpa/annotations:pkg",
+ ],
+)
diff --git a/api/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto b/api/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto
new file mode 100644
index 000000000000..94f21edd3eed
--- /dev/null
+++ b/api/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto
@@ -0,0 +1,70 @@
+syntax = "proto3";
+
+package envoy.extensions.filters.http.local_ratelimit.v3;
+
+import "envoy/config/core/v3/base.proto";
+import "envoy/type/v3/http_status.proto";
+import "envoy/type/v3/token_bucket.proto";
+
+import "udpa/annotations/status.proto";
+import "udpa/annotations/versioning.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.filters.http.local_ratelimit.v3";
+option java_outer_classname = "LocalRateLimitProto";
+option java_multiple_files = true;
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: Local Rate limit]
+// Local Rate limit :ref:`configuration overview `.
+// [#extension: envoy.filters.http.local_ratelimit]
+
+// [#next-free-field: 7]
+message LocalRateLimit {
+ // The human readable prefix to use when emitting stats.
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
+
+ // This field allows for a custom HTTP response status code to the downstream client when
+ // the request has been rate limited.
+ // Defaults to 429 (TooManyRequests).
+ //
+ // .. note::
+ // If this is set to < 400, 429 will be used instead.
+ type.v3.HttpStatus status = 2;
+
+ // The token bucket configuration to use for rate limiting requests that are processed by this
+ // filter. Each request processed by the filter consumes a single token. If the token is available,
+ // the request will be allowed. If no tokens are available, the request will receive the configured
+ // rate limit status.
+ //
+ // .. note::
+ // It's fine for the token bucket to be unset for the global configuration since the rate limit
+ // can be applied at a the virtual host or route level. Thus, the token bucket must be set
+ // for the per route configuration otherwise the config will be rejected.
+ //
+ // .. note::
+ // When using per route configuration, the bucket becomes unique to that route.
+ //
+ // .. note::
+ // In the current implementation the token bucket's :ref:`fill_interval
+ // ` must be >= 50ms to avoid too aggressive
+ // refills.
+ type.v3.TokenBucket token_bucket = 3;
+
+ // If set, this will enable -- but not necessarily enforce -- the rate limit for the given
+ // fraction of requests.
+ // Defaults to 0% of requests for safety.
+ config.core.v3.RuntimeFractionalPercent filter_enabled = 4;
+
+ // If set, this will enforce the rate limit decisions for the given fraction of requests.
+ //
+ // Note: this only applies to the fraction of enabled requests.
+ //
+ // Defaults to 0% of requests for safety.
+ config.core.v3.RuntimeFractionalPercent filter_enforced = 5;
+
+ // Specifies a list of HTTP headers that should be added to each response for requests that
+ // have been rate limited.
+ repeated config.core.v3.HeaderValueOption response_headers_to_add = 6
+ [(validate.rules).repeated = {max_items: 10}];
+}
diff --git a/api/envoy/extensions/filters/http/lua/v3/lua.proto b/api/envoy/extensions/filters/http/lua/v3/lua.proto
index fc348c2365cd..1636c01ab1c7 100644
--- a/api/envoy/extensions/filters/http/lua/v3/lua.proto
+++ b/api/envoy/extensions/filters/http/lua/v3/lua.proto
@@ -25,7 +25,7 @@ message Lua {
// further loads code from disk if desired. Note that if JSON configuration is used, the code must
// be properly escaped. YAML configuration may be easier to read since YAML supports multi-line
// strings so complex scripts can be easily expressed inline in the configuration.
- string inline_code = 1 [(validate.rules).string = {min_bytes: 1}];
+ string inline_code = 1 [(validate.rules).string = {min_len: 1}];
// Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute
// `. The Lua source codes can be
diff --git a/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto b/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto
index 53678996de6c..e4be64167ed2 100644
--- a/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto
+++ b/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto
@@ -26,7 +26,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
message OAuth2Credentials {
// The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server.
- string client_id = 1 [(validate.rules).string = {min_bytes: 1}];
+ string client_id = 1 [(validate.rules).string = {min_len: 1}];
// The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server.
transport_sockets.tls.v3.SdsSecretConfig token_secret = 2
@@ -50,7 +50,7 @@ message OAuth2Config {
config.core.v3.HttpUri token_endpoint = 1;
// The endpoint redirect to for authorization in response to unauthorized requests.
- string authorization_endpoint = 2 [(validate.rules).string = {min_bytes: 1}];
+ string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}];
// Credentials used for OAuth.
OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}];
@@ -60,7 +60,7 @@ message OAuth2Config {
// documentation on :ref:`custom request headers `.
//
// This URI should not contain any query parameters.
- string redirect_uri = 4 [(validate.rules).string = {min_bytes: 1}];
+ string redirect_uri = 4 [(validate.rules).string = {min_len: 1}];
// Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server.
type.matcher.v3.PathMatcher redirect_path_matcher = 5
diff --git a/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto b/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto
index 547a3060e16b..ee51e1f96099 100644
--- a/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto
+++ b/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto
@@ -29,7 +29,7 @@ message OAuth2Credentials {
"envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Credentials";
// The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server.
- string client_id = 1 [(validate.rules).string = {min_bytes: 1}];
+ string client_id = 1 [(validate.rules).string = {min_len: 1}];
// The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server.
transport_sockets.tls.v4alpha.SdsSecretConfig token_secret = 2
@@ -56,7 +56,7 @@ message OAuth2Config {
config.core.v4alpha.HttpUri token_endpoint = 1;
// The endpoint redirect to for authorization in response to unauthorized requests.
- string authorization_endpoint = 2 [(validate.rules).string = {min_bytes: 1}];
+ string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}];
// Credentials used for OAuth.
OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}];
@@ -66,7 +66,7 @@ message OAuth2Config {
// documentation on :ref:`custom request headers `.
//
// This URI should not contain any query parameters.
- string redirect_uri = 4 [(validate.rules).string = {min_bytes: 1}];
+ string redirect_uri = 4 [(validate.rules).string = {min_len: 1}];
// Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server.
type.matcher.v4alpha.PathMatcher redirect_path_matcher = 5
diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto
index 781fddc1939c..bc58e7f9b2e1 100644
--- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto
+++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto
@@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// Rate limit :ref:`configuration overview `.
// [#extension: envoy.filters.http.ratelimit]
-// [#next-free-field: 9]
+// [#next-free-field: 10]
message RateLimit {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.filter.http.rate_limit.v2.RateLimit";
@@ -34,7 +34,7 @@ message RateLimit {
}
// The rate limit domain to use when calling the rate limit service.
- string domain = 1 [(validate.rules).string = {min_bytes: 1}];
+ string domain = 1 [(validate.rules).string = {min_len: 1}];
// Specifies the rate limit configurations to be applied with the same
// stage number. If not set, the default stage number is 0.
@@ -60,7 +60,6 @@ message RateLimit {
// The filter's behaviour in case the rate limiting service does
// not respond back. When it is set to true, Envoy will not allow traffic in case of
// communication failure between rate limiting service and the proxy.
- // Defaults to false.
bool failure_mode_deny = 5;
// Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead
@@ -99,4 +98,25 @@ message RateLimit {
// Disabled by default.
XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8
[(validate.rules).enum = {defined_only: true}];
+
+ // Disables emitting the :ref:`x-envoy-ratelimited` header
+ // in case of rate limiting (i.e. 429 responses).
+ // Having this header not present potentially makes the request retriable.
+ bool disable_x_envoy_ratelimited_header = 9;
+}
+
+message RateLimitPerRoute {
+ enum VhRateLimitsOptions {
+ // Use the virtual host rate limits unless the route has a rate limit policy.
+ OVERRIDE = 0;
+
+ // Use the virtual host rate limits even if the route has a rate limit policy.
+ INCLUDE = 1;
+
+ // Ignore the virtual host rate limits even if the route does not have a rate limit policy.
+ IGNORE = 2;
+ }
+
+ // Specifies if the rate limit filter should include the virtual host rate limits.
+ VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}];
}
diff --git a/api/envoy/extensions/filters/http/squash/v3/squash.proto b/api/envoy/extensions/filters/http/squash/v3/squash.proto
index 0ea335a414fa..f9bc9cceceb9 100644
--- a/api/envoy/extensions/filters/http/squash/v3/squash.proto
+++ b/api/envoy/extensions/filters/http/squash/v3/squash.proto
@@ -24,7 +24,7 @@ message Squash {
"envoy.config.filter.http.squash.v2.Squash";
// The name of the cluster that hosts the Squash server.
- string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
// When the filter requests the Squash server to create a DebugAttachment, it will use this
// structure as template for the body of the request. It can contain reference to environment
diff --git a/api/envoy/extensions/filters/http/wasm/v3/wasm.proto b/api/envoy/extensions/filters/http/wasm/v3/wasm.proto
index a812992a5b84..55eba141f45f 100644
--- a/api/envoy/extensions/filters/http/wasm/v3/wasm.proto
+++ b/api/envoy/extensions/filters/http/wasm/v3/wasm.proto
@@ -13,7 +13,10 @@ option java_outer_classname = "WasmProto";
option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
-// [[#not-implemented-hide:]
+// [#protodoc-title: Wasm]
+// [#extension: envoy.filters.http.wasm]
+// Wasm :ref:`configuration overview `.
+
message Wasm {
// General Plugin configuration.
envoy.extensions.wasm.v3.PluginConfig config = 1;
diff --git a/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto
index 8fd0c63d0c82..fb8047d391e9 100644
--- a/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto
+++ b/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto
@@ -24,7 +24,7 @@ message ProxyProtocol {
string metadata_namespace = 1;
// The key to use within the namespace.
- string key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string key = 2 [(validate.rules).string = {min_len: 1}];
}
// A Rule defines what metadata to apply when a header is present or missing.
diff --git a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto
index b3af267a77ad..2ed14c7f0e23 100644
--- a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto
+++ b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto
@@ -30,11 +30,11 @@ message ClientSSLAuth {
// of principals. The service must support the expected :ref:`REST API
// `.
string auth_api_cluster = 1
- [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];
// The prefix to use when emitting :ref:`statistics
// `.
- string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 2 [(validate.rules).string = {min_len: 1}];
// Time in milliseconds between principal refreshes from the
// authentication service. Default is 60000 (60s). The actual fetch time
diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto
index 749708880d71..646f053ca9b6 100644
--- a/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto
+++ b/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto
@@ -37,7 +37,7 @@ message DubboProxy {
"envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy";
// The human readable prefix to use when emitting statistics.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// Configure the protocol used.
ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}];
@@ -62,7 +62,7 @@ message DubboFilter {
// The name of the filter to instantiate. The name must match a supported
// filter.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Filter specific configuration which depends on the filter being
// instantiated. See the supported filters for further documentation.
diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto
index 4894c7693fd7..30499c27f6f0 100644
--- a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto
+++ b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto
@@ -37,7 +37,7 @@ message DubboProxy {
"envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy";
// The human readable prefix to use when emitting statistics.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// Configure the protocol used.
ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}];
@@ -62,7 +62,7 @@ message DubboFilter {
// The name of the filter to instantiate. The name must match a supported
// filter.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Filter specific configuration which depends on the filter being
// instantiated. See the supported filters for further documentation.
diff --git a/api/envoy/extensions/filters/network/ext_authz/v3/BUILD b/api/envoy/extensions/filters/network/ext_authz/v3/BUILD
index a4e298b42619..a5c5b57b7227 100644
--- a/api/envoy/extensions/filters/network/ext_authz/v3/BUILD
+++ b/api/envoy/extensions/filters/network/ext_authz/v3/BUILD
@@ -8,6 +8,7 @@ api_proto_package(
deps = [
"//envoy/config/core/v3:pkg",
"//envoy/config/filter/network/ext_authz/v2:pkg",
+ "//envoy/type/matcher/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
],
)
diff --git a/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto
index 50161f1cb92b..78f4167ccc33 100644
--- a/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto
+++ b/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto
@@ -4,6 +4,7 @@ package envoy.extensions.filters.network.ext_authz.v3;
import "envoy/config/core/v3/config_source.proto";
import "envoy/config/core/v3/grpc_service.proto";
+import "envoy/type/matcher/v3/metadata.proto";
import "udpa/annotations/status.proto";
import "udpa/annotations/versioning.proto";
@@ -23,13 +24,13 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// gRPC Authorization API defined by
// :ref:`CheckRequest `.
// A failed check will cause this filter to close the TCP connection.
-// [#next-free-field: 6]
+// [#next-free-field: 7]
message ExtAuthz {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.filter.network.ext_authz.v2.ExtAuthz";
// The prefix to use when emitting statistics.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// The external authorization gRPC service configuration.
// The default timeout is set to 200ms by this filter.
@@ -51,4 +52,8 @@ message ExtAuthz {
// version of Check{Request,Response} used on the wire.
config.core.v3.ApiVersion transport_api_version = 5
[(validate.rules).enum = {defined_only: true}];
+
+ // Specifies if the filter is enabled with metadata matcher.
+ // If this field is not specified, the filter will be enabled for all requests.
+ type.matcher.v3.MetadataMatcher filter_enabled_metadata = 6;
}
diff --git a/api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD b/api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD
new file mode 100644
index 000000000000..6d146b1c64d1
--- /dev/null
+++ b/api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD
@@ -0,0 +1,14 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = [
+ "//envoy/config/core/v4alpha:pkg",
+ "//envoy/extensions/filters/network/ext_authz/v3:pkg",
+ "//envoy/type/matcher/v4alpha:pkg",
+ "@com_github_cncf_udpa//udpa/annotations:pkg",
+ ],
+)
diff --git a/api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto
new file mode 100644
index 000000000000..f877a3ed8502
--- /dev/null
+++ b/api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto
@@ -0,0 +1,59 @@
+syntax = "proto3";
+
+package envoy.extensions.filters.network.ext_authz.v4alpha;
+
+import "envoy/config/core/v4alpha/config_source.proto";
+import "envoy/config/core/v4alpha/grpc_service.proto";
+import "envoy/type/matcher/v4alpha/metadata.proto";
+
+import "udpa/annotations/status.proto";
+import "udpa/annotations/versioning.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v4alpha";
+option java_outer_classname = "ExtAuthzProto";
+option java_multiple_files = true;
+option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;
+
+// [#protodoc-title: Network External Authorization ]
+// The network layer external authorization service configuration
+// :ref:`configuration overview `.
+// [#extension: envoy.filters.network.ext_authz]
+
+// External Authorization filter calls out to an external service over the
+// gRPC Authorization API defined by
+// :ref:`CheckRequest `.
+// A failed check will cause this filter to close the TCP connection.
+// [#next-free-field: 7]
+message ExtAuthz {
+ option (udpa.annotations.versioning).previous_message_type =
+ "envoy.extensions.filters.network.ext_authz.v3.ExtAuthz";
+
+ // The prefix to use when emitting statistics.
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
+
+ // The external authorization gRPC service configuration.
+ // The default timeout is set to 200ms by this filter.
+ config.core.v4alpha.GrpcService grpc_service = 2;
+
+ // The filter's behaviour in case the external authorization service does
+ // not respond back. When it is set to true, Envoy will also allow traffic in case of
+ // communication failure between authorization service and the proxy.
+ // Defaults to false.
+ bool failure_mode_allow = 3;
+
+ // Specifies if the peer certificate is sent to the external service.
+ //
+ // When this field is true, Envoy will include the peer X.509 certificate, if available, in the
+ // :ref:`certificate`.
+ bool include_peer_certificate = 4;
+
+ // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and
+ // version of Check{Request,Response} used on the wire.
+ config.core.v4alpha.ApiVersion transport_api_version = 5
+ [(validate.rules).enum = {defined_only: true}];
+
+ // Specifies if the filter is enabled with metadata matcher.
+ // If this field is not specified, the filter will be enabled for all requests.
+ type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 6;
+}
diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto
index 68c5c8cad2a3..a4c115c68da0 100644
--- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto
+++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto
@@ -252,7 +252,7 @@ message HttpConnectionManager {
// The human readable prefix to use when emitting statistics for the
// connection manager. See the :ref:`statistics documentation ` for
// more information.
- string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 2 [(validate.rules).string = {min_len: 1}];
oneof route_specifier {
option (validate.required) = true;
@@ -571,27 +571,29 @@ message LocalReplyConfig {
// The configuration to form response body from the :ref:`command operators `
// and to specify response content type as one of: plain/text or application/json.
//
- // Example one: plain/text body_format.
+ // Example one: "plain/text" ``body_format``.
//
- // .. code-block::
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
//
- // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)%
+ // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n"
//
- // The following response body in `plain/text` format will be generated for a request with
+ // The following response body in "plain/text" format will be generated for a request with
// local reply body of "upstream connection error", response_code=503 and path=/foo.
//
- // .. code-block::
+ // .. code-block:: text
//
// upstream connect error:503:path=/foo
//
- // Example two: application/json body_format.
+ // Example two: "application/json" ``body_format``.
//
- // .. code-block::
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
//
- // json_format:
- // status: %RESPONSE_CODE%
- // message: %LOCAL_REPLY_BODY%
- // path: $REQ(:path)%
+ // json_format:
+ // status: "%RESPONSE_CODE%"
+ // message: "%LOCAL_REPLY_BODY%"
+ // path: "%REQ(:path)%"
//
// The following response body in "application/json" format would be generated for a request with
// local reply body of "upstream connection error", response_code=503 and path=/foo.
@@ -716,14 +718,18 @@ message ScopedRoutes {
// If an element contains no separator, the whole element is parsed as key and the
// fragment value is an empty string.
// If there are multiple values for a matched key, the first value is returned.
- string separator = 1 [(validate.rules).string = {min_bytes: 1}];
+ string separator = 1 [(validate.rules).string = {min_len: 1}];
// The key to match on.
- string key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string key = 2 [(validate.rules).string = {min_len: 1}];
}
// The name of the header field to extract the value from.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// The element separator (e.g., ';' separates 'a;b;c;d').
// Default: empty string. This causes the entirety of the header field to be extracted.
@@ -757,7 +763,7 @@ message ScopedRoutes {
}
// The name assigned to the scoped routing configuration.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// The algorithm to use for constructing a scope key for each request.
ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}];
@@ -807,15 +813,15 @@ message HttpFilter {
// The name of the filter configuration. The name is used as a fallback to
// select an extension if the type of the configuration proto is not
// sufficient. It also serves as a resource name in ExtensionConfigDS.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
- // Filter specific configuration which depends on the filter being instantiated. See the supported
- // filters for further documentation.
oneof config_type {
+ // Filter specific configuration which depends on the filter being instantiated. See the supported
+ // filters for further documentation.
google.protobuf.Any typed_config = 4;
// Configuration source specifier for an extension configuration discovery service.
- // In case of a failure and without the default configuration, the HTTP listener responds with 500.
+ // In case of a failure and without the default configuration, the HTTP listener responds with code 500.
// Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061).
config.core.v3.ExtensionConfigSource config_discovery = 5;
}
diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto
index 9db92927ebbe..ceb7f4a65a1f 100644
--- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto
+++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto
@@ -251,7 +251,7 @@ message HttpConnectionManager {
// The human readable prefix to use when emitting statistics for the
// connection manager. See the :ref:`statistics documentation ` for
// more information.
- string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 2 [(validate.rules).string = {min_len: 1}];
oneof route_specifier {
option (validate.required) = true;
@@ -573,27 +573,29 @@ message LocalReplyConfig {
// The configuration to form response body from the :ref:`command operators `
// and to specify response content type as one of: plain/text or application/json.
//
- // Example one: plain/text body_format.
+ // Example one: "plain/text" ``body_format``.
//
- // .. code-block::
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
//
- // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)%
+ // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n"
//
- // The following response body in `plain/text` format will be generated for a request with
+ // The following response body in "plain/text" format will be generated for a request with
// local reply body of "upstream connection error", response_code=503 and path=/foo.
//
- // .. code-block::
+ // .. code-block:: text
//
// upstream connect error:503:path=/foo
//
- // Example two: application/json body_format.
+ // Example two: "application/json" ``body_format``.
//
- // .. code-block::
+ // .. validated-code-block:: yaml
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
//
- // json_format:
- // status: %RESPONSE_CODE%
- // message: %LOCAL_REPLY_BODY%
- // path: $REQ(:path)%
+ // json_format:
+ // status: "%RESPONSE_CODE%"
+ // message: "%LOCAL_REPLY_BODY%"
+ // path: "%REQ(:path)%"
//
// The following response body in "application/json" format would be generated for a request with
// local reply body of "upstream connection error", response_code=503 and path=/foo.
@@ -721,14 +723,18 @@ message ScopedRoutes {
// If an element contains no separator, the whole element is parsed as key and the
// fragment value is an empty string.
// If there are multiple values for a matched key, the first value is returned.
- string separator = 1 [(validate.rules).string = {min_bytes: 1}];
+ string separator = 1 [(validate.rules).string = {min_len: 1}];
// The key to match on.
- string key = 2 [(validate.rules).string = {min_bytes: 1}];
+ string key = 2 [(validate.rules).string = {min_len: 1}];
}
// The name of the header field to extract the value from.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ //
+ // .. note::
+ //
+ // If the header appears multiple times only the first value is used.
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// The element separator (e.g., ';' separates 'a;b;c;d').
// Default: empty string. This causes the entirety of the header field to be extracted.
@@ -762,7 +768,7 @@ message ScopedRoutes {
}
// The name assigned to the scoped routing configuration.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// The algorithm to use for constructing a scope key for each request.
ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}];
@@ -813,15 +819,15 @@ message HttpFilter {
// The name of the filter configuration. The name is used as a fallback to
// select an extension if the type of the configuration proto is not
// sufficient. It also serves as a resource name in ExtensionConfigDS.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
- // Filter specific configuration which depends on the filter being instantiated. See the supported
- // filters for further documentation.
oneof config_type {
+ // Filter specific configuration which depends on the filter being instantiated. See the supported
+ // filters for further documentation.
google.protobuf.Any typed_config = 4;
// Configuration source specifier for an extension configuration discovery service.
- // In case of a failure and without the default configuration, the HTTP listener responds with 500.
+ // In case of a failure and without the default configuration, the HTTP listener responds with code 500.
// Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061).
config.core.v4alpha.ExtensionConfigSource config_discovery = 5;
}
diff --git a/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto
index 497e688f4c3d..0fac07427d0c 100644
--- a/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto
+++ b/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto
@@ -20,5 +20,5 @@ message KafkaBroker {
"envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker";
// The prefix to use when emitting :ref:`statistics `.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
}
diff --git a/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto b/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto
index 027bc0e3fc98..37eb8c62d0e2 100644
--- a/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto
+++ b/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto
@@ -24,7 +24,7 @@ message LocalRateLimit {
// The prefix to use when emitting :ref:`statistics
// `.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// The token bucket configuration to use for rate limiting connections that are processed by the
// filter's filter chain. Each incoming connection processed by the filter consumes a single
diff --git a/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto b/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto
index 7bd17600d145..ebdfb6f2fcc0 100644
--- a/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto
+++ b/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto
@@ -17,13 +17,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// MongoDB :ref:`configuration overview `.
// [#extension: envoy.filters.network.mongo_proxy]
+// [#next-free-field: 6]
message MongoProxy {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.filter.network.mongo_proxy.v2.MongoProxy";
// The human readable prefix to use when emitting :ref:`statistics
// `.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// The optional path to use for writing Mongo access logs. If not access log
// path is specified no access logs will be written. Note that access log is
@@ -39,4 +40,9 @@ message MongoProxy {
// Flag to specify whether :ref:`dynamic metadata
// ` should be emitted. Defaults to false.
bool emit_dynamic_metadata = 4;
+
+ // List of commands to emit metrics for. Defaults to "delete", "insert", and "update".
+ // Note that metrics will not be emitted for "find" commands, since those are considered
+ // queries, and metrics for those are emitted under a dedicated "query" namespace.
+ repeated string commands = 5;
}
diff --git a/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto b/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto
index 663449b27035..9dfdb14d3f11 100644
--- a/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto
+++ b/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto
@@ -21,7 +21,7 @@ message MySQLProxy {
// The human readable prefix to use when emitting :ref:`statistics
// `.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// [#not-implemented-hide:] The optional path to use for writing MySQL access logs.
// If the access log field is empty, access logs will not be written.
diff --git a/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto
index b92d3cee2541..2fcdda846b6a 100644
--- a/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto
+++ b/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto
@@ -26,10 +26,10 @@ message RateLimit {
"envoy.config.filter.network.rate_limit.v2.RateLimit";
// The prefix to use when emitting :ref:`statistics `.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// The rate limit domain to use in the rate limit service request.
- string domain = 2 [(validate.rules).string = {min_bytes: 1}];
+ string domain = 2 [(validate.rules).string = {min_len: 1}];
// The rate limit descriptor list to use in the rate limit service request.
repeated common.ratelimit.v3.RateLimitDescriptor descriptors = 3
diff --git a/api/envoy/extensions/filters/network/rbac/v3/rbac.proto b/api/envoy/extensions/filters/network/rbac/v3/rbac.proto
index e62f7b4c419e..6b8d3b0181b9 100644
--- a/api/envoy/extensions/filters/network/rbac/v3/rbac.proto
+++ b/api/envoy/extensions/filters/network/rbac/v3/rbac.proto
@@ -46,7 +46,7 @@ message RBAC {
config.rbac.v3.RBAC shadow_rules = 2;
// The prefix to use when emitting statistics.
- string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 3 [(validate.rules).string = {min_len: 1}];
// RBAC enforcement strategy. By default RBAC will be enforced only once
// when the first byte of data arrives from the downstream. When used in
diff --git a/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto b/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto
index 8452a89822c1..a1508997df62 100644
--- a/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto
+++ b/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto
@@ -46,7 +46,7 @@ message RBAC {
config.rbac.v4alpha.RBAC shadow_rules = 2;
// The prefix to use when emitting statistics.
- string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 3 [(validate.rules).string = {min_len: 1}];
// RBAC enforcement strategy. By default RBAC will be enforced only once
// when the first byte of data arrives from the downstream. When used in
diff --git a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto
index 740095ac5120..4e1a8099fc37 100644
--- a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto
+++ b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto
@@ -140,7 +140,7 @@ message RedisProxy {
// Specifies the cluster that requests will be mirrored to. The cluster must
// exist in the cluster manager configuration.
- string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
// If not specified or the runtime key is not present, all requests to the target cluster
// will be mirrored.
@@ -162,7 +162,7 @@ message RedisProxy {
bool remove_prefix = 2;
// Upstream cluster to forward the command to.
- string cluster = 3 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 3 [(validate.rules).string = {min_len: 1}];
// Indicates that the route has a request mirroring policy.
repeated RequestMirrorPolicy request_mirror_policy = 4;
@@ -213,7 +213,7 @@ message RedisProxy {
reserved "cluster";
// The prefix to use when emitting :ref:`statistics `.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// Network settings for the connection pool to the upstream clusters.
ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}];
diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto
index ee77ab909592..e29a3d10af0a 100644
--- a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto
+++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto
@@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
message RocketmqProxy {
// The human readable prefix to use when emitting statistics.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// The route table for the connection manager is specified in this property.
RouteConfiguration route_config = 2;
diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto
index 5fe5d33ffacf..899debcbde7b 100644
--- a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto
+++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto
@@ -48,7 +48,7 @@ message RouteMatch {
message RouteAction {
// Indicates the upstream cluster to which the request should be routed.
- string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
// Optional endpoint metadata match criteria used by the subset load balancer.
config.core.v3.Metadata metadata_match = 2;
diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto
index a765734e66db..cbc66fcd9979 100644
--- a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto
+++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto
@@ -25,7 +25,7 @@ message RocketmqProxy {
"envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy";
// The human readable prefix to use when emitting statistics.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// The route table for the connection manager is specified in this property.
RouteConfiguration route_config = 2;
diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto
index 995e8bcb05e3..0925afef833d 100644
--- a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto
+++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto
@@ -60,7 +60,7 @@ message RouteAction {
"envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction";
// Indicates the upstream cluster to which the request should be routed.
- string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
// Optional endpoint metadata match criteria used by the subset load balancer.
config.core.v4alpha.Metadata metadata_match = 2;
diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto b/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto
index 3d305cadcf40..cbe7581588b9 100644
--- a/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto
+++ b/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto
@@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// TCP Proxy :ref:`configuration overview `.
// [#extension: envoy.filters.network.tcp_proxy]
-// [#next-free-field: 13]
+// [#next-free-field: 14]
message TcpProxy {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.filter.network.tcp_proxy.v2.TcpProxy";
@@ -39,7 +39,7 @@ message TcpProxy {
"envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight";
// Name of the upstream cluster.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// When a request matches the route, the choice of an upstream cluster is
// determined by its weight. The sum of weights across all entries in the
@@ -67,7 +67,7 @@ message TcpProxy {
"envoy.config.filter.network.tcp_proxy.v2.TcpProxy.TunnelingConfig";
// The hostname to send in the synthesized CONNECT headers to the upstream proxy.
- string hostname = 1 [(validate.rules).string = {min_bytes: 1}];
+ string hostname = 1 [(validate.rules).string = {min_len: 1}];
}
reserved 6;
@@ -76,7 +76,7 @@ message TcpProxy {
// The prefix to use when emitting :ref:`statistics
// `.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
oneof cluster_specifier {
option (validate.required) = true;
@@ -134,4 +134,10 @@ message TcpProxy {
// payloads over a shared HTTP/2 tunnel. If this message is absent, the payload
// will be proxied upstream as per usual.
TunnelingConfig tunneling_config = 12;
+
+ // The maximum duration of a connection. The duration is defined as the period since a connection
+ // was established. If not set, there is no max duration. When max_downstream_connection_duration
+ // is reached the connection will be closed. Duration must be at least 1ms.
+ google.protobuf.Duration max_downstream_connection_duration = 13
+ [(validate.rules).duration = {gte {nanos: 1000000}}];
}
diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto
index 1857f2abcd4e..9a2f395176b1 100644
--- a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto
+++ b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto
@@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO
// TCP Proxy :ref:`configuration overview `.
// [#extension: envoy.filters.network.tcp_proxy]
-// [#next-free-field: 13]
+// [#next-free-field: 14]
message TcpProxy {
option (udpa.annotations.versioning).previous_message_type =
"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy";
@@ -39,7 +39,7 @@ message TcpProxy {
"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight";
// Name of the upstream cluster.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// When a request matches the route, the choice of an upstream cluster is
// determined by its weight. The sum of weights across all entries in the
@@ -67,7 +67,7 @@ message TcpProxy {
"envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig";
// The hostname to send in the synthesized CONNECT headers to the upstream proxy.
- string hostname = 1 [(validate.rules).string = {min_bytes: 1}];
+ string hostname = 1 [(validate.rules).string = {min_len: 1}];
}
reserved 6;
@@ -76,7 +76,7 @@ message TcpProxy {
// The prefix to use when emitting :ref:`statistics
// `.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
oneof cluster_specifier {
option (validate.required) = true;
@@ -134,4 +134,10 @@ message TcpProxy {
// payloads over a shared HTTP/2 tunnel. If this message is absent, the payload
// will be proxied upstream as per usual.
TunnelingConfig tunneling_config = 12;
+
+ // The maximum duration of a connection. The duration is defined as the period since a connection
+ // was established. If not set, there is no max duration. When max_downstream_connection_duration
+ // is reached the connection will be closed. Duration must be at least 1ms.
+ google.protobuf.Duration max_downstream_connection_duration = 13
+ [(validate.rules).duration = {gte {nanos: 1000000}}];
}
diff --git a/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto
index 4fc3289ae33d..c93b4d1e8e5a 100644
--- a/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto
+++ b/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto
@@ -25,7 +25,7 @@ message RateLimit {
"envoy.config.filter.thrift.rate_limit.v2alpha1.RateLimit";
// The rate limit domain to use in the rate limit service request.
- string domain = 1 [(validate.rules).string = {min_bytes: 1}];
+ string domain = 1 [(validate.rules).string = {min_len: 1}];
// Specifies the rate limit configuration stage. Each configured rate limit filter performs a
// rate limit check using descriptors configured in the
diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto
index b7afc4f0b803..f00b0e6983d1 100644
--- a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto
+++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto
@@ -91,7 +91,7 @@ message RouteAction {
// Indicates a single upstream cluster to which the request should be routed
// to.
- string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
// Multiple upstream clusters can be specified for a given route. The
// request is routed to one of the upstream clusters based on weights
@@ -103,9 +103,8 @@ message RouteAction {
// header is not found or the referenced cluster does not exist Envoy will
// respond with an unknown method exception or an internal error exception,
// respectively.
- string cluster_header = 6 [
- (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}
- ];
+ string cluster_header = 6
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];
}
// Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in
@@ -138,7 +137,7 @@ message WeightedCluster {
"envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight";
// Name of the upstream cluster.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// When a request matches the route, the choice of an upstream cluster is determined by its
// weight. The sum of weights across all entries in the clusters array determines the total
diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto
index 74c71afb5424..2b9863e91ffa 100644
--- a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto
+++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto
@@ -72,7 +72,7 @@ message ThriftProxy {
ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}];
// The human readable prefix to use when emitting statistics.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// The route table for the connection manager is static and is specified in this property.
RouteConfiguration route_config = 4;
@@ -99,7 +99,7 @@ message ThriftFilter {
// [#comment:TODO(zuercher): Auto generate the following list]
// * :ref:`envoy.filters.thrift.router `
// * :ref:`envoy.filters.thrift.rate_limit `
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Filter specific configuration which depends on the filter being instantiated. See the supported
// filters for further documentation.
diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto
index 374cc131ddf8..b73a78c4f2cc 100644
--- a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto
+++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto
@@ -91,7 +91,7 @@ message RouteAction {
// Indicates a single upstream cluster to which the request should be routed
// to.
- string cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 1 [(validate.rules).string = {min_len: 1}];
// Multiple upstream clusters can be specified for a given route. The
// request is routed to one of the upstream clusters based on weights
@@ -103,9 +103,8 @@ message RouteAction {
// header is not found or the referenced cluster does not exist Envoy will
// respond with an unknown method exception or an internal error exception,
// respectively.
- string cluster_header = 6 [
- (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}
- ];
+ string cluster_header = 6
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];
}
// Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in
@@ -138,7 +137,7 @@ message WeightedCluster {
"envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight";
// Name of the upstream cluster.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// When a request matches the route, the choice of an upstream cluster is determined by its
// weight. The sum of weights across all entries in the clusters array determines the total
diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto
index 6bf055da3ce6..b75d0e39eaf2 100644
--- a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto
+++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto
@@ -72,7 +72,7 @@ message ThriftProxy {
ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}];
// The human readable prefix to use when emitting statistics.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// The route table for the connection manager is static and is specified in this property.
RouteConfiguration route_config = 4;
@@ -99,7 +99,7 @@ message ThriftFilter {
// [#comment:TODO(zuercher): Auto generate the following list]
// * :ref:`envoy.filters.thrift.router `
// * :ref:`envoy.filters.thrift.rate_limit `
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Filter specific configuration which depends on the filter being instantiated. See the supported
// filters for further documentation.
diff --git a/api/envoy/extensions/filters/network/wasm/v3/wasm.proto b/api/envoy/extensions/filters/network/wasm/v3/wasm.proto
index 131582762b59..0c1ac6af440e 100644
--- a/api/envoy/extensions/filters/network/wasm/v3/wasm.proto
+++ b/api/envoy/extensions/filters/network/wasm/v3/wasm.proto
@@ -13,7 +13,10 @@ option java_outer_classname = "WasmProto";
option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
-// [[#not-implemented-hide:]
+// [#protodoc-title: Wasm]
+// [#extension: envoy.filters.network.wasm]
+// Wasm :ref:`configuration overview `.
+
message Wasm {
// General Plugin configuration.
envoy.extensions.wasm.v3.PluginConfig config = 1;
diff --git a/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto b/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto
index a90f777d79ec..eb2c202c58f1 100644
--- a/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto
+++ b/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto
@@ -23,7 +23,7 @@ message ZooKeeperProxy {
// The human readable prefix to use when emitting :ref:`statistics
// `.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
// [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs.
// If the access log field is empty, access logs will not be written.
diff --git a/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto b/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto
index d3f6123548f8..1e986434f777 100644
--- a/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto
+++ b/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto
@@ -35,13 +35,13 @@ message UdpProxyConfig {
}
// The stat prefix used when emitting UDP proxy filter stats.
- string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
+ string stat_prefix = 1 [(validate.rules).string = {min_len: 1}];
oneof route_specifier {
option (validate.required) = true;
// The upstream cluster to connect to.
- string cluster = 2 [(validate.rules).string = {min_bytes: 1}];
+ string cluster = 2 [(validate.rules).string = {min_len: 1}];
}
// The idle timeout for sessions. Idle is defined as no datagrams between received or sent by
diff --git a/api/envoy/extensions/stat_sinks/wasm/v3/BUILD b/api/envoy/extensions/stat_sinks/wasm/v3/BUILD
new file mode 100644
index 000000000000..c37174bdefc4
--- /dev/null
+++ b/api/envoy/extensions/stat_sinks/wasm/v3/BUILD
@@ -0,0 +1,12 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = [
+ "//envoy/extensions/wasm/v3:pkg",
+ "@com_github_cncf_udpa//udpa/annotations:pkg",
+ ],
+)
diff --git a/api/envoy/extensions/stat_sinks/wasm/v3/wasm.proto b/api/envoy/extensions/stat_sinks/wasm/v3/wasm.proto
new file mode 100644
index 000000000000..3fc5dae91795
--- /dev/null
+++ b/api/envoy/extensions/stat_sinks/wasm/v3/wasm.proto
@@ -0,0 +1,23 @@
+syntax = "proto3";
+
+package envoy.extensions.stat_sinks.wasm.v3;
+
+import "envoy/extensions/wasm/v3/wasm.proto";
+
+import "udpa/annotations/status.proto";
+import "udpa/annotations/versioning.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.extensions.stat_sinks.wasm.v3";
+option java_outer_classname = "WasmProto";
+option java_multiple_files = true;
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: Wasm]
+// Wasm :ref:`configuration overview `.
+// [#extension: envoy.stat_sinks.wasm]
+
+message Wasm {
+ // General Plugin configuration.
+ envoy.extensions.wasm.v3.PluginConfig config = 1;
+}
diff --git a/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto b/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto
index 94359ce837bf..f41c8added21 100644
--- a/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto
+++ b/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto
@@ -20,8 +20,8 @@ message DatadogConfig {
"envoy.config.trace.v3.DatadogConfig";
// The cluster to use for submitting traces to the Datadog agent.
- string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];
// The name used for the service when traces are generated by envoy.
- string service_name = 2 [(validate.rules).string = {min_bytes: 1}];
+ string service_name = 2 [(validate.rules).string = {min_len: 1}];
}
diff --git a/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto b/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto
index d311304a3ddf..21455a974d3b 100644
--- a/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto
+++ b/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto
@@ -25,7 +25,7 @@ message DynamicOtConfig {
// Dynamic library implementing the `OpenTracing API
// `_.
- string library = 1 [(validate.rules).string = {min_bytes: 1}];
+ string library = 1 [(validate.rules).string = {min_len: 1}];
// The configuration to use when creating a tracer from the given dynamic
// library.
diff --git a/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto b/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto
index 93ea47ba6a10..d7e306754dc9 100644
--- a/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto
+++ b/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto
@@ -35,11 +35,11 @@ message LightstepConfig {
}
// The cluster manager cluster that hosts the LightStep collectors.
- string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];
// File containing the access token to the `LightStep
// `_ API.
- string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}];
+ string access_token_file = 2 [(validate.rules).string = {min_len: 1}];
// Propagation modes to use by LightStep's tracer.
repeated PropagationMode propagation_modes = 3
diff --git a/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto b/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto
index 3abbcad2de15..a6974fcc0ea6 100644
--- a/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto
+++ b/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto
@@ -47,12 +47,12 @@ message ZipkinConfig {
// The cluster manager cluster that hosts the Zipkin collectors. Note that the
// Zipkin cluster must be defined in the :ref:`Bootstrap static cluster
// resources `.
- string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}];
+ string collector_cluster = 1 [(validate.rules).string = {min_len: 1}];
// The API endpoint of the Zipkin service where the spans will be sent. When
// using a standard Zipkin installation, the API endpoint is typically
// /api/v1/spans, which is the default value.
- string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}];
+ string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}];
// Determines whether a 128bit trace id will be used when creating a new
// trace instance. The default value is false, which will result in a 64 bit trace id being used.
diff --git a/api/envoy/extensions/transport_sockets/alts/v3/alts.proto b/api/envoy/extensions/transport_sockets/alts/v3/alts.proto
index 6c001be1c746..85a8c66d0c0e 100644
--- a/api/envoy/extensions/transport_sockets/alts/v3/alts.proto
+++ b/api/envoy/extensions/transport_sockets/alts/v3/alts.proto
@@ -22,7 +22,7 @@ message Alts {
// The location of a handshaker service, this is usually 169.254.169.254:8080
// on GCE.
- string handshaker_service = 1 [(validate.rules).string = {min_bytes: 1}];
+ string handshaker_service = 1 [(validate.rules).string = {min_len: 1}];
// The acceptable service accounts from peer, peers not in the list will be rejected in the
// handshake validation step. If empty, no validation will be performed.
diff --git a/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto
index c6c2ee9798d6..687226574d29 100644
--- a/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto
+++ b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto
@@ -6,7 +6,6 @@ import "envoy/config/core/v3/base.proto";
import "envoy/config/core/v3/proxy_protocol.proto";
import "udpa/annotations/status.proto";
-import "udpa/annotations/versioning.proto";
import "validate/validate.proto";
option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3";
@@ -16,9 +15,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Upstream Proxy Protocol]
// [#extension: envoy.transport_sockets.upstream_proxy_protocol]
-// [#not-implemented-hide:]
+
// Configuration for PROXY protocol socket
message ProxyProtocolUpstreamTransport {
+ // The PROXY protocol settings
config.core.v3.ProxyProtocolConfig config = 1;
// The underlying transport socket being wrapped.
diff --git a/api/envoy/extensions/transport_sockets/tls/v3/common.proto b/api/envoy/extensions/transport_sockets/tls/v3/common.proto
index 5eab3c1060b5..a7b9360d248a 100644
--- a/api/envoy/extensions/transport_sockets/tls/v3/common.proto
+++ b/api/envoy/extensions/transport_sockets/tls/v3/common.proto
@@ -120,7 +120,7 @@ message PrivateKeyProvider {
// Private key method provider name. The name must match a
// supported private key method provider type.
- string provider_name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string provider_name = 1 [(validate.rules).string = {min_len: 1}];
// Private key method provider specific configuration.
oneof config_type {
@@ -151,7 +151,9 @@ message TlsCertificate {
// TLS private key is not password encrypted.
config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true];
- // [#not-implemented-hide:]
+ // The OCSP response to be stapled with this certificate during the handshake.
+ // The response must be DER-encoded and may only be provided via ``filename`` or
+ // ``inline_bytes``. The response may pertain to only one certificate.
config.core.v3.DataSource ocsp_staple = 4;
// [#not-implemented-hide:]
@@ -205,7 +207,7 @@ message CertificateValidationContext {
ACCEPT_UNTRUSTED = 1;
}
- reserved 4;
+ reserved 4, 5;
reserved "verify_subject_alt_name";
@@ -264,7 +266,7 @@ message CertificateValidationContext {
// because SPKI is tied to a private key, so it doesn't change when the certificate
// is renewed using the same private key.
repeated string verify_certificate_spki = 3
- [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}];
+ [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}];
// An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that
// the SHA-256 of the DER-encoded presented certificate matches one of the specified values.
@@ -293,7 +295,7 @@ message CertificateValidationContext {
// ` are specified,
// a hash matching value from either of the lists will result in the certificate being accepted.
repeated string verify_certificate_hash = 2
- [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}];
+ [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}];
// An optional list of Subject Alternative name matchers. Envoy will verify that the
// Subject Alternative Name of the presented certificate matches one of the specified matches.
@@ -315,9 +317,6 @@ message CertificateValidationContext {
// `.
repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9;
- // [#not-implemented-hide:] Must present a signed time-stamped OCSP response.
- google.protobuf.BoolValue require_ocsp_staple = 5;
-
// [#not-implemented-hide:] Must present signed certificate time-stamp.
google.protobuf.BoolValue require_signed_certificate_timestamp = 6;
diff --git a/api/envoy/extensions/transport_sockets/tls/v3/secret.proto b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto
index 80c68a56f5ce..f25370c3c9f6 100644
--- a/api/envoy/extensions/transport_sockets/tls/v3/secret.proto
+++ b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto
@@ -12,6 +12,7 @@ import "udpa/annotations/migrate.proto";
import "udpa/annotations/sensitive.proto";
import "udpa/annotations/status.proto";
import "udpa/annotations/versioning.proto";
+import "validate/validate.proto";
option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3";
option java_outer_classname = "SecretProto";
@@ -33,7 +34,10 @@ message SdsSecretConfig {
// Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.
// When both name and config are specified, then secret can be fetched and/or reloaded via
// SDS. When only name is specified, then secret will be loaded from static resources.
- string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"];
+ string name = 1 [
+ (validate.rules).string = {min_len: 1},
+ (udpa.annotations.field_migrate).oneof_promotion = "name_specifier"
+ ];
// Resource locator for SDS. This is mutually exclusive to *name*.
// [#not-implemented-hide:]
diff --git a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto
index f746f3d2f1cf..e11b2691978c 100644
--- a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto
+++ b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto
@@ -54,11 +54,33 @@ message UpstreamTlsContext {
google.protobuf.UInt32Value max_session_keys = 4;
}
-// [#next-free-field: 8]
+// [#next-free-field: 9]
message DownstreamTlsContext {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.auth.DownstreamTlsContext";
+ enum OcspStaplePolicy {
+ // OCSP responses are optional. If an OCSP response is absent
+ // or expired, the associated certificate will be used for
+ // connections without an OCSP staple.
+ LENIENT_STAPLING = 0;
+
+ // OCSP responses are optional. If an OCSP response is absent,
+ // the associated certificate will be used without an
+ // OCSP staple. If a response is provided but is expired,
+ // the associated certificate will not be used for
+ // subsequent connections. If no suitable certificate is found,
+ // the connection is rejected.
+ STRICT_STAPLING = 1;
+
+ // OCSP responses are required. Configuration will fail if
+ // a certificate is provided without an OCSP response. If a
+ // response expires, the associated certificate will not be
+ // used connections. If no suitable certificate is found, the
+ // connection is rejected.
+ MUST_STAPLE = 2;
+ }
+
// Common TLS context settings.
CommonTlsContext common_tls_context = 1;
@@ -96,6 +118,11 @@ message DownstreamTlsContext {
lt {seconds: 4294967296}
gte {}
}];
+
+ // Config for whether to use certificates if they do not have
+ // an accompanying OCSP response or if the response expires at runtime.
+ // Defaults to LENIENT_STAPLING
+ OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}];
}
// TLS context shared by both client and server TLS contexts.
@@ -108,7 +135,7 @@ message CommonTlsContext {
message CertificateProvider {
// opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify
// a root-certificate (validation context) or "TLS" to specify a new tls-certificate.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Provider specific config.
// Note: an implementation is expected to dedup multiple instances of the same config
diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto
index 589dd17b543a..3608f93ffe30 100644
--- a/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto
+++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto
@@ -121,7 +121,7 @@ message PrivateKeyProvider {
// Private key method provider name. The name must match a
// supported private key method provider type.
- string provider_name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string provider_name = 1 [(validate.rules).string = {min_len: 1}];
// Private key method provider specific configuration.
oneof config_type {
@@ -153,7 +153,9 @@ message TlsCertificate {
// TLS private key is not password encrypted.
config.core.v4alpha.DataSource password = 3 [(udpa.annotations.sensitive) = true];
- // [#not-implemented-hide:]
+ // The OCSP response to be stapled with this certificate during the handshake.
+ // The response must be DER-encoded and may only be provided via ``filename`` or
+ // ``inline_bytes``. The response may pertain to only one certificate.
config.core.v4alpha.DataSource ocsp_staple = 4;
// [#not-implemented-hide:]
@@ -207,7 +209,7 @@ message CertificateValidationContext {
ACCEPT_UNTRUSTED = 1;
}
- reserved 4;
+ reserved 4, 5;
reserved "verify_subject_alt_name";
@@ -266,7 +268,7 @@ message CertificateValidationContext {
// because SPKI is tied to a private key, so it doesn't change when the certificate
// is renewed using the same private key.
repeated string verify_certificate_spki = 3
- [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}];
+ [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}];
// An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that
// the SHA-256 of the DER-encoded presented certificate matches one of the specified values.
@@ -295,7 +297,7 @@ message CertificateValidationContext {
// ` are specified,
// a hash matching value from either of the lists will result in the certificate being accepted.
repeated string verify_certificate_hash = 2
- [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}];
+ [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}];
// An optional list of Subject Alternative name matchers. Envoy will verify that the
// Subject Alternative Name of the presented certificate matches one of the specified matches.
@@ -317,9 +319,6 @@ message CertificateValidationContext {
// `.
repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9;
- // [#not-implemented-hide:] Must present a signed time-stamped OCSP response.
- google.protobuf.BoolValue require_ocsp_staple = 5;
-
// [#not-implemented-hide:] Must present signed certificate time-stamp.
google.protobuf.BoolValue require_signed_certificate_timestamp = 6;
diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto
index 11306f21415a..9848eaadef0b 100644
--- a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto
+++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto
@@ -11,6 +11,7 @@ import "udpa/core/v1/resource_locator.proto";
import "udpa/annotations/sensitive.proto";
import "udpa/annotations/status.proto";
import "udpa/annotations/versioning.proto";
+import "validate/validate.proto";
option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha";
option java_outer_classname = "SecretProto";
@@ -35,7 +36,7 @@ message SdsSecretConfig {
// Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.
// When both name and config are specified, then secret can be fetched and/or reloaded via
// SDS. When only name is specified, then secret will be loaded from static resources.
- string name = 1;
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Resource locator for SDS. This is mutually exclusive to *name*.
// [#not-implemented-hide:]
diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto
index 44963f687073..6a49cb352ec4 100644
--- a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto
+++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto
@@ -53,11 +53,33 @@ message UpstreamTlsContext {
google.protobuf.UInt32Value max_session_keys = 4;
}
-// [#next-free-field: 8]
+// [#next-free-field: 9]
message DownstreamTlsContext {
option (udpa.annotations.versioning).previous_message_type =
"envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext";
+ enum OcspStaplePolicy {
+ // OCSP responses are optional. If an OCSP response is absent
+ // or expired, the associated certificate will be used for
+ // connections without an OCSP staple.
+ LENIENT_STAPLING = 0;
+
+ // OCSP responses are optional. If an OCSP response is absent,
+ // the associated certificate will be used without an
+ // OCSP staple. If a response is provided but is expired,
+ // the associated certificate will not be used for
+ // subsequent connections. If no suitable certificate is found,
+ // the connection is rejected.
+ STRICT_STAPLING = 1;
+
+ // OCSP responses are required. Configuration will fail if
+ // a certificate is provided without an OCSP response. If a
+ // response expires, the associated certificate will not be
+ // used connections. If no suitable certificate is found, the
+ // connection is rejected.
+ MUST_STAPLE = 2;
+ }
+
// Common TLS context settings.
CommonTlsContext common_tls_context = 1;
@@ -95,6 +117,11 @@ message DownstreamTlsContext {
lt {seconds: 4294967296}
gte {}
}];
+
+ // Config for whether to use certificates if they do not have
+ // an accompanying OCSP response or if the response expires at runtime.
+ // Defaults to LENIENT_STAPLING
+ OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}];
}
// TLS context shared by both client and server TLS contexts.
@@ -111,7 +138,7 @@ message CommonTlsContext {
// opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify
// a root-certificate (validation context) or "TLS" to specify a new tls-certificate.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// Provider specific config.
// Note: an implementation is expected to dedup multiple instances of the same config
diff --git a/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto b/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto
index c6b02364aa2d..44e207172c9b 100644
--- a/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto
+++ b/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto
@@ -5,7 +5,7 @@ package envoy.extensions.upstreams.http.generic.v3;
import "udpa/annotations/status.proto";
option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.generic.v3";
-option java_outer_classname = "GenericConnectionPoolProto";
+option java_outer_classname = "GenericConnectionPoolProtoOuterClass";
option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
diff --git a/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto b/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto
index e4c2d6ff9b84..8318f3c666d9 100644
--- a/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto
+++ b/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto
@@ -5,7 +5,7 @@ package envoy.extensions.upstreams.http.http.v3;
import "udpa/annotations/status.proto";
option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.http.v3";
-option java_outer_classname = "HttpConnectionPoolProto";
+option java_outer_classname = "HttpConnectionPoolProtoOuterClass";
option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
diff --git a/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto b/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto
index 5bc8734cb3f7..7c1d633432e9 100644
--- a/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto
+++ b/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto
@@ -5,7 +5,7 @@ package envoy.extensions.upstreams.http.tcp.v3;
import "udpa/annotations/status.proto";
option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3";
-option java_outer_classname = "TcpConnectionPoolProto";
+option java_outer_classname = "TcpConnectionPoolProtoOuterClass";
option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
diff --git a/api/envoy/extensions/wasm/v3/wasm.proto b/api/envoy/extensions/wasm/v3/wasm.proto
index 26f458214466..b42fb75a0bf7 100644
--- a/api/envoy/extensions/wasm/v3/wasm.proto
+++ b/api/envoy/extensions/wasm/v3/wasm.proto
@@ -16,8 +16,8 @@ option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Wasm]
+// [#extension: envoy.bootstrap.wasm]
-// [[#not-implemented-hide:]
// Configuration for a Wasm VM.
// [#next-free-field: 7]
message VmConfig {
@@ -29,7 +29,7 @@ message VmConfig {
string vm_id = 1;
// The Wasm runtime type (either "v8" or "null" for code compiled into Envoy).
- string runtime = 2 [(validate.rules).string = {min_bytes: 1}];
+ string runtime = 2 [(validate.rules).string = {min_len: 1}];
// The Wasm code that Envoy will execute.
config.core.v3.AsyncDataSource code = 3;
@@ -51,7 +51,6 @@ message VmConfig {
bool nack_on_code_cache_miss = 6;
}
-// [[#not-implemented-hide:]
// Base Configuration for Wasm Plugins e.g. filters and services.
// [#next-free-field: 6]
message PluginConfig {
@@ -66,9 +65,9 @@ message PluginConfig {
string root_id = 2;
// Configuration for finding or starting VM.
- oneof vm_config {
- VmConfig inline_vm_config = 3;
- // In the future add referential VM configurations.
+ oneof vm {
+ VmConfig vm_config = 3;
+ // TODO: add referential VM configurations.
}
// Filter/service configuration used to configure or reconfigure a plugin
@@ -86,7 +85,6 @@ message PluginConfig {
bool fail_open = 5;
}
-// [[#not-implemented-hide:]
// WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService
// ` This opaque configuration will be used to create a Wasm Service.
message WasmService {
diff --git a/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto b/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto
index 02636d0fb25f..fb2369089151 100644
--- a/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto
+++ b/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto
@@ -23,7 +23,7 @@ message ProfileActionConfig {
google.protobuf.Duration profile_duration = 1;
// File path to the directory to output profiles.
- string profile_path = 2 [(validate.rules).string = {min_bytes: 1}];
+ string profile_path = 2 [(validate.rules).string = {min_len: 1}];
// Limits the max number of profiles that can be generated by this action
// over its lifetime to avoid filling the disk.
diff --git a/api/envoy/service/accesslog/v3/als.proto b/api/envoy/service/accesslog/v3/als.proto
index 3f5e37325cc5..5421c2304918 100644
--- a/api/envoy/service/accesslog/v3/als.proto
+++ b/api/envoy/service/accesslog/v3/als.proto
@@ -50,7 +50,7 @@ message StreamAccessLogsMessage {
// The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig
// `.
- string log_name = 2 [(validate.rules).string = {min_bytes: 1}];
+ string log_name = 2 [(validate.rules).string = {min_len: 1}];
}
// Wrapper for batches of HTTP access log entries.
diff --git a/api/envoy/service/accesslog/v4alpha/als.proto b/api/envoy/service/accesslog/v4alpha/als.proto
index 4edb5eade0f2..e7e96583fd2c 100644
--- a/api/envoy/service/accesslog/v4alpha/als.proto
+++ b/api/envoy/service/accesslog/v4alpha/als.proto
@@ -50,7 +50,7 @@ message StreamAccessLogsMessage {
// The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig
// `.
- string log_name = 2 [(validate.rules).string = {min_bytes: 1}];
+ string log_name = 2 [(validate.rules).string = {min_len: 1}];
}
// Wrapper for batches of HTTP access log entries.
diff --git a/api/envoy/service/auth/v2/external_auth.proto b/api/envoy/service/auth/v2/external_auth.proto
index 0f580fe7dc34..7dbfd3556968 100644
--- a/api/envoy/service/auth/v2/external_auth.proto
+++ b/api/envoy/service/auth/v2/external_auth.proto
@@ -43,7 +43,8 @@ message DeniedHttpResponse {
type.HttpStatus status = 1 [(validate.rules).message = {required: true}];
// This field allows the authorization service to send HTTP response headers
- // to the downstream client.
+ // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to
+ // false when used in this message.
repeated api.v2.core.HeaderValueOption headers = 2;
// This field allows the authorization service to send a response body data
@@ -55,9 +56,10 @@ message DeniedHttpResponse {
message OkHttpResponse {
// HTTP entity headers in addition to the original request headers. This allows the authorization
// service to append, to add or to override headers from the original request before
- // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`,
- // the filter will append the correspondent header value to the matched request header. Note that
- // by Leaving `append` as false, the filter will either add a new header, or override an existing
+ // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to
+ // false when used in this message. By setting the `append` field to `true`,
+ // the filter will append the correspondent header value to the matched request header.
+ // By leaving `append` as false, the filter will either add a new header, or override an existing
// one if there is a match.
repeated api.v2.core.HeaderValueOption headers = 2;
}
diff --git a/api/envoy/service/auth/v3/attribute_context.proto b/api/envoy/service/auth/v3/attribute_context.proto
index 3c4fe0af665e..cdf3ee9f96e4 100644
--- a/api/envoy/service/auth/v3/attribute_context.proto
+++ b/api/envoy/service/auth/v3/attribute_context.proto
@@ -97,7 +97,7 @@ message AttributeContext {
// This message defines attributes for an HTTP request.
// HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
- // [#next-free-field: 12]
+ // [#next-free-field: 13]
message HttpRequest {
option (udpa.annotations.versioning).previous_message_type =
"envoy.service.auth.v2.AttributeContext.HttpRequest";
@@ -145,6 +145,12 @@ message AttributeContext {
// The HTTP request body.
string body = 11;
+
+ // The HTTP request body in bytes. This is used instead of
+ // :ref:`body ` when
+ // :ref:`pack_as_bytes `
+ // is set to true.
+ bytes raw_body = 12;
}
// The source of a network activity, such as starting a TCP connection.
diff --git a/api/envoy/service/auth/v3/external_auth.proto b/api/envoy/service/auth/v3/external_auth.proto
index 317d83abe485..9e2bf8fccd5b 100644
--- a/api/envoy/service/auth/v3/external_auth.proto
+++ b/api/envoy/service/auth/v3/external_auth.proto
@@ -50,7 +50,8 @@ message DeniedHttpResponse {
type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}];
// This field allows the authorization service to send HTTP response headers
- // to the downstream client.
+ // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to
+ // false when used in this message.
repeated config.core.v3.HeaderValueOption headers = 2;
// This field allows the authorization service to send a response body data
@@ -59,18 +60,37 @@ message DeniedHttpResponse {
}
// HTTP attributes for an OK response.
+// [#next-free-field: 6]
message OkHttpResponse {
option (udpa.annotations.versioning).previous_message_type =
"envoy.service.auth.v2.OkHttpResponse";
// HTTP entity headers in addition to the original request headers. This allows the authorization
// service to append, to add or to override headers from the original request before
- // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`,
- // the filter will append the correspondent header value to the matched request header. Note that
- // by Leaving `append` as false, the filter will either add a new header, or override an existing
+ // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to
+ // false when used in this message. By setting the `append` field to `true`,
+ // the filter will append the correspondent header value to the matched request header.
+ // By leaving `append` as false, the filter will either add a new header, or override an existing
// one if there is a match.
repeated config.core.v3.HeaderValueOption headers = 2;
+ // HTTP entity headers to remove from the original request before dispatching
+ // it to the upstream. This allows the authorization service to act on auth
+ // related headers (like `Authorization`), process them, and consume them.
+ // Under this model, the upstream will either receive the request (if it's
+ // authorized) or not receive it (if it's not), but will not see headers
+ // containing authorization credentials.
+ //
+ // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as
+ // the header `Host`, may not be removed as that would make the request
+ // malformed. If mentioned in `headers_to_remove` these special headers will
+ // be ignored.
+ //
+ // When using the HTTP service this must instead be set by the HTTP
+ // authorization service as a comma separated list like so:
+ // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``.
+ repeated string headers_to_remove = 5;
+
// This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata
// `. Until it is removed,
// setting this field overrides :ref:`CheckResponse.dynamic_metadata
diff --git a/api/envoy/service/auth/v4alpha/attribute_context.proto b/api/envoy/service/auth/v4alpha/attribute_context.proto
index 24f728c7adef..a1bf9c9c62cb 100644
--- a/api/envoy/service/auth/v4alpha/attribute_context.proto
+++ b/api/envoy/service/auth/v4alpha/attribute_context.proto
@@ -97,7 +97,7 @@ message AttributeContext {
// This message defines attributes for an HTTP request.
// HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
- // [#next-free-field: 12]
+ // [#next-free-field: 13]
message HttpRequest {
option (udpa.annotations.versioning).previous_message_type =
"envoy.service.auth.v3.AttributeContext.HttpRequest";
@@ -145,6 +145,12 @@ message AttributeContext {
// The HTTP request body.
string body = 11;
+
+ // The HTTP request body in bytes. This is used instead of
+ // :ref:`body ` when
+ // :ref:`pack_as_bytes `
+ // is set to true.
+ bytes raw_body = 12;
}
// The source of a network activity, such as starting a TCP connection.
diff --git a/api/envoy/service/auth/v4alpha/external_auth.proto b/api/envoy/service/auth/v4alpha/external_auth.proto
index cca67e6ecce9..06ccecec15da 100644
--- a/api/envoy/service/auth/v4alpha/external_auth.proto
+++ b/api/envoy/service/auth/v4alpha/external_auth.proto
@@ -50,7 +50,8 @@ message DeniedHttpResponse {
type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}];
// This field allows the authorization service to send HTTP response headers
- // to the downstream client.
+ // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to
+ // false when used in this message.
repeated config.core.v4alpha.HeaderValueOption headers = 2;
// This field allows the authorization service to send a response body data
@@ -59,6 +60,7 @@ message DeniedHttpResponse {
}
// HTTP attributes for an OK response.
+// [#next-free-field: 6]
message OkHttpResponse {
option (udpa.annotations.versioning).previous_message_type =
"envoy.service.auth.v3.OkHttpResponse";
@@ -69,11 +71,29 @@ message OkHttpResponse {
// HTTP entity headers in addition to the original request headers. This allows the authorization
// service to append, to add or to override headers from the original request before
- // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`,
- // the filter will append the correspondent header value to the matched request header. Note that
- // by Leaving `append` as false, the filter will either add a new header, or override an existing
+ // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to
+ // false when used in this message. By setting the `append` field to `true`,
+ // the filter will append the correspondent header value to the matched request header.
+ // By leaving `append` as false, the filter will either add a new header, or override an existing
// one if there is a match.
repeated config.core.v4alpha.HeaderValueOption headers = 2;
+
+ // HTTP entity headers to remove from the original request before dispatching
+ // it to the upstream. This allows the authorization service to act on auth
+ // related headers (like `Authorization`), process them, and consume them.
+ // Under this model, the upstream will either receive the request (if it's
+ // authorized) or not receive it (if it's not), but will not see headers
+ // containing authorization credentials.
+ //
+ // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as
+ // the header `Host`, may not be removed as that would make the request
+ // malformed. If mentioned in `headers_to_remove` these special headers will
+ // be ignored.
+ //
+ // When using the HTTP service this must instead be set by the HTTP
+ // authorization service as a comma separated list like so:
+ // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``.
+ repeated string headers_to_remove = 5;
}
// Intended for gRPC and Network Authorization servers `only`.
diff --git a/api/envoy/service/extension/v3/config_discovery.proto b/api/envoy/service/extension/v3/config_discovery.proto
index 652355b707e3..d0b703312346 100644
--- a/api/envoy/service/extension/v3/config_discovery.proto
+++ b/api/envoy/service/extension/v3/config_discovery.proto
@@ -16,7 +16,7 @@ option java_multiple_files = true;
option java_generic_services = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;
-// [#protodoc-title: ExtensionConfigDS]
+// [#protodoc-title: Extension Config Discovery Service (ECDS)]
// Return extension configurations.
service ExtensionConfigDiscoveryService {
diff --git a/api/envoy/service/runtime/v3/rtds.proto b/api/envoy/service/runtime/v3/rtds.proto
index b12844233883..796b6fac24e6 100644
--- a/api/envoy/service/runtime/v3/rtds.proto
+++ b/api/envoy/service/runtime/v3/rtds.proto
@@ -52,7 +52,7 @@ message Runtime {
// Runtime resource name. This makes the Runtime a self-describing xDS
// resource.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
google.protobuf.Struct layer = 2;
}
diff --git a/api/envoy/service/status/v3/csds.proto b/api/envoy/service/status/v3/csds.proto
index 23f1352bf489..8e81dcdd2bff 100644
--- a/api/envoy/service/status/v3/csds.proto
+++ b/api/envoy/service/status/v3/csds.proto
@@ -9,6 +9,7 @@ import "envoy/type/matcher/v3/node.proto";
import "google/api/annotations.proto";
import "google/protobuf/struct.proto";
+import "udpa/annotations/migrate.proto";
import "udpa/annotations/status.proto";
import "udpa/annotations/versioning.proto";
@@ -21,9 +22,8 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Client Status Discovery Service (CSDS)]
// CSDS is Client Status Discovery Service. It can be used to get the status of
-// an xDS-compliant client from the management server's point of view. In the
-// future, it can potentially be used as an interface to get the current
-// state directly from the client.
+// an xDS-compliant client from the management server's point of view. It can
+// also be used to get the current xDS states directly from the client.
service ClientStatusDiscoveryService {
rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) {
}
@@ -34,7 +34,7 @@ service ClientStatusDiscoveryService {
}
}
-// Status of a config.
+// Status of a config from a management server view.
enum ConfigStatus {
// Status info is not available/unknown.
UNKNOWN = 0;
@@ -49,10 +49,30 @@ enum ConfigStatus {
// ACK/NACK.
STALE = 3;
- // Management server has sent the config to client but received NACK.
+ // Management server has sent the config to client but received NACK. The
+ // attached config dump will be the latest config (the rejected one), since
+ // it is the persisted version in the management server.
ERROR = 4;
}
+// Config status from a client-side view.
+enum ClientConfigStatus {
+ // Config status is not available/unknown.
+ CLIENT_UNKNOWN = 0;
+
+ // Client requested the config but hasn't received any config from management
+ // server yet.
+ CLIENT_REQUESTED = 1;
+
+ // Client received the config and replied with ACK.
+ CLIENT_ACKED = 2;
+
+ // Client received the config and replied with NACK. Notably, the attached
+ // config dump is not the NACKed version, but the most recent accepted one. If
+ // no config is accepted yet, the attached config dump will be empty.
+ CLIENT_NACKED = 3;
+}
+
// Request for client status of clients identified by a list of NodeMatchers.
message ClientStatusRequest {
option (udpa.annotations.versioning).previous_message_type =
@@ -67,12 +87,20 @@ message ClientStatusRequest {
}
// Detailed config (per xDS) with status.
-// [#next-free-field: 7]
+// [#next-free-field: 8]
message PerXdsConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.service.status.v2.PerXdsConfig";
- ConfigStatus status = 1;
+ // Config status generated by management servers. Will not be present if the
+ // CSDS server is an xDS client.
+ ConfigStatus status = 1 [(udpa.annotations.field_migrate).oneof_promotion = "status_config"];
+
+ // Client config status is populated by xDS clients. Will not be present if
+ // the CSDS server is an xDS server. No matter what the client config status
+ // is, xDS clients should always dump the most recent accepted xDS config.
+ ClientConfigStatus client_status = 7
+ [(udpa.annotations.field_migrate).oneof_promotion = "status_config"];
oneof per_xds_config {
admin.v3.ListenersConfigDump listener_config = 2;
diff --git a/api/envoy/service/status/v4alpha/csds.proto b/api/envoy/service/status/v4alpha/csds.proto
index 37758954cadb..e1556de8b913 100644
--- a/api/envoy/service/status/v4alpha/csds.proto
+++ b/api/envoy/service/status/v4alpha/csds.proto
@@ -21,9 +21,8 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO
// [#protodoc-title: Client Status Discovery Service (CSDS)]
// CSDS is Client Status Discovery Service. It can be used to get the status of
-// an xDS-compliant client from the management server's point of view. In the
-// future, it can potentially be used as an interface to get the current
-// state directly from the client.
+// an xDS-compliant client from the management server's point of view. It can
+// also be used to get the current xDS states directly from the client.
service ClientStatusDiscoveryService {
rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) {
}
@@ -34,7 +33,7 @@ service ClientStatusDiscoveryService {
}
}
-// Status of a config.
+// Status of a config from a management server view.
enum ConfigStatus {
// Status info is not available/unknown.
UNKNOWN = 0;
@@ -49,10 +48,30 @@ enum ConfigStatus {
// ACK/NACK.
STALE = 3;
- // Management server has sent the config to client but received NACK.
+ // Management server has sent the config to client but received NACK. The
+ // attached config dump will be the latest config (the rejected one), since
+ // it is the persisted version in the management server.
ERROR = 4;
}
+// Config status from a client-side view.
+enum ClientConfigStatus {
+ // Config status is not available/unknown.
+ CLIENT_UNKNOWN = 0;
+
+ // Client requested the config but hasn't received any config from management
+ // server yet.
+ CLIENT_REQUESTED = 1;
+
+ // Client received the config and replied with ACK.
+ CLIENT_ACKED = 2;
+
+ // Client received the config and replied with NACK. Notably, the attached
+ // config dump is not the NACKed version, but the most recent accepted one. If
+ // no config is accepted yet, the attached config dump will be empty.
+ CLIENT_NACKED = 3;
+}
+
// Request for client status of clients identified by a list of NodeMatchers.
message ClientStatusRequest {
option (udpa.annotations.versioning).previous_message_type =
@@ -67,12 +86,21 @@ message ClientStatusRequest {
}
// Detailed config (per xDS) with status.
-// [#next-free-field: 7]
+// [#next-free-field: 8]
message PerXdsConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.service.status.v3.PerXdsConfig";
- ConfigStatus status = 1;
+ oneof status_config {
+ // Config status generated by management servers. Will not be present if the
+ // CSDS server is an xDS client.
+ ConfigStatus status = 1;
+
+ // Client config status is populated by xDS clients. Will not be present if
+ // the CSDS server is an xDS server. No matter what the client config status
+ // is, xDS clients should always dump the most recent accepted xDS config.
+ ClientConfigStatus client_status = 7;
+ }
oneof per_xds_config {
admin.v4alpha.ListenersConfigDump listener_config = 2;
diff --git a/api/envoy/service/tap/v2alpha/BUILD b/api/envoy/service/tap/v2alpha/BUILD
index 267aeaa0efab..8e0561a169c5 100644
--- a/api/envoy/service/tap/v2alpha/BUILD
+++ b/api/envoy/service/tap/v2alpha/BUILD
@@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2
api_proto_package(
has_services = True,
deps = [
- "//envoy/api/v2:pkg",
"//envoy/api/v2/core:pkg",
"//envoy/api/v2/route:pkg",
"//envoy/data/tap/v2alpha:pkg",
diff --git a/api/envoy/service/tap/v2alpha/tapds.proto b/api/envoy/service/tap/v2alpha/tapds.proto
deleted file mode 100644
index 81b9cb0e447b..000000000000
--- a/api/envoy/service/tap/v2alpha/tapds.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-syntax = "proto3";
-
-package envoy.service.tap.v2alpha;
-
-import "envoy/api/v2/discovery.proto";
-import "envoy/service/tap/v2alpha/common.proto";
-
-import "google/api/annotations.proto";
-
-import "udpa/annotations/status.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.service.tap.v2alpha";
-option java_outer_classname = "TapdsProto";
-option java_multiple_files = true;
-option java_generic_services = true;
-option (udpa.annotations.file_status).package_version_status = FROZEN;
-
-// [#protodoc-title: Tap discovery service]
-
-// [#not-implemented-hide:] Tap discovery service.
-service TapDiscoveryService {
- rpc StreamTapConfigs(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) {
- }
-
- rpc DeltaTapConfigs(stream api.v2.DeltaDiscoveryRequest)
- returns (stream api.v2.DeltaDiscoveryResponse) {
- }
-
- rpc FetchTapConfigs(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) {
- option (google.api.http).post = "/v2/discovery:tap_configs";
- option (google.api.http).body = "*";
- }
-}
-
-// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name
-// The filter TapDS config references this name.
-message TapResource {
- // The name of the tap configuration.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
-
- // Tap config to apply
- TapConfig config = 2;
-}
diff --git a/api/envoy/service/tap/v3/BUILD b/api/envoy/service/tap/v3/BUILD
index 0aa82fa145be..5ee1ce553f48 100644
--- a/api/envoy/service/tap/v3/BUILD
+++ b/api/envoy/service/tap/v3/BUILD
@@ -8,9 +8,7 @@ api_proto_package(
has_services = True,
deps = [
"//envoy/config/core/v3:pkg",
- "//envoy/config/tap/v3:pkg",
"//envoy/data/tap/v3:pkg",
- "//envoy/service/discovery/v3:pkg",
"//envoy/service/tap/v2alpha:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
],
diff --git a/api/envoy/service/tap/v3/tapds.proto b/api/envoy/service/tap/v3/tapds.proto
deleted file mode 100644
index 51393d6e14c7..000000000000
--- a/api/envoy/service/tap/v3/tapds.proto
+++ /dev/null
@@ -1,49 +0,0 @@
-syntax = "proto3";
-
-package envoy.service.tap.v3;
-
-import "envoy/config/tap/v3/common.proto";
-import "envoy/service/discovery/v3/discovery.proto";
-
-import "google/api/annotations.proto";
-
-import "udpa/annotations/status.proto";
-import "udpa/annotations/versioning.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.service.tap.v3";
-option java_outer_classname = "TapdsProto";
-option java_multiple_files = true;
-option java_generic_services = true;
-option (udpa.annotations.file_status).package_version_status = ACTIVE;
-
-// [#protodoc-title: Tap discovery service]
-
-// [#not-implemented-hide:] Tap discovery service.
-service TapDiscoveryService {
- rpc StreamTapConfigs(stream discovery.v3.DiscoveryRequest)
- returns (stream discovery.v3.DiscoveryResponse) {
- }
-
- rpc DeltaTapConfigs(stream discovery.v3.DeltaDiscoveryRequest)
- returns (stream discovery.v3.DeltaDiscoveryResponse) {
- }
-
- rpc FetchTapConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) {
- option (google.api.http).post = "/v3/discovery:tap_configs";
- option (google.api.http).body = "*";
- }
-}
-
-// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name
-// The filter TapDS config references this name.
-message TapResource {
- option (udpa.annotations.versioning).previous_message_type =
- "envoy.service.tap.v2alpha.TapResource";
-
- // The name of the tap configuration.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
-
- // Tap config to apply
- config.tap.v3.TapConfig config = 2;
-}
diff --git a/api/envoy/service/tap/v4alpha/BUILD b/api/envoy/service/tap/v4alpha/BUILD
index 8e407d4f61e3..cb89a6907d9a 100644
--- a/api/envoy/service/tap/v4alpha/BUILD
+++ b/api/envoy/service/tap/v4alpha/BUILD
@@ -8,9 +8,7 @@ api_proto_package(
has_services = True,
deps = [
"//envoy/config/core/v4alpha:pkg",
- "//envoy/config/tap/v4alpha:pkg",
"//envoy/data/tap/v3:pkg",
- "//envoy/service/discovery/v4alpha:pkg",
"//envoy/service/tap/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
],
diff --git a/api/envoy/service/tap/v4alpha/tapds.proto b/api/envoy/service/tap/v4alpha/tapds.proto
deleted file mode 100644
index a041beea2697..000000000000
--- a/api/envoy/service/tap/v4alpha/tapds.proto
+++ /dev/null
@@ -1,49 +0,0 @@
-syntax = "proto3";
-
-package envoy.service.tap.v4alpha;
-
-import "envoy/config/tap/v4alpha/common.proto";
-import "envoy/service/discovery/v4alpha/discovery.proto";
-
-import "google/api/annotations.proto";
-
-import "udpa/annotations/status.proto";
-import "udpa/annotations/versioning.proto";
-import "validate/validate.proto";
-
-option java_package = "io.envoyproxy.envoy.service.tap.v4alpha";
-option java_outer_classname = "TapdsProto";
-option java_multiple_files = true;
-option java_generic_services = true;
-option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE;
-
-// [#protodoc-title: Tap discovery service]
-
-// [#not-implemented-hide:] Tap discovery service.
-service TapDiscoveryService {
- rpc StreamTapConfigs(stream discovery.v4alpha.DiscoveryRequest)
- returns (stream discovery.v4alpha.DiscoveryResponse) {
- }
-
- rpc DeltaTapConfigs(stream discovery.v4alpha.DeltaDiscoveryRequest)
- returns (stream discovery.v4alpha.DeltaDiscoveryResponse) {
- }
-
- rpc FetchTapConfigs(discovery.v4alpha.DiscoveryRequest)
- returns (discovery.v4alpha.DiscoveryResponse) {
- option (google.api.http).post = "/v3/discovery:tap_configs";
- option (google.api.http).body = "*";
- }
-}
-
-// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name
-// The filter TapDS config references this name.
-message TapResource {
- option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v3.TapResource";
-
- // The name of the tap configuration.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
-
- // Tap config to apply
- config.tap.v4alpha.TapConfig config = 2;
-}
diff --git a/api/envoy/type/matcher/metadata.proto b/api/envoy/type/matcher/metadata.proto
index 2cbc602564c5..ed58d04adb02 100644
--- a/api/envoy/type/matcher/metadata.proto
+++ b/api/envoy/type/matcher/metadata.proto
@@ -83,12 +83,12 @@ message MetadataMatcher {
option (validate.required) = true;
// If specified, use the key to retrieve the value in a Struct.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
}
}
// The filter name to retrieve the Struct from the Metadata.
- string filter = 1 [(validate.rules).string = {min_bytes: 1}];
+ string filter = 1 [(validate.rules).string = {min_len: 1}];
// The path to retrieve the Value from the Struct.
repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];
diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto
index b23c0bff3075..6c499235bbe2 100644
--- a/api/envoy/type/matcher/regex.proto
+++ b/api/envoy/type/matcher/regex.proto
@@ -48,7 +48,7 @@ message RegexMatcher {
}
// The regex match string. The string must be supported by the configured engine.
- string regex = 2 [(validate.rules).string = {min_bytes: 1}];
+ string regex = 2 [(validate.rules).string = {min_len: 1}];
}
// Describes how to match a string and then produce a new string using a regular
diff --git a/api/envoy/type/matcher/string.proto b/api/envoy/type/matcher/string.proto
index 431043e00ec1..499eaf21775f 100644
--- a/api/envoy/type/matcher/string.proto
+++ b/api/envoy/type/matcher/string.proto
@@ -34,7 +34,7 @@ message StringMatcher {
// Examples:
//
// * *abc* matches the value *abc.xyz*
- string prefix = 2 [(validate.rules).string = {min_bytes: 1}];
+ string prefix = 2 [(validate.rules).string = {min_len: 1}];
// The input string must have the suffix specified here.
// Note: empty prefix is not allowed, please use regex instead.
@@ -42,7 +42,7 @@ message StringMatcher {
// Examples:
//
// * *abc* matches the value *xyz.abc*
- string suffix = 3 [(validate.rules).string = {min_bytes: 1}];
+ string suffix = 3 [(validate.rules).string = {min_len: 1}];
// The input string must match the regular expression specified here.
// The regex grammar is defined `here
diff --git a/api/envoy/type/matcher/struct.proto b/api/envoy/type/matcher/struct.proto
index f65b1d121845..10d4672e0622 100644
--- a/api/envoy/type/matcher/struct.proto
+++ b/api/envoy/type/matcher/struct.proto
@@ -72,7 +72,7 @@ message StructMatcher {
option (validate.required) = true;
// If specified, use the key to retrieve the value in a Struct.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
}
}
diff --git a/api/envoy/type/matcher/v3/metadata.proto b/api/envoy/type/matcher/v3/metadata.proto
index 65ec4f47ffff..a7184ee98050 100644
--- a/api/envoy/type/matcher/v3/metadata.proto
+++ b/api/envoy/type/matcher/v3/metadata.proto
@@ -89,12 +89,12 @@ message MetadataMatcher {
option (validate.required) = true;
// If specified, use the key to retrieve the value in a Struct.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
}
}
// The filter name to retrieve the Struct from the Metadata.
- string filter = 1 [(validate.rules).string = {min_bytes: 1}];
+ string filter = 1 [(validate.rules).string = {min_len: 1}];
// The path to retrieve the Value from the Struct.
repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];
diff --git a/api/envoy/type/matcher/v3/regex.proto b/api/envoy/type/matcher/v3/regex.proto
index 6087c6f90fad..f5913c460c46 100644
--- a/api/envoy/type/matcher/v3/regex.proto
+++ b/api/envoy/type/matcher/v3/regex.proto
@@ -54,7 +54,7 @@ message RegexMatcher {
}
// The regex match string. The string must be supported by the configured engine.
- string regex = 2 [(validate.rules).string = {min_bytes: 1}];
+ string regex = 2 [(validate.rules).string = {min_len: 1}];
}
// Describes how to match a string and then produce a new string using a regular
diff --git a/api/envoy/type/matcher/v3/string.proto b/api/envoy/type/matcher/v3/string.proto
index d453d43d3f85..7a91b58f3bc6 100644
--- a/api/envoy/type/matcher/v3/string.proto
+++ b/api/envoy/type/matcher/v3/string.proto
@@ -41,7 +41,7 @@ message StringMatcher {
// Examples:
//
// * *abc* matches the value *abc.xyz*
- string prefix = 2 [(validate.rules).string = {min_bytes: 1}];
+ string prefix = 2 [(validate.rules).string = {min_len: 1}];
// The input string must have the suffix specified here.
// Note: empty prefix is not allowed, please use regex instead.
@@ -49,7 +49,7 @@ message StringMatcher {
// Examples:
//
// * *abc* matches the value *xyz.abc*
- string suffix = 3 [(validate.rules).string = {min_bytes: 1}];
+ string suffix = 3 [(validate.rules).string = {min_len: 1}];
// The input string must match the regular expression specified here.
RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}];
@@ -60,7 +60,7 @@ message StringMatcher {
// Examples:
//
// * *abc* matches the value *xyz.abc.def*
- string contains = 7 [(validate.rules).string = {min_bytes: 1}];
+ string contains = 7 [(validate.rules).string = {min_len: 1}];
}
// If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no
diff --git a/api/envoy/type/matcher/v3/struct.proto b/api/envoy/type/matcher/v3/struct.proto
index b88d7b11bc2a..c753d07a5c0a 100644
--- a/api/envoy/type/matcher/v3/struct.proto
+++ b/api/envoy/type/matcher/v3/struct.proto
@@ -78,7 +78,7 @@ message StructMatcher {
option (validate.required) = true;
// If specified, use the key to retrieve the value in a Struct.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
}
}
diff --git a/api/envoy/type/matcher/v4alpha/metadata.proto b/api/envoy/type/matcher/v4alpha/metadata.proto
index 8abe14e7b667..35af650391ff 100644
--- a/api/envoy/type/matcher/v4alpha/metadata.proto
+++ b/api/envoy/type/matcher/v4alpha/metadata.proto
@@ -90,12 +90,12 @@ message MetadataMatcher {
option (validate.required) = true;
// If specified, use the key to retrieve the value in a Struct.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
}
}
// The filter name to retrieve the Struct from the Metadata.
- string filter = 1 [(validate.rules).string = {min_bytes: 1}];
+ string filter = 1 [(validate.rules).string = {min_len: 1}];
// The path to retrieve the Value from the Struct.
repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}];
diff --git a/api/envoy/type/matcher/v4alpha/regex.proto b/api/envoy/type/matcher/v4alpha/regex.proto
index 087c5e3f7292..537635ec87d0 100644
--- a/api/envoy/type/matcher/v4alpha/regex.proto
+++ b/api/envoy/type/matcher/v4alpha/regex.proto
@@ -49,7 +49,7 @@ message RegexMatcher {
}
// The regex match string. The string must be supported by the configured engine.
- string regex = 2 [(validate.rules).string = {min_bytes: 1}];
+ string regex = 2 [(validate.rules).string = {min_len: 1}];
}
// Describes how to match a string and then produce a new string using a regular
diff --git a/api/envoy/type/matcher/v4alpha/string.proto b/api/envoy/type/matcher/v4alpha/string.proto
index fc17946fe3b5..1bc0118ced9b 100644
--- a/api/envoy/type/matcher/v4alpha/string.proto
+++ b/api/envoy/type/matcher/v4alpha/string.proto
@@ -42,7 +42,7 @@ message StringMatcher {
// Examples:
//
// * *abc* matches the value *abc.xyz*
- string prefix = 2 [(validate.rules).string = {min_bytes: 1}];
+ string prefix = 2 [(validate.rules).string = {min_len: 1}];
// The input string must have the suffix specified here.
// Note: empty prefix is not allowed, please use regex instead.
@@ -50,7 +50,7 @@ message StringMatcher {
// Examples:
//
// * *abc* matches the value *xyz.abc*
- string suffix = 3 [(validate.rules).string = {min_bytes: 1}];
+ string suffix = 3 [(validate.rules).string = {min_len: 1}];
// The input string must match the regular expression specified here.
RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}];
@@ -61,7 +61,7 @@ message StringMatcher {
// Examples:
//
// * *abc* matches the value *xyz.abc.def*
- string contains = 7 [(validate.rules).string = {min_bytes: 1}];
+ string contains = 7 [(validate.rules).string = {min_len: 1}];
}
// If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no
diff --git a/api/envoy/type/matcher/v4alpha/struct.proto b/api/envoy/type/matcher/v4alpha/struct.proto
index 643cc5a47570..328ac555bd81 100644
--- a/api/envoy/type/matcher/v4alpha/struct.proto
+++ b/api/envoy/type/matcher/v4alpha/struct.proto
@@ -79,7 +79,7 @@ message StructMatcher {
option (validate.required) = true;
// If specified, use the key to retrieve the value in a Struct.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
}
}
diff --git a/api/envoy/type/metadata/v3/metadata.proto b/api/envoy/type/metadata/v3/metadata.proto
index ddcce6882057..b971d8debbe5 100644
--- a/api/envoy/type/metadata/v3/metadata.proto
+++ b/api/envoy/type/metadata/v3/metadata.proto
@@ -49,13 +49,13 @@ message MetadataKey {
option (validate.required) = true;
// If specified, use the key to retrieve the value in a Struct.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
}
}
// The key name of Metadata to retrieve the Struct from the metadata.
// Typically, it represents a builtin subsystem or custom extension.
- string key = 1 [(validate.rules).string = {min_bytes: 1}];
+ string key = 1 [(validate.rules).string = {min_len: 1}];
// The path to retrieve the Value from the Struct. It can be a prefix or a full path,
// e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example,
diff --git a/api/envoy/type/tracing/v3/custom_tag.proto b/api/envoy/type/tracing/v3/custom_tag.proto
index 42518ead59d1..bcebe5779ba1 100644
--- a/api/envoy/type/tracing/v3/custom_tag.proto
+++ b/api/envoy/type/tracing/v3/custom_tag.proto
@@ -26,7 +26,7 @@ message CustomTag {
"envoy.type.tracing.v2.CustomTag.Literal";
// Static literal value to populate the tag value.
- string value = 1 [(validate.rules).string = {min_bytes: 1}];
+ string value = 1 [(validate.rules).string = {min_len: 1}];
}
// Environment type custom tag with environment name and default value.
@@ -35,7 +35,7 @@ message CustomTag {
"envoy.type.tracing.v2.CustomTag.Environment";
// Environment variable name to obtain the value to populate the tag value.
- string name = 1 [(validate.rules).string = {min_bytes: 1}];
+ string name = 1 [(validate.rules).string = {min_len: 1}];
// When the environment variable is not found,
// the tag value will be populated with this default value if specified,
@@ -50,7 +50,7 @@ message CustomTag {
// Header name to obtain the value to populate the tag value.
string name = 1
- [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
+ [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}];
// When the header does not exist,
// the tag value will be populated with this default value if specified,
@@ -80,7 +80,7 @@ message CustomTag {
}
// Used to populate the tag name.
- string tag = 1 [(validate.rules).string = {min_bytes: 1}];
+ string tag = 1 [(validate.rules).string = {min_len: 1}];
// Used to specify what kind of custom tag.
oneof type {
diff --git a/api/envoy/watchdog/v3alpha/BUILD b/api/envoy/watchdog/v3alpha/BUILD
new file mode 100644
index 000000000000..ee92fb652582
--- /dev/null
+++ b/api/envoy/watchdog/v3alpha/BUILD
@@ -0,0 +1,9 @@
+# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py.
+
+load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_package(
+ deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"],
+)
diff --git a/api/envoy/watchdog/v3alpha/README.md b/api/envoy/watchdog/v3alpha/README.md
new file mode 100644
index 000000000000..c8433b9c05b5
--- /dev/null
+++ b/api/envoy/watchdog/v3alpha/README.md
@@ -0,0 +1,2 @@
+This contains watchdog actions that are part of core Envoy, and therefore cannot
+be in the extensions directory.
diff --git a/api/envoy/watchdog/v3alpha/abort_action.proto b/api/envoy/watchdog/v3alpha/abort_action.proto
new file mode 100644
index 000000000000..3f47fddaa77e
--- /dev/null
+++ b/api/envoy/watchdog/v3alpha/abort_action.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+
+package envoy.watchdog.v3alpha;
+
+import "google/protobuf/duration.proto";
+
+import "udpa/annotations/status.proto";
+import "udpa/annotations/versioning.proto";
+import "validate/validate.proto";
+
+option java_package = "io.envoyproxy.envoy.watchdog.v3alpha";
+option java_outer_classname = "AbortActionProto";
+option java_multiple_files = true;
+option (udpa.annotations.file_status).work_in_progress = true;
+option (udpa.annotations.file_status).package_version_status = ACTIVE;
+
+// [#protodoc-title: Watchdog Action that kills a stuck thread to kill the process.]
+
+// A GuardDogAction that will terminate the process by killing the
+// stuck thread. This would allow easier access to the call stack of the stuck
+// thread since we would run signal handlers on that thread. By default
+// this will be registered to run as the last watchdog action on KILL and
+// MULTIKILL events if those are enabled.
+message AbortActionConfig {
+ // How long to wait for the thread to respond to the thread kill function
+ // before killing the process from this action. This is a blocking action.
+ // By default this is 5 seconds.
+ google.protobuf.Duration wait_duration = 1;
+}
diff --git a/api/versioning/BUILD b/api/versioning/BUILD
index c93b1f7d84c5..2e0a1cd4997d 100644
--- a/api/versioning/BUILD
+++ b/api/versioning/BUILD
@@ -54,6 +54,7 @@ proto_library(
"//envoy/extensions/filters/http/aws_request_signing/v3:pkg",
"//envoy/extensions/filters/http/buffer/v3:pkg",
"//envoy/extensions/filters/http/cache/v3alpha:pkg",
+ "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg",
"//envoy/extensions/filters/http/compressor/v3:pkg",
"//envoy/extensions/filters/http/cors/v3:pkg",
"//envoy/extensions/filters/http/csrf/v3:pkg",
@@ -72,6 +73,7 @@ proto_library(
"//envoy/extensions/filters/http/health_check/v3:pkg",
"//envoy/extensions/filters/http/ip_tagging/v3:pkg",
"//envoy/extensions/filters/http/jwt_authn/v3:pkg",
+ "//envoy/extensions/filters/http/local_ratelimit/v3:pkg",
"//envoy/extensions/filters/http/lua/v3:pkg",
"//envoy/extensions/filters/http/oauth2/v3alpha:pkg",
"//envoy/extensions/filters/http/on_demand/v3:pkg",
@@ -118,6 +120,7 @@ proto_library(
"//envoy/extensions/network/socket_interface/v3:pkg",
"//envoy/extensions/retry/host/omit_host_metadata/v3:pkg",
"//envoy/extensions/retry/priority/previous_priorities/v3:pkg",
+ "//envoy/extensions/stat_sinks/wasm/v3:pkg",
"//envoy/extensions/transport_sockets/alts/v3:pkg",
"//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg",
"//envoy/extensions/transport_sockets/quic/v3:pkg",
@@ -151,6 +154,7 @@ proto_library(
"//envoy/type/metadata/v3:pkg",
"//envoy/type/tracing/v3:pkg",
"//envoy/type/v3:pkg",
+ "//envoy/watchdog/v3alpha:pkg",
],
)
diff --git a/api/xds_protocol.rst b/api/xds_protocol.rst
index c7cbe6f03bcc..641d30794006 100644
--- a/api/xds_protocol.rst
+++ b/api/xds_protocol.rst
@@ -695,10 +695,16 @@ An example minimal ``bootstrap.yaml`` fragment for ADS configuration is:
address:
port_value:
lb_policy: ROUND_ROBIN
- http2_protocol_options: {}
+ # It is recommended to configure either HTTP/2 or TCP keepalives in order to detect
+ # connection issues, and allow Envoy to reconnect. TCP keepalive is less expensive, but
+ # may be inadequate if there is a TCP proxy between Envoy and the management server.
+ # HTTP/2 keepalive is slightly more expensive, but may detect issues through more types
+ # of intermediate proxies.
+ http2_protocol_options:
+ connection_keepalive:
+ interval: 30s
+ timeout: 5s
upstream_connection_options:
- # configure a TCP keep-alive to detect and reconnect to the admin
- # server in the event of a TCP socket disconnection
tcp_keepalive:
...
admin:
diff --git a/bazel/BUILD b/bazel/BUILD
index 0cdca5a06d7f..d03b931018a3 100644
--- a/bazel/BUILD
+++ b/bazel/BUILD
@@ -84,6 +84,33 @@ config_setting(
},
)
+config_setting(
+ name = "clang_cl_opt_build",
+ values = {
+ "cpu": "x64_windows",
+ "define": "clang_cl=1",
+ "compilation_mode": "opt",
+ },
+)
+
+config_setting(
+ name = "clang_cl_dbg_build",
+ values = {
+ "cpu": "x64_windows",
+ "define": "clang_cl=1",
+ "compilation_mode": "dbg",
+ },
+)
+
+config_setting(
+ name = "clang_cl_fastbuild_build",
+ values = {
+ "cpu": "x64_windows",
+ "define": "clang_cl=1",
+ "compilation_mode": "fastbuild",
+ },
+)
+
config_setting(
name = "opt_build",
values = {"compilation_mode": "opt"},
@@ -155,6 +182,37 @@ config_setting(
values = {"define": "tcmalloc=debug"},
)
+config_setting(
+ name = "gperftools_tcmalloc",
+ values = {"define": "tcmalloc=gperftools"},
+)
+
+# As select() can't be nested we need these specialized settings to avoid ambiguity when choosing
+# tcmalloc's flavor for x86_64 builds.
+config_setting(
+ name = "disable_tcmalloc_on_linux_x86_64",
+ values = {
+ "define": "tcmalloc=disabled",
+ "cpu": "k8",
+ },
+)
+
+config_setting(
+ name = "gperftools_tcmalloc_on_linux_x86_64",
+ values = {
+ "define": "tcmalloc=gperftools",
+ "cpu": "k8",
+ },
+)
+
+config_setting(
+ name = "debug_tcmalloc_on_linux_x86_64",
+ values = {
+ "define": "tcmalloc=debug",
+ "cpu": "k8",
+ },
+)
+
config_setting(
name = "disable_signal_trace",
values = {"define": "signal_trace=disabled"},
@@ -256,6 +314,27 @@ config_setting(
values = {"define": "quiche=enabled"},
)
+# TODO: consider converting WAVM VM support to an extension (https://github.com/envoyproxy/envoy/issues/12574)
+config_setting(
+ name = "wasm_all",
+ values = {"define": "wasm=enabled"},
+)
+
+config_setting(
+ name = "wasm_wavm",
+ values = {"define": "wasm=wavm"},
+)
+
+config_setting(
+ name = "wasm_v8",
+ values = {"define": "wasm=v8"},
+)
+
+config_setting(
+ name = "wasm_none",
+ values = {"define": "wasm=disabled"},
+)
+
# Alias pointing to the selected version of BoringSSL:
# - BoringSSL FIPS from @boringssl_fips//:ssl,
# - non-FIPS BoringSSL from @boringssl//:ssl.
diff --git a/bazel/README.md b/bazel/README.md
index 951c76dc53e4..b8bb300a2a77 100644
--- a/bazel/README.md
+++ b/bazel/README.md
@@ -29,10 +29,10 @@ to find the right version of Bazel and set the version to `USE_BAZEL_VERSION` en
## Production environments
To build Envoy with Bazel in a production environment, where the [Envoy
-dependencies](https://www.envoyproxy.io/docs/envoy/latest/install/building.html#requirements) are typically
+dependencies](https://www.envoyproxy.io/docs/envoy/latest/start/building#requirements) are typically
independently sourced, the following steps should be followed:
-1. Configure, build and/or install the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/install/building.html#requirements).
+1. Configure, build and/or install the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/start/building#requirements).
1. `bazel build -c opt //source/exe:envoy-static` from the repository root.
## Quick start Bazel build for developers
@@ -127,6 +127,11 @@ for how to update or override dependencies.
startup --output_base=C:/_eb
```
+ Bazel also creates file symlinks when building Envoy. It's strongly recommended to enable file symlink support
+ using [Bazel's instructions](https://docs.bazel.build/versions/master/windows.html#enable-symlink-support).
+ For other common issues, see the
+ [Using Bazel on Windows](https://docs.bazel.build/versions/master/windows.html) page.
+
[python3](https://www.python.org/downloads/): Specifically, the Windows-native flavor distributed
by python.org. The POSIX flavor available via MSYS2, the Windows Store flavor and other distributions
will not work. Add a symlink for `python3.exe` pointing to the installed `python.exe` for Envoy scripts
@@ -145,7 +150,8 @@ for how to update or override dependencies.
package. Earlier versions of VC++ Build Tools/Visual Studio are not recommended or supported.
If installed in a non-standard filesystem location, be sure to set the `BAZEL_VC` environment variable
to the path of the VC++ package to allow Bazel to find your installation of VC++. NOTE: ensure that
- the `link.exe` that resolves on your PATH is from VC++ Build Tools and not `/usr/bin/link.exe` from MSYS2.
+ the `link.exe` that resolves on your PATH is from VC++ Build Tools and not `/usr/bin/link.exe` from MSYS2,
+ which is determined by their relative ordering in your PATH.
```
set BAZEL_VC=%USERPROFILE%\VSBT2019\VC
set PATH=%PATH%;%USERPROFILE%\VSBT2019\VC\Tools\MSVC\14.26.28801\bin\Hostx64\x64
@@ -160,10 +166,11 @@ for how to update or override dependencies.
set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja
```
- [MSYS2 shell](https://msys2.github.io/): Set the `BAZEL_SH` environment variable to the path
- of the installed MSYS2 `bash.exe` executable. Additionally, setting the `MSYS2_ARG_CONV_EXCL` environment
- variable to a value of `*` is often advisable to ensure argument parsing in the MSYS2 shell
- behaves as expected.
+ [MSYS2 shell](https://msys2.github.io/): Install to a path with no spaces, e.g. C:\msys32.
+
+ Set the `BAZEL_SH` environment variable to the path of the installed MSYS2 `bash.exe`
+ executable. Additionally, setting the `MSYS2_ARG_CONV_EXCL` environment variable to a value
+ of `*` is often advisable to ensure argument parsing in the MSYS2 shell behaves as expected.
```
set PATH=%PATH%;%USERPROFILE%\msys64\usr\bin
set BAZEL_SH=%USERPROFILE%\msys64\usr\bin\bash.exe
@@ -181,7 +188,7 @@ for how to update or override dependencies.
The TMPDIR path and MSYS2 `mktemp` command are used frequently by the `rules_foreign_cc`
component of Bazel as well as Envoy's test scripts, causing problems if not set to a path
accessible to both Windows and msys commands. [Note the `ci/windows_ci_steps.sh` script
- which builds envoy and run tests in CI) creates this symlink automatically.]
+ which builds envoy and run tests in CI creates this symlink automatically.]
In the MSYS2 shell, install additional packages via pacman:
```
@@ -212,7 +219,8 @@ for how to update or override dependencies.
in your shell for buildifier to work.
1. `go get -u github.com/bazelbuild/buildtools/buildozer` to install buildozer. You may need to set `BUILDOZER_BIN` to `$GOPATH/bin/buildozer`
in your shell for buildozer to work.
-1. `bazel build //source/exe:envoy-static` from the Envoy source directory.
+1. `bazel build //source/exe:envoy-static` from the Envoy source directory. Add `-c opt` for an optimized release build or
+ `-c dbg` for an unoptimized, fully instrumented debugging build.
## Building Envoy with the CI Docker image
@@ -228,7 +236,7 @@ From a Windows host with Docker installed, the Windows containers feature enable
MSYS2 or Git bash), run:
```
-./ci/run_envoy_docker_windows.sh './ci/windows_ci_steps.sh'
+./ci/run_envoy_docker.sh './ci/windows_ci_steps.sh'
```
See also the [documentation](https://github.com/envoyproxy/envoy/tree/master/ci) for developer use of the
@@ -599,7 +607,8 @@ The following optional features can be disabled on the Bazel build command-line:
* Google C++ gRPC client with `--define google_grpc=disabled`
* Backtracing on signals with `--define signal_trace=disabled`
* Active stream state dump on signals with `--define signal_trace=disabled` or `--define disable_object_dump_on_signal_trace=disabled`
-* tcmalloc with `--define tcmalloc=disabled`
+* tcmalloc with `--define tcmalloc=disabled`. Also you can choose Gperftools' implementation of
+ tcmalloc with `--define tcmalloc=gperftools` which is the default for non-x86 builds.
* deprecated features with `--define deprecated_features=disabled`
@@ -618,7 +627,8 @@ The following optional features can be enabled on the Bazel build command-line:
`--define log_debug_assert_in_release=enabled`. The default behavior is to compile debug assertions out of
release builds so that the condition is not evaluated. This option has no effect in debug builds.
* memory-debugging (scribbling over memory after allocation and before freeing) with
- `--define tcmalloc=debug`. Note this option cannot be used with FIPS-compliant mode BoringSSL.
+ `--define tcmalloc=debug`. Note this option cannot be used with FIPS-compliant mode BoringSSL and
+ tcmalloc is built from the sources of Gperftools.
* Default [path normalization](https://github.com/envoyproxy/envoy/issues/6435) with
`--define path_normalization_by_default=true`. Note this still could be disable by explicit xDS config.
* Manual stamping via VersionInfo with `--define manual_stamp=manual_stamp`.
diff --git a/bazel/coverage/collect_cc_coverage.sh b/bazel/coverage/collect_cc_coverage.sh
index 53926e5cb6af..f0b85c5c0ddb 100755
--- a/bazel/coverage/collect_cc_coverage.sh
+++ b/bazel/coverage/collect_cc_coverage.sh
@@ -41,6 +41,8 @@
# gcda or profraw) and uses either lcov or gcov to get the coverage data.
# The coverage data is placed in $COVERAGE_OUTPUT_FILE.
+read -ra COVERAGE_GCOV_OPTIONS <<< "${COVERAGE_GCOV_OPTIONS:-}"
+
# Checks if clang llvm coverage should be used instead of lcov.
function uses_llvm() {
if stat "${COVERAGE_DIR}"/*.profraw >/dev/null 2>&1; then
@@ -68,24 +70,24 @@ function init_gcov() {
# $COVERAGE_DIR.
# Writes the collected coverage into the given output file.
function llvm_coverage() {
- local output_file="${1}"; shift
+ local output_file="${1}" object_file object_files object_param=()
+ shift
export LLVM_PROFILE_FILE="${COVERAGE_DIR}/%h-%p-%m.profraw"
"${COVERAGE_GCOV_PATH}" merge -output "${output_file}.data" \
"${COVERAGE_DIR}"/*.profraw
- local object_files="$(find -L "${RUNFILES_DIR}" -type f -exec file -L {} \; \
+ object_files="$(find -L "${RUNFILES_DIR}" -type f -exec file -L {} \; \
| grep ELF | grep -v "LSB core" | sed 's,:.*,,')"
-
- local object_param=""
+
for object_file in ${object_files}; do
- object_param+=" -object ${object_file}"
+ object_param+=(-object "${object_file}")
done
llvm-cov export -instr-profile "${output_file}.data" -format=lcov \
-ignore-filename-regex='.*external/.+' \
-ignore-filename-regex='/tmp/.+' \
- ${object_param} | sed 's#/proc/self/cwd/##' > "${output_file}"
+ "${object_param[@]}" | sed 's#/proc/self/cwd/##' > "${output_file}"
}
# Generates a code coverage report in gcov intermediate text format by invoking
@@ -97,17 +99,15 @@ function llvm_coverage() {
# - output_file The location of the file where the generated code coverage
# report is written.
function gcov_coverage() {
- local output_file="${1}"; shift
-
- # We'll save the standard output of each the gcov command in this log.
- local gcov_log="$output_file.gcov.log"
+ local gcda gcno_path line output_file="${1}"
+ shift
# Copy .gcno files next to their corresponding .gcda files in $COVERAGE_DIR
# because gcov expects them to be in the same directory.
while read -r line; do
if [[ ${line: -4} == "gcno" ]]; then
gcno_path=${line}
- local gcda="${COVERAGE_DIR}/$(dirname ${gcno_path})/$(basename ${gcno_path} .gcno).gcda"
+ gcda="${COVERAGE_DIR}/$(dirname "${gcno_path}")/$(basename "${gcno_path}" .gcno).gcda"
# If the gcda file was not found we skip generating coverage from the gcno
# file.
if [[ -f "$gcda" ]]; then
@@ -115,7 +115,7 @@ function gcov_coverage() {
# We overcome this by copying the gcno to $COVERAGE_DIR where the gcda
# files are expected to be.
if [ ! -f "${COVERAGE_DIR}/${gcno_path}" ]; then
- mkdir -p "${COVERAGE_DIR}/$(dirname ${gcno_path})"
+ mkdir -p "${COVERAGE_DIR}/$(dirname "${gcno_path}")"
cp "$ROOT/${gcno_path}" "${COVERAGE_DIR}/${gcno_path}"
fi
# Invoke gcov to generate a code coverage report with the flags:
@@ -134,12 +134,12 @@ function gcov_coverage() {
# Don't generate branch coverage (-b) because of a gcov issue that
# segfaults when both -i and -b are used (see
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84879).
- "${GCOV}" -i $COVERAGE_GCOV_OPTIONS -o "$(dirname ${gcda})" "${gcda}"
+ "${GCOV}" -i "${COVERAGE_GCOV_OPTIONS[@]}" -o "$(dirname "${gcda}")" "${gcda}"
# Append all .gcov files in the current directory to the output file.
- cat *.gcov >> "$output_file"
+ cat ./*.gcov >> "$output_file"
# Delete the .gcov files.
- rm *.gcov
+ rm ./*.gcov
fi
fi
done < "${COVERAGE_MANIFEST}"
diff --git a/bazel/coverage/fuzz_coverage_wrapper.sh b/bazel/coverage/fuzz_coverage_wrapper.sh
index 42d705f7f134..f185b7d37083 100755
--- a/bazel/coverage/fuzz_coverage_wrapper.sh
+++ b/bazel/coverage/fuzz_coverage_wrapper.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-set -x
+set -ex
TEST_BINARY=$1
shift
@@ -9,9 +9,14 @@ shift
rm -rf fuzz_corpus
mkdir -p fuzz_corpus/seed_corpus
-cp -r $@ fuzz_corpus/seed_corpus
+cp -r "$@" fuzz_corpus/seed_corpus
# TODO(asraa): When fuzz targets are stable, remove error suppression and run coverage while fuzzing.
-LLVM_PROFILE_FILE= ${TEST_BINARY} fuzz_corpus -seed=${FUZZ_CORPUS_SEED:-1} -max_total_time=${FUZZ_CORPUS_TIME:-60} -max_len=2048 -rss_limit_mb=8192 || true
+LLVM_PROFILE_FILE='' ${TEST_BINARY} fuzz_corpus -seed="${FUZZ_CORPUS_SEED:-1}" -max_total_time="${FUZZ_CORPUS_TIME:-60}" -max_len=2048 -rss_limit_mb=8192 -timeout=30 || :
-${TEST_BINARY} fuzz_corpus -rss_limit_mb=8192 -runs=0
+# Passing files instead of a directory will run fuzzing as a regression test.
+# TODO(asraa): Remove manual `|| :`, but this shouldn't be necessary.
+_CORPUS="$(find fuzz_corpus -type f)"
+while read -r line; do CORPUS+=("$line"); done \
+ <<< "$_CORPUS"
+${TEST_BINARY} "${CORPUS[@]}" -rss_limit_mb=8192 || :
diff --git a/bazel/crates.bzl b/bazel/crates.bzl
new file mode 100644
index 000000000000..d4373143ddd4
--- /dev/null
+++ b/bazel/crates.bzl
@@ -0,0 +1,113 @@
+"""
+cargo-raze crate workspace functions
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository")
+
+def _new_http_archive(name, **kwargs):
+ if not native.existing_rule(name):
+ http_archive(name = name, **kwargs)
+
+def _new_git_repository(name, **kwargs):
+ if not native.existing_rule(name):
+ new_git_repository(name = name, **kwargs)
+
+def raze_fetch_remote_crates():
+ _new_http_archive(
+ name = "raze__ahash__0_3_8",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/ahash/ahash-0.3.8.crate",
+ type = "tar.gz",
+ strip_prefix = "ahash-0.3.8",
+ build_file = Label("//bazel/external/cargo/remote:ahash-0.3.8.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__autocfg__1_0_0",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/autocfg/autocfg-1.0.0.crate",
+ type = "tar.gz",
+ strip_prefix = "autocfg-1.0.0",
+ build_file = Label("//bazel/external/cargo/remote:autocfg-1.0.0.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__cfg_if__0_1_10",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/cfg-if/cfg-if-0.1.10.crate",
+ type = "tar.gz",
+ strip_prefix = "cfg-if-0.1.10",
+ build_file = Label("//bazel/external/cargo/remote:cfg-if-0.1.10.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__hashbrown__0_7_2",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/hashbrown/hashbrown-0.7.2.crate",
+ type = "tar.gz",
+ strip_prefix = "hashbrown-0.7.2",
+ build_file = Label("//bazel/external/cargo/remote:hashbrown-0.7.2.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__libc__0_2_74",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/libc/libc-0.2.74.crate",
+ type = "tar.gz",
+ strip_prefix = "libc-0.2.74",
+ build_file = Label("//bazel/external/cargo/remote:libc-0.2.74.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__log__0_4_11",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/log/log-0.4.11.crate",
+ type = "tar.gz",
+ strip_prefix = "log-0.4.11",
+ build_file = Label("//bazel/external/cargo/remote:log-0.4.11.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__memory_units__0_4_0",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/memory_units/memory_units-0.4.0.crate",
+ type = "tar.gz",
+ strip_prefix = "memory_units-0.4.0",
+ build_file = Label("//bazel/external/cargo/remote:memory_units-0.4.0.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__proxy_wasm__0_1_2",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/proxy-wasm/proxy-wasm-0.1.2.crate",
+ type = "tar.gz",
+ strip_prefix = "proxy-wasm-0.1.2",
+ build_file = Label("//bazel/external/cargo/remote:proxy-wasm-0.1.2.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__wee_alloc__0_4_5",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/wee_alloc/wee_alloc-0.4.5.crate",
+ type = "tar.gz",
+ strip_prefix = "wee_alloc-0.4.5",
+ build_file = Label("//bazel/external/cargo/remote:wee_alloc-0.4.5.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__winapi__0_3_9",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi/winapi-0.3.9.crate",
+ type = "tar.gz",
+ strip_prefix = "winapi-0.3.9",
+ build_file = Label("//bazel/external/cargo/remote:winapi-0.3.9.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__winapi_i686_pc_windows_gnu__0_4_0",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi-i686-pc-windows-gnu/winapi-i686-pc-windows-gnu-0.4.0.crate",
+ type = "tar.gz",
+ strip_prefix = "winapi-i686-pc-windows-gnu-0.4.0",
+ build_file = Label("//bazel/external/cargo/remote:winapi-i686-pc-windows-gnu-0.4.0.BUILD"),
+ )
+
+ _new_http_archive(
+ name = "raze__winapi_x86_64_pc_windows_gnu__0_4_0",
+ url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi-x86_64-pc-windows-gnu/winapi-x86_64-pc-windows-gnu-0.4.0.crate",
+ type = "tar.gz",
+ strip_prefix = "winapi-x86_64-pc-windows-gnu-0.4.0",
+ build_file = Label("//bazel/external/cargo/remote:winapi-x86_64-pc-windows-gnu-0.4.0.BUILD"),
+ )
diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl
index 92c837a4f06a..dc02f5056cd0 100644
--- a/bazel/dependency_imports.bzl
+++ b/bazel/dependency_imports.bzl
@@ -5,6 +5,8 @@ load("@bazel_toolchains//rules/exec_properties:exec_properties.bzl", "create_rbe
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies")
load("@upb//bazel:repository_defs.bzl", upb_bazel_version_repository = "bazel_version_repository")
+load("@io_bazel_rules_rust//rust:repositories.bzl", "rust_repositories")
+load("@io_bazel_rules_rust//:workspace.bzl", "bazel_version")
load("@config_validation_pip3//:requirements.bzl", config_validation_pip_install = "pip_install")
load("@configs_pip3//:requirements.bzl", configs_pip_install = "pip_install")
load("@headersplit_pip3//:requirements.bzl", headersplit_pip_install = "pip_install")
@@ -23,8 +25,10 @@ def envoy_dependency_imports(go_version = GO_VERSION):
rbe_toolchains_config()
gazelle_dependencies()
apple_rules_dependencies()
+ rust_repositories()
+ bazel_version(name = "bazel_version")
upb_bazel_version_repository(name = "upb_bazel_version")
- antlr_dependencies(471)
+ antlr_dependencies(472)
custom_exec_properties(
name = "envoy_large_machine_exec_property",
@@ -33,6 +37,7 @@ def envoy_dependency_imports(go_version = GO_VERSION):
},
)
+ # These dependencies, like most of the Go in this repository, exist only for the API.
go_repository(
name = "org_golang_google_grpc",
build_file_proto_mode = "disable",
@@ -40,14 +45,12 @@ def envoy_dependency_imports(go_version = GO_VERSION):
sum = "h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=",
version = "v1.29.1",
)
-
go_repository(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
sum = "h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=",
version = "v0.0.0-20190813141303-74dc4d7220e7",
)
-
go_repository(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
diff --git a/bazel/envoy_binary.bzl b/bazel/envoy_binary.bzl
index 16adfb38a439..d966bdf5a104 100644
--- a/bazel/envoy_binary.bzl
+++ b/bazel/envoy_binary.bzl
@@ -60,8 +60,13 @@ def _envoy_linkopts():
"-pagezero_size 10000",
"-image_base 100000000",
],
+ "@envoy//bazel:clang_cl_opt_build": [
+ "-DEFAULTLIB:ws2_32.lib",
+ "-DEFAULTLIB:iphlpapi.lib",
+ "-DEBUG:FULL",
+ "-WX",
+ ],
"@envoy//bazel:windows_x86_64": [
- "-DEFAULTLIB:advapi32.lib",
"-DEFAULTLIB:ws2_32.lib",
"-DEFAULTLIB:iphlpapi.lib",
"-WX",
diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl
index bdeb501e3068..e95329095dca 100644
--- a/bazel/envoy_build_system.bzl
+++ b/bazel/envoy_build_system.bzl
@@ -19,6 +19,10 @@ load(
_envoy_select_google_grpc = "envoy_select_google_grpc",
_envoy_select_hot_restart = "envoy_select_hot_restart",
_envoy_select_new_codecs_in_integration_tests = "envoy_select_new_codecs_in_integration_tests",
+ _envoy_select_wasm = "envoy_select_wasm",
+ _envoy_select_wasm_all_v8_wavm_none = "envoy_select_wasm_all_v8_wavm_none",
+ _envoy_select_wasm_v8 = "envoy_select_wasm_v8",
+ _envoy_select_wasm_wavm = "envoy_select_wasm_wavm",
)
load(
":envoy_test.bzl",
@@ -176,6 +180,10 @@ def envoy_google_grpc_external_deps():
envoy_select_boringssl = _envoy_select_boringssl
envoy_select_google_grpc = _envoy_select_google_grpc
envoy_select_hot_restart = _envoy_select_hot_restart
+envoy_select_wasm = _envoy_select_wasm
+envoy_select_wasm_all_v8_wavm_none = _envoy_select_wasm_all_v8_wavm_none
+envoy_select_wasm_wavm = _envoy_select_wasm_wavm
+envoy_select_wasm_v8 = _envoy_select_wasm_v8
envoy_select_new_codecs_in_integration_tests = _envoy_select_new_codecs_in_integration_tests
# Binary wrappers (from envoy_binary.bzl)
diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl
index 10b3448c00ee..96ca7a504f9f 100644
--- a/bazel/envoy_internal.bzl
+++ b/bazel/envoy_internal.bzl
@@ -32,9 +32,17 @@ def envoy_copts(repository, test = False):
"-DNOMCX",
"-DNOIME",
"-DNOCRYPT",
- # this is to silence the incorrect MSVC compiler warning when trying to convert between
- # std::optional data types while conversions between primitive types are producing no error
+ # Ignore unguarded gcc pragmas in quiche (unrecognized by MSVC)
+ # TODO(wrowe,sunjayBhatia): Drop this change when fixed in bazel/external/quiche.genrule_cmd
+ "-wd4068",
+ # Silence incorrect MSVC compiler warnings when converting between std::optional
+ # data types (while conversions between primitive types are producing no error)
"-wd4244",
+ # Allow inline functions to be undefined
+ "-wd4506",
+ # Allow 'nodiscard' function return values to be discarded
+ # TODO(wrowe,sunjayBhatia): Drop this option when all causes are fixed
+ "-wd4834",
]
return select({
@@ -48,8 +56,11 @@ def envoy_copts(repository, test = False):
repository + "//bazel:windows_opt_build": [],
repository + "//bazel:windows_fastbuild_build": [],
repository + "//bazel:windows_dbg_build": [],
+ repository + "//bazel:clang_cl_opt_build": [] if test else ["-Z7", "-fstandalone-debug"],
+ repository + "//bazel:clang_cl_fastbuild_build": ["-fno-standalone-debug"],
+ repository + "//bazel:clang_cl_dbg_build": ["-fstandalone-debug"],
}) + select({
- repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand", "-Wc++2a-extensions"],
+ repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand", "-Wc++2a-extensions", "-Wrange-loop-analysis"],
repository + "//bazel:gcc_build": ["-Wno-maybe-uninitialized"],
"//conditions:default": [],
}) + select({
@@ -57,10 +68,13 @@ def envoy_copts(repository, test = False):
"//conditions:default": [],
}) + select({
repository + "//bazel:disable_tcmalloc": ["-DABSL_MALLOC_HOOK_MMAP_DISABLE"],
- "//conditions:default": ["-DTCMALLOC"],
- }) + select({
- repository + "//bazel:debug_tcmalloc": ["-DENVOY_MEMORY_DEBUG_ENABLED=1"],
- "//conditions:default": [],
+ repository + "//bazel:disable_tcmalloc_on_linux_x86_64": ["-DABSL_MALLOC_HOOK_MMAP_DISABLE"],
+ repository + "//bazel:gperftools_tcmalloc": ["-DGPERFTOOLS_TCMALLOC"],
+ repository + "//bazel:gperftools_tcmalloc_on_linux_x86_64": ["-DGPERFTOOLS_TCMALLOC"],
+ repository + "//bazel:debug_tcmalloc": ["-DENVOY_MEMORY_DEBUG_ENABLED=1", "-DGPERFTOOLS_TCMALLOC"],
+ repository + "//bazel:debug_tcmalloc_on_linux_x86_64": ["-DENVOY_MEMORY_DEBUG_ENABLED=1", "-DGPERFTOOLS_TCMALLOC"],
+ repository + "//bazel:linux_x86_64": ["-DTCMALLOC"],
+ "//conditions:default": ["-DGPERFTOOLS_TCMALLOC"],
}) + select({
repository + "//bazel:disable_signal_trace": [],
"//conditions:default": ["-DENVOY_HANDLE_SIGNALS"],
@@ -115,6 +129,12 @@ def envoy_stdlib_deps():
def tcmalloc_external_dep(repository):
return select({
repository + "//bazel:disable_tcmalloc": None,
+ repository + "//bazel:disable_tcmalloc_on_linux_x86_64": None,
+ repository + "//bazel:debug_tcmalloc": envoy_external_dep_path("gperftools"),
+ repository + "//bazel:debug_tcmalloc_on_linux_x86_64": envoy_external_dep_path("gperftools"),
+ repository + "//bazel:gperftools_tcmalloc": envoy_external_dep_path("gperftools"),
+ repository + "//bazel:gperftools_tcmalloc_on_linux_x86_64": envoy_external_dep_path("gperftools"),
+ repository + "//bazel:linux_x86_64": envoy_external_dep_path("tcmalloc"),
"//conditions:default": envoy_external_dep_path("gperftools"),
})
diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl
index 471c8b72eec7..5eb90df500c0 100644
--- a/bazel/envoy_library.bzl
+++ b/bazel/envoy_library.bzl
@@ -20,6 +20,12 @@ load(
def tcmalloc_external_deps(repository):
return select({
repository + "//bazel:disable_tcmalloc": [],
+ repository + "//bazel:disable_tcmalloc_on_linux_x86_64": [],
+ repository + "//bazel:debug_tcmalloc": [envoy_external_dep_path("gperftools")],
+ repository + "//bazel:debug_tcmalloc_on_linux_x86_64": [envoy_external_dep_path("gperftools")],
+ repository + "//bazel:gperftools_tcmalloc": [envoy_external_dep_path("gperftools")],
+ repository + "//bazel:gperftools_tcmalloc_on_linux_x86_64": [envoy_external_dep_path("gperftools")],
+ repository + "//bazel:linux_x86_64": [envoy_external_dep_path("tcmalloc")],
"//conditions:default": [envoy_external_dep_path("gperftools")],
})
@@ -98,7 +104,8 @@ def envoy_cc_library(
tags = [],
deps = [],
strip_include_prefix = None,
- textual_hdrs = None):
+ textual_hdrs = None,
+ defines = []):
if tcmalloc_dep:
deps += tcmalloc_external_deps(repository)
@@ -123,6 +130,7 @@ def envoy_cc_library(
alwayslink = 1,
linkstatic = envoy_linkstatic(),
strip_include_prefix = strip_include_prefix,
+ defines = defines,
)
# Intended for usage by external consumers. This allows them to disambiguate
diff --git a/bazel/envoy_select.bzl b/bazel/envoy_select.bzl
index 107ad2a21bde..5a33e4da515d 100644
--- a/bazel/envoy_select.bzl
+++ b/bazel/envoy_select.bzl
@@ -32,6 +32,36 @@ def envoy_select_hot_restart(xs, repository = ""):
"//conditions:default": xs,
})
+# Selects the given values depending on the WASM runtimes enabled in the current build.
+def envoy_select_wasm(xs):
+ return select({
+ "@envoy//bazel:wasm_none": [],
+ "//conditions:default": xs,
+ })
+
+def envoy_select_wasm_v8(xs):
+ return select({
+ "@envoy//bazel:wasm_wavm": [],
+ "@envoy//bazel:wasm_none": [],
+ "//conditions:default": xs,
+ })
+
+def envoy_select_wasm_wavm(xs):
+ return select({
+ "@envoy//bazel:wasm_all": xs,
+ "@envoy//bazel:wasm_wavm": xs,
+ "//conditions:default": [],
+ })
+
+def envoy_select_wasm_all_v8_wavm_none(xs1, xs2, xs3, xs4):
+ return select({
+ "@envoy//bazel:wasm_all": xs1,
+ "@envoy//bazel:wasm_v8": xs2,
+ "@envoy//bazel:wasm_wavm": xs3,
+ "@envoy//bazel:wasm_none": xs4,
+ "//conditions:default": xs2,
+ })
+
# Select the given values if use legacy codecs in test is on in the current build.
def envoy_select_new_codecs_in_integration_tests(xs, repository = ""):
return select({
diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl
index 454733300dd8..1d5720149428 100644
--- a/bazel/envoy_test.bzl
+++ b/bazel/envoy_test.bzl
@@ -29,6 +29,7 @@ def _envoy_cc_test_infrastructure_library(
tags = [],
include_prefix = None,
copts = [],
+ alwayslink = 1,
**kargs):
# Add implicit tcmalloc external dependency(if available) in order to enable CPU and heap profiling in tests.
deps += tcmalloc_external_deps(repository)
@@ -44,7 +45,7 @@ def _envoy_cc_test_infrastructure_library(
],
tags = tags,
include_prefix = include_prefix,
- alwayslink = 1,
+ alwayslink = alwayslink,
linkstatic = envoy_linkstatic(),
**kargs
)
@@ -58,7 +59,6 @@ def _envoy_test_linkopts():
"-image_base 100000000",
],
"@envoy//bazel:windows_x86_64": [
- "-DEFAULTLIB:advapi32.lib",
"-DEFAULTLIB:ws2_32.lib",
"-DEFAULTLIB:iphlpapi.lib",
"-WX",
@@ -205,6 +205,7 @@ def envoy_cc_test_library(
tags = [],
include_prefix = None,
copts = [],
+ alwayslink = 1,
**kargs):
deps = deps + [
repository + "//test/test_common:printers_includes",
@@ -222,6 +223,7 @@ def envoy_cc_test_library(
include_prefix,
copts,
visibility = ["//visibility:public"],
+ alwayslink = alwayslink,
**kargs
)
diff --git a/bazel/external/cargo/BUILD b/bazel/external/cargo/BUILD
new file mode 100644
index 000000000000..e216296d130d
--- /dev/null
+++ b/bazel/external/cargo/BUILD
@@ -0,0 +1,23 @@
+"""
+cargo-raze workspace build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+ "notice", # See individual crates for specific licenses
+])
+
+alias(
+ name = "log",
+ actual = "@raze__log__0_4_11//:log",
+ tags = ["cargo-raze"],
+)
+
+alias(
+ name = "proxy_wasm",
+ actual = "@raze__proxy_wasm__0_1_2//:proxy_wasm",
+ tags = ["cargo-raze"],
+)
diff --git a/bazel/external/cargo/remote/BUILD b/bazel/external/cargo/remote/BUILD
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/bazel/external/cargo/remote/ahash-0.3.8.BUILD b/bazel/external/cargo/remote/ahash-0.3.8.BUILD
new file mode 100644
index 000000000000..a34e9e1685cf
--- /dev/null
+++ b/bazel/external/cargo/remote/ahash-0.3.8.BUILD
@@ -0,0 +1,46 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "notice", # MIT from expression "MIT OR Apache-2.0"
+])
+
+# Unsupported target "ahash" with type "bench" omitted
+
+rust_library(
+ name = "ahash",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2018",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.3.8",
+ deps = [
+ ],
+)
+
+# Unsupported target "bench" with type "test" omitted
+# Unsupported target "map" with type "bench" omitted
+# Unsupported target "map_tests" with type "test" omitted
+# Unsupported target "nopanic" with type "test" omitted
diff --git a/bazel/external/cargo/remote/autocfg-1.0.0.BUILD b/bazel/external/cargo/remote/autocfg-1.0.0.BUILD
new file mode 100644
index 000000000000..9f51a3e4cd37
--- /dev/null
+++ b/bazel/external/cargo/remote/autocfg-1.0.0.BUILD
@@ -0,0 +1,45 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "notice", # Apache-2.0 from expression "Apache-2.0 OR MIT"
+])
+
+rust_library(
+ name = "autocfg",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2015",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "1.0.0",
+ deps = [
+ ],
+)
+
+# Unsupported target "integers" with type "example" omitted
+# Unsupported target "paths" with type "example" omitted
+# Unsupported target "rustflags" with type "test" omitted
+# Unsupported target "traits" with type "example" omitted
+# Unsupported target "versions" with type "example" omitted
diff --git a/bazel/external/cargo/remote/cfg-if-0.1.10.BUILD b/bazel/external/cargo/remote/cfg-if-0.1.10.BUILD
new file mode 100644
index 000000000000..b36c1413e5b0
--- /dev/null
+++ b/bazel/external/cargo/remote/cfg-if-0.1.10.BUILD
@@ -0,0 +1,41 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "notice", # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_library(
+ name = "cfg_if",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2018",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.1.10",
+ deps = [
+ ],
+)
+
+# Unsupported target "xcrate" with type "test" omitted
diff --git a/bazel/external/cargo/remote/hashbrown-0.7.2.BUILD b/bazel/external/cargo/remote/hashbrown-0.7.2.BUILD
new file mode 100644
index 000000000000..54276e05010e
--- /dev/null
+++ b/bazel/external/cargo/remote/hashbrown-0.7.2.BUILD
@@ -0,0 +1,50 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "notice", # Apache-2.0 from expression "Apache-2.0 OR MIT"
+])
+
+# Unsupported target "bench" with type "bench" omitted
+# Unsupported target "build-script-build" with type "custom-build" omitted
+
+rust_library(
+ name = "hashbrown",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ "ahash",
+ "inline-more",
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2018",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.7.2",
+ deps = [
+ "@raze__ahash__0_3_8//:ahash",
+ ],
+)
+
+# Unsupported target "hasher" with type "test" omitted
+# Unsupported target "rayon" with type "test" omitted
+# Unsupported target "serde" with type "test" omitted
+# Unsupported target "set" with type "test" omitted
diff --git a/bazel/external/cargo/remote/libc-0.2.74.BUILD b/bazel/external/cargo/remote/libc-0.2.74.BUILD
new file mode 100644
index 000000000000..76a2773d1a4c
--- /dev/null
+++ b/bazel/external/cargo/remote/libc-0.2.74.BUILD
@@ -0,0 +1,42 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "notice", # MIT from expression "MIT OR Apache-2.0"
+])
+
+# Unsupported target "build-script-build" with type "custom-build" omitted
+# Unsupported target "const_fn" with type "test" omitted
+
+rust_library(
+ name = "libc",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2015",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.2.74",
+ deps = [
+ ],
+)
diff --git a/bazel/external/cargo/remote/log-0.4.11.BUILD b/bazel/external/cargo/remote/log-0.4.11.BUILD
new file mode 100644
index 000000000000..9596e2448ecc
--- /dev/null
+++ b/bazel/external/cargo/remote/log-0.4.11.BUILD
@@ -0,0 +1,46 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "notice", # MIT from expression "MIT OR Apache-2.0"
+])
+
+# Unsupported target "build-script-build" with type "custom-build" omitted
+# Unsupported target "filters" with type "test" omitted
+
+rust_library(
+ name = "log",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2015",
+ rustc_flags = [
+ "--cap-lints=allow",
+ "--cfg=atomic_cas",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.4.11",
+ deps = [
+ "@raze__cfg_if__0_1_10//:cfg_if",
+ ],
+)
+
+# Unsupported target "macros" with type "test" omitted
diff --git a/bazel/external/cargo/remote/memory_units-0.4.0.BUILD b/bazel/external/cargo/remote/memory_units-0.4.0.BUILD
new file mode 100644
index 000000000000..c5c3c3987128
--- /dev/null
+++ b/bazel/external/cargo/remote/memory_units-0.4.0.BUILD
@@ -0,0 +1,39 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "reciprocal", # MPL-2.0 from expression "MPL-2.0"
+])
+
+rust_library(
+ name = "memory_units",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2015",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.4.0",
+ deps = [
+ ],
+)
diff --git a/bazel/external/cargo/remote/proxy-wasm-0.1.2.BUILD b/bazel/external/cargo/remote/proxy-wasm-0.1.2.BUILD
new file mode 100644
index 000000000000..2f9895fea7fa
--- /dev/null
+++ b/bazel/external/cargo/remote/proxy-wasm-0.1.2.BUILD
@@ -0,0 +1,47 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "notice", # Apache-2.0 from expression "Apache-2.0"
+])
+
+# Unsupported target "hello_world" with type "example" omitted
+# Unsupported target "http_auth_random" with type "example" omitted
+# Unsupported target "http_body" with type "example" omitted
+# Unsupported target "http_headers" with type "example" omitted
+
+rust_library(
+ name = "proxy_wasm",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2018",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.1.2",
+ deps = [
+ "@raze__hashbrown__0_7_2//:hashbrown",
+ "@raze__log__0_4_11//:log",
+ "@raze__wee_alloc__0_4_5//:wee_alloc",
+ ],
+)
diff --git a/bazel/external/cargo/remote/wee_alloc-0.4.5.BUILD b/bazel/external/cargo/remote/wee_alloc-0.4.5.BUILD
new file mode 100644
index 000000000000..ab49873603cd
--- /dev/null
+++ b/bazel/external/cargo/remote/wee_alloc-0.4.5.BUILD
@@ -0,0 +1,46 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "reciprocal", # MPL-2.0 from expression "MPL-2.0"
+])
+
+# Unsupported target "build-script-build" with type "custom-build" omitted
+
+rust_library(
+ name = "wee_alloc",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ "default",
+ "size_classes",
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2015",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.4.5",
+ deps = [
+ "@raze__cfg_if__0_1_10//:cfg_if",
+ "@raze__libc__0_2_74//:libc",
+ "@raze__memory_units__0_4_0//:memory_units",
+ ],
+)
diff --git a/bazel/external/cargo/remote/winapi-0.3.9.BUILD b/bazel/external/cargo/remote/winapi-0.3.9.BUILD
new file mode 100644
index 000000000000..2495dd1d900e
--- /dev/null
+++ b/bazel/external/cargo/remote/winapi-0.3.9.BUILD
@@ -0,0 +1,44 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "notice", # MIT from expression "MIT OR Apache-2.0"
+])
+
+# Unsupported target "build-script-build" with type "custom-build" omitted
+
+rust_library(
+ name = "winapi",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ "memoryapi",
+ "synchapi",
+ "winbase",
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2015",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.3.9",
+ deps = [
+ ],
+)
diff --git a/bazel/external/cargo/remote/winapi-i686-pc-windows-gnu-0.4.0.BUILD b/bazel/external/cargo/remote/winapi-i686-pc-windows-gnu-0.4.0.BUILD
new file mode 100644
index 000000000000..d6c1545143fe
--- /dev/null
+++ b/bazel/external/cargo/remote/winapi-i686-pc-windows-gnu-0.4.0.BUILD
@@ -0,0 +1,41 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "notice", # MIT from expression "MIT OR Apache-2.0"
+])
+
+# Unsupported target "build-script-build" with type "custom-build" omitted
+
+rust_library(
+ name = "winapi_i686_pc_windows_gnu",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2015",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.4.0",
+ deps = [
+ ],
+)
diff --git a/bazel/external/cargo/remote/winapi-x86_64-pc-windows-gnu-0.4.0.BUILD b/bazel/external/cargo/remote/winapi-x86_64-pc-windows-gnu-0.4.0.BUILD
new file mode 100644
index 000000000000..e666870dbd05
--- /dev/null
+++ b/bazel/external/cargo/remote/winapi-x86_64-pc-windows-gnu-0.4.0.BUILD
@@ -0,0 +1,41 @@
+"""
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+load(
+ "@io_bazel_rules_rust//rust:rust.bzl",
+ "rust_library",
+)
+
+package(default_visibility = [
+ # Public for visibility by "@raze__crate__version//" targets.
+ #
+ # Prefer access through "//bazel/external/cargo", which limits external
+ # visibility to explicit Cargo.toml dependencies.
+ "//visibility:public",
+])
+
+licenses([
+ "notice", # MIT from expression "MIT OR Apache-2.0"
+])
+
+# Unsupported target "build-script-build" with type "custom-build" omitted
+
+rust_library(
+ name = "winapi_x86_64_pc_windows_gnu",
+ srcs = glob(["**/*.rs"]),
+ crate_features = [
+ ],
+ crate_root = "src/lib.rs",
+ crate_type = "lib",
+ edition = "2015",
+ rustc_flags = [
+ "--cap-lints=allow",
+ ],
+ tags = ["cargo-raze"],
+ version = "0.4.0",
+ deps = [
+ ],
+)
diff --git a/bazel/external/googleurl.patch b/bazel/external/googleurl.patch
deleted file mode 100644
index fb33ca4475fb..000000000000
--- a/bazel/external/googleurl.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-# TODO(dio): Consider to remove this patch when we have the ability to compile the project using
-# clang-cl. Tracked in https://github.com/envoyproxy/envoy/issues/11974.
-
-diff --git a/base/compiler_specific.h b/base/compiler_specific.h
-index 0cd36dc..8c4cbd4 100644
---- a/base/compiler_specific.h
-+++ b/base/compiler_specific.h
-@@ -7,10 +7,6 @@
-
- #include "build/build_config.h"
-
--#if defined(COMPILER_MSVC) && !defined(__clang__)
--#error "Only clang-cl is supported on Windows, see https://crbug.com/988071"
--#endif
--
- // Annotate a variable indicating it's ok if the variable is not used.
- // (Typically used to silence a compiler warning when the assignment
- // is important for some other reason.)
-@@ -55,8 +51,12 @@
- // prevent code folding, see gurl_base::debug::Alias.
- // Use like:
- // void NOT_TAIL_CALLED FooBar();
--#if defined(__clang__) && __has_attribute(not_tail_called)
-+#if defined(__clang__)
-+#if defined(__has_attribute)
-+#if __has_attribute(not_tail_called)
- #define NOT_TAIL_CALLED __attribute__((not_tail_called))
-+#endif
-+#endif
- #else
- #define NOT_TAIL_CALLED
- #endif
-@@ -226,7 +226,9 @@
- #endif
- #endif
-
--#if defined(__clang__) && __has_attribute(uninitialized)
-+#if defined(__clang__)
-+#if defined(__has_attribute)
-+#if __has_attribute(uninitialized)
- // Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for
- // the specified variable.
- // Library-wide alternative is
-@@ -257,6 +259,8 @@
- // E.g. platform, bot, benchmark or test name in patch description or next to
- // the attribute.
- #define STACK_UNINITIALIZED __attribute__((uninitialized))
-+#endif
-+#endif
- #else
- #define STACK_UNINITIALIZED
- #endif
diff --git a/bazel/external/icuuc.BUILD b/bazel/external/icuuc.BUILD
deleted file mode 100644
index 305d0db952b1..000000000000
--- a/bazel/external/icuuc.BUILD
+++ /dev/null
@@ -1,55 +0,0 @@
-load("@rules_cc//cc:defs.bzl", "cc_library")
-
-licenses(["notice"]) # Apache 2
-
-exports_files(["LICENSE"])
-
-icuuc_copts = [
- "-DU_STATIC_IMPLEMENTATION",
- "-DU_COMMON_IMPLEMENTATION",
- "-DU_HAVE_STD_ATOMICS",
-] + select({
- "@envoy//bazel:apple": [
- "-Wno-shorten-64-to-32",
- "-Wno-unused-variable",
- ],
- "@envoy//bazel:windows_x86_64": [
- "/utf-8",
- "/DLOCALE_ALLOW_NEUTRAL_NAMES=0",
- ],
- # TODO(dio): Add "@envoy//bazel:android" when we have it.
- # "@envoy//bazel:android": [
- # "-fdata-sections",
- # "-DU_HAVE_NL_LANGINFO_CODESET=0",
- # "-Wno-deprecated-declarations",
- # ],
- "//conditions:default": [],
-})
-
-cc_library(
- name = "headers",
- hdrs = glob(["source/common/unicode/*.h"]),
- includes = ["source/common"],
- visibility = ["//visibility:public"],
-)
-
-cc_library(
- name = "common",
- hdrs = glob(["source/common/unicode/*.h"]),
- includes = ["source/common"],
- visibility = ["//visibility:public"],
- deps = [":icuuc"],
-)
-
-cc_library(
- name = "icuuc",
- srcs = glob([
- "source/common/*.c",
- "source/common/*.cpp",
- "source/stubdata/*.cpp",
- ]),
- hdrs = glob(["source/common/*.h"]),
- copts = icuuc_copts,
- visibility = ["//visibility:private"],
- deps = [":headers"],
-)
diff --git a/bazel/external/proxy_wasm_cpp_host.BUILD b/bazel/external/proxy_wasm_cpp_host.BUILD
index 4cb87cf98ec1..1b3f0829d7b2 100644
--- a/bazel/external/proxy_wasm_cpp_host.BUILD
+++ b/bazel/external/proxy_wasm_cpp_host.BUILD
@@ -1,4 +1,10 @@
load("@rules_cc//cc:defs.bzl", "cc_library")
+load(
+ "@envoy//bazel:envoy_build_system.bzl",
+ "envoy_select_wasm_all_v8_wavm_none",
+ "envoy_select_wasm_v8",
+ "envoy_select_wasm_wavm",
+)
licenses(["notice"]) # Apache 2
@@ -14,14 +20,44 @@ cc_library(
cc_library(
name = "lib",
- srcs = glob(
- [
- "src/**/*.h",
- "src/**/*.cc",
- ],
- exclude = ["src/**/wavm*"],
+ # Note that the select cannot appear in the glob.
+ srcs = envoy_select_wasm_all_v8_wavm_none(
+ glob(
+ [
+ "src/**/*.h",
+ "src/**/*.cc",
+ ],
+ ),
+ glob(
+ [
+ "src/**/*.h",
+ "src/**/*.cc",
+ ],
+ exclude = ["src/wavm/*"],
+ ),
+ glob(
+ [
+ "src/**/*.h",
+ "src/**/*.cc",
+ ],
+ exclude = ["src/v8/*"],
+ ),
+ glob(
+ [
+ "src/**/*.h",
+ "src/**/*.cc",
+ ],
+ exclude = [
+ "src/wavm/*",
+ "src/v8/*",
+ ],
+ ),
),
- copts = ["-std=c++14"],
+ copts = envoy_select_wasm_wavm([
+ '-DWAVM_API=""',
+ "-Wno-non-virtual-dtor",
+ "-Wno-old-style-cast",
+ ]),
deps = [
":include",
"//external:abseil_flat_hash_map",
@@ -29,9 +65,12 @@ cc_library(
"//external:abseil_strings",
"//external:protobuf",
"//external:ssl",
- "//external:wee8",
"//external:zlib",
"@proxy_wasm_cpp_sdk//:api_lib",
"@proxy_wasm_cpp_sdk//:common_lib",
- ],
+ ] + envoy_select_wasm_wavm([
+ "@envoy//bazel/foreign_cc:wavm",
+ ]) + envoy_select_wasm_v8([
+ "//external:wee8",
+ ]),
)
diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD
index b641e9d59e84..7541909aa191 100644
--- a/bazel/external/quiche.BUILD
+++ b/bazel/external/quiche.BUILD
@@ -53,23 +53,21 @@ genrule(
# These options are only used to suppress errors in brought-in QUICHE tests.
# Use #pragma GCC diagnostic ignored in integration code to suppress these errors.
+quiche_common_copts = [
+ "-Wno-unused-function",
+ # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames.
+ "-Wno-invalid-offsetof",
+ "-Wno-range-loop-analysis",
+]
+
quiche_copts = select({
- "@envoy//bazel:windows_x86_64": [],
- "//conditions:default": [
- # Remove these after upstream fix.
- "-Wno-unused-parameter",
- "-Wno-unused-function",
- "-Wno-return-type",
- "-Wno-unknown-warning-option",
- "-Wno-deprecated-copy",
- "-Wno-ignored-qualifiers",
- "-Wno-sign-compare",
- "-Wno-inconsistent-missing-override",
- # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames.
- "-Wno-invalid-offsetof",
- # to suppress errors re: size_t vs. int comparisons
+ # Ignore unguarded #pragma GCC statements in QUICHE sources
+ "@envoy//bazel:windows_x86_64": ["-wd4068"],
+ # Remove these after upstream fix.
+ "@envoy//bazel:gcc_build": [
"-Wno-sign-compare",
- ],
+ ] + quiche_common_copts,
+ "//conditions:default": quiche_common_copts,
})
test_suite(
@@ -2247,6 +2245,12 @@ envoy_cc_library(
"quiche/quic/core/frames/quic_window_update_frame.h",
],
copts = quiche_copts,
+ # TODO: Work around initializer in anonymous union in fastbuild build.
+ # Remove this after upstream fix.
+ defines = select({
+ "@envoy//bazel:windows_x86_64": ["QUIC_FRAME_DEBUG=0"],
+ "//conditions:default": [],
+ }),
repository = "@envoy",
tags = ["nofips"],
visibility = ["//visibility:public"],
@@ -3534,6 +3538,23 @@ envoy_cc_test_library(
],
)
+envoy_cc_library(
+ name = "quic_test_tools_flow_controller_peer_lib",
+ srcs = [
+ "quiche/quic/test_tools/quic_flow_controller_peer.cc",
+ ],
+ hdrs = [
+ "quiche/quic/test_tools/quic_flow_controller_peer.h",
+ ],
+ copts = quiche_copts,
+ repository = "@envoy",
+ tags = ["nofips"],
+ deps = [
+ ":quic_core_packets_lib",
+ ":quic_core_session_lib",
+ ],
+)
+
envoy_cc_test_library(
name = "quic_test_tools_framer_peer_lib",
srcs = ["quiche/quic/test_tools/quic_framer_peer.cc"],
@@ -3667,6 +3688,7 @@ envoy_cc_test_library(
":quic_core_session_lib",
":quic_core_stream_send_buffer_lib",
":quic_platform_base",
+ ":quic_test_tools_flow_controller_peer_lib",
":quic_test_tools_stream_send_buffer_peer_lib",
],
)
@@ -3831,7 +3853,6 @@ envoy_cc_library(
hdrs = [
"quiche/common/platform/api/quiche_arraysize.h",
"quiche/common/platform/api/quiche_logging.h",
- "quiche/common/platform/api/quiche_map_util.h",
"quiche/common/platform/api/quiche_optional.h",
"quiche/common/platform/api/quiche_ptr_util.h",
"quiche/common/platform/api/quiche_str_cat.h",
diff --git a/bazel/external/wee8.BUILD b/bazel/external/wee8.BUILD
index b61f95748672..3a62ecd9ebf4 100644
--- a/bazel/external/wee8.BUILD
+++ b/bazel/external/wee8.BUILD
@@ -13,6 +13,10 @@ cc_library(
"wee8/include/v8-version.h",
"wee8/third_party/wasm-api/wasm.hh",
],
+ copts = [
+ "-Wno-range-loop-analysis",
+ ],
+ defines = ["ENVOY_WASM_V8"],
includes = [
"wee8/include",
"wee8/third_party",
diff --git a/bazel/external/wee8.genrule_cmd b/bazel/external/wee8.genrule_cmd
index 8cb0e24c5f49..d8cbd1981a64 100644
--- a/bazel/external/wee8.genrule_cmd
+++ b/bazel/external/wee8.genrule_cmd
@@ -19,7 +19,7 @@ pushd $$ROOT/wee8
rm -rf out/wee8
# Export compiler configuration.
-export CXXFLAGS="$${CXXFLAGS-} -Wno-deprecated-copy -Wno-unknown-warning-option"
+export CXXFLAGS="$${CXXFLAGS-} -Wno-sign-compare -Wno-deprecated-copy -Wno-unknown-warning-option -Wno-range-loop-analysis"
if [[ ( `uname` == "Darwin" && $${CXX-} == "" ) || $${CXX-} == *"clang"* ]]; then
export IS_CLANG=true
export CC=$${CC:-clang}
diff --git a/bazel/external/wee8.patch b/bazel/external/wee8.patch
index ad1c20b6c00b..cce3eecde614 100644
--- a/bazel/external/wee8.patch
+++ b/bazel/external/wee8.patch
@@ -34,7 +34,7 @@
#endif
--- wee8/build/config/sanitizers/sanitizers.gni
+++ wee8/build/config/sanitizers/sanitizers.gni
-@@ -147,7 +147,7 @@ if (!is_a_target_toolchain) {
+@@ -150,7 +150,7 @@ if (!is_a_target_toolchain) {
# standard system libraries. We have instrumented system libraries for msan,
# which requires them to prevent false positives.
# TODO(thakis): Maybe remove this variable.
@@ -43,7 +43,7 @@
# Whether we are doing a fuzzer build. Normally this should be checked instead
# of checking "use_libfuzzer || use_afl" because often developers forget to
-@@ -195,8 +195,7 @@ assert(!using_sanitizer || is_clang,
+@@ -198,8 +198,7 @@ assert(!using_sanitizer || is_clang,
assert(!is_cfi || is_clang,
"is_cfi requires setting is_clang = true in 'gn args'")
diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD
index 6357444b4d0b..c2a214747107 100644
--- a/bazel/foreign_cc/BUILD
+++ b/bazel/foreign_cc/BUILD
@@ -67,16 +67,13 @@ configure_make(
# TODO(htuch): Remove when #6084 is fixed
"//bazel:asan_build": {"ENVOY_CONFIG_ASAN": "1"},
"//bazel:msan_build": {"ENVOY_CONFIG_MSAN": "1"},
- "//bazel:windows_dbg_build": {"WINDOWS_DBG_BUILD": "debug"},
"//conditions:default": {},
}),
lib_source = "@com_github_moonjit_moonjit//:all",
make_commands = [],
out_include_dir = "include/moonjit-2.2",
- static_libraries = select({
- "//bazel:windows_x86_64": ["lua51.lib"],
- "//conditions:default": ["libluajit-5.1.a"],
- }),
+ static_libraries = ["libluajit-5.1.a"],
+ tags = ["skip_on_windows"],
)
envoy_cmake_external(
@@ -115,8 +112,9 @@ envoy_cmake_external(
"CMAKE_USE_GSSAPI": "off",
"HTTP_ONLY": "on",
"CMAKE_INSTALL_LIBDIR": "lib",
- # Explicitly enable Unix sockets and disable crypto for Windows
- "USE_UNIX_SOCKETS": "on",
+ # Explicitly enable Unix sockets, once afunix.h is correctly detected
+ # "USE_UNIX_SOCKETS": "on",
+ # Explicitly disable "Windows" crypto for Windows
"CURL_DISABLE_CRYPTO_AUTH": "on",
# C-Ares.
"ENABLE_ARES": "on",
@@ -137,7 +135,6 @@ envoy_cmake_external(
"ZLIB_LIBRARY": "$EXT_BUILD_DEPS/zlib",
"ZLIB_INCLUDE_DIR": "$EXT_BUILD_DEPS/zlib/include",
"CMAKE_CXX_COMPILER_FORCED": "on",
- "CMAKE_C_FLAGS_BAZEL": "-fPIC",
},
defines = ["CURL_STATICLIB"],
generate_crosstool_file = True,
@@ -149,8 +146,8 @@ envoy_cmake_external(
deps = [
":ares",
":nghttp2",
- ":zlib",
"//external:ssl",
+ "//external:zlib",
],
)
@@ -192,6 +189,109 @@ envoy_cmake_external(
}),
)
+envoy_cmake_external(
+ name = "llvm",
+ cache_entries = {
+ # Disable both: BUILD and INCLUDE, since some of the INCLUDE
+ # targets build code instead of only generating build files.
+ "LLVM_BUILD_DOCS": "off",
+ "LLVM_INCLUDE_DOCS": "off",
+ "LLVM_BUILD_EXAMPLES": "off",
+ "LLVM_INCLUDE_EXAMPLES": "off",
+ "LLVM_BUILD_RUNTIME": "off",
+ "LLVM_BUILD_RUNTIMES": "off",
+ "LLVM_INCLUDE_RUNTIMES": "off",
+ "LLVM_BUILD_TESTS": "off",
+ "LLVM_INCLUDE_TESTS": "off",
+ "LLVM_BUILD_TOOLS": "off",
+ "LLVM_INCLUDE_TOOLS": "off",
+ "LLVM_BUILD_UTILS": "off",
+ "LLVM_INCLUDE_UTILS": "off",
+ "LLVM_ENABLE_LIBEDIT": "off",
+ "LLVM_ENABLE_LIBXML2": "off",
+ "LLVM_ENABLE_TERMINFO": "off",
+ "LLVM_ENABLE_ZLIB": "off",
+ "LLVM_TARGETS_TO_BUILD": "X86",
+ "CMAKE_CXX_COMPILER_FORCED": "on",
+ # Workaround for the issue with statically linked libstdc++
+ # using -l:libstdc++.a.
+ "CMAKE_CXX_FLAGS": "-lstdc++",
+ },
+ env_vars = {
+ # Workaround for the -DDEBUG flag added in fastbuild on macOS,
+ # which conflicts with DEBUG macro used in LLVM.
+ "CFLAGS": "-UDEBUG",
+ "CXXFLAGS": "-UDEBUG",
+ "ASMFLAGS": "-UDEBUG",
+ },
+ lib_source = "@org_llvm_llvm//:all",
+ static_libraries = select({
+ "//conditions:default": [
+ # Order from llvm-config --libnames.
+ "libLLVMLTO.a",
+ "libLLVMPasses.a",
+ "libLLVMObjCARCOpts.a",
+ "libLLVMSymbolize.a",
+ "libLLVMDebugInfoPDB.a",
+ "libLLVMDebugInfoDWARF.a",
+ "libLLVMFuzzMutate.a",
+ "libLLVMTableGen.a",
+ "libLLVMDlltoolDriver.a",
+ "libLLVMLineEditor.a",
+ "libLLVMOrcJIT.a",
+ "libLLVMCoverage.a",
+ "libLLVMMIRParser.a",
+ "libLLVMObjectYAML.a",
+ "libLLVMLibDriver.a",
+ "libLLVMOption.a",
+ "libLLVMWindowsManifest.a",
+ "libLLVMX86Disassembler.a",
+ "libLLVMX86AsmParser.a",
+ "libLLVMX86CodeGen.a",
+ "libLLVMGlobalISel.a",
+ "libLLVMSelectionDAG.a",
+ "libLLVMAsmPrinter.a",
+ "libLLVMDebugInfoCodeView.a",
+ "libLLVMDebugInfoMSF.a",
+ "libLLVMX86Desc.a",
+ "libLLVMMCDisassembler.a",
+ "libLLVMX86Info.a",
+ "libLLVMX86Utils.a",
+ "libLLVMMCJIT.a",
+ "libLLVMInterpreter.a",
+ "libLLVMExecutionEngine.a",
+ "libLLVMRuntimeDyld.a",
+ "libLLVMCodeGen.a",
+ "libLLVMTarget.a",
+ "libLLVMCoroutines.a",
+ "libLLVMipo.a",
+ "libLLVMInstrumentation.a",
+ "libLLVMVectorize.a",
+ "libLLVMScalarOpts.a",
+ "libLLVMLinker.a",
+ "libLLVMIRReader.a",
+ "libLLVMAsmParser.a",
+ "libLLVMInstCombine.a",
+ "libLLVMTransformUtils.a",
+ "libLLVMBitWriter.a",
+ "libLLVMAnalysis.a",
+ "libLLVMProfileData.a",
+ "libLLVMObject.a",
+ "libLLVMMCParser.a",
+ "libLLVMMC.a",
+ "libLLVMBitReader.a",
+ "libLLVMBitstreamReader.a",
+ "libLLVMCore.a",
+ "libLLVMBinaryFormat.a",
+ "libLLVMSupport.a",
+ "libLLVMDemangle.a",
+ "libLLVMRemarks.a",
+ "libLLVMCFGuard.a",
+ "libLLVMTextAPI.a",
+ ],
+ }),
+)
+
envoy_cmake_external(
name = "nghttp2",
cache_entries = {
@@ -211,10 +311,38 @@ envoy_cmake_external(
}),
)
+envoy_cmake_external(
+ name = "wavm",
+ binaries = ["wavm"],
+ cache_entries = {
+ "LLVM_DIR": "$EXT_BUILD_DEPS/copy_llvm/llvm/lib/cmake/llvm",
+ "WAVM_ENABLE_STATIC_LINKING": "on",
+ "WAVM_ENABLE_RELEASE_ASSERTS": "on",
+ "WAVM_ENABLE_UNWIND": "no",
+ # Workaround for the issue with statically linked libstdc++
+ # using -l:libstdc++.a.
+ "CMAKE_CXX_FLAGS": "-lstdc++ -Wno-unused-command-line-argument",
+ },
+ defines = ["ENVOY_WASM_WAVM"],
+ env_vars = {
+ # Workaround for the -DDEBUG flag added in fastbuild on macOS,
+ # which conflicts with DEBUG macro used in LLVM.
+ "CFLAGS": "-UDEBUG",
+ "CXXFLAGS": "-UDEBUG",
+ "ASMFLAGS": "-UDEBUG",
+ },
+ lib_source = "@com_github_wavm_wavm//:all",
+ static_libraries = select({
+ "//conditions:default": [
+ "libWAVM.a",
+ ],
+ }),
+ deps = [":llvm"],
+)
+
envoy_cmake_external(
name = "zlib",
cache_entries = {
- "BUILD_SHARED_LIBS": "off",
"CMAKE_CXX_COMPILER_FORCED": "on",
"CMAKE_C_COMPILER_FORCED": "on",
"SKIP_BUILD_EXAMPLES": "on",
diff --git a/bazel/foreign_cc/curl-revert-cmake-minreqver.patch b/bazel/foreign_cc/curl-revert-cmake-minreqver.patch
deleted file mode 100644
index 78ba60fdb34b..000000000000
--- a/bazel/foreign_cc/curl-revert-cmake-minreqver.patch
+++ /dev/null
@@ -1,17 +0,0 @@
-# Curl 7.69.1 introduces a range-bound cmake revisions between 3.0 and 3.16
-# but this causes the Win32 build to be broken (and is unwise as cmake
-# has already released 3.17)
-diff --git a/CMakeLists.txt b/CMakeLists.txt
-index b13616fc7..8b6d77542 100644
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -38,8 +38,7 @@
- # To check:
- # (From Daniel Stenberg) The cmake build selected to run gcc with -fPIC on my box while the plain configure script did not.
- # (From Daniel Stenberg) The gcc command line use neither -g nor any -O options. As a developer, I also treasure our configure scripts's --enable-debug option that sets a long range of "picky" compiler options.
--cmake_minimum_required(VERSION 3.0...3.16 FATAL_ERROR)
--
-+cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
- set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake;${CMAKE_MODULE_PATH}")
- include(Utilities)
- include(Macros)
diff --git a/bazel/foreign_cc/curl.patch b/bazel/foreign_cc/curl.patch
new file mode 100644
index 000000000000..7c2a7bc129e0
--- /dev/null
+++ b/bazel/foreign_cc/curl.patch
@@ -0,0 +1,29 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index ec1cfa782..0c5a72f00 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -42,0 +42,5 @@
++# revert CMake bug triggered by curl's defined max CMake policy version, see https://gitlab.kitware.com/cmake/cmake/-/issues/21288
++if(POLICY CMP0091)
++ cmake_policy(SET CMP0091 OLD)
++endif()
++
+@@ -249,3 +254,6 @@
+- set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>")
+- set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /MT")
+- set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /MTd")
++ foreach(build_suffix "" _DEBUG _RELEASE _MINSIZEREL _RELWITHDEBINFO)
++ set(flags_var CMAKE_C_FLAGS${build_suffix})
++ if("${${flags_var}}" MATCHES "/MD")
++ string(REGEX REPLACE "/MD" "/MT" ${flags_var} "${${flags_var}}")
++ endif()
++ endforeach()
+diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
+index 911c9096d..ba6af1bf1 100644
+--- a/lib/CMakeLists.txt
++++ b/lib/CMakeLists.txt
+@@ -91,4 +91,0 @@ add_library(
+-if(MSVC AND NOT BUILD_SHARED_LIBS)
+- set_target_properties(${LIB_NAME} PROPERTIES STATIC_LIBRARY_FLAGS ${CMAKE_EXE_LINKER_FLAGS})
+-endif()
+-
diff --git a/bazel/foreign_cc/llvm.patch b/bazel/foreign_cc/llvm.patch
new file mode 100644
index 000000000000..cd02f2842401
--- /dev/null
+++ b/bazel/foreign_cc/llvm.patch
@@ -0,0 +1,25 @@
+# Workaround for Envoy's CMAKE_BUILD_TYPE=Bazel.
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -247,7 +247,7 @@
+ string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
+
+ if (CMAKE_BUILD_TYPE AND
+- NOT uppercase_CMAKE_BUILD_TYPE MATCHES "^(DEBUG|RELEASE|RELWITHDEBINFO|MINSIZEREL)$")
++ NOT uppercase_CMAKE_BUILD_TYPE MATCHES "^(DEBUG|RELEASE|RELWITHDEBINFO|MINSIZEREL|BAZEL)$")
+ message(FATAL_ERROR "Invalid value for CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
+ endif()
+
+# Workaround for a missing -fuse-ld flag in CXXFLAGS, which results in
+# different linkers being used during configure and compilation phases.
+--- a/cmake/modules/HandleLLVMOptions.cmake
++++ b/cmake/modules/HandleLLVMOptions.cmake
+@@ -718,8 +718,6 @@ endif()
+ if (UNIX AND CMAKE_GENERATOR STREQUAL "Ninja")
+ include(CheckLinkerFlag)
+ check_linker_flag("-Wl,--color-diagnostics" LINKER_SUPPORTS_COLOR_DIAGNOSTICS)
+- append_if(LINKER_SUPPORTS_COLOR_DIAGNOSTICS "-Wl,--color-diagnostics"
+- CMAKE_EXE_LINKER_FLAGS CMAKE_MODULE_LINKER_FLAGS CMAKE_SHARED_LINKER_FLAGS)
+ endif()
+
+ # Add flags for add_dead_strip().
diff --git a/bazel/foreign_cc/luajit.patch b/bazel/foreign_cc/luajit.patch
index b454b7dfd149..c0fb0da819fd 100644
--- a/bazel/foreign_cc/luajit.patch
+++ b/bazel/foreign_cc/luajit.patch
@@ -1,5 +1,5 @@
diff --git a/src/Makefile b/src/Makefile
-index f56465d..5d91fa7 100644
+index e65b55e..f0a61dd 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -27,7 +27,7 @@ NODOTABIVER= 51
@@ -33,96 +33,96 @@ index f56465d..5d91fa7 100644
#
# Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter.
#XCFLAGS+= -DLUAJIT_DISABLE_JIT
-@@ -111,7 +111,7 @@ XCFLAGS=
- #XCFLAGS+= -DLUAJIT_NUMMODE=2
- #
- # Enable GC64 mode for x64.
--#XCFLAGS+= -DLUAJIT_ENABLE_GC64
-+XCFLAGS+= -DLUAJIT_ENABLE_GC64
- #
- ##############################################################################
-
-@@ -587,7 +587,7 @@ endif
-
+@@ -591,7 +591,7 @@ endif
+
Q= @
E= @echo
-#Q=
+Q=
#E= @:
-
- ##############################################################################
-EOF
---- a/src/msvcbuild.bat 2020-08-13 18:42:05.667354300 +0000
-+++ b/src/msvcbuild.bat 2020-08-13 19:03:25.092297900 +0000
-@@ -14,7 +14,7 @@
- @if not defined INCLUDE goto :FAIL
- @setlocal
--@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline
-+@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT
- @set LJLINK=link /nologo
- @set LJMT=mt /nologo
- @set LJLIB=lib /nologo /nodefaultlib
-@@ -25,7 +25,7 @@
- @set LJLIBNAME=lua51.lib
- @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c
-
--%LJCOMPILE% host\minilua.c
-+%LJCOMPILE% /O2 host\minilua.c
- @if errorlevel 1 goto :BAD
- %LJLINK% /out:minilua.exe minilua.obj
- @if errorlevel 1 goto :BAD
-@@ -48,7 +48,7 @@
- minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC%
- @if errorlevel 1 goto :BAD
-
--%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c
-+%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c
- @if errorlevel 1 goto :BAD
- %LJLINK% /out:buildvm.exe buildvm*.obj
- @if errorlevel 1 goto :BAD
-@@ -72,24 +72,35 @@
-
- @if "%1" neq "debug" goto :NODEBUG
- @shift
--@set LJCOMPILE=%LJCOMPILE% /Zi
-+@set LJCOMPILE=%LJCOMPILE% /O0 /Z7
- @set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no
-+@set LJCRTDBG=d
-+@goto :ENDDEBUG
- :NODEBUG
-+@set LJCOMPILE=%LJCOMPILE% /O2 /Z7
-+@set LJLINK=%LJLINK% /release /incremental:no
-+@set LJCRTDBG=
-+:ENDDEBUG
- @if "%1"=="amalg" goto :AMALGDLL
- @if "%1"=="static" goto :STATIC
--%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
-+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG%
-+%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
- @if errorlevel 1 goto :BAD
- %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj
- @if errorlevel 1 goto :BAD
- @goto :MTDLL
- :STATIC
-+@shift
-+@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG%
- %LJCOMPILE% lj_*.c lib_*.c
- @if errorlevel 1 goto :BAD
- %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj
- @if errorlevel 1 goto :BAD
- @goto :MTDLL
- :AMALGDLL
--%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c
-+@shift
-+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG%
-+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c
- @if errorlevel 1 goto :BAD
- %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj
- @if errorlevel 1 goto :BAD
+ ##############################################################################
+diff --git a/src/msvcbuild.bat b/src/msvcbuild.bat
+index ae035dc..0e7eac9 100644
+--- a/src/msvcbuild.bat
++++ b/src/msvcbuild.bat
+@@ -13,9 +13,7 @@
+ @if not defined INCLUDE goto :FAIL
+
+ @setlocal
+-@rem Add more debug flags here, e.g. DEBUGCFLAGS=/DLUA_USE_APICHECK
+-@set DEBUGCFLAGS=
+-@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline
++@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT
+ @set LJLINK=link /nologo
+ @set LJMT=mt /nologo
+ @set LJLIB=lib /nologo /nodefaultlib
+@@ -24,10 +22,9 @@
+ @set DASC=vm_x64.dasc
+ @set LJDLLNAME=lua51.dll
+ @set LJLIBNAME=lua51.lib
+-@set BUILDTYPE=release
+ @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c
+
+-%LJCOMPILE% host\minilua.c
++%LJCOMPILE% /O2 host\minilua.c
+ @if errorlevel 1 goto :BAD
+ %LJLINK% /out:minilua.exe minilua.obj
+ @if errorlevel 1 goto :BAD
+@@ -51,7 +48,7 @@ if exist minilua.exe.manifest^
+ minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC%
+ @if errorlevel 1 goto :BAD
+
+-%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c
++%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c
+ @if errorlevel 1 goto :BAD
+ %LJLINK% /out:buildvm.exe buildvm*.obj
+ @if errorlevel 1 goto :BAD
+@@ -75,26 +72,35 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
+
+ @if "%1" neq "debug" goto :NODEBUG
+ @shift
+-@set BUILDTYPE=debug
+-@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS%
+-@set LJLINK=%LJLINK% /opt:ref /opt:icf /incremental:no
++@set LJCOMPILE=%LJCOMPILE% /O0 /Z7
++@set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no
++@set LJCRTDBG=d
++@goto :ENDDEBUG
+ :NODEBUG
+-@set LJLINK=%LJLINK% /%BUILDTYPE%
++@set LJCOMPILE=%LJCOMPILE% /O2 /Z7
++@set LJLINK=%LJLINK% /release /incremental:no
++@set LJCRTDBG=
++:ENDDEBUG
+ @if "%1"=="amalg" goto :AMALGDLL
+ @if "%1"=="static" goto :STATIC
+-%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
++@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG%
++%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
+ @if errorlevel 1 goto :BAD
+ %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj
+ @if errorlevel 1 goto :BAD
+ @goto :MTDLL
+ :STATIC
++@shift
++@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG%
+ %LJCOMPILE% lj_*.c lib_*.c
+ @if errorlevel 1 goto :BAD
+ %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj
+ @if errorlevel 1 goto :BAD
+ @goto :MTDLL
+ :AMALGDLL
+-%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c
++@shift
++@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG%
++%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c
+ @if errorlevel 1 goto :BAD
+ %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj
+ @if errorlevel 1 goto :BAD
diff --git a/build.py b/build.py
new file mode 100755
-index 0000000..9c71271
+index 0000000..3eb74ff
--- /dev/null
+++ b/build.py
@@ -0,0 +1,56 @@
@@ -168,7 +168,7 @@ index 0000000..9c71271
+ dst_dir = os.getcwd() + "/luajit"
+ shutil.copytree(src_dir, os.path.basename(src_dir))
+ os.chdir(os.path.basename(src_dir) + "/src")
-+ os.system('msvcbuild.bat gc64 ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static')
++ os.system('msvcbuild.bat ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static')
+ os.makedirs(dst_dir + "/lib", exist_ok=True)
+ shutil.copy("lua51.lib", dst_dir + "/lib")
+ os.makedirs(dst_dir + "/include/luajit-2.1", exist_ok=True)
diff --git a/bazel/foreign_cc/moonjit.patch b/bazel/foreign_cc/moonjit.patch
index 99ac22fb04fe..5bb745875132 100644
--- a/bazel/foreign_cc/moonjit.patch
+++ b/bazel/foreign_cc/moonjit.patch
@@ -3,7 +3,7 @@ new file mode 100644
index 00000000..dab3606c
--- /dev/null
+++ b/build.py
-@@ -0,0 +1,56 @@
+@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+
+import argparse
@@ -41,24 +41,7 @@ index 00000000..dab3606c
+
+ os.system('make -j{} V=1 PREFIX="{}" install'.format(os.cpu_count(), args.prefix))
+
-+def win_main():
-+ src_dir = os.path.dirname(os.path.realpath(__file__))
-+ dst_dir = os.getcwd() + "/moonjit"
-+ shutil.copytree(src_dir, os.path.basename(src_dir))
-+ os.chdir(os.path.basename(src_dir) + "/src")
-+ os.system('msvcbuild.bat gc64 ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static')
-+ os.makedirs(dst_dir + "/lib", exist_ok=True)
-+ shutil.copy("lua51.lib", dst_dir + "/lib")
-+ os.makedirs(dst_dir + "/include/moonjit-2.2", exist_ok=True)
-+ for header in ["lauxlib.h", "luaconf.h", "lua.h", "lua.hpp", "luajit.h", "lualib.h"]:
-+ shutil.copy(header, dst_dir + "/include/moonjit-2.2")
-+ os.makedirs(dst_dir + "/bin", exist_ok=True)
-+ shutil.copy("luajit.exe", dst_dir + "/bin")
-+
-+if os.name == 'nt':
-+ win_main()
-+else:
-+ main()
++main()
+
diff --git a/src/Makefile b/src/Makefile
index dad9aeec..e10b3118 100644
@@ -104,78 +87,3 @@ index dad9aeec..e10b3118 100644
#E= @:
##############################################################################
-diff --git a/src/msvcbuild.bat b/src/msvcbuild.bat
-index c2d2c212..71f24422 100644
---- a/src/msvcbuild.bat
-+++ b/src/msvcbuild.bat
-@@ -15,7 +15,7 @@
- @setlocal
- @rem Add more debug flags here, e.g. DEBUGCFLAGS=/DLUA_USE_APICHECK
- @set DEBUGCFLAGS=
--@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline
-+@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT
- @set LJLINK=link /nologo
- @set LJMT=mt /nologo
- @set LJLIB=lib /nologo /nodefaultlib
-@@ -24,10 +24,9 @@
- @set DASC=vm_x86.dasc
- @set LJDLLNAME=lua51.dll
- @set LJLIBNAME=lua51.lib
--@set BUILDTYPE=release
- @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_utf8.c
-
--%LJCOMPILE% host\minilua.c
-+%LJCOMPILE% /O2 host\minilua.c
- @if errorlevel 1 goto :BAD
- %LJLINK% /out:minilua.exe minilua.obj
- @if errorlevel 1 goto :BAD
-@@ -50,7 +49,7 @@ if exist minilua.exe.manifest^
- minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC%
- @if errorlevel 1 goto :BAD
-
--%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c
-+%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c
- @if errorlevel 1 goto :BAD
- %LJLINK% /out:buildvm.exe buildvm*.obj
- @if errorlevel 1 goto :BAD
-@@ -74,25 +73,35 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
-
- @if "%1" neq "debug" goto :NODEBUG
- @shift
--@set BUILDTYPE=debug
--@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS%
-+@set LJCOMPILE=%LJCOMPILE% /O0 /Z7
-+@set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no
-+@set LJCRTDBG=d
-+@goto :ENDDEBUG
- :NODEBUG
--@set LJLINK=%LJLINK% /%BUILDTYPE%
-+@set LJCOMPILE=%LJCOMPILE% /O2 /Z7
-+@set LJLINK=%LJLINK% /release /incremental:no
-+@set LJCRTDBG=
-+:ENDDEBUG
- @if "%1"=="amalg" goto :AMALGDLL
- @if "%1"=="static" goto :STATIC
--%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
-+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG%
-+LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
- @if errorlevel 1 goto :BAD
- %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj
- @if errorlevel 1 goto :BAD
- @goto :MTDLL
- :STATIC
-+@shift
-+@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG%
- %LJCOMPILE% lj_*.c lib_*.c
- @if errorlevel 1 goto :BAD
- %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj
- @if errorlevel 1 goto :BAD
- @goto :MTDLL
- :AMALGDLL
--%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c
-+@shift
-+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG%
-+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c
- @if errorlevel 1 goto :BAD
- %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj
- @if errorlevel 1 goto :BAD
diff --git a/bazel/gen_sh_test_runner.sh b/bazel/gen_sh_test_runner.sh
index 8e33707db49b..5665ce914814 100755
--- a/bazel/gen_sh_test_runner.sh
+++ b/bazel/gen_sh_test_runner.sh
@@ -14,7 +14,7 @@ TEST_NAME="${RAW_TEST_NAME//./_}"
EXEC_ARGS="\"$1\""
shift
-for a in $@
+for a in "$@"
do
EXEC_ARGS="${EXEC_ARGS}, \"$a\""
done
diff --git a/bazel/get_workspace_status b/bazel/get_workspace_status
index 82bb7593c7f3..4cfce22fd2d2 100755
--- a/bazel/get_workspace_status
+++ b/bazel/get_workspace_status
@@ -29,21 +29,14 @@ then
fi
# The code below presents an implementation that works for git repository
-git_rev=$(git rev-parse HEAD)
-if [[ $? != 0 ]];
-then
- exit 1
-fi
+git_rev=$(git rev-parse HEAD) || exit 1
echo "BUILD_SCM_REVISION ${git_rev}"
echo "STABLE_BUILD_SCM_REVISION ${git_rev}"
# Check whether there are any uncommitted changes
-git diff-index --quiet HEAD --
-if [[ $? == 0 ]];
-then
- tree_status="Clean"
-else
+tree_status="Clean"
+git diff-index --quiet HEAD -- || {
tree_status="Modified"
-fi
+}
echo "BUILD_SCM_STATUS ${tree_status}"
echo "STABLE_BUILD_SCM_STATUS ${tree_status}"
diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl
index ce30752e1e94..5047a52141f0 100644
--- a/bazel/repositories.bzl
+++ b/bazel/repositories.bzl
@@ -1,9 +1,10 @@
-load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load(":dev_binding.bzl", "envoy_dev_binding")
load(":genrule_repository.bzl", "genrule_repository")
load("@envoy_api//bazel:envoy_http_archive.bzl", "envoy_http_archive")
-load(":repository_locations.bzl", "DEPENDENCY_ANNOTATIONS", "DEPENDENCY_REPOSITORIES", "USE_CATEGORIES", "USE_CATEGORIES_WITH_CPE_OPTIONAL")
+load("@envoy_api//bazel:external_deps.bzl", "load_repository_locations")
+load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC")
load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
+load(":crates.bzl", "raze_fetch_remote_crates")
PPC_SKIP_TARGETS = ["envoy.filters.http.lua"]
@@ -16,70 +17,29 @@ WINDOWS_SKIP_TARGETS = [
# Make all contents of an external repository accessible under a filegroup. Used for external HTTP
# archives, e.g. cares.
-BUILD_ALL_CONTENT = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
-
-def _fail_missing_attribute(attr, key):
- fail("The '%s' attribute must be defined for external dependecy " % attr + key)
-
-# Method for verifying content of the DEPENDENCY_REPOSITORIES defined in bazel/repository_locations.bzl
-# Verification is here so that bazel/repository_locations.bzl can be loaded into other tools written in Python,
-# and as such needs to be free of bazel specific constructs.
-#
-# We also remove the attributes for further consumption in this file, since rules such as http_archive
-# don't recognize them.
-def _repository_locations():
- locations = {}
- for key, location in DEPENDENCY_REPOSITORIES.items():
- mutable_location = dict(location)
- locations[key] = mutable_location
-
- if "sha256" not in location or len(location["sha256"]) == 0:
- _fail_missing_attribute("sha256", key)
-
- if "project_name" not in location:
- _fail_missing_attribute("project_name", key)
- mutable_location.pop("project_name")
-
- if "project_url" not in location:
- _fail_missing_attribute("project_url", key)
- mutable_location.pop("project_url")
-
- if "version" not in location:
- _fail_missing_attribute("version", key)
- mutable_location.pop("version")
-
- if "use_category" not in location:
- _fail_missing_attribute("use_category", key)
- mutable_location.pop("use_category")
-
- if "cpe" in location:
- mutable_location.pop("cpe")
- elif not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location["use_category"]]:
- _fail_missing_attribute("cpe", key)
-
- for category in location["use_category"]:
- if category not in USE_CATEGORIES:
- fail("Unknown use_category value '" + category + "' for dependecy " + key)
-
- return locations
-
-REPOSITORY_LOCATIONS = _repository_locations()
-
-# To initialize http_archive REPOSITORY_LOCATIONS dictionaries must be stripped of annotations.
-# See repository_locations.bzl for the list of annotation attributes.
-def _get_location(dependency):
- stripped = dict(REPOSITORY_LOCATIONS[dependency])
- for attribute in DEPENDENCY_ANNOTATIONS:
- stripped.pop(attribute, None)
- return stripped
-
-def _repository_impl(name, **kwargs):
+def _build_all_content(exclude = []):
+ return """filegroup(name = "all", srcs = glob(["**"], exclude={}), visibility = ["//visibility:public"])""".format(repr(exclude))
+
+BUILD_ALL_CONTENT = _build_all_content()
+
+REPOSITORY_LOCATIONS = load_repository_locations(REPOSITORY_LOCATIONS_SPEC)
+
+# Use this macro to reference any HTTP archive from bazel/repository_locations.bzl.
+def external_http_archive(name, **kwargs):
envoy_http_archive(
name,
locations = REPOSITORY_LOCATIONS,
**kwargs
)
+# Use this macro to reference any genrule_repository sourced from bazel/repository_locations.bzl.
+def external_genrule_repository(name, **kwargs):
+ location = REPOSITORY_LOCATIONS[name]
+ genrule_repository(
+ name = name,
+ **dict(location, **kwargs)
+ )
+
def _default_envoy_build_config_impl(ctx):
ctx.file("WORKSPACE", "")
ctx.file("BUILD.bazel", "")
@@ -95,26 +55,26 @@ _default_envoy_build_config = repository_rule(
# Python dependencies.
def _python_deps():
# TODO(htuch): convert these to pip3_import.
- _repository_impl(
+ external_http_archive(
name = "com_github_twitter_common_lang",
build_file = "@envoy//bazel/external:twitter_common_lang.BUILD",
)
- _repository_impl(
+ external_http_archive(
name = "com_github_twitter_common_rpc",
build_file = "@envoy//bazel/external:twitter_common_rpc.BUILD",
)
- _repository_impl(
+ external_http_archive(
name = "com_github_twitter_common_finagle_thrift",
build_file = "@envoy//bazel/external:twitter_common_finagle_thrift.BUILD",
)
- _repository_impl(
+ external_http_archive(
name = "six",
build_file = "@com_google_protobuf//third_party:six.BUILD",
)
# Bazel native C++ dependencies. For the dependencies that doesn't provide autoconf/automake builds.
def _cc_deps():
- _repository_impl("grpc_httpjson_transcoding")
+ external_http_archive("grpc_httpjson_transcoding")
native.bind(
name = "path_matcher",
actual = "@grpc_httpjson_transcoding//src:path_matcher",
@@ -128,13 +88,17 @@ def _go_deps(skip_targets):
# Keep the skip_targets check around until Istio Proxy has stopped using
# it to exclude the Go rules.
if "io_bazel_rules_go" not in skip_targets:
- _repository_impl(
+ external_http_archive(
name = "io_bazel_rules_go",
# TODO(wrowe, sunjayBhatia): remove when Windows RBE supports batch file invocation
patch_args = ["-p1"],
patches = ["@envoy//bazel:rules_go.patch"],
)
- _repository_impl("bazel_gazelle")
+ external_http_archive("bazel_gazelle")
+
+def _rust_deps():
+ external_http_archive("io_bazel_rules_rust")
+ raze_fetch_remote_crates()
def envoy_dependencies(skip_targets = []):
# Setup Envoy developer tools.
@@ -172,6 +136,7 @@ def envoy_dependencies(skip_targets = []):
_com_github_google_benchmark()
_com_github_google_jwt_verify()
_com_github_google_libprotobuf_mutator()
+ _com_github_google_tcmalloc()
_com_github_gperftools_gperftools()
_com_github_grpc_grpc()
_com_github_jbeder_yaml_cpp()
@@ -198,14 +163,13 @@ def envoy_dependencies(skip_targets = []):
_proxy_wasm_cpp_sdk()
_proxy_wasm_cpp_host()
_emscripten_toolchain()
- _repository_impl("com_googlesource_code_re2")
+ external_http_archive("com_googlesource_code_re2")
_com_google_cel_cpp()
- _repository_impl("com_github_google_flatbuffers")
- _repository_impl("bazel_toolchains")
- _repository_impl("bazel_compdb")
- _repository_impl("envoy_build_tools")
- _repository_impl("rules_cc")
- _org_unicode_icuuc()
+ external_http_archive("com_github_google_flatbuffers")
+ external_http_archive("bazel_toolchains")
+ external_http_archive("bazel_compdb")
+ external_http_archive("envoy_build_tools")
+ external_http_archive("rules_cc")
# Unconditional, since we use this only for compiler-agnostic fuzzing utils.
_org_llvm_releases_compiler_rt()
@@ -213,8 +177,12 @@ def envoy_dependencies(skip_targets = []):
_python_deps()
_cc_deps()
_go_deps(skip_targets)
+ _rust_deps()
_kafka_deps()
+ _org_llvm_llvm()
+ _com_github_wavm_wavm()
+
switched_rules_by_language(
name = "com_google_googleapis_imports",
cc = True,
@@ -230,25 +198,22 @@ def envoy_dependencies(skip_targets = []):
)
def _boringssl():
- _repository_impl(
+ external_http_archive(
name = "boringssl",
patch_args = ["-p1"],
patches = ["@envoy//bazel:boringssl_static.patch"],
)
def _boringssl_fips():
- location = REPOSITORY_LOCATIONS["boringssl_fips"]
- genrule_repository(
+ external_genrule_repository(
name = "boringssl_fips",
- urls = location["urls"],
- sha256 = location["sha256"],
genrule_cmd_file = "@envoy//bazel/external:boringssl_fips.genrule_cmd",
build_file = "@envoy//bazel/external:boringssl_fips.BUILD",
patches = ["@envoy//bazel/external:boringssl_fips.patch"],
)
def _com_github_circonus_labs_libcircllhist():
- _repository_impl(
+ external_http_archive(
name = "com_github_circonus_labs_libcircllhist",
build_file = "@envoy//bazel/external:libcircllhist.BUILD",
)
@@ -258,11 +223,9 @@ def _com_github_circonus_labs_libcircllhist():
)
def _com_github_c_ares_c_ares():
- location = _get_location("com_github_c_ares_c_ares")
- http_archive(
+ external_http_archive(
name = "com_github_c_ares_c_ares",
build_file_content = BUILD_ALL_CONTENT,
- **location
)
native.bind(
name = "ares",
@@ -270,7 +233,7 @@ def _com_github_c_ares_c_ares():
)
def _com_github_cyan4973_xxhash():
- _repository_impl(
+ external_http_archive(
name = "com_github_cyan4973_xxhash",
build_file = "@envoy//bazel/external:xxhash.BUILD",
)
@@ -280,7 +243,7 @@ def _com_github_cyan4973_xxhash():
)
def _com_github_envoyproxy_sqlparser():
- _repository_impl(
+ external_http_archive(
name = "com_github_envoyproxy_sqlparser",
build_file = "@envoy//bazel/external:sqlparser.BUILD",
)
@@ -290,7 +253,7 @@ def _com_github_envoyproxy_sqlparser():
)
def _com_github_mirror_tclap():
- _repository_impl(
+ external_http_archive(
name = "com_github_mirror_tclap",
build_file = "@envoy//bazel/external:tclap.BUILD",
patch_args = ["-p1"],
@@ -306,7 +269,7 @@ def _com_github_mirror_tclap():
)
def _com_github_fmtlib_fmt():
- _repository_impl(
+ external_http_archive(
name = "com_github_fmtlib_fmt",
build_file = "@envoy//bazel/external:fmtlib.BUILD",
)
@@ -316,7 +279,7 @@ def _com_github_fmtlib_fmt():
)
def _com_github_gabime_spdlog():
- _repository_impl(
+ external_http_archive(
name = "com_github_gabime_spdlog",
build_file = "@envoy//bazel/external:spdlog.BUILD",
)
@@ -326,10 +289,8 @@ def _com_github_gabime_spdlog():
)
def _com_github_google_benchmark():
- location = _get_location("com_github_google_benchmark")
- http_archive(
+ external_http_archive(
name = "com_github_google_benchmark",
- **location
)
native.bind(
name = "benchmark",
@@ -337,13 +298,13 @@ def _com_github_google_benchmark():
)
def _com_github_google_libprotobuf_mutator():
- _repository_impl(
+ external_http_archive(
name = "com_github_google_libprotobuf_mutator",
build_file = "@envoy//bazel/external:libprotobuf_mutator.BUILD",
)
def _com_github_jbeder_yaml_cpp():
- _repository_impl(
+ external_http_archive(
name = "com_github_jbeder_yaml_cpp",
)
native.bind(
@@ -352,11 +313,9 @@ def _com_github_jbeder_yaml_cpp():
)
def _com_github_libevent_libevent():
- location = _get_location("com_github_libevent_libevent")
- http_archive(
+ external_http_archive(
name = "com_github_libevent_libevent",
build_file_content = BUILD_ALL_CONTENT,
- **location
)
native.bind(
name = "event",
@@ -364,7 +323,7 @@ def _com_github_libevent_libevent():
)
def _net_zlib():
- _repository_impl(
+ external_http_archive(
name = "net_zlib",
build_file_content = BUILD_ALL_CONTENT,
patch_args = ["-p1"],
@@ -383,16 +342,19 @@ def _net_zlib():
)
def _com_github_zlib_ng_zlib_ng():
- _repository_impl(
+ external_http_archive(
name = "com_github_zlib_ng_zlib_ng",
build_file_content = BUILD_ALL_CONTENT,
)
def _com_google_cel_cpp():
- _repository_impl("com_google_cel_cpp")
- _repository_impl("rules_antlr")
- location = _get_location("antlr4_runtimes")
- http_archive(
+ external_http_archive("com_google_cel_cpp")
+ external_http_archive("rules_antlr")
+
+ # Parser dependencies
+ # TODO: upgrade this when cel is upgraded to use the latest version
+ external_http_archive(name = "rules_antlr")
+ external_http_archive(
name = "antlr4_runtimes",
build_file_content = """
package(default_visibility = ["//visibility:public"])
@@ -406,12 +368,10 @@ cc_library(
patch_args = ["-p1"],
# Patches ASAN violation of initialization fiasco
patches = ["@envoy//bazel:antlr.patch"],
- **location
)
def _com_github_nghttp2_nghttp2():
- location = _get_location("com_github_nghttp2_nghttp2")
- http_archive(
+ external_http_archive(
name = "com_github_nghttp2_nghttp2",
build_file_content = BUILD_ALL_CONTENT,
patch_args = ["-p1"],
@@ -420,7 +380,6 @@ def _com_github_nghttp2_nghttp2():
# https://github.com/nghttp2/nghttp2/pull/1395
# https://github.com/envoyproxy/envoy/pull/8572#discussion_r334067786
patches = ["@envoy//bazel/foreign_cc:nghttp2.patch"],
- **location
)
native.bind(
name = "nghttp2",
@@ -428,7 +387,7 @@ def _com_github_nghttp2_nghttp2():
)
def _io_opentracing_cpp():
- _repository_impl(
+ external_http_archive(
name = "io_opentracing_cpp",
patch_args = ["-p1"],
# Workaround for LSAN false positive in https://github.com/envoyproxy/envoy/issues/7647
@@ -440,15 +399,15 @@ def _io_opentracing_cpp():
)
def _com_lightstep_tracer_cpp():
- _repository_impl("com_lightstep_tracer_cpp")
+ external_http_archive("com_lightstep_tracer_cpp")
native.bind(
name = "lightstep",
actual = "@com_lightstep_tracer_cpp//:manual_tracer_lib",
)
def _com_github_datadog_dd_opentracing_cpp():
- _repository_impl("com_github_datadog_dd_opentracing_cpp")
- _repository_impl(
+ external_http_archive("com_github_datadog_dd_opentracing_cpp")
+ external_http_archive(
name = "com_github_msgpack_msgpack_c",
build_file = "@com_github_datadog_dd_opentracing_cpp//:bazel/external/msgpack.BUILD",
)
@@ -458,7 +417,7 @@ def _com_github_datadog_dd_opentracing_cpp():
)
def _com_github_tencent_rapidjson():
- _repository_impl(
+ external_http_archive(
name = "com_github_tencent_rapidjson",
build_file = "@envoy//bazel/external:rapidjson.BUILD",
)
@@ -468,7 +427,7 @@ def _com_github_tencent_rapidjson():
)
def _com_github_nodejs_http_parser():
- _repository_impl(
+ external_http_archive(
name = "com_github_nodejs_http_parser",
build_file = "@envoy//bazel/external:http-parser.BUILD",
)
@@ -478,7 +437,7 @@ def _com_github_nodejs_http_parser():
)
def _com_google_googletest():
- _repository_impl("com_google_googletest")
+ external_http_archive("com_google_googletest")
native.bind(
name = "googletest",
actual = "@com_google_googletest//:gtest",
@@ -489,7 +448,7 @@ def _com_google_googletest():
# pull in more bits of abseil as needed, and is now the preferred
# method for pure Bazel deps.
def _com_google_absl():
- _repository_impl("com_google_absl")
+ external_http_archive("com_google_absl")
native.bind(
name = "abseil_any",
actual = "@com_google_absl//absl/types:any",
@@ -592,8 +551,8 @@ def _com_google_absl():
)
def _com_google_protobuf():
- _repository_impl("rules_python")
- _repository_impl(
+ external_http_archive("rules_python")
+ external_http_archive(
"com_google_protobuf",
patches = ["@envoy//bazel:protobuf.patch"],
patch_args = ["-p1"],
@@ -624,10 +583,8 @@ def _com_google_protobuf():
)
def _io_opencensus_cpp():
- location = _get_location("io_opencensus_cpp")
- http_archive(
+ external_http_archive(
name = "io_opencensus_cpp",
- **location
)
native.bind(
name = "opencensus_trace",
@@ -668,15 +625,21 @@ def _io_opencensus_cpp():
def _com_github_curl():
# Used by OpenCensus Zipkin exporter.
- location = _get_location("com_github_curl")
- http_archive(
+ external_http_archive(
name = "com_github_curl",
build_file_content = BUILD_ALL_CONTENT + """
cc_library(name = "curl", visibility = ["//visibility:public"], deps = ["@envoy//bazel/foreign_cc:curl"])
""",
- patches = ["@envoy//bazel/foreign_cc:curl-revert-cmake-minreqver.patch"],
+ # Patch curl 7.72.0 due to CMake's problematic implementation of policy `CMP0091`
+ # introduced in CMake 3.15 and then deprecated in CMake 3.18. Curl forcing the CMake
+ # ruleset to 3.16 breaks the Envoy windows fastbuild target.
+ # Also cure a fatal assumption creating a static library using LLVM `lld-link.exe`
+ # adding dynamic link flags, which breaks the Envoy clang-cl library archive step.
+ # Upstream patch submitted: https://github.com/curl/curl/pull/6050
+ # TODO(https://github.com/envoyproxy/envoy/issues/11816): This patch is obsoleted
+ # by elimination of the curl dependency.
+ patches = ["@envoy//bazel/foreign_cc:curl.patch"],
patch_args = ["-p1"],
- **location
)
native.bind(
name = "curl",
@@ -684,13 +647,11 @@ cc_library(name = "curl", visibility = ["//visibility:public"], deps = ["@envoy/
)
def _com_googlesource_chromium_v8():
- location = _get_location("com_googlesource_chromium_v8")
- genrule_repository(
+ external_genrule_repository(
name = "com_googlesource_chromium_v8",
genrule_cmd_file = "@envoy//bazel/external:wee8.genrule_cmd",
build_file = "@envoy//bazel/external:wee8.BUILD",
patches = ["@envoy//bazel/external:wee8.patch"],
- **location
)
native.bind(
name = "wee8",
@@ -698,11 +659,8 @@ def _com_googlesource_chromium_v8():
)
def _com_googlesource_quiche():
- location = REPOSITORY_LOCATIONS["com_googlesource_quiche"]
- genrule_repository(
+ external_genrule_repository(
name = "com_googlesource_quiche",
- urls = location["urls"],
- sha256 = location["sha256"],
genrule_cmd_file = "@envoy//bazel/external:quiche.genrule_cmd",
build_file = "@envoy//bazel/external:quiche.BUILD",
)
@@ -728,10 +686,8 @@ def _com_googlesource_quiche():
)
def _com_googlesource_googleurl():
- _repository_impl(
+ external_http_archive(
name = "com_googlesource_googleurl",
- patches = ["@envoy//bazel/external:googleurl.patch"],
- patch_args = ["-p1"],
)
native.bind(
name = "googleurl",
@@ -739,14 +695,14 @@ def _com_googlesource_googleurl():
)
def _org_llvm_releases_compiler_rt():
- _repository_impl(
+ external_http_archive(
name = "org_llvm_releases_compiler_rt",
build_file = "@envoy//bazel/external:compiler_rt.BUILD",
)
def _com_github_grpc_grpc():
- _repository_impl("com_github_grpc_grpc")
- _repository_impl("build_bazel_rules_apple")
+ external_http_archive("com_github_grpc_grpc")
+ external_http_archive("build_bazel_rules_apple")
# Rebind some stuff to match what the gRPC Bazel is expecting.
native.bind(
@@ -788,7 +744,7 @@ def _com_github_grpc_grpc():
)
def _upb():
- _repository_impl(
+ external_http_archive(
name = "upb",
patches = ["@envoy//bazel:upb.patch"],
patch_args = ["-p1"],
@@ -800,23 +756,28 @@ def _upb():
)
def _proxy_wasm_cpp_sdk():
- _repository_impl(name = "proxy_wasm_cpp_sdk")
+ external_http_archive(name = "proxy_wasm_cpp_sdk")
def _proxy_wasm_cpp_host():
- _repository_impl(
+ external_http_archive(
name = "proxy_wasm_cpp_host",
build_file = "@envoy//bazel/external:proxy_wasm_cpp_host.BUILD",
)
def _emscripten_toolchain():
- _repository_impl(
+ external_http_archive(
name = "emscripten_toolchain",
- build_file_content = BUILD_ALL_CONTENT,
- patch_cmds = REPOSITORY_LOCATIONS["emscripten_toolchain"]["patch_cmds"],
+ build_file_content = _build_all_content(exclude = [
+ "upstream/emscripten/cache/is_vanilla.txt",
+ ".emscripten_sanity",
+ ]),
+ patch_cmds = [
+ "[[ \"$(uname -m)\" == \"x86_64\" ]] && ./emsdk install 1.39.6-upstream && ./emsdk activate --embedded 1.39.6-upstream || true",
+ ],
)
def _com_github_google_jwt_verify():
- _repository_impl("com_github_google_jwt_verify")
+ external_http_archive("com_github_google_jwt_verify")
native.bind(
name = "jwt_verify_lib",
@@ -824,14 +785,12 @@ def _com_github_google_jwt_verify():
)
def _com_github_luajit_luajit():
- location = _get_location("com_github_luajit_luajit")
- http_archive(
+ external_http_archive(
name = "com_github_luajit_luajit",
build_file_content = BUILD_ALL_CONTENT,
patches = ["@envoy//bazel/foreign_cc:luajit.patch"],
patch_args = ["-p1"],
patch_cmds = ["chmod u+x build.py"],
- **location
)
native.bind(
@@ -840,14 +799,12 @@ def _com_github_luajit_luajit():
)
def _com_github_moonjit_moonjit():
- location = _get_location("com_github_moonjit_moonjit")
- http_archive(
+ external_http_archive(
name = "com_github_moonjit_moonjit",
build_file_content = BUILD_ALL_CONTENT,
patches = ["@envoy//bazel/foreign_cc:moonjit.patch"],
patch_args = ["-p1"],
patch_cmds = ["chmod u+x build.py"],
- **location
)
native.bind(
@@ -855,19 +812,48 @@ def _com_github_moonjit_moonjit():
actual = "@envoy//bazel/foreign_cc:moonjit",
)
+def _com_github_google_tcmalloc():
+ external_http_archive(
+ name = "com_github_google_tcmalloc",
+ )
+
+ native.bind(
+ name = "tcmalloc",
+ actual = "@com_github_google_tcmalloc//tcmalloc",
+ )
+
def _com_github_gperftools_gperftools():
- location = _get_location("com_github_gperftools_gperftools")
- http_archive(
+ external_http_archive(
name = "com_github_gperftools_gperftools",
build_file_content = BUILD_ALL_CONTENT,
- **location
)
-
native.bind(
name = "gperftools",
actual = "@envoy//bazel/foreign_cc:gperftools",
)
+def _org_llvm_llvm():
+ external_http_archive(
+ name = "org_llvm_llvm",
+ build_file_content = BUILD_ALL_CONTENT,
+ patch_args = ["-p1"],
+ patches = ["@envoy//bazel/foreign_cc:llvm.patch"],
+ )
+ native.bind(
+ name = "llvm",
+ actual = "@envoy//bazel/foreign_cc:llvm",
+ )
+
+def _com_github_wavm_wavm():
+ external_http_archive(
+ name = "com_github_wavm_wavm",
+ build_file_content = BUILD_ALL_CONTENT,
+ )
+ native.bind(
+ name = "wavm",
+ actual = "@envoy//bazel/foreign_cc:wavm",
+ )
+
def _kafka_deps():
# This archive contains Kafka client source code.
# We are using request/response message format files to generate parser code.
@@ -883,37 +869,28 @@ filegroup(
visibility = ["//visibility:public"],
)
"""
- http_archive(
+ external_http_archive(
name = "kafka_source",
build_file_content = KAFKASOURCE_BUILD_CONTENT,
patches = ["@envoy//bazel/external:kafka_int32.patch"],
- **_get_location("kafka_source")
)
# This archive provides Kafka (and Zookeeper) binaries, that are used during Kafka integration
# tests.
- http_archive(
+ external_http_archive(
name = "kafka_server_binary",
build_file_content = BUILD_ALL_CONTENT,
- **_get_location("kafka_server_binary")
)
# This archive provides Kafka client in Python, so we can use it to interact with Kafka server
# during interation tests.
- http_archive(
+ external_http_archive(
name = "kafka_python_client",
build_file_content = BUILD_ALL_CONTENT,
- **_get_location("kafka_python_client")
- )
-
-def _org_unicode_icuuc():
- _repository_impl(
- name = "org_unicode_icuuc",
- build_file = "@envoy//bazel/external:icuuc.BUILD",
)
def _foreign_cc_dependencies():
- _repository_impl("rules_foreign_cc")
+ external_http_archive("rules_foreign_cc")
def _is_linux(ctxt):
return ctxt.os.name == "linux"
diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl
index 8e19344926b8..3aafc9528d80 100644
--- a/bazel/repositories_extra.bzl
+++ b/bazel/repositories_extra.bzl
@@ -7,59 +7,94 @@ def _python_deps():
pip_repositories()
pip3_import(
+ name = "config_validation_pip3",
+ requirements = "@envoy//tools/config_validation:requirements.txt",
+ extra_pip_args = ["--require-hashes"],
+
# project_name = "PyYAML",
# project_url = "https://github.com/yaml/pyyaml",
# version = "5.3.1",
- # use_category = ["other"],
+ # last_update = "2020-03-18"
+ # use_category = ["devtools"],
# cpe = "cpe:2.3:a:pyyaml:pyyaml:*",
- name = "config_validation_pip3",
- requirements = "@envoy//tools/config_validation:requirements.txt",
- extra_pip_args = ["--require-hashes"],
)
pip3_import(
+ name = "configs_pip3",
+ requirements = "@envoy//configs:requirements.txt",
+ extra_pip_args = ["--require-hashes"],
+
# project_name = "Jinja",
# project_url = "http://palletsprojects.com/p/jinja",
# version = "2.11.2",
+ # last_update = "2020-04-13"
# use_category = ["test"],
# cpe = "cpe:2.3:a:palletsprojects:jinja:*",
- name = "configs_pip3",
- requirements = "@envoy//configs:requirements.txt",
- extra_pip_args = ["--require-hashes"],
+
+ # project_name = "MarkupSafe",
+ # project_url = "https://markupsafe.palletsprojects.com/en/1.1.x/",
+ # version = "1.1.1",
+ # last_update = "2019-02-23"
+ # use_category = ["test"],
)
pip3_import(
+ name = "kafka_pip3",
+ requirements = "@envoy//source/extensions/filters/network/kafka:requirements.txt",
+ extra_pip_args = ["--require-hashes"],
+
# project_name = "Jinja",
# project_url = "http://palletsprojects.com/p/jinja",
# version = "2.11.2",
+ # last_update = "2020-04-13"
# use_category = ["test"],
# cpe = "cpe:2.3:a:palletsprojects:jinja:*",
- name = "kafka_pip3",
- requirements = "@envoy//source/extensions/filters/network/kafka:requirements.txt",
- extra_pip_args = ["--require-hashes"],
+
+ # project_name = "MarkupSafe",
+ # project_url = "https://markupsafe.palletsprojects.com/en/1.1.x/",
+ # version = "1.1.1",
+ # last_update = "2019-02-23"
+ # use_category = ["test"],
)
pip3_import(
name = "headersplit_pip3",
requirements = "@envoy//tools/envoy_headersplit:requirements.txt",
extra_pip_args = ["--require-hashes"],
+
+ # project_name = "Clang",
+ # project_url = "https://clang.llvm.org/",
+ # version = "10.0.1",
+ # last_update = "2020-07-21"
+ # use_category = ["devtools"],
+ # cpe = "cpe:2.3:a:llvm:clang:*",
)
pip3_import(
+ name = "protodoc_pip3",
+ requirements = "@envoy//tools/protodoc:requirements.txt",
+ extra_pip_args = ["--require-hashes"],
+
# project_name = "PyYAML",
# project_url = "https://github.com/yaml/pyyaml",
# version = "5.3.1",
- # use_category = ["other"],
+ # last_update = "2020-03-18"
+ # use_category = ["docs"],
# cpe = "cpe:2.3:a:pyyaml:pyyaml:*",
- name = "protodoc_pip3",
- requirements = "@envoy//tools/protodoc:requirements.txt",
- extra_pip_args = ["--require-hashes"],
)
pip3_import(
+ name = "thrift_pip3",
+ requirements = "@envoy//test/extensions/filters/network/thrift_proxy:requirements.txt",
+ extra_pip_args = ["--require-hashes"],
+
# project_name = "Apache Thrift",
# project_url = "http://thrift.apache.org/",
# version = "0.11.0",
- # use_category = ["dataplane"],
+ # last_update = "2017-12-07"
+ # use_category = ["test"],
# cpe = "cpe:2.3:a:apache:thrift:*",
- name = "thrift_pip3",
- requirements = "@envoy//test/extensions/filters/network/thrift_proxy:requirements.txt",
- extra_pip_args = ["--require-hashes"],
+
+ # project_name = "Six: Python 2 and 3 Compatibility Library",
+ # project_url = "https://six.readthedocs.io/",
+ # version = "1.15.0",
+ # last_update = "2020-05-21"
+ # use_category = ["test"],
)
# Envoy deps that rely on a first stage of dependency loading in envoy_dependencies().
diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl
index e1cddcc8513d..6a631c5a3e6b 100644
--- a/bazel/repository_locations.bzl
+++ b/bazel/repository_locations.bzl
@@ -1,65 +1,29 @@
-# Validation of content in this file is done on the bazel/repositories.bzl file to make it free of bazel
-# constructs. This is to allow this file to be loaded into Python based build and maintenance tools.
-
-# Envoy dependencies may be annotated with the following attributes:
-DEPENDENCY_ANNOTATIONS = [
- # List of the categories describing how the dependency is being used. This attribute is used
- # for automatic tracking of security posture of Envoy's dependencies.
- # Possible values are documented in the USE_CATEGORIES list below.
- # This attribute is mandatory for each dependecy.
- "use_category",
-
- # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID
- # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See
- # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements
- # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned.
- # This attribute is optional for components with use categories listed in the
- # USE_CATEGORIES_WITH_CPE_OPTIONAL
- "cpe",
-]
-
-# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed
-# to be declared.
-USE_CATEGORIES = [
- # This dependency is used in API protos.
- "api",
- # This dependency is used in build process.
- "build",
- # This dependency is used to process xDS requests.
- "controlplane",
- # This dependency is used in processing downstream or upstream requests.
- "dataplane",
- # This dependecy is used for logging, metrics or tracing. It may process unstrusted input.
- "observability",
- # This dependency does not handle untrusted data and is used for various utility purposes.
- "other",
- # This dependency is used for unit tests.
- "test",
-]
-
-# Components with these use categories are not required to specify the 'cpe' annotation.
-USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test"]
-
-DEPENDENCY_REPOSITORIES_SPEC = dict(
+# This should match the schema defined in external_deps.bzl.
+REPOSITORY_LOCATIONS_SPEC = dict(
bazel_compdb = dict(
- project_name = "bazil-compilation-database",
+ project_name = "bazel-compilation-database",
+ project_desc = "Clang JSON compilation database support for Bazel",
project_url = "https://github.com/grailbio/bazel-compilation-database",
version = "0.4.5",
sha256 = "bcecfd622c4ef272fd4ba42726a52e140b961c4eac23025f18b346c968a8cfb4",
strip_prefix = "bazel-compilation-database-{version}",
urls = ["https://github.com/grailbio/bazel-compilation-database/archive/{version}.tar.gz"],
+ last_updated = "2020-08-01",
use_category = ["build"],
),
bazel_gazelle = dict(
project_name = "Gazelle",
+ project_desc = "Bazel BUILD file generator for Go projects",
project_url = "https://github.com/bazelbuild/bazel-gazelle",
version = "0.21.1",
sha256 = "cdb02a887a7187ea4d5a27452311a75ed8637379a1287d8eeb952138ea485f7d",
urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v{version}/bazel-gazelle-v{version}.tar.gz"],
+ last_updated = "2020-05-28",
use_category = ["build"],
),
bazel_toolchains = dict(
project_name = "bazel-toolchains",
+ project_desc = "Bazel toolchain configs for RBE",
project_url = "https://github.com/bazelbuild/bazel-toolchains",
version = "3.4.1",
sha256 = "7ebb200ed3ca3d1f7505659c7dfed01c4b5cb04c3a6f34140726fe22f5d35e86",
@@ -68,238 +32,304 @@ DEPENDENCY_REPOSITORIES_SPEC = dict(
"https://github.com/bazelbuild/bazel-toolchains/releases/download/{version}/bazel-toolchains-{version}.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/{version}.tar.gz",
],
+ last_updated = "2020-08-10",
use_category = ["build"],
),
build_bazel_rules_apple = dict(
project_name = "Apple Rules for Bazel",
+ project_desc = "Bazel rules for Apple platforms",
project_url = "https://github.com/bazelbuild/rules_apple",
version = "0.19.0",
sha256 = "7a7afdd4869bb201c9352eed2daf37294d42b093579b70423490c1b4d4f6ce42",
urls = ["https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz"],
+ last_updated = "2020-10-10",
use_category = ["build"],
),
envoy_build_tools = dict(
project_name = "envoy-build-tools",
+ project_desc = "Common build tools shared by the Envoy/UDPA ecosystem",
project_url = "https://github.com/envoyproxy/envoy-build-tools",
- version = "2d13ad4157997715a4939bd218a89c81c26ff28e",
- sha256 = "0dc8ce5eb645ae069ce710c1010975456f723ffd4fc788a03dacfcd0647b05b9",
+ version = "0ba5aa98a6e6c5efcc63f53602f69548d2417683",
+ sha256 = "dc3881d16e7b0c855a7279f5757d55e4aa55fe2befbd9e34215b971818622f9e",
strip_prefix = "envoy-build-tools-{version}",
- # 2020-08-21
urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz"],
+ last_updated = "2020-10-01",
use_category = ["build"],
),
boringssl = dict(
project_name = "BoringSSL",
+ project_desc = "Minimal OpenSSL fork",
project_url = "https://github.com/google/boringssl",
- version = "597b810379e126ae05d32c1d94b1a9464385acd0",
- sha256 = "1ea42456c020daf0a9b0f9e8d8bc3a403c9314f4f54230c617257af996cd5fa6",
+ version = "2192bbc878822cf6ab5977d4257a1339453d9d39",
+ sha256 = "bb55b0ed2f0cb548b5dce6a6b8307ce37f7f748eb9f1be6bfe2d266ff2b4d52b",
strip_prefix = "boringssl-{version}",
# To update BoringSSL, which tracks Chromium releases:
# 1. Open https://omahaproxy.appspot.com/ and note of linux/stable release.
# 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags//DEPS and note .
# 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges .
#
- # chromium-85.0.4183.83
- # 2020-06-23
+ # chromium-86.0.4240.80
urls = ["https://github.com/google/boringssl/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
- cpe = "N/A",
+ use_category = ["controlplane", "dataplane_core"],
+ last_updated = "2020-07-30",
+ cpe = "cpe:2.3:a:google:boringssl:*",
),
boringssl_fips = dict(
project_name = "BoringSSL (FIPS)",
+ project_desc = "FIPS compliant BoringSSL",
project_url = "https://boringssl.googlesource.com/boringssl/+/master/crypto/fipsmodule/FIPS.md",
version = "fips-20190808",
sha256 = "3b5fdf23274d4179c2077b5e8fa625d9debd7a390aac1d165b7e47234f648bb8",
urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-ae223d6138807a13006342edfeef32e813246b39.tar.xz"],
- use_category = ["dataplane"],
- cpe = "N/A",
+ use_category = ["controlplane", "dataplane_core"],
+ last_updated = "2019-08-08",
+ cpe = "cpe:2.3:a:google:boringssl:*",
),
com_google_absl = dict(
project_name = "Abseil",
+ project_desc = "Open source collection of C++ libraries drawn from the most fundamental pieces of Google’s internal codebase",
project_url = "https://abseil.io/",
- version = "ce4bc927755fdf0ed03d679d9c7fa041175bb3cb",
- sha256 = "573baccd67aa591b8c7209bfb0c77e0d15633d77ced39d1ccbb1232828f7f7d9",
+ version = "093cc27604df1c4a179b73bc3f00d4d1ce2ce113",
+ sha256 = "55d33c75aff05a8c4a55bdf0eddad66c71a963107bc2add96cf8eb88ddb47a80",
strip_prefix = "abseil-cpp-{version}",
- # 2020-08-08
urls = ["https://github.com/abseil/abseil-cpp/archive/{version}.tar.gz"],
- use_category = ["dataplane", "controlplane"],
+ use_category = ["dataplane_core", "controlplane"],
+ last_updated = "2020-10-01",
cpe = "N/A",
),
com_github_c_ares_c_ares = dict(
project_name = "c-ares",
+ project_desc = "C library for asynchronous DNS requests",
project_url = "https://c-ares.haxx.se/",
version = "1.16.1",
sha256 = "d08312d0ecc3bd48eee0a4cc0d2137c9f194e0a28de2028928c0f6cae85f86ce",
strip_prefix = "c-ares-{version}",
urls = ["https://github.com/c-ares/c-ares/releases/download/cares-{underscore_version}/c-ares-{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_core", "controlplane"],
+ last_updated = "2020-05-11",
cpe = "cpe:2.3:a:c-ares_project:c-ares:*",
),
com_github_circonus_labs_libcircllhist = dict(
project_name = "libcircllhist",
+ project_desc = "An implementation of Circonus log-linear histograms",
project_url = "https://github.com/circonus-labs/libcircllhist",
- # 2019-02-11
version = "63a16dd6f2fc7bc841bb17ff92be8318df60e2e1",
sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c",
strip_prefix = "libcircllhist-{version}",
urls = ["https://github.com/circonus-labs/libcircllhist/archive/{version}.tar.gz"],
- use_category = ["observability"],
+ use_category = ["controlplane", "observability_core", "dataplane_core"],
+ last_updated = "2019-02-11",
cpe = "N/A",
),
com_github_cyan4973_xxhash = dict(
project_name = "xxHash",
+ project_desc = "Extremely fast hash algorithm",
project_url = "https://github.com/Cyan4973/xxHash",
version = "0.7.3",
sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7",
strip_prefix = "xxHash-{version}",
urls = ["https://github.com/Cyan4973/xxHash/archive/v{version}.tar.gz"],
- use_category = ["dataplane", "controlplane"],
+ use_category = ["dataplane_core", "controlplane"],
+ last_updated = "2020-03-04",
cpe = "N/A",
),
com_github_envoyproxy_sqlparser = dict(
project_name = "C++ SQL Parser Library",
+ project_desc = "Forked from Hyrise SQL Parser",
project_url = "https://github.com/envoyproxy/sql-parser",
- # 2020-06-10
version = "3b40ba2d106587bdf053a292f7e3bb17e818a57f",
sha256 = "96c10c8e950a141a32034f19b19cdeb1da48fe859cf96ae5e19f894f36c62c71",
strip_prefix = "sql-parser-{version}",
urls = ["https://github.com/envoyproxy/sql-parser/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.filters.network.mysql_proxy",
+ "envoy.filters.network.postgres_proxy",
+ ],
+ last_updated = "2020-06-10",
cpe = "N/A",
),
com_github_mirror_tclap = dict(
project_name = "tclap",
+ project_desc = "Small, flexible library that provides a simple interface for defining and accessing command line arguments",
project_url = "http://tclap.sourceforge.net",
version = "1-2-1",
sha256 = "f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f",
strip_prefix = "tclap-tclap-{version}-release-final",
urls = ["https://github.com/mirror/tclap/archive/tclap-{version}-release-final.tar.gz"],
+ last_updated = "2017-11-10",
use_category = ["other"],
),
com_github_fmtlib_fmt = dict(
project_name = "fmt",
+ project_desc = "{fmt} is an open-source formatting library providing a fast and safe alternative to C stdio and C++ iostreams",
project_url = "https://fmt.dev",
version = "7.0.3",
sha256 = "decfdf9ad274070fa85f26407b816f5a4d82205ae86bac1990be658d0795ea4d",
strip_prefix = "fmt-{version}",
urls = ["https://github.com/fmtlib/fmt/releases/download/{version}/fmt-{version}.zip"],
- use_category = ["observability"],
- cpe = "N/A",
+ use_category = ["dataplane_core", "controlplane"],
+ last_updated = "2020-08-07",
+ cpe = "cpe:2.3:a:fmt:fmt:*",
),
com_github_gabime_spdlog = dict(
project_name = "spdlog",
+ project_desc = "Very fast, header-only/compiled, C++ logging library",
project_url = "https://github.com/gabime/spdlog",
version = "1.7.0",
sha256 = "f0114a4d3c88be9e696762f37a7c379619443ce9d668546c61b21d41affe5b62",
strip_prefix = "spdlog-{version}",
urls = ["https://github.com/gabime/spdlog/archive/v{version}.tar.gz"],
- use_category = ["observability"],
+ use_category = ["dataplane_core", "controlplane"],
+ last_updated = "2020-07-09",
cpe = "N/A",
),
com_github_google_libprotobuf_mutator = dict(
project_name = "libprotobuf-mutator",
+ project_desc = "Library to randomly mutate protobuffers",
project_url = "https://github.com/google/libprotobuf-mutator",
- # 2020-08-18
version = "8942a9ba43d8bb196230c321d46d6a137957a719",
sha256 = "49a26dbe77c75f2eca1dd8a9fbdb31c4496d9af42df027ff57569c5a7a5d980d",
strip_prefix = "libprotobuf-mutator-{version}",
urls = ["https://github.com/google/libprotobuf-mutator/archive/{version}.tar.gz"],
- use_category = ["test"],
+ last_updated = "2020-08-18",
+ use_category = ["test_only"],
+ ),
+ com_github_google_tcmalloc = dict(
+ project_name = "tcmalloc",
+ project_desc = "Fast, multi-threaded malloc implementation",
+ project_url = "https://github.com/google/tcmalloc",
+ version = "d1311bf409db47c3441d3de6ea07d768c6551dec",
+ sha256 = "e22444b6544edd81f11c987dd5e482a2e00bbff717badb388779ca57525dad50",
+ strip_prefix = "tcmalloc-{version}",
+ urls = ["https://github.com/google/tcmalloc/archive/{version}.tar.gz"],
+ use_category = ["dataplane_core", "controlplane"],
+ last_updated = "2020-09-16",
+ cpe = "N/A",
),
com_github_gperftools_gperftools = dict(
project_name = "gperftools",
+ project_desc = "tcmalloc and profiling libraries",
project_url = "https://github.com/gperftools/gperftools",
version = "2.8",
sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e",
strip_prefix = "gperftools-{version}",
urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-{version}/gperftools-{version}.tar.gz"],
- use_category = ["test"],
+ last_updated = "2020-07-06",
+ use_category = ["dataplane_core", "controlplane"],
+ cpe = "cpe:2.3:a:gperftools_project:gperftools:*",
),
com_github_grpc_grpc = dict(
project_name = "gRPC",
+ project_desc = "gRPC C core library",
project_url = "https://grpc.io",
# TODO(JimmyCYJ): Bump to release 1.27
# This sha on grpc:v1.25.x branch is specifically chosen to fix gRPC STS call credential options.
- # 2020-02-11
version = "d8f4928fa779f6005a7fe55a176bdb373b0f910f",
sha256 = "bbc8f020f4e85ec029b047fab939b8c81f3d67254b5c724e1003a2bc49ddd123",
strip_prefix = "grpc-{version}",
urls = ["https://github.com/grpc/grpc/archive/{version}.tar.gz"],
- use_category = ["dataplane", "controlplane"],
+ use_category = ["dataplane_core", "controlplane"],
+ last_updated = "2020-02-11",
cpe = "cpe:2.3:a:grpc:grpc:*",
),
com_github_luajit_luajit = dict(
project_name = "LuaJIT",
+ project_desc = "Just-In-Time compiler for Lua",
project_url = "https://luajit.org",
- version = "2.1.0-beta3",
- sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8",
+ # The last release version, 2.1.0-beta3 has a number of CVEs filed
+ # against it. These may not impact correct non-malicious Lua code, but for prudence we bump.
+ version = "1d8b747c161db457e032a023ebbff511f5de5ec2",
+ sha256 = "20a159c38a98ecdb6368e8d655343b6036622a29a1621da9dc303f7ed9bf37f3",
strip_prefix = "LuaJIT-{version}",
- urls = ["https://github.com/LuaJIT/LuaJIT/archive/v{version}.tar.gz"],
- use_category = ["dataplane"],
- cpe = "N/A",
+ urls = ["https://github.com/LuaJIT/LuaJIT/archive/{version}.tar.gz"],
+ last_updated = "2020-10-13",
+ use_category = ["dataplane_ext"],
+ extensions = ["envoy.filters.http.lua"],
+ cpe = "cpe:2.3:a:luajit:luajit:*",
),
com_github_moonjit_moonjit = dict(
project_name = "Moonjit",
+ project_desc = "LuaJIT fork with wider platform support",
project_url = "https://github.com/moonjit/moonjit",
version = "2.2.0",
sha256 = "83deb2c880488dfe7dd8ebf09e3b1e7613ef4b8420de53de6f712f01aabca2b6",
strip_prefix = "moonjit-{version}",
urls = ["https://github.com/moonjit/moonjit/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
- cpe = "N/A",
+ use_category = ["dataplane_ext"],
+ extensions = ["envoy.filters.http.lua"],
+ last_updated = "2020-01-14",
+ cpe = "cpe:2.3:a:moonjit_project:moonjit:*",
),
com_github_nghttp2_nghttp2 = dict(
project_name = "Nghttp2",
+ project_desc = "Implementation of HTTP/2 and its header compression algorithm HPACK in Cimplementation of HTTP/2 and its header compression algorithm HPACK in C",
project_url = "https://nghttp2.org",
version = "1.41.0",
sha256 = "eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8",
strip_prefix = "nghttp2-{version}",
urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["controlplane", "dataplane_core"],
+ last_updated = "2020-06-03",
cpe = "cpe:2.3:a:nghttp2:nghttp2:*",
),
io_opentracing_cpp = dict(
project_name = "OpenTracing",
+ project_desc = "Vendor-neutral APIs and instrumentation for distributed tracing",
project_url = "https://opentracing.io",
version = "1.5.1",
sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301",
strip_prefix = "opentracing-cpp-{version}",
urls = ["https://github.com/opentracing/opentracing-cpp/archive/v{version}.tar.gz"],
- use_category = ["observability"],
+ use_category = ["observability_ext"],
+ extensions = [
+ "envoy.tracers.datadog",
+ "envoy.tracers.dynamic_ot",
+ "envoy.tracers.lightstep",
+ ],
+ last_updated = "2019-01-16",
cpe = "N/A",
),
com_lightstep_tracer_cpp = dict(
project_name = "lightstep-tracer-cpp",
+ project_desc = "LightStep distributed tracing library for C++",
project_url = "https://github.com/lightstep/lightstep-tracer-cpp",
- # 2020-08-24
version = "1942b3f142e218ebc143a043f32e3278dafec9aa",
sha256 = "3238921a8f578beb26c2215cd277e8f6752f3d29b020b881d60d96a240a38aed",
strip_prefix = "lightstep-tracer-cpp-{version}",
urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/{version}.tar.gz"],
- use_category = ["observability"],
+ use_category = ["observability_ext"],
+ extensions = ["envoy.tracers.lightstep"],
+ last_updated = "2020-08-24",
cpe = "N/A",
),
com_github_datadog_dd_opentracing_cpp = dict(
project_name = "Datadog OpenTracing C++ Client",
+ project_desc = "Datadog OpenTracing C++ Client",
project_url = "https://github.com/DataDog/dd-opentracing-cpp",
version = "1.1.5",
sha256 = "b84fd2fb0bb0578af4901db31d1c0ae909b532a1016fe6534cbe31a6c3ad6924",
strip_prefix = "dd-opentracing-cpp-{version}",
urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v{version}.tar.gz"],
- use_category = ["observability"],
+ use_category = ["observability_ext"],
+ extensions = ["envoy.tracers.datadog"],
+ last_updated = "2020-05-15",
cpe = "N/A",
),
com_github_google_benchmark = dict(
project_name = "Benchmark",
+ project_desc = "Library to benchmark code snippets",
project_url = "https://github.com/google/benchmark",
version = "1.5.1",
sha256 = "23082937d1663a53b90cb5b61df4bcc312f6dee7018da78ba00dd6bd669dfef2",
strip_prefix = "benchmark-{version}",
urls = ["https://github.com/google/benchmark/archive/v{version}.tar.gz"],
- use_category = ["test"],
+ use_category = ["test_only"],
+ last_updated = "2020-06-09",
),
com_github_libevent_libevent = dict(
project_name = "libevent",
+ project_desc = "Event notification library",
project_url = "https://libevent.org",
# This SHA includes the new "prepare" and "check" watchers, used for event loop performance
# stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition
@@ -311,286 +341,408 @@ DEPENDENCY_REPOSITORIES_SPEC = dict(
# This also includes the wepoll backend for Windows (see
# https://github.com/libevent/libevent/pull/1006)
# TODO(adip): Update to v2.2 when it is released.
- # 2020-07-31
version = "62c152d9a7cd264b993dad730c4163c6ede2e0a3",
sha256 = "4c80e5fe044ce5f8055b20a2f141ee32ec2614000f3e95d2aa81611a4c8f5213",
strip_prefix = "libevent-{version}",
urls = ["https://github.com/libevent/libevent/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_core", "controlplane"],
+ last_updated = "2020-07-31",
cpe = "cpe:2.3:a:libevent_project:libevent:*",
),
+ # This should be removed, see https://github.com/envoyproxy/envoy/issues/13261.
net_zlib = dict(
project_name = "zlib",
+ project_desc = "zlib compression library",
project_url = "https://zlib.net",
version = "79baebe50e4d6b73ae1f8b603f0ef41300110aa3",
# Use the dev branch of zlib to resolve fuzz bugs and out of bound
# errors resulting in crashes in zlib 1.2.11.
# TODO(asraa): Remove when zlib > 1.2.11 is released.
- # 2019-04-14 development branch
sha256 = "155a8f8c1a753fb05b16a1b0cc0a0a9f61a78e245f9e0da483d13043b3bcbf2e",
strip_prefix = "zlib-{version}",
urls = ["https://github.com/madler/zlib/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["controlplane", "dataplane_core"],
+ last_updated = "2019-04-14",
cpe = "cpe:2.3:a:gnu:zlib:*",
),
com_github_zlib_ng_zlib_ng = dict(
project_name = "zlib-ng",
+ project_desc = "zlib fork (higher performance)",
project_url = "https://github.com/zlib-ng/zlib-ng",
version = "193d8fd7dfb7927facab7a3034daa27ad5b9df1c",
sha256 = "5fe543e8d007b9e7b729f3d6b3a5ee1f9b68d0eef5f6af1393745a4dcd472a98",
- strip_prefix = "zlib-ng-193d8fd7dfb7927facab7a3034daa27ad5b9df1c",
- # 2020-08-16 develop branch.
- urls = ["https://github.com/zlib-ng/zlib-ng/archive/193d8fd7dfb7927facab7a3034daa27ad5b9df1c.tar.gz"],
- use_category = ["dataplane"],
+ strip_prefix = "zlib-ng-{version}",
+ urls = ["https://github.com/zlib-ng/zlib-ng/archive/{version}.tar.gz"],
+ use_category = ["controlplane", "dataplane_core"],
+ last_updated = "2020-08-16",
cpe = "N/A",
),
com_github_jbeder_yaml_cpp = dict(
project_name = "yaml-cpp",
+ project_desc = "YAML parser and emitter in C++ matching the YAML 1.2 spec",
project_url = "https://github.com/jbeder/yaml-cpp",
- # 2020-07-28
version = "98acc5a8874faab28b82c28936f4b400b389f5d6",
sha256 = "79ab7069ef1c7c3632e7ffe095f7185d4c77b64d8035db3c085c239d4fe96d5f",
strip_prefix = "yaml-cpp-{version}",
urls = ["https://github.com/jbeder/yaml-cpp/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
- cpe = "N/A",
+ # YAML is also used for runtime as well as controlplane. It shouldn't appear on the
+ # dataplane but we can't verify this automatically due to code structure today.
+ use_category = ["controlplane", "dataplane_core"],
+ last_updated = "2020-07-28",
+ cpe = "cpe:2.3:a:yaml-cpp_project:yaml-cpp:*",
),
com_github_msgpack_msgpack_c = dict(
project_name = "msgpack for C/C++",
+ project_desc = "MessagePack is an efficient binary serialization format",
project_url = "https://github.com/msgpack/msgpack-c",
version = "3.3.0",
sha256 = "6e114d12a5ddb8cb11f669f83f32246e484a8addd0ce93f274996f1941c1f07b",
strip_prefix = "msgpack-{version}",
urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-{version}/msgpack-{version}.tar.gz"],
- use_category = ["observability"],
+ use_category = ["observability_ext"],
+ extensions = ["envoy.tracers.datadog"],
+ last_updated = "2020-06-05",
cpe = "N/A",
),
com_github_google_jwt_verify = dict(
project_name = "jwt_verify_lib",
+ project_desc = "JWT verification library for C++",
project_url = "https://github.com/google/jwt_verify_lib",
- # 2020-07-09
version = "7276a339af8426724b744216f619c99152f8c141",
sha256 = "f1fde4f3ebb3b2d841332c7a02a4b50e0529a19709934c63bc6208d1bbe28fb1",
strip_prefix = "jwt_verify_lib-{version}",
urls = ["https://github.com/google/jwt_verify_lib/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_ext"],
+ extensions = ["envoy.filters.http.jwt_authn"],
+ last_updated = "2020-07-09",
cpe = "N/A",
),
com_github_nodejs_http_parser = dict(
project_name = "HTTP Parser",
+ project_desc = "Parser for HTTP messages written in C",
project_url = "https://github.com/nodejs/http-parser",
- # 2020-07-10
# This SHA includes fix for https://github.com/nodejs/http-parser/issues/517 which allows (opt-in) to serve
# requests with both Content-Legth and Transfer-Encoding: chunked headers set.
version = "4f15b7d510dc7c6361a26a7c6d2f7c3a17f8d878",
sha256 = "6a12896313ce1ca630cf516a0ee43a79b5f13f5a5d8143f56560ac0b21c98fac",
strip_prefix = "http-parser-{version}",
urls = ["https://github.com/nodejs/http-parser/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["controlplane", "dataplane_core"],
+ last_updated = "2020-07-10",
cpe = "cpe:2.3:a:nodejs:node.js:*",
),
com_github_tencent_rapidjson = dict(
project_name = "RapidJSON",
+ project_desc = "Fast JSON parser/generator for C++",
project_url = "https://rapidjson.org",
- # Changes through 2019-12-02
version = "dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1",
sha256 = "a2faafbc402394df0fa94602df4b5e4befd734aad6bb55dfef46f62fcaf1090b",
strip_prefix = "rapidjson-{version}",
urls = ["https://github.com/Tencent/rapidjson/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ # We're mostly using com_google_protobuf for JSON, but there are some extensions and hard to
+ # disentangle uses on the dataplane, e.g. header_formatter, Squash filter.
+ use_category = ["controlplane", "dataplane_core"],
+ last_updated = "2019-12-02",
cpe = "cpe:2.3:a:tencent:rapidjson:*",
),
com_github_twitter_common_lang = dict(
project_name = "twitter.common.lang (Thrift)",
+ project_desc = "twitter.common Python language and compatibility facilities",
project_url = "https://pypi.org/project/twitter.common.lang",
version = "0.3.9",
sha256 = "56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1",
strip_prefix = "twitter.common.lang-{version}/src",
urls = ["https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-{version}.tar.gz"],
- use_category = ["dataplane"],
- cpe = "N/A",
+ last_updated = "2018-06-26",
+ use_category = ["test_only"],
),
com_github_twitter_common_rpc = dict(
project_name = "twitter.common.rpc (Thrift)",
+ project_desc = "twitter.common Thrift helpers including Finagle and SSL transports",
project_url = "https://pypi.org/project/twitter.common.rpc",
version = "0.3.9",
sha256 = "0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514",
strip_prefix = "twitter.common.rpc-{version}/src",
urls = ["https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-{version}.tar.gz"],
- use_category = ["dataplane"],
- cpe = "N/A",
+ last_updated = "2018-06-26",
+ use_category = ["test_only"],
),
com_github_twitter_common_finagle_thrift = dict(
project_name = "twitter.common.finagle-thrift",
+ project_desc = "twitter.common Thrift stubs for Zipkin RPC tracing support in Finagle",
project_url = "https://pypi.org/project/twitter.common.finagle-thrift",
version = "0.3.9",
sha256 = "1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a",
strip_prefix = "twitter.common.finagle-thrift-{version}/src",
urls = ["https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-{version}.tar.gz"],
- use_category = ["dataplane"],
- cpe = "N/A",
+ last_updated = "2018-06-26",
+ use_category = ["test_only"],
),
com_google_googletest = dict(
project_name = "Google Test",
+ project_desc = "Google's C++ test framework",
project_url = "https://github.com/google/googletest",
- version = "1.10.0",
- sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb",
- strip_prefix = "googletest-release-{version}",
- urls = ["https://github.com/google/googletest/archive/release-{version}.tar.gz"],
- use_category = ["test"],
+ # Pick up fix for MOCK_METHOD compilation with clang-cl for Windows (resolved after 1.10.0)
+ # see https://github.com/google/googletest/issues/2490
+ version = "a4ab0abb93620ce26efad9de9296b73b16e88588",
+ sha256 = "7897bfaa5ad39a479177cfb5c3ce010184dbaee22a7c3727b212282871918751",
+ strip_prefix = "googletest-{version}",
+ urls = ["https://github.com/google/googletest/archive/{version}.tar.gz"],
+ last_updated = "2020-09-10",
+ use_category = ["test_only"],
),
com_google_protobuf = dict(
project_name = "Protocol Buffers",
+ project_desc = "Language-neutral, platform-neutral extensible mechanism for serializing structured data",
project_url = "https://developers.google.com/protocol-buffers",
version = "3.10.1",
sha256 = "d7cfd31620a352b2ee8c1ed883222a0d77e44346643458e062e86b1d069ace3e",
strip_prefix = "protobuf-{version}",
urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-all-{version}.tar.gz"],
- use_category = ["dataplane", "controlplane"],
- cpe = "N/A",
+ use_category = ["dataplane_core", "controlplane"],
+ last_updated = "2020-10-24",
+ cpe = "cpe:2.3:a:google:protobuf:*",
),
grpc_httpjson_transcoding = dict(
project_name = "grpc-httpjson-transcoding",
+ project_desc = "Library that supports transcoding so that HTTP/JSON can be converted to gRPC",
project_url = "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding",
- # 2020-03-02
version = "faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6",
sha256 = "62c8cb5ea2cca1142cde9d4a0778c52c6022345c3268c60ef81666946b958ad5",
strip_prefix = "grpc-httpjson-transcoding-{version}",
urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_ext"],
+ extensions = ["envoy.filters.http.grpc_json_transcoder"],
+ last_updated = "2020-03-02",
cpe = "N/A",
),
io_bazel_rules_go = dict(
project_name = "Go rules for Bazel",
+ project_desc = "Bazel rules for the Go language",
project_url = "https://github.com/bazelbuild/rules_go",
version = "0.23.7",
sha256 = "0310e837aed522875791750de44408ec91046c630374990edd51827cb169f616",
urls = ["https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz"],
- use_category = ["build"],
+ use_category = ["build", "api"],
+ last_updated = "2020-08-06",
+ implied_untracked_deps = [
+ "com_github_golang_protobuf",
+ "io_bazel_rules_nogo",
+ "org_golang_google_protobuf",
+ "org_golang_x_tools",
+ ],
),
rules_cc = dict(
project_name = "C++ rules for Bazel",
+ project_desc = "Bazel rules for the C++ language",
project_url = "https://github.com/bazelbuild/rules_cc",
- # 2020-05-13
# TODO(lizan): pin to a point releases when there's a released version.
version = "818289e5613731ae410efb54218a4077fb9dbb03",
sha256 = "9d48151ea71b3e225adfb6867e6d2c7d0dce46cbdc8710d9a9a628574dfd40a0",
strip_prefix = "rules_cc-{version}",
urls = ["https://github.com/bazelbuild/rules_cc/archive/{version}.tar.gz"],
+ last_updated = "2020-05-13",
use_category = ["build"],
),
rules_foreign_cc = dict(
project_name = "Rules for using foreign build systems in Bazel",
+ project_desc = "Rules for using foreign build systems in Bazel",
project_url = "https://github.com/bazelbuild/rules_foreign_cc",
- # 2020-08-21
version = "594bf4d7731e606a705f3ad787dd0a70c5a28b30",
sha256 = "2b1cf88de0b6e0195f6571cfde3a5bd406d11b42117d6adef2395c9525a1902e",
strip_prefix = "rules_foreign_cc-{version}",
urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/{version}.tar.gz"],
+ last_updated = "2020-08-21",
use_category = ["build"],
),
rules_python = dict(
project_name = "Python rules for Bazel",
+ project_desc = "Bazel rules for the Python language",
project_url = "https://github.com/bazelbuild/rules_python",
- # 2020-04-09
# TODO(htuch): revert back to a point releases when pip3_import appears.
version = "a0fbf98d4e3a232144df4d0d80b577c7a693b570",
sha256 = "76a8fd4e7eca2a3590f816958faa0d83c9b2ce9c32634c5c375bcccf161d3bb5",
strip_prefix = "rules_python-{version}",
urls = ["https://github.com/bazelbuild/rules_python/archive/{version}.tar.gz"],
+ last_updated = "2020-04-09",
use_category = ["build"],
),
six = dict(
project_name = "Six",
+ project_desc = "Python 2 and 3 compatibility library",
project_url = "https://pypi.org/project/six",
version = "1.12.0",
sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73",
urls = ["https://files.pythonhosted.org/packages/dd/bf/4138e7bfb757de47d1f4b6994648ec67a51efe58fa907c1e11e350cddfca/six-{version}.tar.gz"],
+ last_updated = "2019-11-17",
use_category = ["other"],
),
+ org_llvm_llvm = dict(
+ project_name = "LLVM",
+ project_desc = "LLVM Compiler Infrastructure",
+ project_url = "https://llvm.org",
+ version = "10.0.0",
+ sha256 = "df83a44b3a9a71029049ec101fb0077ecbbdf5fe41e395215025779099a98fdf",
+ strip_prefix = "llvm-{version}.src",
+ urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/llvm-{version}.src.tar.xz"],
+ last_updated = "2020-10-09",
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.access_loggers.wasm",
+ "envoy.bootstrap.wasm",
+ "envoy.filters.http.wasm",
+ "envoy.filters.network.wasm",
+ "envoy.stat_sinks.wasm",
+ ],
+ cpe = "cpe:2.3:a:llvm:*:*",
+ ),
+ com_github_wavm_wavm = dict(
+ project_name = "WAVM",
+ project_desc = "WebAssembly Virtual Machine",
+ project_url = "https://wavm.github.io",
+ version = "e8155f1f3af88b4d08802716a7054950ef18d827",
+ sha256 = "cc3fcaf05d57010c9cf8eb920234679dede6c780137b55001fd34e4d14806f7c",
+ strip_prefix = "WAVM-{version}",
+ urls = ["https://github.com/WAVM/WAVM/archive/{version}.tar.gz"],
+ last_updated = "2020-10-09",
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.access_loggers.wasm",
+ "envoy.bootstrap.wasm",
+ "envoy.filters.http.wasm",
+ "envoy.filters.network.wasm",
+ "envoy.stat_sinks.wasm",
+ ],
+ cpe = "cpe:2.3:a:webassembly_virtual_machine_project:webassembly_virtual_machine:*",
+ ),
io_opencensus_cpp = dict(
project_name = "OpenCensus C++",
+ project_desc = "OpenCensus tracing library",
project_url = "https://github.com/census-instrumentation/opencensus-cpp",
- # 2020-06-01
- version = "7877337633466358ed680f9b26967da5b310d7aa",
- sha256 = "12ff300fa804f97bd07e2ff071d969e09d5f3d7bbffeac438c725fa52a51a212",
+ version = "ba631066779a534267fdb1321b19850eb2b0c000",
+ sha256 = "f239a40803f6e2e42b57c9e68771b0990c4ca8b2d76b440073cdf14f4211ad26",
strip_prefix = "opencensus-cpp-{version}",
urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/{version}.tar.gz"],
- use_category = ["observability"],
+ use_category = ["observability_ext"],
+ extensions = ["envoy.tracers.opencensus"],
+ last_updated = "2020-10-13",
cpe = "N/A",
),
+ # This should be removed, see https://github.com/envoyproxy/envoy/issues/11816.
com_github_curl = dict(
project_name = "curl",
+ project_desc = "Library for transferring data with URLs",
project_url = "https://curl.haxx.se",
- version = "7.69.1",
- sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98",
+ version = "7.72.0",
+ sha256 = "d4d5899a3868fbb6ae1856c3e55a32ce35913de3956d1973caccd37bd0174fa2",
strip_prefix = "curl-{version}",
urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"],
- use_category = ["dataplane"],
- cpe = "N/A",
+ use_category = ["dataplane_ext", "observability_ext"],
+ extensions = [
+ "envoy.filters.http.aws_lambda",
+ "envoy.filters.http.aws_request_signing",
+ "envoy.grpc_credentials.aws_iam",
+ "envoy.tracers.opencensus",
+ ],
+ last_updated = "2020-08-19",
+ cpe = "cpe:2.3:a:haxx:curl:*",
),
com_googlesource_chromium_v8 = dict(
project_name = "V8",
+ project_desc = "Google’s open source high-performance JavaScript and WebAssembly engine, written in C++",
project_url = "https://v8.dev",
version = "8.5.210.20",
# This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh
# and contains complete checkout of V8 with all dependencies necessary to build wee8.
sha256 = "ef404643d7da6854b76b9fb9950a79a1acbd037b7a26f02c585ac379b0f7dee1",
urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-{version}.tar.gz"],
- use_category = ["dataplane"],
- cpe = "N/A",
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.access_loggers.wasm",
+ "envoy.bootstrap.wasm",
+ "envoy.filters.http.wasm",
+ "envoy.filters.network.wasm",
+ "envoy.stat_sinks.wasm",
+ ],
+ last_updated = "2020-08-31",
+ cpe = "cpe:2.3:a:google:v8:*",
),
com_googlesource_quiche = dict(
project_name = "QUICHE",
+ project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols",
project_url = "https://quiche.googlesource.com/quiche",
- # Static snapshot of https://quiche.googlesource.com/quiche/+archive/96bd860bec207d4b722ab7f319fa47be129a85cd.tar.gz
- version = "96bd860bec207d4b722ab7f319fa47be129a85cd",
- sha256 = "d7129a2f41f2bd00a8a38b33f9b7b955d3e7de3dec20f69b70d7000d3a856360",
+ # Static snapshot of https://quiche.googlesource.com/quiche/+archive/f555d99a084cdd086a349548c70fb558ac5847cf.tar.gz
+ version = "f555d99a084cdd086a349548c70fb558ac5847cf",
+ sha256 = "1833f08e7b0f18b49d7498b029b7f3e6559a82113ec82a98a9e945553756e351",
urls = ["https://storage.googleapis.com/quiche-envoy-integration/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_ext"],
+ extensions = ["envoy.transport_sockets.quic"],
+ last_updated = "2020-09-18",
cpe = "N/A",
),
com_googlesource_googleurl = dict(
project_name = "Chrome URL parsing library",
+ project_desc = "Chrome URL parsing library",
project_url = "https://quiche.googlesource.com/googleurl",
# Static snapshot of https://quiche.googlesource.com/quiche/+archive/ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz.
- # 2020-08-05
version = "ef0d23689e240e6c8de4c3a5296b209128c87373",
sha256 = "d769283fed1319bca68bae8bdd47fbc3a7933999329eee850eff1f1ea61ce176",
urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_ext"],
+ extensions = [],
+ last_updated = "2020-08-05",
cpe = "N/A",
),
com_google_cel_cpp = dict(
- project_name = "Common Expression Language C++",
+ project_name = "Common Expression Language (CEL) C++ library",
+ project_desc = "Common Expression Language (CEL) C++ library",
project_url = "https://opensource.google/projects/cel",
- # 2020-07-14
version = "b9453a09b28a1531c4917e8792b3ea61f6b1a447",
sha256 = "cad7d01139947d78e413d112cb8f7431fbb33cf66b0adf9c280824803fc2a72e",
strip_prefix = "cel-cpp-{version}",
urls = ["https://github.com/google/cel-cpp/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.access_loggers.wasm",
+ "envoy.bootstrap.wasm",
+ "envoy.filters.http.rbac",
+ "envoy.filters.http.wasm",
+ "envoy.filters.network.rbac",
+ "envoy.filters.network.wasm",
+ "envoy.stat_sinks.wasm",
+ ],
+ last_updated = "2020-07-14",
cpe = "N/A",
),
com_github_google_flatbuffers = dict(
project_name = "FlatBuffers",
+ project_desc = "Cross platform serialization library architected for maximum memory efficiency",
project_url = "https://github.com/google/flatbuffers",
version = "a83caf5910644ba1c421c002ef68e42f21c15f9f",
sha256 = "b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a",
strip_prefix = "flatbuffers-{version}",
urls = ["https://github.com/google/flatbuffers/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.access_loggers.wasm",
+ "envoy.bootstrap.wasm",
+ "envoy.filters.http.wasm",
+ "envoy.filters.network.wasm",
+ "envoy.stat_sinks.wasm",
+ ],
+ last_updated = "2020-07-29",
cpe = "N/A",
),
com_googlesource_code_re2 = dict(
project_name = "RE2",
+ project_desc = "RE2, a regular expression library",
project_url = "https://github.com/google/re2",
- # 2020-07-06
version = "2020-07-06",
sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f",
strip_prefix = "re2-{version}",
urls = ["https://github.com/google/re2/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["controlplane", "dataplane_core"],
+ last_updated = "2020-07-06",
cpe = "N/A",
),
# Included to access FuzzedDataProvider.h. This is compiler agnostic but
@@ -598,132 +750,163 @@ DEPENDENCY_REPOSITORIES_SPEC = dict(
# Clang variant as we are not a Clang-LLVM only shop today.
org_llvm_releases_compiler_rt = dict(
project_name = "compiler-rt",
+ project_desc = "LLVM compiler runtime library",
project_url = "https://compiler-rt.llvm.org",
version = "10.0.0",
sha256 = "6a7da64d3a0a7320577b68b9ca4933bdcab676e898b759850e827333c3282c75",
# Only allow peeking at fuzzer related files for now.
strip_prefix = "compiler-rt-{version}.src",
urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/compiler-rt-{version}.src.tar.xz"],
- use_category = ["test"],
+ last_updated = "2020-03-24",
+ use_category = ["test_only"],
),
upb = dict(
project_name = "upb",
+ project_desc = "A small protobuf implementation in C (gRPC dependency)",
project_url = "https://github.com/protocolbuffers/upb",
- # 2019-11-19
version = "8a3ae1ef3e3e3f26b45dec735c5776737fc7247f",
sha256 = "e9f281c56ab1eb1f97a80ca8a83bb7ef73d230eabb8591f83876f4e7b85d9b47",
strip_prefix = "upb-{version}",
urls = ["https://github.com/protocolbuffers/upb/archive/{version}.tar.gz"],
- use_category = ["dataplane", "controlplane"],
+ use_category = ["controlplane"],
+ last_updated = "2019-11-19",
cpe = "N/A",
),
kafka_source = dict(
project_name = "Kafka (source)",
+ project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
version = "2.4.1",
sha256 = "740236f44d66e33ea83382383b4fb7eabdab7093a644b525dd5ec90207f933bd",
strip_prefix = "kafka-{version}/clients/src/main/resources/common/message",
urls = ["https://github.com/apache/kafka/archive/{version}.zip"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_ext"],
+ extensions = ["envoy.filters.network.kafka_broker"],
+ last_updated = "2020-08-26",
cpe = "cpe:2.3:a:apache:kafka:*",
),
kafka_server_binary = dict(
project_name = "Kafka (server binary)",
+ project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
version = "2.4.1",
sha256 = "2177cbd14118999e1d76fec628ca78ace7e6f841219dbc6035027c796bbe1a2a",
strip_prefix = "kafka_2.12-{version}",
urls = ["https://mirrors.gigenet.com/apache/kafka/{version}/kafka_2.12-{version}.tgz"],
- use_category = ["test"],
+ last_updated = "2020-08-26",
+ use_category = ["test_only"],
),
kafka_python_client = dict(
project_name = "Kafka (Python client)",
+ project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
version = "2.0.1",
sha256 = "05f7c6eecb402f11fcb7e524c903f1ba1c38d3bdc9bf42bc8ec3cf7567b9f979",
strip_prefix = "kafka-python-{version}",
urls = ["https://github.com/dpkp/kafka-python/archive/{version}.tar.gz"],
- use_category = ["test"],
- ),
- org_unicode_icuuc = dict(
- project_name = "International Components for Unicode",
- project_url = "https://github.com/unicode-org/icu",
- version = "67.1",
- strip_prefix = "icu",
- sha256 = "94a80cd6f251a53bd2a997f6f1b5ac6653fe791dfab66e1eb0227740fb86d5dc",
- urls = ["https://github.com/unicode-org/icu/releases/download/release-{dash_version}/icu4c-{underscore_version}-src.tgz"],
- use_category = ["dataplane"],
- cpe = "cpe:2.3:a:icu-project:international_components_for_unicode",
+ last_updated = "2020-08-26",
+ use_category = ["test_only"],
),
proxy_wasm_cpp_sdk = dict(
project_name = "WebAssembly for Proxies (C++ SDK)",
+ project_desc = "WebAssembly for Proxies (C++ SDK)",
project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-sdk",
- version = "5cec30b448975e1fd3f4117311f0957309df5cb0",
- sha256 = "7d9e1f2e299215ed3e5fa8c8149740872b1100cfe3230fc639f967d9dcfd812e",
+ version = "7afb39d868a973caa6216a535c24e37fb666b6f3",
+ sha256 = "213d0b441bcc3df2c87933b24a593b5fd482fa8f4db158b707c60005b9e70040",
strip_prefix = "proxy-wasm-cpp-sdk-{version}",
urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.access_loggers.wasm",
+ "envoy.bootstrap.wasm",
+ "envoy.filters.http.wasm",
+ "envoy.filters.network.wasm",
+ "envoy.stat_sinks.wasm",
+ ],
+ last_updated = "2020-10-09",
cpe = "N/A",
),
proxy_wasm_cpp_host = dict(
project_name = "WebAssembly for Proxies (C++ host implementation)",
+ project_desc = "WebAssembly for Proxies (C++ host implementation)",
project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host",
- version = "928db4d79ec7b90aea3ad13ea5df36dc60c9c31d",
- sha256 = "494d3f81156b92bac640c26000497fbf3a7b1bc35f9789594280450c6e5d8129",
+ version = "c5658d34979abece30882b1eeaa95b6ee965d825",
+ sha256 = "dc3a794424b7679c3dbcf23548e202aa01e9f9093791b95446b99e8524e03c4f",
strip_prefix = "proxy-wasm-cpp-host-{version}",
urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"],
- use_category = ["dataplane"],
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.access_loggers.wasm",
+ "envoy.bootstrap.wasm",
+ "envoy.filters.http.wasm",
+ "envoy.filters.network.wasm",
+ "envoy.stat_sinks.wasm",
+ ],
+ last_updated = "2020-10-16",
cpe = "N/A",
),
+ # TODO: upgrade to the latest version (1.41 currently fails tests)
emscripten_toolchain = dict(
project_name = "Emscripten SDK",
+ project_desc = "Emscripten SDK (use by Wasm)",
project_url = "https://github.com/emscripten-core/emsdk",
- version = "dec8a63594753fe5f4ad3b47850bf64d66c14a4e",
- sha256 = "2bdbee6947e32ad1e03cd075b48fda493ab16157b2b0225b445222cd528e1843",
- patch_cmds = [
- "./emsdk install 1.39.19-upstream",
- "./emsdk activate --embedded 1.39.19-upstream",
- ],
+ version = "1.39.6",
+ sha256 = "4ac0f1f3de8b3f1373d435cd7e58bd94de4146e751f099732167749a229b443b",
strip_prefix = "emsdk-{version}",
urls = ["https://github.com/emscripten-core/emsdk/archive/{version}.tar.gz"],
use_category = ["build"],
+ last_updated = "2020-10-09",
+ ),
+ io_bazel_rules_rust = dict(
+ project_name = "Bazel rust rules",
+ project_desc = "Bazel rust rules (used by Wasm)",
+ project_url = "https://github.com/bazelbuild/rules_rust",
+ version = "fb90a7484800157fbb8a5904fbeb608dc1effc0c",
+ sha256 = "cbb253b8c5ab1a3c1787790f900e7d6774e95ba038714fc0f710935e62f30f5f",
+ # Last commit where "out_binary = True" works.
+ # See: https://github.com/bazelbuild/rules_rust/issues/386
+ strip_prefix = "rules_rust-{version}",
+ urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"],
+ use_category = ["test_only"],
+ last_updated = "2020-10-15",
),
rules_antlr = dict(
project_name = "ANTLR Rules for Bazel",
+ project_desc = "Bazel rules for ANTLR",
project_url = "https://github.com/marcohu/rules_antlr",
version = "3cc2f9502a54ceb7b79b37383316b23c4da66f9a",
sha256 = "7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429",
strip_prefix = "rules_antlr-{version}",
urls = ["https://github.com/marcohu/rules_antlr/archive/{version}.tar.gz"],
- use_category = ["build"],
+ # ANTLR has a runtime component, so is not purely build.
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.access_loggers.wasm",
+ "envoy.bootstrap.wasm",
+ "envoy.filters.http.wasm",
+ "envoy.filters.network.wasm",
+ "envoy.stat_sinks.wasm",
+ ],
+ last_updated = "2020-07-29",
+ cpe = "N/A",
),
antlr4_runtimes = dict(
project_name = "ANTLR v4",
+ project_desc = "ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files",
project_url = "https://github.com/antlr/antlr4",
- version = "4.7.1",
- sha256 = "4d0714f441333a63e50031c9e8e4890c78f3d21e053d46416949803e122a6574",
+ version = "4.7.2",
+ sha256 = "46f5e1af5f4bd28ade55cb632f9a069656b31fc8c2408f9aa045f9b5f5caad64",
strip_prefix = "antlr4-{version}",
urls = ["https://github.com/antlr/antlr4/archive/{version}.tar.gz"],
- use_category = ["build"],
+ use_category = ["dataplane_ext"],
+ extensions = [
+ "envoy.access_loggers.wasm",
+ "envoy.bootstrap.wasm",
+ "envoy.filters.http.wasm",
+ "envoy.filters.network.wasm",
+ "envoy.stat_sinks.wasm",
+ ],
+ last_updated = "2020-10-09",
+ cpe = "N/A",
),
)
-
-def _format_version(s, version):
- return s.format(version = version, dash_version = version.replace(".", "-"), underscore_version = version.replace(".", "_"))
-
-# Interpolate {version} in the above dependency specs. This code should be capable of running in both Python
-# and Starlark.
-def _dependency_repositories():
- locations = {}
- for key, location in DEPENDENCY_REPOSITORIES_SPEC.items():
- mutable_location = dict(location)
- locations[key] = mutable_location
-
- # Fixup with version information.
- if "version" in location:
- if "strip_prefix" in location:
- mutable_location["strip_prefix"] = _format_version(location["strip_prefix"], location["version"])
- mutable_location["urls"] = [_format_version(url, location["version"]) for url in location["urls"]]
- return locations
-
-DEPENDENCY_REPOSITORIES = _dependency_repositories()
diff --git a/bazel/setup_clang.sh b/bazel/setup_clang.sh
index 0ed987b9d4d0..d0e58478dc0a 100755
--- a/bazel/setup_clang.sh
+++ b/bazel/setup_clang.sh
@@ -9,9 +9,10 @@ if [[ ! -e "${LLVM_PREFIX}/bin/llvm-config" ]]; then
exit 1
fi
-export PATH="$(${LLVM_PREFIX}/bin/llvm-config --bindir):${PATH}"
+PATH="$("${LLVM_PREFIX}"/bin/llvm-config --bindir):${PATH}"
+export PATH
-RT_LIBRARY_PATH="$(dirname $(find $(llvm-config --libdir) -name libclang_rt.ubsan_standalone_cxx-x86_64.a | head -1))"
+RT_LIBRARY_PATH="$(dirname "$(find "$(llvm-config --libdir)" -name libclang_rt.ubsan_standalone_cxx-x86_64.a | head -1)")"
echo "# Generated file, do not edit. If you want to disable clang, just delete this file.
build:clang --action_env='PATH=${PATH}'
@@ -28,5 +29,4 @@ build:clang-asan --linkopt=-fsanitize=vptr,function
build:clang-asan --linkopt='-L${RT_LIBRARY_PATH}'
build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone-x86_64.a
build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone_cxx-x86_64.a
-" > ${BAZELRC_FILE}
-
+" > "${BAZELRC_FILE}"
diff --git a/bazel/setup_local_tsan.sh b/bazel/setup_local_tsan.sh
index c805704af9e8..a5bd56ceb812 100755
--- a/bazel/setup_local_tsan.sh
+++ b/bazel/setup_local_tsan.sh
@@ -15,5 +15,4 @@ build:local-tsan --config=libc++
build:local-tsan --config=clang-tsan
build:local-tsan --linkopt=-L${LIBCXX_PREFIX}/lib
build:local-tsan --linkopt=-Wl,-rpath,${LIBCXX_PREFIX}/lib
-" > ${BAZELRC_FILE}
-
+" > "${BAZELRC_FILE}"
diff --git a/bazel/sh_test_wrapper.sh b/bazel/sh_test_wrapper.sh
index 9e2f1138dea7..8a290d6684d5 100755
--- a/bazel/sh_test_wrapper.sh
+++ b/bazel/sh_test_wrapper.sh
@@ -4,8 +4,8 @@
# TODO(lizan): remove when we have a solution for
# https://github.com/bazelbuild/bazel/issues/3510
-cd $(dirname "$0")
+cd "$(dirname "$0")" || exit 1
if [ $# -gt 0 ]; then
- "./$@"
+ "./${1}" "${@:2}"
fi
diff --git a/bazel/test_for_benchmark_wrapper.sh b/bazel/test_for_benchmark_wrapper.sh
index 37de6d0d0d81..0a53ca0ada3e 100755
--- a/bazel/test_for_benchmark_wrapper.sh
+++ b/bazel/test_for_benchmark_wrapper.sh
@@ -3,4 +3,4 @@
# Set the benchmark time to 0 to just verify that the benchmark runs to
# completion. We're interacting with two different flag parsers, so the order
# of flags and the -- matters.
-"${TEST_SRCDIR}/envoy/$@" --skip_expensive_benchmarks -- --benchmark_min_time=0
+"${TEST_SRCDIR}/envoy/${1}" "${@:2}" --skip_expensive_benchmarks -- --benchmark_min_time=0
diff --git a/bazel/wasm/wasm.bzl b/bazel/wasm/wasm.bzl
index 65fefcb49e90..a3d89067e496 100644
--- a/bazel/wasm/wasm.bzl
+++ b/bazel/wasm/wasm.bzl
@@ -1,6 +1,7 @@
+load("@io_bazel_rules_rust//rust:rust.bzl", "rust_binary")
load("@rules_cc//cc:defs.bzl", "cc_binary")
-def _wasm_transition_impl(settings, attr):
+def _wasm_cc_transition_impl(settings, attr):
return {
"//command_line_option:cpu": "wasm32",
"//command_line_option:crosstool_top": "@proxy_wasm_cpp_sdk//toolchain:emscripten",
@@ -11,46 +12,89 @@ def _wasm_transition_impl(settings, attr):
"//command_line_option:cxxopt": [],
"//command_line_option:linkopt": [],
"//command_line_option:collect_code_coverage": "false",
+ "//command_line_option:fission": "no",
}
-wasm_transition = transition(
- implementation = _wasm_transition_impl,
+def _wasm_rust_transition_impl(settings, attr):
+ return {
+ "//command_line_option:platforms": "@io_bazel_rules_rust//rust/platform:wasm",
+ }
+
+wasm_cc_transition = transition(
+ implementation = _wasm_cc_transition_impl,
inputs = [],
outputs = [
"//command_line_option:cpu",
"//command_line_option:crosstool_top",
"//command_line_option:copt",
"//command_line_option:cxxopt",
+ "//command_line_option:fission",
"//command_line_option:linkopt",
"//command_line_option:collect_code_coverage",
],
)
+wasm_rust_transition = transition(
+ implementation = _wasm_rust_transition_impl,
+ inputs = [],
+ outputs = [
+ "//command_line_option:platforms",
+ ],
+)
+
def _wasm_binary_impl(ctx):
out = ctx.actions.declare_file(ctx.label.name)
- ctx.actions.run_shell(
- command = 'cp "{}" "{}"'.format(ctx.files.binary[0].path, out.path),
- outputs = [out],
- inputs = ctx.files.binary,
- )
+ if ctx.attr.precompile:
+ ctx.actions.run(
+ executable = ctx.executable._compile_tool,
+ arguments = [ctx.files.binary[0].path, out.path],
+ outputs = [out],
+ inputs = ctx.files.binary,
+ )
+ else:
+ ctx.actions.run(
+ executable = "cp",
+ arguments = [ctx.files.binary[0].path, out.path],
+ outputs = [out],
+ inputs = ctx.files.binary,
+ )
- return [DefaultInfo(runfiles = ctx.runfiles([out]))]
+ return [DefaultInfo(files = depset([out]), runfiles = ctx.runfiles([out]))]
+
+def _wasm_attrs(transition):
+ return {
+ "binary": attr.label(mandatory = True, cfg = transition),
+ "precompile": attr.bool(default = False),
+ # This is deliberately in target configuration to avoid compiling v8 twice.
+ "_compile_tool": attr.label(default = "@envoy//test/tools/wee8_compile:wee8_compile_tool", executable = True, cfg = "target"),
+ "_whitelist_function_transition": attr.label(default = "@bazel_tools//tools/whitelists/function_transition_whitelist"),
+ }
# WASM binary rule implementation.
# This copies the binary specified in binary attribute in WASM configuration to
# target configuration, so a binary in non-WASM configuration can depend on them.
-wasm_binary = rule(
+wasm_cc_binary_rule = rule(
implementation = _wasm_binary_impl,
- attrs = {
- "binary": attr.label(mandatory = True, cfg = wasm_transition),
- "_whitelist_function_transition": attr.label(default = "@bazel_tools//tools/whitelists/function_transition_whitelist"),
- },
+ attrs = _wasm_attrs(wasm_cc_transition),
+)
+
+wasm_rust_binary_rule = rule(
+ implementation = _wasm_binary_impl,
+ attrs = _wasm_attrs(wasm_rust_transition),
)
-def wasm_cc_binary(name, **kwargs):
+def wasm_cc_binary(name, tags = [], repository = "", **kwargs):
wasm_name = "_wasm_" + name
- kwargs.setdefault("additional_linker_inputs", ["@proxy_wasm_cpp_sdk//:jslib"])
- kwargs.setdefault("linkopts", ["--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js"])
+ kwargs.setdefault("additional_linker_inputs", ["@proxy_wasm_cpp_sdk//:jslib", "@envoy//source/extensions/common/wasm/ext:jslib"])
+
+ if repository == "@envoy":
+ envoy_js = "--js-library external/envoy/source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js"
+ else:
+ envoy_js = "--js-library source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js"
+ kwargs.setdefault("linkopts", [
+ envoy_js,
+ "--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js",
+ ])
kwargs.setdefault("visibility", ["//visibility:public"])
cc_binary(
name = wasm_name,
@@ -61,7 +105,34 @@ def wasm_cc_binary(name, **kwargs):
**kwargs
)
- wasm_binary(
+ wasm_cc_binary_rule(
+ name = name,
+ binary = ":" + wasm_name,
+ tags = tags + ["manual"],
+ )
+
+def envoy_wasm_cc_binary(name, tags = [], **kwargs):
+ wasm_cc_binary(name, tags, repository = "", **kwargs)
+
+def wasm_rust_binary(name, tags = [], **kwargs):
+ wasm_name = "_wasm_" + name.replace(".", "_")
+ kwargs.setdefault("visibility", ["//visibility:public"])
+
+ rust_binary(
+ name = wasm_name,
+ edition = "2018",
+ crate_type = "cdylib",
+ out_binary = True,
+ tags = ["manual"],
+ **kwargs
+ )
+
+ wasm_rust_binary_rule(
name = name,
+ precompile = select({
+ "@envoy//bazel:linux_x86_64": True,
+ "//conditions:default": False,
+ }),
binary = ":" + wasm_name,
+ tags = tags + ["manual"],
)
diff --git a/ci/Dockerfile-envoy b/ci/Dockerfile-envoy
index eecb68be7d17..435250d08185 100644
--- a/ci/Dockerfile-envoy
+++ b/ci/Dockerfile-envoy
@@ -28,7 +28,7 @@ RUN mkdir -p /etc/envoy
ARG ENVOY_BINARY_SUFFIX=_stripped
ADD ${TARGETPLATFORM}/build_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/
-ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml
+ADD configs/google_com_proxy.yaml /etc/envoy/envoy.yaml
EXPOSE 10000
diff --git a/ci/Dockerfile-envoy-alpine b/ci/Dockerfile-envoy-alpine
index de13be43162d..b7bfba617f80 100644
--- a/ci/Dockerfile-envoy-alpine
+++ b/ci/Dockerfile-envoy-alpine
@@ -1,7 +1,7 @@
FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31
RUN mkdir -p /etc/envoy
-ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml
+ADD configs/google_com_proxy.yaml /etc/envoy/envoy.yaml
RUN apk add --no-cache shadow su-exec \
&& addgroup -S envoy && adduser --no-create-home -S envoy -G envoy
diff --git a/ci/Dockerfile-envoy-windows b/ci/Dockerfile-envoy-windows
new file mode 100644
index 000000000000..4b0db0161531
--- /dev/null
+++ b/ci/Dockerfile-envoy-windows
@@ -0,0 +1,19 @@
+ARG BUILD_OS=mcr.microsoft.com/windows/servercore
+ARG BUILD_TAG=ltsc2019
+
+FROM $BUILD_OS:$BUILD_TAG
+
+RUN mkdir "C:\\Program\ Files\\envoy"
+RUN setx path "%path%;c:\Program Files\envoy"
+ADD ["windows/amd64/envoy.exe", "C:/Program Files/envoy/"]
+
+RUN mkdir "C:\\ProgramData\\envoy"
+ADD ["configs/google_com_proxy.yaml", "C:/ProgramData/envoy/envoy.yaml"]
+# Replace temp path with Windows temp path
+RUN powershell -Command "(cat C:\ProgramData\envoy\envoy.yaml -raw) -replace '/tmp/','C:\Windows\Temp\' | Set-Content -Encoding Ascii C:\ProgramData\envoy\envoy.yaml"
+
+EXPOSE 10000
+
+COPY ci/docker-entrypoint.bat C:/
+ENTRYPOINT ["C:/docker-entrypoint.bat"]
+CMD ["envoy.exe", "-c", "C:\\ProgramData\\envoy\\envoy.yaml"]
diff --git a/ci/README.md b/ci/README.md
index 46b1e1c65dae..028e31263b30 100644
--- a/ci/README.md
+++ b/ci/README.md
@@ -5,7 +5,7 @@ and an image based on Windows2019.
## Ubuntu Envoy image
-The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CircleCI checks,
+The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CI checks,
where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). Developers
may work with the latest build image SHA in [envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8)
repo to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Ubuntu Envoy image.
@@ -15,9 +15,9 @@ binary built from the latest tip of master that passed tests.
## Alpine Envoy image
-Minimal images based on Alpine Linux allow for quicker deployment of Envoy. Two Alpine based images are built,
-one with an Envoy binary with debug (`envoyproxy/envoy-alpine-debug`) symbols and one stripped of them (`envoyproxy/envoy-alpine`).
-Both images are pushed with two different tags: `` and `latest`. Parallel to the Ubuntu images above, `` corresponds to the
+Minimal images based on Alpine Linux allow for quicker deployment of Envoy. The Alpine base image is only built with symbols stripped.
+To get the binary with symbols, use the corresponding Ubuntu based debug image. The image is pushed with two different tags:
+`` and `latest`. Parallel to the Ubuntu images above, `` corresponds to the
master commit at which the binary was compiled, and `latest` corresponds to a binary built from the latest tip of master that passed tests.
## Windows 2019 Envoy image
@@ -50,20 +50,26 @@ run `./ci/do_ci.sh` as described below.
# Building and running tests as a developer
-## On Linux
+The `./ci/run_envoy_docker.sh` script can be used to set up a Docker container on Linux and Windows
+to build an Envoy static binary and run tests.
-An example basic invocation to build a developer version of the Envoy static binary (using the Bazel `fastbuild` type) is:
+The build image defaults to `envoyproxy/envoy-build-ubuntu` on Linux and
+`envoyproxy/envoy-build-windows2019` on Windows, but you can choose build image by setting
+`IMAGE_NAME` in the environment.
+
+In case your setup is behind a proxy, set `http_proxy` and `https_proxy` to the proxy servers before
+invoking the build.
```bash
-./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'
+IMAGE_NAME=envoyproxy/envoy-build-ubuntu http_proxy=http://proxy.foo.com:8080 https_proxy=http://proxy.bar.com:8080 ./ci/run_envoy_docker.sh '
```
-The build image defaults to `envoyproxy/envoy-build-ubuntu`, but you can choose build image by setting `IMAGE_NAME` in the environment.
+## On Linux
-In case your setup is behind a proxy, set `http_proxy` and `https_proxy` to the proxy servers before invoking the build.
+An example basic invocation to build a developer version of the Envoy static binary (using the Bazel `fastbuild` type) is:
```bash
-IMAGE_NAME=envoyproxy/envoy-build-ubuntu http_proxy=http://proxy.foo.com:8080 https_proxy=http://proxy.bar.com:8080 ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'
+./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'
```
The Envoy binary can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy-fastbuild` on the Docker host. You
@@ -139,15 +145,27 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are:
An example basic invocation to build the Envoy static binary and run tests is:
```bash
-./ci/run_envoy_docker_windows.sh './ci/windows_ci_steps.sh'
+./ci/run_envoy_docker.sh './ci/windows_ci_steps.sh'
+```
+
+You can modify `./ci/windows_ci_steps.sh` to modify `bazel` arguments, tests to run, etc. as well
+as set environment variables to adjust your container build environment as described above.
+
+The Envoy binary can be found in `C:\Windows\Temp\envoy-docker-build\envoy\source\exe` on the Docker host. You
+can control this by setting `ENVOY_DOCKER_BUILD_DIR` in the environment, e.g. to
+generate the binary in `C:\Users\foo\build\envoy\source\exe` you can run:
+
+```bash
+ENVOY_DOCKER_BUILD_DIR="C:\Users\foo\build" ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'
```
-You can modify `./ci/windows_ci_steps.sh` to modify `bazel` arguments, tests to run, etc.
+Note the quotations around the `ENVOY_DOCKER_BUILD_DIR` value to preserve the backslashes in the
+path.
If you would like to run an interactive session to keep the build container running (to persist your local build environment), run:
```bash
-./ci/run_envoy_docker_windows.sh 'bash'
+./ci/run_envoy_docker.sh 'bash'
```
From an interactive session, you can invoke `bazel` manually or use the `./ci/windows_ci_steps.sh` script to build and run tests.
@@ -171,10 +189,10 @@ This build the Ubuntu based `envoyproxy/envoy-build-ubuntu` image, and the final
# macOS Build Flow
-The macOS CI build is part of the [CircleCI](https://circleci.com/gh/envoyproxy/envoy) workflow.
+The macOS CI build is part of the [Azure Pipelines](https://dev.azure.com/cncf/envoy/_build) workflow.
Dependencies are installed by the `ci/mac_ci_setup.sh` script, via [Homebrew](https://brew.sh),
-which is pre-installed on the CircleCI macOS image. The dependencies are cached are re-installed
-on every build. The `ci/mac_ci_steps.sh` script executes the specific commands that
+which is pre-installed on the [Azure Pipelines macOS image](https://github.com/actions/virtual-environments/blob/main/images/macos/macos-10.15-Readme.md).
+The dependencies are cached and re-installed on every build. The `ci/mac_ci_steps.sh` script executes the specific commands that
build and test Envoy. Note that the full version of Xcode (not just Command Line Tools) is required.
# Coverity Scan Build Flow
diff --git a/ci/api_mirror.sh b/ci/api_mirror.sh
index 077cdd1d3cfe..03e8ab85d80c 100755
--- a/ci/api_mirror.sh
+++ b/ci/api_mirror.sh
@@ -3,16 +3,15 @@
set -e
CHECKOUT_DIR=../data-plane-api
+MAIN_BRANCH="refs/heads/master"
+API_MAIN_BRANCH="master"
-if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ]
-then
+if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then
echo "Cloning..."
- git clone git@github.com:envoyproxy/data-plane-api "$CHECKOUT_DIR"
+ git clone git@github.com:envoyproxy/data-plane-api "$CHECKOUT_DIR" -b "${API_MAIN_BRANCH}"
- git -C "$CHECKOUT_DIR" config user.name "data-plane-api(CircleCI)"
+ git -C "$CHECKOUT_DIR" config user.name "data-plane-api(Azure Pipelines)"
git -C "$CHECKOUT_DIR" config user.email data-plane-api@users.noreply.github.com
- git -C "$CHECKOUT_DIR" fetch
- git -C "$CHECKOUT_DIR" checkout -B master origin/master
# Determine last envoyproxy/envoy SHA in envoyproxy/data-plane-api
MIRROR_MSG="Mirrored from https://github.com/envoyproxy/envoy"
@@ -40,6 +39,6 @@ then
done
echo "Pushing..."
- git -C "$CHECKOUT_DIR" push origin master
+ git -C "$CHECKOUT_DIR" push origin "${API_MAIN_BRANCH}"
echo "Done"
fi
diff --git a/ci/build_setup.ps1 b/ci/build_setup.ps1
deleted file mode 100755
index 9d64fff8f1ca..000000000000
--- a/ci/build_setup.ps1
+++ /dev/null
@@ -1,21 +0,0 @@
-$ErrorActionPreference = "Stop";
-trap { $host.SetShouldExit(1) }
-
-if ("$env:NUM_CPUS" -eq "") {
- $env:NUM_CPUS = (Get-WmiObject -class Win32_computersystem).NumberOfLogicalProcessors
-}
-
-if ("$env:ENVOY_BAZEL_ROOT" -eq "") {
- Write-Host "ENVOY_BAZEL_ROOT must be set!"
- throw
-}
-
-mkdir -force "$env:ENVOY_BAZEL_ROOT" > $nul
-
-$env:ENVOY_SRCDIR = [System.IO.Path]::GetFullPath("$PSScriptRoot\..")
-
-echo "ENVOY_BAZEL_ROOT: $env:ENVOY_BAZEL_ROOT"
-echo "ENVOY_SRCDIR: $env:ENVOY_SRCDIR"
-
-$env:BAZEL_BASE_OPTIONS="--output_base=$env:ENVOY_BAZEL_ROOT"
-$env:BAZEL_BUILD_OPTIONS="--config=msvc-cl --features=compiler_param_file --strategy=Genrule=standalone --spawn_strategy=standalone --verbose_failures --jobs=$env:NUM_CPUS --show_task_finish --test_output=all $env:BAZEL_BUILD_EXTRA_OPTIONS $env:BAZEL_EXTRA_TEST_OPTIONS"
diff --git a/ci/build_setup.sh b/ci/build_setup.sh
index ab8705edccce..f9275c2543c8 100755
--- a/ci/build_setup.sh
+++ b/ci/build_setup.sh
@@ -6,17 +6,25 @@ set -e
export PPROF_PATH=/thirdparty_build/bin/pprof
-[ -z "${NUM_CPUS}" ] && NUM_CPUS=`grep -c ^processor /proc/cpuinfo`
+[ -z "${NUM_CPUS}" ] && NUM_CPUS=$(grep -c ^processor /proc/cpuinfo)
[ -z "${ENVOY_SRCDIR}" ] && export ENVOY_SRCDIR=/source
[ -z "${ENVOY_BUILD_TARGET}" ] && export ENVOY_BUILD_TARGET=//source/exe:envoy-static
[ -z "${ENVOY_BUILD_DEBUG_INFORMATION}" ] && export ENVOY_BUILD_DEBUG_INFORMATION=//source/exe:envoy-static.dwp
-[ -z "${ENVOY_BUILD_ARCH}" ] && export ENVOY_BUILD_ARCH=$(uname -m)
+[ -z "${ENVOY_BUILD_ARCH}" ] && {
+ ENVOY_BUILD_ARCH=$(uname -m)
+ export ENVOY_BUILD_ARCH
+}
+
+read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:-}"
+read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}"
+read -ra BAZEL_OPTIONS <<< "${BAZEL_OPTIONS:-}"
+
echo "ENVOY_SRCDIR=${ENVOY_SRCDIR}"
echo "ENVOY_BUILD_TARGET=${ENVOY_BUILD_TARGET}"
echo "ENVOY_BUILD_ARCH=${ENVOY_BUILD_ARCH}"
function setup_gcc_toolchain() {
- if [[ ! -z "${ENVOY_STDLIB}" && "${ENVOY_STDLIB}" != "libstdc++" ]]; then
+ if [[ -n "${ENVOY_STDLIB}" && "${ENVOY_STDLIB}" != "libstdc++" ]]; then
echo "gcc toolchain doesn't support ${ENVOY_STDLIB}."
exit 1
fi
@@ -26,7 +34,7 @@ function setup_gcc_toolchain() {
export BAZEL_COMPILER=gcc
echo "$CC/$CXX toolchain configured"
else
- export BAZEL_BUILD_OPTIONS="--config=remote-gcc ${BAZEL_BUILD_OPTIONS}"
+ BAZEL_BUILD_OPTIONS=("--config=remote-gcc" "${BAZEL_BUILD_OPTIONS[@]}")
fi
}
@@ -34,15 +42,15 @@ function setup_clang_toolchain() {
ENVOY_STDLIB="${ENVOY_STDLIB:-libc++}"
if [[ -z "${ENVOY_RBE}" ]]; then
if [[ "${ENVOY_STDLIB}" == "libc++" ]]; then
- export BAZEL_BUILD_OPTIONS="--config=libc++ ${BAZEL_BUILD_OPTIONS}"
+ BAZEL_BUILD_OPTIONS=("--config=libc++" "${BAZEL_BUILD_OPTIONS[@]}")
else
- export BAZEL_BUILD_OPTIONS="--config=clang ${BAZEL_BUILD_OPTIONS}"
+ BAZEL_BUILD_OPTIONS=("--config=clang" "${BAZEL_BUILD_OPTIONS[@]}")
fi
else
if [[ "${ENVOY_STDLIB}" == "libc++" ]]; then
- export BAZEL_BUILD_OPTIONS="--config=remote-clang-libc++ ${BAZEL_BUILD_OPTIONS}"
+ BAZEL_BUILD_OPTIONS=("--config=remote-clang-libc++" "${BAZEL_BUILD_OPTIONS[@]}")
else
- export BAZEL_BUILD_OPTIONS="--config=remote-clang ${BAZEL_BUILD_OPTIONS}"
+ BAZEL_BUILD_OPTIONS=("--config=remote-clang" "${BAZEL_BUILD_OPTIONS[@]}")
fi
fi
echo "clang toolchain with ${ENVOY_STDLIB} configured"
@@ -61,7 +69,7 @@ export PATH=/opt/llvm/bin:${PATH}
export CLANG_FORMAT="${CLANG_FORMAT:-clang-format}"
if [[ -f "/etc/redhat-release" ]]; then
- export BAZEL_BUILD_EXTRA_OPTIONS+="--copt=-DENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR=1"
+ BAZEL_BUILD_EXTRA_OPTIONS+=("--copt=-DENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR=1")
fi
function cleanup() {
@@ -76,16 +84,28 @@ trap cleanup EXIT
export LLVM_ROOT="${LLVM_ROOT:-/opt/llvm}"
"$(dirname "$0")"/../bazel/setup_clang.sh "${LLVM_ROOT}"
-[[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=" --nocache_test_results"
+[[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=("--nocache_test_results")
-export BAZEL_QUERY_OPTIONS="${BAZEL_OPTIONS}"
+# TODO(phlax): deprecate/remove this - i believe it was made redundant here:
+# https://github.com/envoyproxy/envoy/commit/3ebedeb708a23062332a6fcdf33b462b7070adba#diff-2fa22a1337effee365a51e6844be0ab3
+export BAZEL_QUERY_OPTIONS="${BAZEL_OPTIONS[*]}"
# Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks
# to save disk space.
-export BAZEL_BUILD_OPTIONS=" ${BAZEL_OPTIONS} --verbose_failures --show_task_finish --experimental_generate_json_trace_profile \
- --test_output=errors --repository_cache=${BUILD_DIR}/repository_cache --experimental_repository_cache_hardlinks \
- ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}"
-
-[[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --flaky_test_attempts=2 --test_env=HEAPCHECK="
+BAZEL_BUILD_OPTIONS=(
+ "${BAZEL_OPTIONS[@]}"
+ "--verbose_failures"
+ "--show_task_finish"
+ "--experimental_generate_json_trace_profile"
+ "--test_output=errors"
+ "--repository_cache=${BUILD_DIR}/repository_cache"
+ "--experimental_repository_cache_hardlinks"
+ "${BAZEL_BUILD_EXTRA_OPTIONS[@]}"
+ "${BAZEL_EXTRA_TEST_OPTIONS[@]}")
+
+[[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]] && BAZEL_BUILD_OPTIONS+=(
+ "--define" "wasm=disabled"
+ "--flaky_test_attempts=2"
+ "--test_env=HEAPCHECK=")
[[ "${BAZEL_EXPUNGE}" == "1" ]] && bazel clean --expunge
@@ -119,6 +139,7 @@ export BUILDOZER_BIN="${BUILDOZER_BIN:-/usr/local/bin/buildozer}"
# source tree is different than the current workspace, the setup step is
# skipped.
if [[ "$1" != "-nofetch" && "${ENVOY_SRCDIR}" == "$(bazel info workspace)" ]]; then
+ # shellcheck source=ci/filter_example_setup.sh
. "$(dirname "$0")"/filter_example_setup.sh
else
echo "Skip setting up Envoy Filter Example."
diff --git a/ci/do_ci.ps1 b/ci/do_ci.ps1
deleted file mode 100755
index 86f98f74e49e..000000000000
--- a/ci/do_ci.ps1
+++ /dev/null
@@ -1,69 +0,0 @@
-$ErrorActionPreference = "Stop";
-trap { $host.SetShouldExit(1) }
-
-. "$PSScriptRoot\build_setup.ps1"
-Write-Host "building using $env:NUM_CPUS CPUs"
-
-function bazel_binary_build($type) {
- echo "Building..."
- bazel $env:BAZEL_BASE_OPTIONS.Split(" ") build $env:BAZEL_BUILD_OPTIONS.Split(" ") -c $type "//source/exe:envoy-static"
- $exit = $LASTEXITCODE
- if ($exit -ne 0) {
- exit $exit
- }
-}
-
-function bazel_test($type, $test) {
- if ($test) {
- echo "running windows tests $test"
- bazel $env:BAZEL_BASE_OPTIONS.Split(" ") test $env:BAZEL_BUILD_OPTIONS.Split(" ") -c $type --build_tests_only $test
- } else {
- echo "running all windows tests"
- bazel $env:BAZEL_BASE_OPTIONS.Split(" ") test $env:BAZEL_BUILD_OPTIONS.Split(" ") -c $type "//test/..." --test_tag_filters=-skip_on_windows --build_tests_only --test_summary=terse --test_output=errors
- }
- exit $LASTEXITCODE
-}
-
-$action, $test = $args
-
-switch ($action) {
- "bazel.release" {
- echo "bazel release build with tests..."
- bazel_binary_build "opt"
- bazel_test "opt" $test
- }
- "bazel.release.server_only" {
- echo "bazel release build..."
- bazel_binary_build "opt"
- }
- "bazel.release.test_only" {
- echo "bazel release build with tests..."
- bazel_test "opt" $test
- }
- "bazel.debug" {
- echo "bazel debug build with tests..."
- bazel_binary_build "dbg"
- bazel_test "dbg" $test
- }
- "bazel.debug.server_only" {
- echo "bazel debug build..."
- bazel_binary_build "dbg"
- }
- "bazel.debug.test_only" {
- echo "bazel debug build with tests..."
- bazel_test "dbg" $test
- }
- "bazel.dev" {
- echo "bazel fastbuild build with tests..."
- bazel_binary_build "fastbuild"
- bazel_test "fastbuild" $test
- }
- "bazel.dev.test_only" {
- echo "bazel fastbuild build with tests..."
- bazel_test "fastbuild" $test
- }
- default {
- echo "unknown action: $action"
- exit 1
- }
-}
diff --git a/ci/do_ci.sh b/ci/do_ci.sh
index 2f5f183ea937..c9c268e70e04 100755
--- a/ci/do_ci.sh
+++ b/ci/do_ci.sh
@@ -14,7 +14,9 @@ fi
SRCDIR="${PWD}"
NO_BUILD_SETUP="${NO_BUILD_SETUP:-}"
if [[ -z "$NO_BUILD_SETUP" ]]; then
+ # shellcheck source=ci/setup_cache.sh
. "$(dirname "$0")"/setup_cache.sh
+ # shellcheck source=ci/build_setup.sh
. "$(dirname "$0")"/build_setup.sh $build_setup_args
fi
cd "${SRCDIR}"
@@ -38,21 +40,21 @@ function collect_build_profile() {
}
function bazel_with_collection() {
+ local failed_logs
declare -r BAZEL_OUTPUT="${ENVOY_SRCDIR}"/bazel.output.txt
- bazel $* | tee "${BAZEL_OUTPUT}"
+ bazel "$@" | tee "${BAZEL_OUTPUT}"
declare BAZEL_STATUS="${PIPESTATUS[0]}"
if [ "${BAZEL_STATUS}" != "0" ]
then
- declare -r FAILED_TEST_LOGS="$(grep " /build.*test.log" "${BAZEL_OUTPUT}" | sed -e 's/ \/build.*\/testlogs\/\(.*\)/\1/')"
pushd bazel-testlogs
- for f in ${FAILED_TEST_LOGS}
- do
- cp --parents -f $f "${ENVOY_FAILED_TEST_LOGS}"
- done
+ failed_logs=$(grep " /build.*test.log" "${BAZEL_OUTPUT}" | sed -e 's/ \/build.*\/testlogs\/\(.*\)/\1/')
+ while read -r f; do
+ cp --parents -f "$f" "${ENVOY_FAILED_TEST_LOGS}"
+ done <<< "$failed_logs"
popd
exit "${BAZEL_STATUS}"
fi
- collect_build_profile $1
+ collect_build_profile "$1"
run_process_test_result
}
@@ -112,9 +114,9 @@ function bazel_binary_build() {
ENVOY_BIN=$(echo "${ENVOY_BUILD_TARGET}" | sed -e 's#^@\([^/]*\)/#external/\1#;s#^//##;s#:#/#')
# This is a workaround for https://github.com/bazelbuild/bazel/issues/11834
- [[ ! -z "${ENVOY_RBE}" ]] && rm -rf bazel-bin/"${ENVOY_BIN}"*
+ [[ -n "${ENVOY_RBE}" ]] && rm -rf bazel-bin/"${ENVOY_BIN}"*
- bazel build ${BAZEL_BUILD_OPTIONS} -c "${COMPILE_TYPE}" "${ENVOY_BUILD_TARGET}" ${CONFIG_ARGS}
+ bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c "${COMPILE_TYPE}" "${ENVOY_BUILD_TARGET}" ${CONFIG_ARGS}
collect_build_profile "${BINARY_TYPE}"_build
# Copy the built envoy binary somewhere that we can access outside of the
@@ -124,7 +126,7 @@ function bazel_binary_build() {
if [[ "${COMPILE_TYPE}" == "dbg" || "${COMPILE_TYPE}" == "opt" ]]; then
# Generate dwp file for debugging since we used split DWARF to reduce binary
# size
- bazel build ${BAZEL_BUILD_OPTIONS} -c "${COMPILE_TYPE}" "${ENVOY_BUILD_DEBUG_INFORMATION}" ${CONFIG_ARGS}
+ bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c "${COMPILE_TYPE}" "${ENVOY_BUILD_DEBUG_INFORMATION}" ${CONFIG_ARGS}
# Copy the debug information
cp_debug_info_for_outside_access envoy
fi
@@ -142,12 +144,12 @@ CI_TARGET=$1
shift
if [[ $# -ge 1 ]]; then
- COVERAGE_TEST_TARGETS=$*
- TEST_TARGETS="$COVERAGE_TEST_TARGETS"
+ COVERAGE_TEST_TARGETS=("$@")
+ TEST_TARGETS=("$@")
else
# Coverage test will add QUICHE tests by itself.
- COVERAGE_TEST_TARGETS=//test/...
- TEST_TARGETS="${COVERAGE_TEST_TARGETS} @com_googlesource_quiche//:ci_tests"
+ COVERAGE_TEST_TARGETS=("//test/...")
+ TEST_TARGETS=("${COVERAGE_TEST_TARGETS[@]}" "@com_googlesource_quiche//:ci_tests")
fi
if [[ "$CI_TARGET" == "bazel.release" ]]; then
@@ -157,11 +159,11 @@ if [[ "$CI_TARGET" == "bazel.release" ]]; then
# toolchain is kept consistent. This ifdef is checked in
# test/common/stats/stat_test_utility.cc when computing
# Stats::TestUtil::MemoryTest::mode().
- [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=ENVOY_MEMORY_TEST_EXACT=true"
+ [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]] && BAZEL_BUILD_OPTIONS+=("--test_env=ENVOY_MEMORY_TEST_EXACT=true")
setup_clang_toolchain
- echo "Testing ${TEST_TARGETS} with options: ${BAZEL_BUILD_OPTIONS}"
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c opt ${TEST_TARGETS}
+ echo "Testing ${TEST_TARGETS[*]} with options: ${BAZEL_BUILD_OPTIONS[*]}"
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c opt "${TEST_TARGETS[@]}"
echo "bazel release build with tests..."
bazel_binary_build release
@@ -178,26 +180,26 @@ elif [[ "$CI_TARGET" == "bazel.sizeopt.server_only" ]]; then
exit 0
elif [[ "$CI_TARGET" == "bazel.sizeopt" ]]; then
setup_clang_toolchain
- echo "Testing ${TEST_TARGETS}"
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=sizeopt ${TEST_TARGETS}
+ echo "Testing ${TEST_TARGETS[*]}"
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" --config=sizeopt "${TEST_TARGETS[@]}"
echo "bazel size optimized build with tests..."
bazel_binary_build sizeopt
exit 0
elif [[ "$CI_TARGET" == "bazel.gcc" ]]; then
- BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HEAPCHECK="
+ BAZEL_BUILD_OPTIONS+=("--test_env=HEAPCHECK=")
setup_gcc_toolchain
- echo "Testing ${TEST_TARGETS}"
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c opt ${TEST_TARGETS}
+ echo "Testing ${TEST_TARGETS[*]}"
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild "${TEST_TARGETS[@]}"
echo "bazel release build with gcc..."
- bazel_binary_build release
+ bazel_binary_build fastbuild
exit 0
elif [[ "$CI_TARGET" == "bazel.debug" ]]; then
setup_clang_toolchain
- echo "Testing ${TEST_TARGETS}"
- bazel test ${BAZEL_BUILD_OPTIONS} -c dbg ${TEST_TARGETS}
+ echo "Testing ${TEST_TARGETS[*]}"
+ bazel test "${BAZEL_BUILD_OPTIONS[@]}" -c dbg "${TEST_TARGETS[@]}"
echo "bazel debug build with tests..."
bazel_binary_build debug
@@ -209,36 +211,38 @@ elif [[ "$CI_TARGET" == "bazel.debug.server_only" ]]; then
exit 0
elif [[ "$CI_TARGET" == "bazel.asan" ]]; then
setup_clang_toolchain
- BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-asan --build_tests_only"
+ BAZEL_BUILD_OPTIONS+=(-c opt --copt -g "--config=clang-asan" "--build_tests_only")
echo "bazel ASAN/UBSAN debug build with tests"
- echo "Building and testing envoy tests ${TEST_TARGETS}"
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${TEST_TARGETS}
+ echo "Building and testing envoy tests ${TEST_TARGETS[*]}"
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS[@]}"
if [ "${ENVOY_BUILD_FILTER_EXAMPLE}" == "1" ]; then
echo "Building and testing envoy-filter-example tests..."
pushd "${ENVOY_FILTER_EXAMPLE_SRCDIR}"
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${ENVOY_FILTER_EXAMPLE_TESTS}
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${ENVOY_FILTER_EXAMPLE_TESTS[@]}"
popd
fi
- if [ "${CI_SKIP_INTEGRATION_TEST_TRAFFIC_TAPPING}" != "1" ] ; then
+ # TODO(mattklein123): This part of the test is now flaky in CI and it's unclear why, possibly
+ # due to sandboxing issue. Debug and enable it again.
+ # if [ "${CI_SKIP_INTEGRATION_TEST_TRAFFIC_TAPPING}" != "1" ] ; then
# Also validate that integration test traffic tapping (useful when debugging etc.)
# works. This requires that we set TAP_PATH. We do this under bazel.asan to
# ensure a debug build in CI.
- echo "Validating integration test traffic tapping..."
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} \
- --run_under=@envoy//bazel/test:verify_tap_test.sh \
- //test/extensions/transport_sockets/tls/integration:ssl_integration_test
- fi
+ # echo "Validating integration test traffic tapping..."
+ # bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" \
+ # --run_under=@envoy//bazel/test:verify_tap_test.sh \
+ # //test/extensions/transport_sockets/tls/integration:ssl_integration_test
+ # fi
exit 0
elif [[ "$CI_TARGET" == "bazel.tsan" ]]; then
setup_clang_toolchain
echo "bazel TSAN debug build with tests"
- echo "Building and testing envoy tests ${TEST_TARGETS}"
- bazel_with_collection test --config=rbe-toolchain-tsan ${BAZEL_BUILD_OPTIONS} -c dbg --build_tests_only ${TEST_TARGETS}
+ echo "Building and testing envoy tests ${TEST_TARGETS[*]}"
+ bazel_with_collection test --config=rbe-toolchain-tsan "${BAZEL_BUILD_OPTIONS[@]}" -c dbg --build_tests_only "${TEST_TARGETS[@]}"
if [ "${ENVOY_BUILD_FILTER_EXAMPLE}" == "1" ]; then
echo "Building and testing envoy-filter-example tests..."
pushd "${ENVOY_FILTER_EXAMPLE_SRCDIR}"
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-tsan ${ENVOY_FILTER_EXAMPLE_TESTS}
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c dbg --config=clang-tsan "${ENVOY_FILTER_EXAMPLE_TESTS[@]}"
popd
fi
exit 0
@@ -246,10 +250,10 @@ elif [[ "$CI_TARGET" == "bazel.msan" ]]; then
ENVOY_STDLIB=libc++
setup_clang_toolchain
# rbe-toolchain-msan must comes as first to win library link order.
- BAZEL_BUILD_OPTIONS="--config=rbe-toolchain-msan ${BAZEL_BUILD_OPTIONS} -c dbg --build_tests_only"
+ BAZEL_BUILD_OPTIONS=("--config=rbe-toolchain-msan" "${BAZEL_BUILD_OPTIONS[@]}" "-c dbg" "--build_tests_only")
echo "bazel MSAN debug build with tests"
- echo "Building and testing envoy tests ${TEST_TARGETS}"
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${TEST_TARGETS}
+ echo "Building and testing envoy tests ${TEST_TARGETS[*]}"
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS[@]}"
exit 0
elif [[ "$CI_TARGET" == "bazel.dev" ]]; then
setup_clang_toolchain
@@ -258,8 +262,8 @@ elif [[ "$CI_TARGET" == "bazel.dev" ]]; then
echo "Building..."
bazel_binary_build fastbuild
- echo "Building and testing ${TEST_TARGETS}"
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c fastbuild ${TEST_TARGETS}
+ echo "Building and testing ${TEST_TARGETS[*]}"
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild "${TEST_TARGETS[@]}"
# TODO(foreseeable): consolidate this and the API tool tests in a dedicated target.
bazel_with_collection //tools/envoy_headersplit:headersplit_test --spawn_strategy=local
bazel_with_collection //tools/envoy_headersplit:replace_includes_test --spawn_strategy=local
@@ -268,72 +272,77 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then
# Right now, none of the available compile-time options conflict with each other. If this
# changes, this build type may need to be broken up.
# TODO(mpwarres): remove quiche=enabled once QUICHE is built by default.
- COMPILE_TIME_OPTIONS="\
- --define signal_trace=disabled \
- --define hot_restart=disabled \
- --define google_grpc=disabled \
- --define boringssl=fips \
- --define log_debug_assert_in_release=enabled \
- --define quiche=enabled \
- --define path_normalization_by_default=true \
- --define deprecated_features=disabled \
- --define use_new_codecs_in_integration_tests=true \
- --define zlib=ng \
- "
+ COMPILE_TIME_OPTIONS=(
+ "--define" "signal_trace=disabled"
+ "--define" "hot_restart=disabled"
+ "--define" "google_grpc=disabled"
+ "--define" "boringssl=fips"
+ "--define" "log_debug_assert_in_release=enabled"
+ "--define" "quiche=enabled"
+ "--define" "wasm=disabled"
+ "--define" "path_normalization_by_default=true"
+ "--define" "deprecated_features=disabled"
+ "--define" "use_new_codecs_in_integration_tests=false"
+ "--define" "tcmalloc=gperftools"
+ "--define" "zlib=ng")
+
ENVOY_STDLIB="${ENVOY_STDLIB:-libstdc++}"
setup_clang_toolchain
# This doesn't go into CI but is available for developer convenience.
echo "bazel with different compiletime options build with tests..."
- if [[ "${TEST_TARGETS}" == "//test/..." ]]; then
+ if [[ "${TEST_TARGETS[*]}" == "//test/..." ]]; then
cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}"
- TEST_TARGETS="@envoy//test/..."
+ TEST_TARGETS=("@envoy//test/...")
fi
# Building all the dependencies from scratch to link them against libc++.
- echo "Building and testing ${TEST_TARGETS}"
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg ${TEST_TARGETS} --test_tag_filters=-nofips --build_tests_only
+ echo "Building and testing ${TEST_TARGETS[*]}"
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c dbg "${TEST_TARGETS[@]}" --test_tag_filters=-nofips --build_tests_only
# Legacy codecs "--define legacy_codecs_in_integration_tests=true" should also be tested in
# integration tests with asan.
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only
# "--define log_debug_assert_in_release=enabled" must be tested with a release build, so run only
# these tests under "-c opt" to save time in CI.
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test
echo "Building binary..."
- bazel build ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//source/exe:envoy-static --build_tag_filters=-nofips
+ bazel build "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c dbg @envoy//source/exe:envoy-static --build_tag_filters=-nofips
collect_build_profile build
exit 0
elif [[ "$CI_TARGET" == "bazel.api" ]]; then
+ # Use libstdc++ because the API booster links to prebuilt libclang*/libLLVM* installed in /opt/llvm/lib,
+ # which is built with libstdc++. Using libstdc++ for whole of the API CI job to avoid unnecessary rebuild.
+ ENVOY_STDLIB="libstdc++"
setup_clang_toolchain
+ export LLVM_CONFIG="${LLVM_ROOT}"/bin/llvm-config
echo "Validating API structure..."
./tools/api/validate_structure.py
+ echo "Testing API and API Boosting..."
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \
+ @envoy_api_canonical//tools:tap2pcap_test @envoy_dev//clang_tools/api_booster/...
echo "Building API..."
- bazel build ${BAZEL_BUILD_OPTIONS} -c fastbuild @envoy_api_canonical//envoy/...
- echo "Testing API..."
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \
- @envoy_api_canonical//tools:tap2pcap_test
- echo "Testing API boosting (unit tests)..."
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c fastbuild @envoy_dev//clang_tools/api_booster/...
+ bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//envoy/...
echo "Testing API boosting (golden C++ tests)..."
# We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet.
- LLVM_CONFIG="${LLVM_ROOT}"/bin/llvm-config BAZEL_BUILD_OPTIONS="--config=clang" python3.8 ./tools/api_boost/api_boost_test.py
+ BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" python3.8 ./tools/api_boost/api_boost_test.py
exit 0
elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then
setup_clang_toolchain
- echo "${CI_TARGET} build with tests ${COVERAGE_TEST_TARGETS}"
+ echo "${CI_TARGET} build with tests ${COVERAGE_TEST_TARGETS[*]}"
[[ "$CI_TARGET" == "bazel.fuzz_coverage" ]] && export FUZZ_COVERAGE=true
- test/run_envoy_bazel_coverage.sh ${COVERAGE_TEST_TARGETS}
+ # We use custom BAZEL_BUILD_OPTIONS here to cover profiler's code.
+ BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]} --define tcmalloc=gperftools" test/run_envoy_bazel_coverage.sh "${COVERAGE_TEST_TARGETS[@]}"
collect_build_profile coverage
exit 0
elif [[ "$CI_TARGET" == "bazel.clang_tidy" ]]; then
# clang-tidy will warn on standard library issues with libc++
ENVOY_STDLIB="libstdc++"
setup_clang_toolchain
- NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh "$@"
+ BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh "$@"
exit 0
elif [[ "$CI_TARGET" == "bazel.coverity" ]]; then
# Coverity Scan version 2017.07 fails to analyze the entirely of the Envoy
@@ -343,7 +352,7 @@ elif [[ "$CI_TARGET" == "bazel.coverity" ]]; then
setup_gcc_toolchain
echo "bazel Coverity Scan build"
echo "Building..."
- /build/cov-analysis/bin/cov-build --dir "${ENVOY_BUILD_DIR}"/cov-int bazel build --action_env=LD_PRELOAD ${BAZEL_BUILD_OPTIONS} \
+ /build/cov-analysis/bin/cov-build --dir "${ENVOY_BUILD_DIR}"/cov-int bazel build --action_env=LD_PRELOAD "${BAZEL_BUILD_OPTIONS[@]}" \
-c opt "${ENVOY_BUILD_TARGET}"
# tar up the coverity results
tar czvf "${ENVOY_BUILD_DIR}"/envoy-coverity-output.tgz -C "${ENVOY_BUILD_DIR}" cov-int
@@ -354,29 +363,31 @@ elif [[ "$CI_TARGET" == "bazel.coverity" ]]; then
exit 0
elif [[ "$CI_TARGET" == "bazel.fuzz" ]]; then
setup_clang_toolchain
- FUZZ_TEST_TARGETS="$(bazel query "attr('tags','fuzzer',${TEST_TARGETS})")"
- echo "bazel ASAN libFuzzer build with fuzz tests ${FUZZ_TEST_TARGETS}"
+ FUZZ_TEST_TARGETS=("$(bazel query "attr('tags','fuzzer',${TEST_TARGETS[*]})")")
+ echo "bazel ASAN libFuzzer build with fuzz tests ${FUZZ_TEST_TARGETS[*]}"
echo "Building envoy fuzzers and executing 100 fuzz iterations..."
- bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=asan-fuzzer ${FUZZ_TEST_TARGETS} --test_arg="-runs=10"
+ bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" --config=asan-fuzzer "${FUZZ_TEST_TARGETS[@]}" --test_arg="-runs=10"
exit 0
elif [[ "$CI_TARGET" == "fix_format" ]]; then
# proto_format.sh needs to build protobuf.
setup_clang_toolchain
+
echo "fix_format..."
./tools/code_format/check_format.py fix
./tools/code_format/format_python_tools.sh fix
- ./tools/proto_format/proto_format.sh fix --test
+ BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" ./tools/proto_format/proto_format.sh fix --test
exit 0
elif [[ "$CI_TARGET" == "check_format" ]]; then
# proto_format.sh needs to build protobuf.
setup_clang_toolchain
+
echo "check_format_test..."
./tools/code_format/check_format_test_helper.sh --log=WARN
echo "check_format..."
./tools/code_format/check_shellcheck_format.sh
./tools/code_format/check_format.py check
./tools/code_format/format_python_tools.sh check
- ./tools/proto_format/proto_format.sh check --test
+ BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" ./tools/proto_format/proto_format.sh check --test
exit 0
elif [[ "$CI_TARGET" == "check_repositories" ]]; then
echo "check_repositories..."
@@ -400,13 +411,23 @@ elif [[ "$CI_TARGET" == "fix_spelling_pedantic" ]]; then
exit 0
elif [[ "$CI_TARGET" == "docs" ]]; then
echo "generating docs..."
- docs/build.sh
+ # Validate dependency relationships between core/extensions and external deps.
+ tools/dependency/validate_test.py
+ tools/dependency/validate.py
+ # Validate the CVE scanner works. TODO(htuch): create a dedicated tools CI target.
+ python3.8 tools/dependency/cve_scan_test.py
+ # Build docs.
+ BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" docs/build.sh
exit 0
elif [[ "$CI_TARGET" == "verify_examples" ]]; then
echo "verify examples..."
docker load < "$ENVOY_DOCKER_BUILD_DIR/docker/envoy-docker-images.tar.xz"
- images=($(docker image list --format "{{.Repository}}"))
- tags=($(docker image list --format "{{.Tag}}"))
+ _images=$(docker image list --format "{{.Repository}}")
+ while read -r line; do images+=("$line"); done \
+ <<< "$_images"
+ _tags=$(docker image list --format "{{.Tag}}")
+ while read -r line; do tags+=("$line"); done \
+ <<< "$_tags"
for i in "${!images[@]}"; do
if [[ "${images[i]}" =~ "envoy" ]]; then
docker tag "${images[$i]}:${tags[$i]}" "${images[$i]}:latest"
@@ -417,6 +438,7 @@ elif [[ "$CI_TARGET" == "verify_examples" ]]; then
sudo apt-get install -y -qq --no-install-recommends redis-tools
export DOCKER_NO_PULL=1
umask 027
+ chmod -R o-rwx examples/
ci/verify_examples.sh
exit 0
else
diff --git a/ci/do_circle_ci.sh b/ci/do_circle_ci.sh
deleted file mode 100755
index 29469a24b814..000000000000
--- a/ci/do_circle_ci.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# Workaround for argument too long issue in protoc
-ulimit -s 16384
-
-# bazel uses jgit internally and the default circle-ci .gitconfig says to
-# convert https://github.com to ssh://git@github.com, which jgit does not support.
-if [[ -e "~/.gitconfig" ]]; then
- mv ~/.gitconfig ~/.gitconfig_save
-fi
-
-# Workaround for not using ci/run_envoy_docker.sh
-# Create a fake home. Python site libs tries to do getpwuid(3) if we don't and the CI
-# Docker image gets confused as it has no passwd entry when running non-root
-# unless we do this.
-FAKE_HOME=/tmp/fake_home
-mkdir -p "${FAKE_HOME}"
-export HOME="${FAKE_HOME}"
-export PYTHONUSERBASE="${FAKE_HOME}"
-export USER=bazel
-
-export ENVOY_SRCDIR="$(pwd)"
-
-# xlarge resource_class.
-# See note: https://circleci.com/docs/2.0/configuration-reference/#resource_class for why we
-# hard code this (basically due to how docker works).
-export NUM_CPUS=6
-
-# CircleCI doesn't support IPv6 by default, so we run all tests with IPv4 only.
-# IPv6 tests are run with Azure Pipelines.
-export BAZEL_BUILD_EXTRA_OPTIONS+="--test_env=ENVOY_IP_TEST_VERSIONS=v4only --local_cpu_resources=${NUM_CPUS} \
- --action_env=HOME --action_env=PYTHONUSERBASE --test_env=HOME --test_env=PYTHONUSERBASE"
-
-function finish {
- echo "disk space at end of build:"
- df -h
-}
-trap finish EXIT
-
-echo "disk space at beginning of build:"
-df -h
-
-ci/do_ci.sh $*
diff --git a/ci/docker-entrypoint.bat b/ci/docker-entrypoint.bat
new file mode 100644
index 000000000000..ed746d98da69
--- /dev/null
+++ b/ci/docker-entrypoint.bat
@@ -0,0 +1,21 @@
+@echo off
+setlocal
+
+set CMD=%*%
+
+REM if the first argument look like a parameter (i.e. start with '-'), run Envoy
+set first_arg=%1%
+if "%first_arg:~0,1%" == "-" (
+ set CMD=envoy.exe %CMD%
+)
+
+if /i "%1" == "envoy" set is_envoy=1
+if /i "%1" == "envoy.exe" set is_envoy=1
+if defined is_envoy (
+ REM set the log level if the $loglevel variable is set
+ if defined loglevel (
+ set CMD=%CMD% --log-level %loglevel%
+ )
+)
+
+%CMD%
diff --git a/ci/docker-entrypoint.sh b/ci/docker-entrypoint.sh
index 677e617e9fce..4815acb1956a 100755
--- a/ci/docker-entrypoint.sh
+++ b/ci/docker-entrypoint.sh
@@ -1,6 +1,8 @@
#!/usr/bin/env sh
set -e
+loglevel="${loglevel:-}"
+
# if the first argument look like a parameter (i.e. start with '-'), run Envoy
if [ "${1#-}" != "$1" ]; then
set -- envoy "$@"
diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh
index d4bb8e5e20e2..3bd584923bdf 100755
--- a/ci/docker_ci.sh
+++ b/ci/docker_ci.sh
@@ -4,6 +4,10 @@
# CI logs.
set -e
+function is_windows() {
+ [[ "$(uname -s)" == *NT* ]]
+}
+
ENVOY_DOCKER_IMAGE_DIRECTORY="${ENVOY_DOCKER_IMAGE_DIRECTORY:-${BUILD_STAGINGDIRECTORY:-.}/build_images}"
# Setting environments for buildx tools
@@ -12,7 +16,7 @@ config_env() {
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
# Remove older build instance
- docker buildx rm multi-builder | true
+ docker buildx rm multi-builder || :
docker buildx create --use --name multi-builder --platform linux/arm64,linux/amd64
}
@@ -20,10 +24,12 @@ build_platforms() {
TYPE=$1
FILE_SUFFIX="${TYPE/-debug/}"
- if [[ -z "${FILE_SUFFIX}" ]]; then
- echo "linux/arm64,linux/amd64"
+ if is_windows; then
+ echo "windows/amd64"
+ elif [[ -z "${FILE_SUFFIX}" ]]; then
+ echo "linux/arm64,linux/amd64"
else
- echo "linux/amd64"
+ echo "linux/amd64"
fi
}
@@ -31,56 +37,67 @@ build_args() {
TYPE=$1
FILE_SUFFIX="${TYPE/-debug/}"
- echo "-f ci/Dockerfile-envoy${FILE_SUFFIX}"
- [[ "${TYPE}" == *-debug ]] && echo "--build-arg ENVOY_BINARY_SUFFIX="
- if [[ "${TYPE}" == "-google-vrp" ]]; then
- echo "--build-arg ENVOY_VRP_BASE_IMAGE=${VRP_BASE_IMAGE}"
+ printf ' -f ci/Dockerfile-envoy%s' "${FILE_SUFFIX}"
+ if [[ "${TYPE}" == *-debug ]]; then
+ printf ' --build-arg ENVOY_BINARY_SUFFIX='
+ elif [[ "${TYPE}" == "-google-vrp" ]]; then
+ printf ' --build-arg ENVOY_VRP_BASE_IMAGE=%s' "${VRP_BASE_IMAGE}"
fi
}
use_builder() {
- TYPE=$1
- if [[ "${TYPE}" == "-google-vrp" ]]; then
- docker buildx use default
- else
- docker buildx use multi-builder
+ # BuildKit is not available for Windows images, skip this
+ if ! is_windows; then
+ TYPE=$1
+ if [[ "${TYPE}" == "-google-vrp" ]]; then
+ docker buildx use default
+ else
+ docker buildx use multi-builder
+ fi
fi
}
IMAGES_TO_SAVE=()
build_images() {
+ local _args args=()
TYPE=$1
BUILD_TAG=$2
use_builder "${TYPE}"
- ARGS="$(build_args ${TYPE})"
- PLATFORM="$(build_platforms ${TYPE})"
+ _args=$(build_args "${TYPE}")
+ read -ra args <<< "$_args"
+ PLATFORM="$(build_platforms "${TYPE}")"
- docker buildx build --platform "${PLATFORM}" ${ARGS} -t "${BUILD_TAG}" .
+ docker "${BUILD_COMMAND[@]}" --platform "${PLATFORM}" "${args[@]}" -t "${BUILD_TAG}" .
- PLATFORM="$(build_platforms ${TYPE} | tr ',' ' ')"
- # docker buildx load cannot have multiple platform, load individually
+ PLATFORM="$(build_platforms "${TYPE}" | tr ',' ' ')"
for ARCH in ${PLATFORM}; do
- if [[ "${ARCH}" == "linux/amd64" ]]; then
+ if [[ "${ARCH}" == "linux/amd64" ]] || [[ "${ARCH}" == "windows/amd64" ]]; then
IMAGE_TAG="${BUILD_TAG}"
else
IMAGE_TAG="${BUILD_TAG}-${ARCH/linux\//}"
fi
- docker buildx build --platform "${ARCH}" ${ARGS} -t "${IMAGE_TAG}" . --load
IMAGES_TO_SAVE+=("${IMAGE_TAG}")
+
+ # docker buildx load cannot have multiple platform, load individually
+ if ! is_windows; then
+ docker "${BUILD_COMMAND[@]}" --platform "${ARCH}" "${args[@]}" -t "${IMAGE_TAG}" . --load
+ fi
done
}
push_images() {
+ local _args args=()
TYPE=$1
BUILD_TAG=$2
use_builder "${TYPE}"
- ARGS="$(build_args ${TYPE})"
- PLATFORM="$(build_platforms ${TYPE})"
+ _args=$(build_args "${TYPE}")
+ read -ra args <<< "$_args"
+ PLATFORM="$(build_platforms "${TYPE}")"
# docker buildx doesn't do push with default builder
- docker buildx build --platform "${PLATFORM}" ${ARGS} -t ${BUILD_TAG} . --push || \
+ docker "${BUILD_COMMAND[@]}" --platform "${PLATFORM}" "${args[@]}" -t "${BUILD_TAG}" . --push || \
docker push "${BUILD_TAG}"
}
@@ -90,7 +107,7 @@ RELEASE_TAG_REGEX="^refs/tags/v.*"
# For master builds and release branch builds use the dev repo. Otherwise we assume it's a tag and
# we push to the primary repo.
-if [[ "${AZP_BRANCH}" =~ "${RELEASE_TAG_REGEX}" ]]; then
+if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then
IMAGE_POSTFIX=""
IMAGE_NAME="${AZP_BRANCH/refs\/tags\//}"
else
@@ -101,14 +118,22 @@ fi
# This prefix is altered for the private security images on setec builds.
DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}"
-# "-google-vrp" must come afer "" to ensure we rebuild the local base image dependency.
-BUILD_TYPES=("" "-debug" "-alpine" "-alpine-debug" "-google-vrp")
-# Configure docker-buildx tools
-config_env
+if is_windows; then
+ BUILD_TYPES=("-windows")
+ # BuildKit is not available for Windows images, use standard build command
+ BUILD_COMMAND=("build")
+else
+ # "-google-vrp" must come afer "" to ensure we rebuild the local base image dependency.
+ BUILD_TYPES=("" "-debug" "-alpine" "-google-vrp")
-# VRP base image is only for amd64
-VRP_BASE_IMAGE="${DOCKER_IMAGE_PREFIX}${IMAGE_POSTFIX}:${IMAGE_NAME}"
+ # Configure docker-buildx tools
+ BUILD_COMMAND=("buildx" "build")
+ config_env
+
+ # VRP base image is only for Linux amd64
+ VRP_BASE_IMAGE="${DOCKER_IMAGE_PREFIX}${IMAGE_POSTFIX}:${IMAGE_NAME}"
+fi
# Test the docker build in all cases, but use a local tag that we will overwrite before push in the
# cases where we do push.
diff --git a/ci/docker_rebuild_google-vrp.sh b/ci/docker_rebuild_google-vrp.sh
index 3a9bb5f711dc..4f3149e6732f 100755
--- a/ci/docker_rebuild_google-vrp.sh
+++ b/ci/docker_rebuild_google-vrp.sh
@@ -23,7 +23,8 @@ set -e
# this local dep which is fairly stable.
BASE_DOCKER_IMAGE="envoyproxy/envoy-dev:latest"
-declare -r BUILD_DIR="$(mktemp -d)"
+BUILD_DIR="$(mktemp -d)"
+declare -r BUILD_DIR
cp ci/Dockerfile-envoy-google-vrp "${BUILD_DIR}"
declare -r DOCKER_BUILD_FILE="${BUILD_DIR}"/Dockerfile-envoy-google-vrp
diff --git a/ci/envoy_build_sha.sh b/ci/envoy_build_sha.sh
index 6ea4600faeef..e2923189e35e 100644
--- a/ci/envoy_build_sha.sh
+++ b/ci/envoy_build_sha.sh
@@ -1,2 +1,4 @@
-ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu $(dirname $0)/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu:\(.*\)#\1#' | uniq)
+#!/bin/bash
+
+ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu "$(dirname "$0")"/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu:\(.*\)#\1#' | uniq)
[[ $(wc -l <<< "${ENVOY_BUILD_SHA}" | awk '{$1=$1};1') == 1 ]] || (echo ".bazelrc envoyproxy/envoy-build-ubuntu hashes are inconsistent!" && exit 1)
diff --git a/ci/filter_example_mirror.sh b/ci/filter_example_mirror.sh
index 1d6d5ae05b23..8602b1677e4b 100755
--- a/ci/filter_example_mirror.sh
+++ b/ci/filter_example_mirror.sh
@@ -4,16 +4,15 @@ set -e
ENVOY_SRCDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")/../" && pwd)
CHECKOUT_DIR=../envoy-filter-example
+MAIN_BRANCH="refs/heads/master"
+FILTER_EXAMPLE_MAIN_BRANCH="master"
-if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ]
-then
+if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then
echo "Cloning..."
- git clone git@github.com:envoyproxy/envoy-filter-example "$CHECKOUT_DIR"
+ git clone git@github.com:envoyproxy/envoy-filter-example "$CHECKOUT_DIR" -b "${FILTER_EXAMPLE_MAIN_BRANCH}"
- git -C "$CHECKOUT_DIR" config user.name "envoy-filter-example(CircleCI)"
+ git -C "$CHECKOUT_DIR" config user.name "envoy-filter-example(Azure Pipelines)"
git -C "$CHECKOUT_DIR" config user.email envoy-filter-example@users.noreply.github.com
- git -C "$CHECKOUT_DIR" fetch
- git -C "$CHECKOUT_DIR" checkout -B master origin/master
echo "Updating Submodule..."
# Update submodule to latest Envoy SHA
@@ -26,6 +25,6 @@ then
echo "Committing, and Pushing..."
git -C "$CHECKOUT_DIR" commit -a -m "Update Envoy submodule to $ENVOY_SHA"
- git -C "$CHECKOUT_DIR" push origin master
+ git -C "$CHECKOUT_DIR" push origin "${FILTER_EXAMPLE_MAIN_BRANCH}"
echo "Done"
fi
diff --git a/ci/filter_example_setup.sh b/ci/filter_example_setup.sh
index 4101c63445ee..774464f15a7c 100644
--- a/ci/filter_example_setup.sh
+++ b/ci/filter_example_setup.sh
@@ -5,10 +5,14 @@
set -e
# This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to.
-ENVOY_FILTER_EXAMPLE_GITSHA="493e2e5bee10bbed1c3c097e09d83d7f672a9f2e"
+ENVOY_FILTER_EXAMPLE_GITSHA="bebd0b2422ea7739905f1793565681d7266491e6"
ENVOY_FILTER_EXAMPLE_SRCDIR="${BUILD_DIR}/envoy-filter-example"
-export ENVOY_FILTER_EXAMPLE_TESTS="//:echo2_integration_test //http-filter-example:http_filter_integration_test //:envoy_binary_test"
+# shellcheck disable=SC2034
+ENVOY_FILTER_EXAMPLE_TESTS=(
+ "//:echo2_integration_test"
+ "//http-filter-example:http_filter_integration_test"
+ "//:envoy_binary_test")
if [[ ! -d "${ENVOY_FILTER_EXAMPLE_SRCDIR}/.git" ]]; then
rm -rf "${ENVOY_FILTER_EXAMPLE_SRCDIR}"
@@ -23,4 +27,4 @@ ln -sf "${ENVOY_SRCDIR}"/bazel/get_workspace_status "${ENVOY_FILTER_EXAMPLE_SRCD
cp -f "${ENVOY_SRCDIR}"/.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/
cp -f "$(bazel info workspace)"/*.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/
-FILTER_WORKSPACE_SET=1
+export FILTER_WORKSPACE_SET=1
diff --git a/ci/flaky_test/requirements.txt b/ci/flaky_test/requirements.txt
index 7d942590e612..3368c5b2bff0 100644
--- a/ci/flaky_test/requirements.txt
+++ b/ci/flaky_test/requirements.txt
@@ -1,4 +1,73 @@
-multidict
-yarl
-wheel==0.35.1
-slackclient==2.8.0
\ No newline at end of file
+aiohttp==3.6.2 \
+ --hash=sha256:1e984191d1ec186881ffaed4581092ba04f7c61582a177b187d3a2f07ed9719e \
+ --hash=sha256:259ab809ff0727d0e834ac5e8a283dc5e3e0ecc30c4d80b3cd17a4139ce1f326 \
+ --hash=sha256:2f4d1a4fdce595c947162333353d4a44952a724fba9ca3205a3df99a33d1307a \
+ --hash=sha256:32e5f3b7e511aa850829fbe5aa32eb455e5534eaa4b1ce93231d00e2f76e5654 \
+ --hash=sha256:344c780466b73095a72c616fac5ea9c4665add7fc129f285fbdbca3cccf4612a \
+ --hash=sha256:460bd4237d2dbecc3b5ed57e122992f60188afe46e7319116da5eb8a9dfedba4 \
+ --hash=sha256:4c6efd824d44ae697814a2a85604d8e992b875462c6655da161ff18fd4f29f17 \
+ --hash=sha256:50aaad128e6ac62e7bf7bd1f0c0a24bc968a0c0590a726d5a955af193544bcec \
+ --hash=sha256:6206a135d072f88da3e71cc501c59d5abffa9d0bb43269a6dcd28d66bfafdbdd \
+ --hash=sha256:65f31b622af739a802ca6fd1a3076fd0ae523f8485c52924a89561ba10c49b48 \
+ --hash=sha256:ae55bac364c405caa23a4f2d6cfecc6a0daada500274ffca4a9230e7129eac59 \
+ --hash=sha256:b778ce0c909a2653741cb4b1ac7015b5c130ab9c897611df43ae6a58523cb965
+async-timeout==3.0.1 \
+ --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \
+ --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3
+attrs==20.2.0 \
+ --hash=sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594 \
+ --hash=sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc
+chardet==3.0.4 \
+ --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
+ --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691
+idna==2.10 \
+ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \
+ --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0
+idna_ssl==1.1.0 \
+ --hash=sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c
+multidict==4.7.6 \
+ --hash=sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a \
+ --hash=sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000 \
+ --hash=sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2 \
+ --hash=sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507 \
+ --hash=sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5 \
+ --hash=sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7 \
+ --hash=sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d \
+ --hash=sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463 \
+ --hash=sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19 \
+ --hash=sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3 \
+ --hash=sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b \
+ --hash=sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c \
+ --hash=sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87 \
+ --hash=sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7 \
+ --hash=sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430 \
+ --hash=sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255 \
+ --hash=sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d
+slackclient==2.9.1 \
+ --hash=sha256:214edd4a494cc74353c8084ec184ff97a116d4b12cde287f805a9af948ef39ae \
+ --hash=sha256:3a3e84fd4f13d9715740c13ce6c3c25b970147aeeeec22ef137d796124dfcf08
+typing-extensions==3.7.4.3 \
+ --hash=sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918 \
+ --hash=sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c \
+ --hash=sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f
+wheel==0.35.1 \
+ --hash=sha256:497add53525d16c173c2c1c733b8f655510e909ea78cc0e29d374243544b77a2 \
+ --hash=sha256:99a22d87add3f634ff917310a3d87e499f19e663413a52eb9232c447aa646c9f
+yarl==1.6.0 \
+ --hash=sha256:04a54f126a0732af75e5edc9addeaa2113e2ca7c6fce8974a63549a70a25e50e \
+ --hash=sha256:3cc860d72ed989f3b1f3abbd6ecf38e412de722fb38b8f1b1a086315cf0d69c5 \
+ --hash=sha256:5d84cc36981eb5a8533be79d6c43454c8e6a39ee3118ceaadbd3c029ab2ee580 \
+ --hash=sha256:5e447e7f3780f44f890360ea973418025e8c0cdcd7d6a1b221d952600fd945dc \
+ --hash=sha256:61d3ea3c175fe45f1498af868879c6ffeb989d4143ac542163c45538ba5ec21b \
+ --hash=sha256:67c5ea0970da882eaf9efcf65b66792557c526f8e55f752194eff8ec722c75c2 \
+ --hash=sha256:6f6898429ec3c4cfbef12907047136fd7b9e81a6ee9f105b45505e633427330a \
+ --hash=sha256:7ce35944e8e61927a8f4eb78f5bc5d1e6da6d40eadd77e3f79d4e9399e263921 \
+ --hash=sha256:b7c199d2cbaf892ba0f91ed36d12ff41ecd0dde46cbf64ff4bfe997a3ebc925e \
+ --hash=sha256:c15d71a640fb1f8e98a1423f9c64d7f1f6a3a168f803042eaf3a5b5022fde0c1 \
+ --hash=sha256:c22607421f49c0cb6ff3ed593a49b6a99c6ffdeaaa6c944cdda83c2393c8864d \
+ --hash=sha256:c604998ab8115db802cc55cb1b91619b2831a6128a62ca7eea577fc8ea4d3131 \
+ --hash=sha256:d088ea9319e49273f25b1c96a3763bf19a882cff774d1792ae6fba34bd40550a \
+ --hash=sha256:db9eb8307219d7e09b33bcb43287222ef35cbcf1586ba9472b0a4b833666ada1 \
+ --hash=sha256:e31fef4e7b68184545c3d68baec7074532e077bd1906b040ecfba659737df188 \
+ --hash=sha256:e32f0fb443afcfe7f01f95172b66f279938fbc6bdaebe294b0ff6747fb6db020 \
+ --hash=sha256:fcbe419805c9b20db9a51d33b942feddbf6e7fb468cb20686fd7089d4164c12a
diff --git a/ci/flaky_test/run_process_xml.sh b/ci/flaky_test/run_process_xml.sh
index a5c5043c92d4..38496128bb91 100755
--- a/ci/flaky_test/run_process_xml.sh
+++ b/ci/flaky_test/run_process_xml.sh
@@ -1,10 +1,13 @@
#!/bin/bash
+export ENVOY_SRCDIR=${ENVOY_SRCDIR:-.}
+
+# shellcheck source=tools/shell_utils.sh
. "${ENVOY_SRCDIR}"/tools/shell_utils.sh
if [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]]; then
export MULTIDICT_NO_EXTENSIONS=1
- export YARL_NO_EXTENSIONS=1
+ export YARL_NO_EXTENSIONS=1
fi
-python_venv process_xml $1
+python_venv process_xml "$1"
diff --git a/ci/flaky_test/run_process_xml_mac.sh b/ci/flaky_test/run_process_xml_mac.sh
deleted file mode 100755
index 9dad6b7ea7fd..000000000000
--- a/ci/flaky_test/run_process_xml_mac.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-pip3 install slackclient
-./ci/flaky_test/process_xml.py
diff --git a/ci/go_mirror.sh b/ci/go_mirror.sh
index 80be4cc0b532..63f96d0d7969 100755
--- a/ci/go_mirror.sh
+++ b/ci/go_mirror.sh
@@ -2,7 +2,11 @@
set -e
-if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ]
-then
- tools/api/generate_go_protobuf.py
+MAIN_BRANCH="refs/heads/master"
+
+# shellcheck source=ci/setup_cache.sh
+. "$(dirname "$0")"/setup_cache.sh
+
+if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then
+ BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_EXTRA_OPTIONS}" tools/api/generate_go_protobuf.py
fi
diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh
index 9303907a1be9..d69562ced31f 100755
--- a/ci/mac_ci_setup.sh
+++ b/ci/mac_ci_setup.sh
@@ -6,7 +6,13 @@
# https://github.com/actions/virtual-environments/blob/master/images/macos/macos-10.15-Readme.md for
# a list of pre-installed tools in the macOS image.
+# https://github.com/actions/virtual-environments/issues/1811
+brew uninstall openssl@1.0.2t
+
export HOMEBREW_NO_AUTO_UPDATE=1
+HOMEBREW_RETRY_ATTEMPTS=10
+HOMEBREW_RETRY_INTERVAL=3
+
function is_installed {
brew ls --versions "$1" >/dev/null
@@ -20,7 +26,21 @@ function install {
fi
}
-if ! brew update; then
+function retry () {
+ local returns=1 i=1
+ while ((i<=HOMEBREW_RETRY_ATTEMPTS)); do
+ if "$@"; then
+ returns=0
+ break
+ else
+ sleep "$HOMEBREW_RETRY_INTERVAL";
+ ((i++))
+ fi
+ done
+ return "$returns"
+}
+
+if ! retry brew update; then
echo "Failed to update homebrew"
exit 1
fi
@@ -31,12 +51,6 @@ do
is_installed "${DEP}" || install "${DEP}"
done
-if [ -n "$CIRCLECI" ]; then
- # bazel uses jgit internally and the default circle-ci .gitconfig says to
- # convert https://github.com to ssh://git@github.com, which jgit does not support.
- mv ~/.gitconfig ~/.gitconfig_save
-fi
-
# Required as bazel and a foreign bazelisk are installed in the latest macos vm image, we have
# to unlink/overwrite them to install bazelisk
echo "Installing bazelisk"
@@ -48,4 +62,4 @@ fi
bazel version
-pip3 install slackclient
+pip3 install virtualenv
diff --git a/ci/mac_ci_steps.sh b/ci/mac_ci_steps.sh
index 41e01d0fd134..5ebaba83ce95 100755
--- a/ci/mac_ci_steps.sh
+++ b/ci/mac_ci_steps.sh
@@ -11,13 +11,23 @@ trap finish EXIT
echo "disk space at beginning of build:"
df -h
+# shellcheck source=ci/setup_cache.sh
. "$(dirname "$0")"/setup_cache.sh
+read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:-}"
+read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}"
+
# TODO(zuercher): remove --flaky_test_attempts when https://github.com/envoyproxy/envoy/issues/2428
# is resolved.
-BAZEL_BUILD_OPTIONS="--curses=no --show_task_finish --verbose_failures \
- --action_env=PATH=/usr/local/bin:/opt/local/bin:/usr/bin:/bin --test_output=all \
- --flaky_test_attempts=integration@2 ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}"
+BAZEL_BUILD_OPTIONS=(
+ "--curses=no"
+ --show_task_finish
+ --verbose_failures
+ "--action_env=PATH=/usr/local/bin:/opt/local/bin:/usr/bin:/bin"
+ "--test_output=all"
+ "--flaky_test_attempts=integration@2"
+ "${BAZEL_BUILD_EXTRA_OPTIONS[@]}"
+ "${BAZEL_EXTRA_TEST_OPTIONS[@]}")
# Build envoy and run tests as separate steps so that failure output
# is somewhat more deterministic (rather than interleaving the build
@@ -26,10 +36,13 @@ BAZEL_BUILD_OPTIONS="--curses=no --show_task_finish --verbose_failures \
if [[ $# -gt 0 ]]; then
TEST_TARGETS=$*
else
- TEST_TARGETS=//test/integration/...
+ TEST_TARGETS='//test/integration/...'
fi
if [[ "$TEST_TARGETS" == "//test/..." || "$TEST_TARGETS" == "//test/integration/..." ]]; then
- bazel build ${BAZEL_BUILD_OPTIONS} //source/exe:envoy-static
+ bazel build "${BAZEL_BUILD_OPTIONS[@]}" //source/exe:envoy-static
fi
-bazel test ${BAZEL_BUILD_OPTIONS} ${TEST_TARGETS}
+bazel test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS}"
+
+# Additionally run macOS specific test suites
+bazel test "${BAZEL_BUILD_OPTIONS[@]}" //test/common/network:apple_dns_impl_test
diff --git a/ci/repokitteh/modules/azure_pipelines.star b/ci/repokitteh/modules/azure_pipelines.star
index 7d80c149b5cd..655ba0e50863 100644
--- a/ci/repokitteh/modules/azure_pipelines.star
+++ b/ci/repokitteh/modules/azure_pipelines.star
@@ -25,7 +25,7 @@ def _get_azp_checks():
return checks
def _retry(config, comment_id, command):
- msgs = "Retrying Azure Pipelines, to retry CircleCI checks, use `/retest-circle`.\n"
+ msgs = "Retrying Azure Pipelines.\n"
checks = _get_azp_checks()
retried_checks = []
diff --git a/ci/repokitteh/modules/newcontributor.star b/ci/repokitteh/modules/newcontributor.star
new file mode 100644
index 000000000000..4cf644bc200f
--- /dev/null
+++ b/ci/repokitteh/modules/newcontributor.star
@@ -0,0 +1,31 @@
+
+NEW_CONTRIBUTOR_MESSAGE = """
+Hi @%s, welcome and thank you for your contribution.
+
+We will try to review your Pull Request as quickly as possible.
+
+In the meantime, please take a look at the [contribution guidelines](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md) if you have not done so already.
+
+"""
+
+def get_pr_author_association(issue_number):
+ return github.call(
+ method="GET",
+ path="repos/envoyproxy/envoy/pulls/%s" % issue_number)["json"]["author_association"]
+
+def is_newcontributor(issue_number):
+ return get_pr_author_association(issue_number) == "FIRST_TIME_CONTRIBUTOR"
+
+def should_message_newcontributor(action, issue_number):
+ return (
+ action == 'opened'
+ and is_newcontributor(issue_number))
+
+def send_newcontributor_message(sender):
+ github.issue_create_comment(NEW_CONTRIBUTOR_MESSAGE % sender)
+
+def _pr(action, issue_number, sender, config):
+ if should_message_newcontributor(action, issue_number):
+ send_newcontributor_message(sender)
+
+handlers.pull_request(func=_pr)
diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh
index d594553b6cb2..040b5a46b895 100755
--- a/ci/run_clang_tidy.sh
+++ b/ci/run_clang_tidy.sh
@@ -37,7 +37,7 @@ function exclude_win32_impl() {
# Do not run clang-tidy against macOS impl
# TODO: We should run clang-tidy against macOS impl for completeness
function exclude_macos_impl() {
- grep -v source/common/filesystem/kqueue/
+ grep -v source/common/filesystem/kqueue/ | grep -v source/common/network/apple_dns_impl | grep -v test/common/network/apple_dns_impl_test
}
# Do not run incremental clang-tidy on check_format testdata files.
@@ -50,33 +50,59 @@ function exclude_headersplit_testdata() {
grep -v tools/envoy_headersplit/
}
+# Do not run clang-tidy against Chromium URL import, this needs to largely
+# reflect the upstream structure.
+function exclude_chromium_url() {
+ grep -v source/common/chromium_url/
+}
+
# Exclude files in third_party which are temporary forks from other OSS projects.
function exclude_third_party() {
grep -v third_party/
}
+# Exclude files which are part of the Wasm emscripten environment
+function exclude_wasm_emscripten() {
+ grep -v source/extensions/common/wasm/ext
+}
+
+# Exclude files which are part of the Wasm SDK
+function exclude_wasm_sdk() {
+ grep -v proxy_wasm_cpp_sdk
+}
+
+# Exclude files which are part of the Wasm Host environment
+function exclude_wasm_host() {
+ grep -v proxy_wasm_cpp_host
+}
+
+# Exclude proxy-wasm test_data.
+function exclude_wasm_test_data() {
+ grep -v wasm/test_data
+}
+
function filter_excludes() {
- exclude_check_format_testdata | exclude_headersplit_testdata | exclude_win32_impl | exclude_macos_impl | exclude_third_party
+ exclude_check_format_testdata | exclude_headersplit_testdata | exclude_chromium_url | exclude_win32_impl | exclude_macos_impl | exclude_third_party | exclude_wasm_emscripten | exclude_wasm_sdk | exclude_wasm_host | exclude_wasm_test_data
}
function run_clang_tidy() {
python3 "${LLVM_PREFIX}/share/clang/run-clang-tidy.py" \
- -clang-tidy-binary=${CLANG_TIDY} \
- -clang-apply-replacements-binary=${CLANG_APPLY_REPLACEMENTS} \
- -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p ${SRCDIR} -quiet \
- ${APPLY_CLANG_TIDY_FIXES:+-fix} $@
+ -clang-tidy-binary="${CLANG_TIDY}" \
+ -clang-apply-replacements-binary="${CLANG_APPLY_REPLACEMENTS}" \
+ -export-fixes=${FIX_YAML} -j "${NUM_CPUS:-0}" -p "${SRCDIR}" -quiet \
+ ${APPLY_CLANG_TIDY_FIXES:+-fix} "$@"
}
function run_clang_tidy_diff() {
- git diff $1 | filter_excludes | \
+ git diff "$1" | filter_excludes | \
python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \
- -clang-tidy-binary=${CLANG_TIDY} \
- -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p 1 -quiet
+ -clang-tidy-binary="${CLANG_TIDY}" \
+ -export-fixes="${FIX_YAML}" -j "${NUM_CPUS:-0}" -p 1 -quiet
}
if [[ $# -gt 0 ]]; then
- echo "Running clang-tidy on: $@"
- run_clang_tidy $@
+ echo "Running clang-tidy on: $*"
+ run_clang_tidy "$@"
elif [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then
echo "Running a full clang-tidy"
run_clang_tidy
@@ -87,15 +113,15 @@ else
elif [[ "${BUILD_REASON}" == *CI ]]; then
DIFF_REF="HEAD^"
else
- DIFF_REF=$(${ENVOY_SRCDIR}/tools/git/last_github_commit.sh)
+ DIFF_REF=$("${ENVOY_SRCDIR}"/tools/git/last_github_commit.sh)
fi
fi
- echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse ${DIFF_REF})), current HEAD ($(git rev-parse HEAD))"
- run_clang_tidy_diff ${DIFF_REF}
+ echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse "${DIFF_REF}")), current HEAD ($(git rev-parse HEAD))"
+ run_clang_tidy_diff "${DIFF_REF}"
fi
if [[ -s "${FIX_YAML}" ]]; then
echo "clang-tidy check failed, potentially fixed by clang-apply-replacements:"
- cat ${FIX_YAML}
+ cat "${FIX_YAML}"
exit 1
fi
diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh
index 5bafffb89522..842b51b6ce89 100755
--- a/ci/run_envoy_docker.sh
+++ b/ci/run_envoy_docker.sh
@@ -2,36 +2,95 @@
set -e
-. $(dirname $0)/envoy_build_sha.sh
+# shellcheck source=ci/envoy_build_sha.sh
+. "$(dirname "$0")"/envoy_build_sha.sh
-# We run as root and later drop permissions. This is required to setup the USER
-# in useradd below, which is need for correct Python execution in the Docker
-# environment.
-USER=root
-USER_GROUP=root
+function is_windows() {
+ [[ "$(uname -s)" == *NT* ]]
+}
+
+read -ra ENVOY_DOCKER_OPTIONS <<< "${ENVOY_DOCKER_OPTIONS:-}"
+
+# TODO(phlax): uppercase these env vars
+export HTTP_PROXY="${http_proxy:-}"
+export HTTPS_PROXY="${https_proxy:-}"
+export NO_PROXY="${no_proxy:-}"
+
+if is_windows; then
+ [[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-windows2019"
+ # TODO(sunjayBhatia): Currently ENVOY_DOCKER_OPTIONS is ignored on Windows because
+ # CI sets it to a Linux-specific value. Undo this once https://github.com/envoyproxy/envoy/issues/13272
+ # is resolved.
+ ENVOY_DOCKER_OPTIONS=()
+ DEFAULT_ENVOY_DOCKER_BUILD_DIR=C:/Windows/Temp/envoy-docker-build
+ BUILD_DIR_MOUNT_DEST=C:/build
+ # Replace MSYS style drive letter (/c/) with driver letter designation (C:/)
+ SOURCE_DIR=$(echo "${PWD}" | sed -E "s#/([a-zA-Z])/#\1:/#")
+ SOURCE_DIR_MOUNT_DEST=C:/source
+ START_COMMAND=("bash" "-c" "cd source && $*")
+else
+ [[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-ubuntu"
+ # We run as root and later drop permissions. This is required to setup the USER
+ # in useradd below, which is need for correct Python execution in the Docker
+ # environment.
+ ENVOY_DOCKER_OPTIONS+=(-u root:root)
+ ENVOY_DOCKER_OPTIONS+=(-v /var/run/docker.sock:/var/run/docker.sock)
+ ENVOY_DOCKER_OPTIONS+=(--cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN)
+ DEFAULT_ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build
+ BUILD_DIR_MOUNT_DEST=/build
+ SOURCE_DIR="${PWD}"
+ SOURCE_DIR_MOUNT_DEST=/source
+ START_COMMAND=("/bin/bash" "-lc" "groupadd --gid $(id -g) -f envoygroup \
+ && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home --home-dir /build envoybuild \
+ && usermod -a -G pcap envoybuild \
+ && sudo -EHs -u envoybuild bash -c 'cd /source && $*'")
+fi
-[[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-ubuntu"
# The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker
# images').
[[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}"
-[[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build
+[[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR="${DEFAULT_ENVOY_DOCKER_BUILD_DIR}"
+# Replace backslash with forward slash for Windows style paths
+ENVOY_DOCKER_BUILD_DIR="${ENVOY_DOCKER_BUILD_DIR//\\//}"
+mkdir -p "${ENVOY_DOCKER_BUILD_DIR}"
-[[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=" -it"
-[[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=" -v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)"
+[[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=("-it")
+[[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=(-v "$(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)")
+[[ -n "${SSH_AUTH_SOCK}" ]] && ENVOY_DOCKER_OPTIONS+=(-v "${SSH_AUTH_SOCK}:${SSH_AUTH_SOCK}" -e SSH_AUTH_SOCK)
export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}"
-mkdir -p "${ENVOY_DOCKER_BUILD_DIR}"
# Since we specify an explicit hash, docker-run will pull from the remote repo if missing.
-docker run --rm ${ENVOY_DOCKER_OPTIONS} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} -e NO_PROXY=${no_proxy} \
- -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build -v /var/run/docker.sock:/var/run/docker.sock \
- -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE -e ENVOY_STDLIB -e BUILD_REASON \
- -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE -e FUZZIT_API_KEY -e ENVOY_BUILD_IMAGE \
- -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \
- -e GCS_ARTIFACT_BUCKET -e BUILD_SOURCEBRANCHNAME -e BAZELISK_BASE_URL -e ENVOY_BUILD_ARCH -e SLACK_TOKEN -e BUILD_URI\
- -e REPO_URI -v "$PWD":/source --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN "${ENVOY_BUILD_IMAGE}" \
- /bin/bash -lc "\
- groupadd --gid $(id -g) -f envoygroup \
- && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home --home-dir /build envoybuild \
- && usermod -a -G pcap envoybuild \
- && sudo -EHs -u envoybuild bash -c \"cd /source && $*\""
+docker run --rm \
+ "${ENVOY_DOCKER_OPTIONS[@]}" \
+ -v "${ENVOY_DOCKER_BUILD_DIR}":"${BUILD_DIR_MOUNT_DEST}" \
+ -v "${SOURCE_DIR}":"${SOURCE_DIR_MOUNT_DEST}" \
+ -e AZP_BRANCH \
+ -e HTTP_PROXY \
+ -e HTTPS_PROXY \
+ -e NO_PROXY \
+ -e BAZEL_STARTUP_OPTIONS \
+ -e BAZEL_BUILD_EXTRA_OPTIONS \
+ -e BAZEL_EXTRA_TEST_OPTIONS \
+ -e BAZEL_REMOTE_CACHE \
+ -e ENVOY_STDLIB \
+ -e BUILD_REASON \
+ -e BAZEL_REMOTE_INSTANCE \
+ -e GCP_SERVICE_ACCOUNT_KEY \
+ -e NUM_CPUS \
+ -e ENVOY_RBE \
+ -e FUZZIT_API_KEY \
+ -e ENVOY_BUILD_IMAGE \
+ -e ENVOY_SRCDIR \
+ -e ENVOY_BUILD_TARGET \
+ -e SYSTEM_PULLREQUEST_TARGETBRANCH \
+ -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \
+ -e GCS_ARTIFACT_BUCKET \
+ -e BUILD_SOURCEBRANCHNAME \
+ -e BAZELISK_BASE_URL \
+ -e ENVOY_BUILD_ARCH \
+ -e SLACK_TOKEN \
+ -e BUILD_URI\
+ -e REPO_URI \
+ "${ENVOY_BUILD_IMAGE}" \
+ "${START_COMMAND[@]}"
diff --git a/ci/run_envoy_docker_windows.sh b/ci/run_envoy_docker_windows.sh
deleted file mode 100644
index a1f4e7372b52..000000000000
--- a/ci/run_envoy_docker_windows.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# The image tag for the Windows image is the same as the Linux one so we use the same mechanism to find it
-. $(dirname $0)/envoy_build_sha.sh
-
-[[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-windows2019"
-# The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker
-# images').
-[[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}"
-
-ENVOY_SOURCE_DIR=$(echo "${PWD}" | sed -E "s#/([a-zA-Z])/#\1:/#")
-
-[[ -f .git ]] && [[ ! -d .git ]] && GIT_VOLUME_OPTION="-v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)"
-
-[[ -t 1 ]] && DOCKER_TTY_OPTION=-it
-
-export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}"
-
-# Since we specify an explicit hash, docker-run will pull from the remote repo if missing.
-docker run --rm ${DOCKER_TTY_OPTION} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \
- ${GIT_VOLUME_OPTION} -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE \
- -e ENVOY_STDLIB -e BUILD_REASON -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE \
- -e ENVOY_BUILD_IMAGE -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH -v ${ENVOY_SOURCE_DIR}:C:/source \
- "${ENVOY_BUILD_IMAGE}" \
- bash -c "cd source && $*"
diff --git a/ci/setup_cache.sh b/ci/setup_cache.sh
index f615b8b41d5d..0733f679b784 100755
--- a/ci/setup_cache.sh
+++ b/ci/setup_cache.sh
@@ -2,7 +2,7 @@
set -e
-if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY:0:1}" ]]; then
+if [[ -n "${GCP_SERVICE_ACCOUNT_KEY:0:1}" ]]; then
# mktemp will create a tempfile with u+rw permission minus umask, it will not be readable by all
# users by default.
GCP_SERVICE_ACCOUNT_KEY_FILE=$(mktemp -t gcp_service_account.XXXXXX.json)
@@ -20,11 +20,11 @@ if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY:0:1}" ]]; then
fi
-if [[ ! -z "${BAZEL_REMOTE_CACHE}" ]]; then
+if [[ -n "${BAZEL_REMOTE_CACHE}" ]]; then
export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_cache=${BAZEL_REMOTE_CACHE}"
echo "Set up bazel remote read/write cache at ${BAZEL_REMOTE_CACHE}."
- if [[ ! -z "${BAZEL_REMOTE_INSTANCE}" ]]; then
+ if [[ -n "${BAZEL_REMOTE_INSTANCE}" ]]; then
export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_instance_name=${BAZEL_REMOTE_INSTANCE}"
echo "instance_name: ${BAZEL_REMOTE_INSTANCE}."
elif [[ -z "${ENVOY_RBE}" ]]; then
diff --git a/ci/upload_gcs_artifact.sh b/ci/upload_gcs_artifact.sh
index 7bd5b0201359..755abf3a39d5 100755
--- a/ci/upload_gcs_artifact.sh
+++ b/ci/upload_gcs_artifact.sh
@@ -22,5 +22,5 @@ BRANCH=${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}}
GCS_LOCATION="${GCS_ARTIFACT_BUCKET}/${BRANCH}/${TARGET_SUFFIX}"
echo "Uploading to gs://${GCS_LOCATION} ..."
-gsutil -mq rsync -dr ${SOURCE_DIRECTORY} gs://${GCS_LOCATION}
+gsutil -mq rsync -dr "${SOURCE_DIRECTORY}" "gs://${GCS_LOCATION}"
echo "Artifacts uploaded to: https://storage.googleapis.com/${GCS_LOCATION}/index.html"
diff --git a/ci/verify_examples.sh b/ci/verify_examples.sh
index 4e459464aeda..d034a4a30cec 100755
--- a/ci/verify_examples.sh
+++ b/ci/verify_examples.sh
@@ -3,7 +3,7 @@
TESTFILTER="${1:-*}"
FAILED=()
SRCDIR="${SRCDIR:-$(pwd)}"
-EXCLUDED_BUILD_CONFIGS=${EXCLUDED_BUILD_CONFIGS:-"^./jaeger-native-tracing|docker-compose"}
+EXCLUDE_EXAMPLES=${EXCLUDED_EXAMPLES:-"wasm"}
trap_errors () {
@@ -30,7 +30,7 @@ trap exit 1 INT
run_examples () {
local examples example
cd "${SRCDIR}/examples" || exit 1
- examples=$(find . -mindepth 1 -maxdepth 1 -type d -name "$TESTFILTER" | sort)
+ examples=$(find . -mindepth 1 -maxdepth 1 -type d -name "$TESTFILTER" | grep -vE "${EXCLUDE_EXAMPLES}" | sort)
for example in $examples; do
pushd "$example" > /dev/null || return 1
./verify.sh
@@ -38,26 +38,8 @@ run_examples () {
done
}
-verify_build_configs () {
- local config configs missing
- missing=()
- cd "${SRCDIR}/examples" || return 1
- configs="$(find . -name "*.yaml" -o -name "*.lua" | grep -vE "${EXCLUDED_BUILD_CONFIGS}" | cut -d/ -f2-)"
- for config in $configs; do
- grep "\"$config\"" BUILD || missing+=("$config")
- done
- if [[ -n "${missing[*]}" ]]; then
- for config in "${missing[@]}"; do
- echo "Missing config: $config" >&2
- done
- return 1
- fi
-}
-
-verify_build_configs
run_examples
-
if [[ "${#FAILED[@]}" -ne "0" ]]; then
echo "TESTS FAILED:"
for failed in "${FAILED[@]}"; do
diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh
index 498445d9b949..ff77a9ea1465 100755
--- a/ci/windows_ci_steps.sh
+++ b/ci/windows_ci_steps.sh
@@ -11,38 +11,74 @@ trap finish EXIT
echo "disk space at beginning of build:"
df -h
+# shellcheck source=ci/setup_cache.sh
. "$(dirname "$0")"/setup_cache.sh
+read -ra BAZEL_STARTUP_OPTIONS <<< "${BAZEL_STARTUP_OPTIONS:-}"
+# Default to msvc-cl if not overridden
+read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:---config=msvc-cl}"
+read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}"
+
# Set up TMPDIR so bash and non-bash can access
# e.g. TMPDIR=/d/tmp, make a link from /d/d to /d so both bash and Windows programs resolve the
# same path
# This is due to this issue: https://github.com/bazelbuild/rules_foreign_cc/issues/334
# rules_foreign_cc does not currently use bazel output/temp directories by default, it uses mktemp
# which respects the value of the TMPDIR environment variable
-drive="$(readlink -f $TMPDIR | cut -d '/' -f2)"
+drive="$(readlink -f "$TMPDIR" | cut -d '/' -f2)"
if [ ! -e "/$drive/$drive" ]; then
/c/windows/system32/cmd.exe /c "mklink /d $drive:\\$drive $drive:\\"
fi
-BAZEL_STARTUP_OPTIONS="--output_base=c:/_eb"
-# Default to msvc-cl if not overridden
-BAZEL_BUILD_EXTRA_OPTIONS=${BAZEL_BUILD_EXTRA_OPTIONS:---config=msvc-cl}
-BAZEL_BUILD_OPTIONS="-c opt --show_task_finish --verbose_failures \
- --test_output=errors ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}"
+BUILD_DIR=${BUILD_DIR:-/c/build}
+if [[ ! -d "${BUILD_DIR}" ]]
+then
+ echo "${BUILD_DIR} mount missing - did you forget -v :${BUILD_DIR}? Creating."
+ mkdir -p "${BUILD_DIR}"
+fi
+
+# Environment setup.
+export TEST_TMPDIR=${BUILD_DIR}/tmp
+
+[[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=(--nocache_test_results)
+
+BAZEL_STARTUP_OPTIONS+=("--output_base=c:/_eb")
+BAZEL_BUILD_OPTIONS=(
+ -c opt
+ --show_task_finish
+ --verbose_failures
+ --define "wasm=disabled"
+ "--test_output=errors"
+ "${BAZEL_BUILD_EXTRA_OPTIONS[@]}"
+ "${BAZEL_EXTRA_TEST_OPTIONS[@]}")
+
+# Also setup some space for building Envoy standalone.
+ENVOY_BUILD_DIR="${BUILD_DIR}"/envoy
+mkdir -p "${ENVOY_BUILD_DIR}"
+
+# This is where we copy build deliverables to.
+ENVOY_DELIVERY_DIR="${ENVOY_BUILD_DIR}"/source/exe
+mkdir -p "${ENVOY_DELIVERY_DIR}"
# Test to validate updates of all dependency libraries in bazel/external and bazel/foreign_cc
-# bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //bazel/... --build_tag_filters=-skip_on_windows
+# bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //bazel/... --build_tag_filters=-skip_on_windows
# Complete envoy-static build (nothing needs to be skipped, build failure indicates broken dependencies)
-bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //source/exe:envoy-static
+bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //source/exe:envoy-static
+
+# Copy binary to delivery directory
+cp -f bazel-bin/source/exe/envoy-static.exe "${ENVOY_DELIVERY_DIR}/envoy.exe"
+
+# Copy for azp, creating a tar archive
+tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${ENVOY_DELIVERY_DIR}" envoy.exe
# Test invocations of known-working tests on Windows
-bazel ${BAZEL_STARTUP_OPTIONS} test ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows,-fails_on_windows,-flaky_on_windows --build_tests_only
+bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" //test/... --test_tag_filters=-skip_on_windows,-fails_on_windows,-flaky_on_windows --build_tests_only
# Build tests that are known-flaky or known-failing to ensure no compilation regressions
-bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows,fails_on_windows,flaky_on_windows --build_tests_only
+bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //test/... --test_tag_filters=-skip_on_windows,fails_on_windows,flaky_on_windows --build_tests_only
# Summarize tests bypasssed to monitor the progress of porting to Windows
-echo Tests bypassed as skip_on_windows: `bazel query 'kind(".*test rule", attr("tags", "skip_on_windows", //test/...))' 2>/dev/null | sort | wc -l` known unbuildable or inapplicable tests
-echo Tests bypassed as fails_on_windows: `bazel query 'kind(".*test rule", attr("tags", "fails_on_windows", //test/...))' 2>/dev/null | sort | wc -l` known incompatible tests
-echo Tests bypassed as flaky_on_windows: `bazel query 'kind(".*test rule", attr("tags", "flaky_on_windows", //test/...))' 2>/dev/null | sort | wc -l` known unstable tests
+echo "Tests bypassed as skip_on_windows: $(bazel query 'kind(".*test rule", attr("tags", "skip_on_windows", //test/...))' 2>/dev/null | sort | wc -l) known unbuildable or inapplicable tests"
+echo "Tests bypassed as fails_on_windows: $(bazel query 'kind(".*test rule", attr("tags", "fails_on_windows", //test/...))' 2>/dev/null | sort | wc -l) known incompatible tests"
+echo "Tests bypassed as flaky_on_windows: $(bazel query 'kind(".*test rule", attr("tags", "flaky_on_windows", //test/...))' 2>/dev/null | sort | wc -l) known unstable tests"
diff --git a/configs/BUILD b/configs/BUILD
index 128ec6642118..ca9a10935694 100644
--- a/configs/BUILD
+++ b/configs/BUILD
@@ -39,11 +39,17 @@ genrule(
srcs = [
":configs",
"//examples:configs",
+ "//docs:configs",
"//test/config/integration/certs",
],
outs = ["example_configs.tar"],
- cmd = "$(location configgen.sh) $(location configgen) $(@D) $(locations :configs) " +
- "$(locations //examples:configs) $(locations //test/config/integration/certs)",
+ cmd = (
+ "$(location configgen.sh) $(location configgen) $(@D) " +
+ "$(locations :configs) " +
+ "$(locations //examples:configs) " +
+ "$(locations //docs:configs) " +
+ "$(locations //test/config/integration/certs)"
+ ),
tools = [
"configgen.sh",
":configgen",
diff --git a/configs/Dockerfile b/configs/Dockerfile
index 2d7b7a6a5e3b..ac1bc7aeece8 100644
--- a/configs/Dockerfile
+++ b/configs/Dockerfile
@@ -3,5 +3,5 @@
FROM envoyproxy/envoy-dev:latest
RUN apt-get update
-COPY google_com_proxy.v2.yaml /etc/envoy.yaml
+COPY google_com_proxy.yaml /etc/envoy.yaml
CMD /usr/local/bin/envoy -c /etc/envoy.yaml
diff --git a/configs/access_log_format_helper.template.yaml b/configs/access_log_format_helper.template.yaml
new file mode 100644
index 000000000000..9861a51e9bfb
--- /dev/null
+++ b/configs/access_log_format_helper.template.yaml
@@ -0,0 +1,15 @@
+{% macro ingress_sampled_log() -%}
+ log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"}
+{% endmacro %}
+
+{% macro ingress_full() -%}
+ log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"}
+{% endmacro %}
+
+{% macro egress_error_log() -%}
+ log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n"}
+{% endmacro %}
+
+{% macro egress_error_amazon_service() -%}
+ log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" \"%RESP(X-AMZN-RequestId)%\"\n"}
+{% endmacro %}
diff --git a/configs/access_log_format_helper_v2.template.yaml b/configs/access_log_format_helper_v2.template.yaml
deleted file mode 100644
index 7a5d711c088b..000000000000
--- a/configs/access_log_format_helper_v2.template.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-{% macro ingress_sampled_log() -%}
- format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"
-{% endmacro %}
-
-{% macro ingress_full() -%}
- format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"
-{% endmacro %}
-
-{% macro egress_error_log() -%}
- format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n"
-{% endmacro %}
-
-{% macro egress_error_amazon_service() -%}
- format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" \"%RESP(X-AMZN-RequestId)%\"\n"
-{% endmacro %}
diff --git a/configs/configgen.py b/configs/configgen.py
index d5409c481a91..8f5e20cd562d 100755
--- a/configs/configgen.py
+++ b/configs/configgen.py
@@ -111,16 +111,16 @@ def generate_config(template_path, template, output_file, **context):
# Generate a demo config for the main front proxy. This sets up both HTTP and HTTPS listeners,
# as well as a listener for the double proxy to connect to via SSL client authentication.
generate_config(SCRIPT_DIR,
- 'envoy_front_proxy_v2.template.yaml',
- '{}/envoy_front_proxy.v2.yaml'.format(OUT_DIR),
+ 'envoy_front_proxy.template.yaml',
+ '{}/envoy_front_proxy.yaml'.format(OUT_DIR),
clusters=front_envoy_clusters,
tracing=tracing_enabled)
# Generate a demo config for the double proxy. This sets up both an HTTP and HTTPS listeners,
# and backhauls the traffic to the main front proxy.
generate_config(SCRIPT_DIR,
- 'envoy_double_proxy_v2.template.yaml',
- '{}/envoy_double_proxy.v2.yaml'.format(OUT_DIR),
+ 'envoy_double_proxy.template.yaml',
+ '{}/envoy_double_proxy.yaml'.format(OUT_DIR),
tracing=tracing_enabled)
# Generate a demo config for the service to service (local) proxy. This sets up several different
@@ -132,14 +132,12 @@ def generate_config(template_path, template, output_file, **context):
# that Envoy proxies to listens on its own port.
# optional mongo ports: built from mongos_servers above.
generate_config(SCRIPT_DIR,
- 'envoy_service_to_service_v2.template.yaml',
+ 'envoy_service_to_service.template.yaml',
'{}/envoy_service_to_service.yaml'.format(OUT_DIR),
internal_virtual_hosts=service_to_service_envoy_clusters,
external_virtual_hosts=external_virtual_hosts,
mongos_servers=mongos_servers)
-for google_ext in ['v2.yaml']:
- shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.%s' % google_ext), OUT_DIR)
-
-shutil.copy(os.path.join(SCRIPT_DIR, 'encapsulate_in_connect.v3.yaml'), OUT_DIR)
-shutil.copy(os.path.join(SCRIPT_DIR, 'terminate_connect.v3.yaml'), OUT_DIR)
+shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.yaml'), OUT_DIR)
+shutil.copy(os.path.join(SCRIPT_DIR, 'encapsulate_in_connect.yaml'), OUT_DIR)
+shutil.copy(os.path.join(SCRIPT_DIR, 'terminate_connect.yaml'), OUT_DIR)
diff --git a/configs/configgen.sh b/configs/configgen.sh
index 2ef145c4af75..d68db9d46784 100755
--- a/configs/configgen.sh
+++ b/configs/configgen.sh
@@ -9,16 +9,20 @@ shift
mkdir -p "$OUT_DIR/certs"
mkdir -p "$OUT_DIR/lib"
+mkdir -p "$OUT_DIR/protos"
"$CONFIGGEN" "$OUT_DIR"
for FILE in "$@"; do
case "$FILE" in
- *.pem)
+ *.pem|*.der)
cp "$FILE" "$OUT_DIR/certs"
;;
*.lua)
cp "$FILE" "$OUT_DIR/lib"
;;
+ *.pb)
+ cp "$FILE" "$OUT_DIR/protos"
+ ;;
*)
FILENAME="$(echo "$FILE" | sed -e 's/.*examples\///g')"
@@ -29,4 +33,4 @@ for FILE in "$@"; do
done
# tar is having issues with -C for some reason so just cd into OUT_DIR.
-(cd "$OUT_DIR"; tar -hcvf example_configs.tar -- *.yaml certs/*.pem lib/*.lua)
+(cd "$OUT_DIR"; tar -hcvf example_configs.tar -- *.yaml certs/*.pem certs/*.der protos/*.pb lib/*.lua)
diff --git a/configs/encapsulate_in_connect.v3.yaml b/configs/encapsulate_in_connect.yaml
similarity index 100%
rename from configs/encapsulate_in_connect.v3.yaml
rename to configs/encapsulate_in_connect.yaml
diff --git a/configs/envoy_double_proxy_v2.template.yaml b/configs/envoy_double_proxy.template.yaml
similarity index 82%
rename from configs/envoy_double_proxy_v2.template.yaml
rename to configs/envoy_double_proxy.template.yaml
index feb9f3e1f95f..aea9127c74f6 100644
--- a/configs/envoy_double_proxy_v2.template.yaml
+++ b/configs/envoy_double_proxy.template.yaml
@@ -11,7 +11,7 @@
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
- "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
common_tls_context:
tls_certificates:
- certificate_chain:
@@ -29,7 +29,7 @@
filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: AUTO
stat_prefix: router
route_config:
@@ -47,24 +47,23 @@
http_filters:
- name: envoy.filters.http.health_check
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck
+ "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck
pass_through_mode: false
headers:
- exact_match: /healthcheck
name: :path
- name: envoy.filters.http.buffer
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer
+ "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer
max_request_bytes: 5242880
- name: envoy.filters.http.router
typed_config: {}
{% if tracing %}
tracing:
- operation_name: INGRESS
provider:
name: envoy.tracers.lightstep
typed_config:
- "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig
+ "@type": type.googleapis.com/envoy.config.trace.v3.LightstepConfig
access_token_file: "/etc/envoy/lightstep_access_token"
collector_cluster: lightstep_saas
{% endif %}
@@ -89,9 +88,10 @@
runtime_key: access_log.access_error.duration
- traceable_filter: {}
typed_config:
- "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog
+ "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: /var/log/envoy/access_error.log
- format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n"
+ log_format:
+ text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n"
{% if proxy_proto %}
use_remote_address: true
{%endif -%}
@@ -141,7 +141,7 @@ static_resources:
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
- "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
common_tls_context:
tls_certificates:
- certificate_chain:
@@ -151,7 +151,7 @@ static_resources:
validation_context:
trusted_ca:
filename: certs/cacert.pem
- match_subject_alt_names:
+ match_subject_alt_names:
exact: "front-proxy.yourcompany.net"
http2_protocol_options: {}
- name: lightstep_saas
@@ -172,18 +172,18 @@ static_resources:
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
- "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
common_tls_context:
validation_context:
trusted_ca:
filename: certs/cacert.pem
- match_subject_alt_names:
+ match_subject_alt_names:
exact: "collector-grpc.lightstep.com"
flags_path: "/etc/envoy/flags"
stats_sinks:
- name: envoy.stat_sinks.statsd
typed_config:
- "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink
+ "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink
tcp_cluster_name: statsd
layered_runtime:
layers:
diff --git a/configs/envoy_front_proxy_v2.template.yaml b/configs/envoy_front_proxy.template.yaml
similarity index 83%
rename from configs/envoy_front_proxy_v2.template.yaml
rename to configs/envoy_front_proxy.template.yaml
index a9b9bc97f859..1dcb1e6f919f 100644
--- a/configs/envoy_front_proxy_v2.template.yaml
+++ b/configs/envoy_front_proxy.template.yaml
@@ -1,4 +1,4 @@
-{% import 'routing_helper_v2.template.yaml' as helper -%}
+{% import 'routing_helper.template.yaml' as helper -%}
{% macro router_file_content() -%}{% include kwargs['router_file'] -%}{% endmacro -%}
{% macro listener(protocol, address, port_value, proxy_proto, tls, tracing) -%}
name: not_required_for_static_listeners
@@ -12,7 +12,7 @@
- transport_socket:
name: envoy.transport_sockets.tls
typed_config:
- "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
common_tls_context:
alpn_protocols: h2,http/1.1
tls_certificates:
@@ -35,7 +35,7 @@
filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: AUTO
stat_prefix: router
{% if proxy_proto -%}
@@ -43,18 +43,18 @@
{%endif-%}
stat_prefix: ingress_http
route_config:
- {{ router_file_content(router_file='envoy_router_v2.template.yaml')|indent(10) }}
+ {{ router_file_content(router_file='envoy_router.template.yaml')|indent(10) }}
http_filters:
- name: envoy.filters.http.health_check
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck
+ "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck
pass_through_mode: false
headers:
- name: ":path"
exact_match: "/healthcheck"
- name: envoy.filters.http.buffer
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer
+ "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer
max_request_bytes: 5242880
- name: envoy.filters.http.ratelimit
typed_config:
@@ -70,11 +70,10 @@
add_user_agent: true
{% if tracing %}
tracing:
- operation_name: INGRESS
provider:
name: envoy.tracers.lightstep
typed_config:
- "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig
+ "@type": type.googleapis.com/envoy.config.trace.v3.LightstepConfig
collector_cluster: lightstep_saas
access_token_file: "/etc/envoy/lightstep_access_token"
{% endif %}
@@ -99,9 +98,10 @@
runtime_key: access_log.access_error.duration
- traceable_filter: {}
typed_config:
- "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog
+ "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/var/log/envoy/access_error.log"
- format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n"
+ log_format:
+ text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n"
{% endmacro -%}
static_resources:
listeners:
diff --git a/configs/envoy_router_v2.template.yaml b/configs/envoy_router.template.yaml
similarity index 93%
rename from configs/envoy_router_v2.template.yaml
rename to configs/envoy_router.template.yaml
index 0d09269b6cab..338363af6c8c 100644
--- a/configs/envoy_router_v2.template.yaml
+++ b/configs/envoy_router.template.yaml
@@ -1,4 +1,4 @@
-{% import 'routing_helper_v2.template.yaml' as helper with context -%}
+{% import 'routing_helper.template.yaml' as helper with context -%}
name: local_route
virtual_hosts:
- name: www
diff --git a/configs/envoy_service_to_service_v2.template.yaml b/configs/envoy_service_to_service.template.yaml
similarity index 89%
rename from configs/envoy_service_to_service_v2.template.yaml
rename to configs/envoy_service_to_service.template.yaml
index 31386c59bbf3..9237d117f035 100644
--- a/configs/envoy_service_to_service_v2.template.yaml
+++ b/configs/envoy_service_to_service.template.yaml
@@ -1,5 +1,5 @@
-{% import 'routing_helper_v2.template.yaml' as helper -%}
-{% import 'access_log_format_helper_v2.template.yaml' as access_log_helper -%}
+{% import 'routing_helper.template.yaml' as helper -%}
+{% import 'access_log_format_helper.template.yaml' as access_log_helper -%}
{% macro ingress_listener(protocol, address, port_value) -%}
- address:
socket_address:
@@ -11,7 +11,7 @@
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: AUTO
stat_prefix: ingress_http
route_config:
@@ -35,7 +35,7 @@
http_filters:
- name: envoy.filters.http.health_check
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck
+ "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck
pass_through_mode: true
headers:
- name: ":path"
@@ -43,7 +43,7 @@
cache_time: 2.5s
- name: envoy.filters.http.buffer
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer
+ "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer
max_request_bytes: 5242880
- name: envoy.filters.http.router
typed_config: {}
@@ -52,7 +52,7 @@
filter:
not_health_check_filter: {}
typed_config:
- "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog
+ "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/var/log/envoy/ingress_http.log"
{{ access_log_helper.ingress_full()|indent(10)}}
- name: envoy.access_loggers.file
@@ -81,7 +81,7 @@
runtime_key: access_log.access_error.duration
- not_health_check_filter: {}
typed_config:
- "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog
+ "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/var/log/envoy/ingress_http_error.log"
{{ access_log_helper.ingress_sampled_log()|indent(10)}}
- name: envoy.access_loggers.file
@@ -92,7 +92,7 @@
- runtime_filter:
runtime_key: access_log.ingress_http
typed_config:
- "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog
+ "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/var/log/envoy/ingress_http_sampled.log"
{{ access_log_helper.ingress_sampled_log()|indent(10)}}
common_http_protocol_options:
@@ -111,7 +111,7 @@ static_resources:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: AUTO
stat_prefix: egress_http
route_config:
@@ -149,7 +149,7 @@ static_resources:
runtime_key: access_log.access_error.duration
- traceable_filter: {}
typed_config:
- "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog
+ "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/var/log/envoy/egress_http_error.log"
{{ access_log_helper.egress_error_log()|indent(10) }}
use_remote_address: true
@@ -177,7 +177,7 @@ static_resources:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: AUTO
stat_prefix: egress_http
rds:
@@ -210,7 +210,7 @@ static_resources:
runtime_key: access_log.access_error.duration
- traceable_filter: {}
typed_config:
- "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog
+ "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/var/log/envoy/egress_http_error.log"
{{ access_log_helper.egress_error_log()|indent(10) }}
use_remote_address: true
@@ -239,7 +239,7 @@ static_resources:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: AUTO
common_http_protocol_options:
idle_timeout: 840s
@@ -259,7 +259,7 @@ static_resources:
retry_policy:
retry_on: connect-failure
{% if host.get('host_rewrite', False) %}
- host_rewrite: "{{host['host_rewrite']}}"
+ host_rewrite_literal: "{{host['host_rewrite']}}"
{% endif %}
{% endfor %}
http_filters:
@@ -295,7 +295,7 @@ static_resources:
runtime_key: access_log.access_error.duration
{% endif %}
typed_config:
- "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog
+ "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/var/log/envoy/egress_{{ mapping['name'] }}_http_error.log"
{% if mapping.get('is_amzn_service', False) -%}
{{ access_log_helper.egress_error_amazon_service()|indent(10) }}
@@ -315,12 +315,12 @@ static_resources:
- filters:
- name: envoy.filters.network.tcp_proxy
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy
+ "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy
stat_prefix: mongo_{{ key }}
cluster: mongo_{{ key }}
- name: envoy.filters.network.mongo_proxy
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.mongo_proxy.v2.MongoProxy
+ "@type": type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy
stat_prefix: "{{ key }}"
access_log: "/var/log/envoy/mongo_{{ key }}.log"
{% if value.get('ratelimit', False) %}
@@ -346,7 +346,7 @@ static_resources:
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
- "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
common_tls_context:
validation_context:
trusted_ca:
@@ -413,7 +413,7 @@ static_resources:
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
- "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
sni: www.main_website.com
- name: local_service
connect_timeout: 0.25s
@@ -456,8 +456,12 @@ static_resources:
connect_timeout: 0.25s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
+ http2_protocol_options:
+ connection_keepalive:
+ interval: 30s
+ timeout: 5s
load_assignment:
- cluster_name: local_service_grpc
+ cluster_name: rds
endpoints:
- lb_endpoints:
- endpoint:
@@ -501,7 +505,7 @@ static_resources:
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
- "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
common_tls_context:
validation_context:
trusted_ca:
@@ -548,9 +552,8 @@ flags_path: "/etc/envoy/flags"
stats_sinks:
- name: envoy.stat_sinks.statsd
typed_config:
- "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink
+ "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink
tcp_cluster_name: statsd
-watchdog: {}
layered_runtime:
layers:
- name: root
diff --git a/configs/freebind/freebind.yaml b/configs/freebind/freebind.yaml
index 08214b8b044d..367e5ba3568a 100644
--- a/configs/freebind/freebind.yaml
+++ b/configs/freebind/freebind.yaml
@@ -17,7 +17,7 @@ static_resources:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
route_config:
name: local_route
diff --git a/configs/google_com_proxy.v2.yaml b/configs/google_com_proxy.yaml
similarity index 81%
rename from configs/google_com_proxy.v2.yaml
rename to configs/google_com_proxy.yaml
index 53c26efc7c90..32e79bb306a9 100644
--- a/configs/google_com_proxy.v2.yaml
+++ b/configs/google_com_proxy.yaml
@@ -17,7 +17,7 @@ static_resources:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
route_config:
name: local_route
@@ -28,13 +28,13 @@ static_resources:
- match:
prefix: "/"
route:
- host_rewrite: www.google.com
+ host_rewrite_literal: www.google.com
cluster: service_google
http_filters:
- name: envoy.filters.http.router
clusters:
- name: service_google
- connect_timeout: 0.25s
+ connect_timeout: 30s
type: LOGICAL_DNS
# Comment out the following line to test on v6 networks
dns_lookup_family: V4_ONLY
@@ -51,5 +51,5 @@ static_resources:
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
- "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
sni: www.google.com
diff --git a/configs/original-dst-cluster/proxy_config.yaml b/configs/original-dst-cluster/proxy_config.yaml
index 7ac1ea020fdd..b2e925957cda 100644
--- a/configs/original-dst-cluster/proxy_config.yaml
+++ b/configs/original-dst-cluster/proxy_config.yaml
@@ -8,7 +8,7 @@ static_resources:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
route_config:
name: local_service
@@ -35,7 +35,6 @@ static_resources:
lb_policy: CLUSTER_PROVIDED
dns_lookup_family: V4_ONLY
cluster_manager: {}
-watchdog: {}
admin:
access_log_path: /tmp/admin_access.log
address:
diff --git a/configs/routing_helper_v2.template.yaml b/configs/routing_helper.template.yaml
similarity index 100%
rename from configs/routing_helper_v2.template.yaml
rename to configs/routing_helper.template.yaml
diff --git a/configs/terminate_connect.v3.yaml b/configs/terminate_connect.yaml
similarity index 100%
rename from configs/terminate_connect.v3.yaml
rename to configs/terminate_connect.yaml
diff --git a/configs/using_deprecated_config.v2.yaml b/configs/using_deprecated_config.yaml
similarity index 89%
rename from configs/using_deprecated_config.v2.yaml
rename to configs/using_deprecated_config.yaml
index 55ca2797acb9..a98e64f365b9 100644
--- a/configs/using_deprecated_config.v2.yaml
+++ b/configs/using_deprecated_config.yaml
@@ -17,7 +17,7 @@ static_resources:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
- "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
route_config:
name: local_route
@@ -54,7 +54,7 @@ static_resources:
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
- "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
sni: www.google.com
tracing:
http:
diff --git a/docs/BUILD b/docs/BUILD
index ead7bddb9a7f..aad5c89f0b65 100644
--- a/docs/BUILD
+++ b/docs/BUILD
@@ -1,3 +1,32 @@
+load(
+ "//bazel:envoy_build_system.bzl",
+ "envoy_package",
+)
+
licenses(["notice"]) # Apache 2
exports_files(["protodoc_manifest.yaml"])
+
+envoy_package()
+
+filegroup(
+ name = "configs",
+ srcs = glob(
+ [
+ "root/**/*.yaml",
+ "root/**/*.pb",
+ ],
+ exclude = [
+ # TODO(phlax/windows-dev): figure out how to get this working on windows
+ # "Error: unable to read file: /etc/ssl/certs/ca-certificates.crt"
+ "root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml",
+ "root/intro/arch_overview/security/_include/ssl.yaml",
+ ],
+ ) + select({
+ "//bazel:windows_x86_64": [],
+ "//conditions:default": [
+ "root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml",
+ "root/intro/arch_overview/security/_include/ssl.yaml",
+ ],
+ }),
+)
diff --git a/docs/README.md b/docs/README.md
index b672f51c8a4f..5cd5444d670b 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,21 +1,50 @@
-# Developer-local docs build
+# Building documentation locally
+
+There are two methods to build the documentation, described below.
+
+In both cases, the generated output can be found in `generated/docs`.
+
+## Building in an existing Envoy development environment
+
+If you have an [existing Envoy development environment](https://github.com/envoyproxy/envoy/tree/master/bazel#quick-start-bazel-build-for-developers), you should have the necessary dependencies and requirements and be able to build the documentation directly.
```bash
./docs/build.sh
```
-The output can be found in `generated/docs`. By default configuration examples are going to be validated during build.
-To disable validation, set `SPHINX_SKIP_CONFIG_VALIDATION` environment variable to `true`:
+By default configuration examples are going to be validated during build. To disable validation,
+set `SPHINX_SKIP_CONFIG_VALIDATION` environment variable to `true`:
```bash
SPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh
```
+## Using the Docker build container to build the documentation
+
+If you *do not* have an existing development environment, you may wish to use the Docker build
+image that is used in continuous integration.
+
+This can be done as follows:
+
+```
+./ci/run_envoy_docker.sh 'docs/build.sh'
+```
+
+To use this method you will need a minimum of 4-5GB of disk space available to accommodate the build image.
+
+# Creating a Pull Request with documentation changes
+
+When you create a Pull Request the documentation is rendered by Azure Pipelines.
+
+To do this:
+1. Open docs job in Azure Pipelines.
+2. Navigate to "Upload Docs to GCS" log.
+3. Click on the link there.
# How the Envoy website and docs are updated
1. The docs are published to [docs/envoy/latest](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy/latest)
- on every commit to master. This process is handled by CircleCI with the
+ on every commit to master. This process is handled by Azure Pipelines with the
[`publish.sh`](https://github.com/envoyproxy/envoy/blob/master/docs/publish.sh) script.
2. The docs are published to [docs/envoy](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy)
diff --git a/docs/build.sh b/docs/build.sh
index c715f4d5b8f7..c3f182a739c3 100755
--- a/docs/build.sh
+++ b/docs/build.sh
@@ -7,15 +7,20 @@
set -e
+RELEASE_TAG_REGEX="^refs/tags/v.*"
+
+if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then
+ DOCS_TAG="${AZP_BRANCH/refs\/tags\//}"
+fi
+
# We need to set ENVOY_DOCS_VERSION_STRING and ENVOY_DOCS_RELEASE_LEVEL for Sphinx.
# We also validate that the tag and version match at this point if needed.
-if [ -n "$CIRCLE_TAG" ]
-then
+if [[ -n "${DOCS_TAG}" ]]; then
# Check the git tag matches the version number in the VERSION file.
VERSION_NUMBER=$(cat VERSION)
- if [ "v${VERSION_NUMBER}" != "${CIRCLE_TAG}" ]; then
+ if [[ "v${VERSION_NUMBER}" != "${DOCS_TAG}" ]]; then
echo "Given git tag does not match the VERSION file content:"
- echo "${CIRCLE_TAG} vs $(cat VERSION)"
+ echo "${DOCS_TAG} vs $(cat VERSION)"
exit 1
fi
# Check the version_history.rst contains current release version.
@@ -23,9 +28,9 @@ then
|| (echo "Git tag not found in version_history/current.rst" && exit 1)
# Now that we know there is a match, we can use the tag.
- export ENVOY_DOCS_VERSION_STRING="tag-$CIRCLE_TAG"
+ export ENVOY_DOCS_VERSION_STRING="tag-${DOCS_TAG}"
export ENVOY_DOCS_RELEASE_LEVEL=tagged
- export ENVOY_BLOB_SHA="$CIRCLE_TAG"
+ export ENVOY_BLOB_SHA="${DOCS_TAG}"
else
BUILD_SHA=$(git rev-parse HEAD)
VERSION_NUM=$(cat VERSION)
@@ -49,7 +54,7 @@ rm -rf "${GENERATED_RST_DIR}"
mkdir -p "${GENERATED_RST_DIR}"
source_venv "$BUILD_DIR"
-pip3 install -r "${SCRIPT_DIR}"/requirements.txt
+pip3 install --require-hashes -r "${SCRIPT_DIR}"/requirements.txt
# Clean up any stale files in the API tree output. Bazel remembers valid cached
# files still.
@@ -76,7 +81,7 @@ mkdir -p "${GENERATED_RST_DIR}"/intro/arch_overview/security
./docs/generate_extension_rst.py "${EXTENSION_DB_PATH}" "${GENERATED_RST_DIR}"/intro/arch_overview/security
# Generate RST for external dependency docs in intro/arch_overview/security.
-./docs/generate_external_dep_rst.py "${GENERATED_RST_DIR}"/intro/arch_overview/security
+PYTHONPATH=. ./docs/generate_external_dep_rst.py "${GENERATED_RST_DIR}"/intro/arch_overview/security
function generate_api_rst() {
local proto_target
@@ -139,7 +144,12 @@ cp -f "${CONFIGS_DIR}"/google-vrp/envoy-edge.yaml "${GENERATED_RST_DIR}"/configu
rsync -rav "${API_DIR}/diagrams" "${GENERATED_RST_DIR}/api-docs"
-rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${SCRIPT_DIR}"/_ext "${GENERATED_RST_DIR}"
+rsync -av \
+ "${SCRIPT_DIR}"/root/ \
+ "${SCRIPT_DIR}"/conf.py \
+ "${SCRIPT_DIR}"/redirects.txt \
+ "${SCRIPT_DIR}"/_ext \
+ "${GENERATED_RST_DIR}"
# To speed up validate_fragment invocations in validating_code_block
bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/config_validation:validate_fragment
diff --git a/docs/conf.py b/docs/conf.py
index 1eb5725b689b..796519e06a4a 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -71,8 +71,8 @@ def setup(app):
sys.path.append(os.path.abspath("./_ext"))
extensions = [
- 'sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig',
- 'validating_code_block'
+ 'sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig', 'sphinx_tabs.tabs',
+ 'sphinx_copybutton', 'validating_code_block', 'sphinxext.rediraffe'
]
extlinks = {
'repo': ('https://github.com/envoyproxy/envoy/blob/{}/%s'.format(blob_sha), ''),
@@ -88,6 +88,9 @@ def setup(app):
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
+copybutton_prompt_text = r"\$ |PS>"
+copybutton_prompt_is_regexp = True
+
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
@@ -179,6 +182,7 @@ def setup(app):
# documentation.
html_theme_options = {
'logo_only': True,
+ 'includehidden': False,
}
# Add any paths that contain custom themes here, relative to this directory.
@@ -271,3 +275,8 @@ def setup(app):
# Output file base name for HTML help builder.
htmlhelp_basename = 'envoydoc'
+
+# TODO(phlax): add redirect diff (`rediraffe_branch` setting)
+# - not sure how diffing will work with master merging in PRs - might need
+# to be injected dynamically, somehow
+rediraffe_redirects = "redirects.txt"
diff --git a/docs/generate_external_dep_rst.py b/docs/generate_external_dep_rst.py
index 8c0de67572c8..07488d20831e 100755
--- a/docs/generate_external_dep_rst.py
+++ b/docs/generate_external_dep_rst.py
@@ -7,16 +7,7 @@
import sys
import urllib.parse
-from importlib.util import spec_from_loader, module_from_spec
-from importlib.machinery import SourceFileLoader
-
-# bazel/repository_locations.bzl must have a .bzl suffix for Starlark import, so
-# we are forced to do this workaround.
-_repository_locations_spec = spec_from_loader(
- 'repository_locations',
- SourceFileLoader('repository_locations', 'bazel/repository_locations.bzl'))
-repository_locations = module_from_spec(_repository_locations_spec)
-_repository_locations_spec.loader.exec_module(repository_locations)
+from tools.dependency import utils as dep_utils
# Render a CSV table given a list of table headers, widths and list of rows
@@ -40,7 +31,7 @@ def RstLink(text, url):
# NIST CPE database search URL for a given CPE.
def NistCpeUrl(cpe):
encoded_cpe = urllib.parse.quote(cpe)
- return 'https://nvd.nist.gov/products/cpe/search/results?keyword=%s&status=FINAL&orderBy=CPEURI&namingFormat=2.3' % encoded_cpe
+ return f'https://nvd.nist.gov/vuln/search/results?form_type=Advanced&results_type=overview&query={encoded_cpe}&search_type=all'
# Render version strings human readable.
@@ -52,13 +43,56 @@ def RenderVersion(version):
return version
+def RenderTitle(title):
+ underline = '~' * len(title)
+ return f'\n{title}\n{underline}\n\n'
+
+
+# Determine the version link URL. If it's GitHub, use some heuristics to figure
+# out a release tag link, otherwise point to the GitHub tree at the respective
+# SHA. Otherwise, return the tarball download.
+def GetVersionUrl(metadata):
+ # Figure out if it's a GitHub repo.
+ github_repo = None
+ github_version = None
+ for url in metadata['urls']:
+ if url.startswith('https://github.com/'):
+ components = url.split('/')
+ github_repo = f'https://github.com/{components[3]}/{components[4]}'
+ if components[5] == 'archive':
+ # Only support .tar.gz, .zip today. Figure out the release tag from this
+ # filename.
+ if components[6].endswith('.tar.gz'):
+ github_version = components[6][:-len('.tar.gz')]
+ else:
+ assert (components[6].endswith('.zip'))
+ github_version = components[6][:-len('.zip')]
+ else:
+ # Release tag is a path component.
+ assert (components[5] == 'releases')
+ github_version = components[7]
+ break
+ # If not, direct download link for tarball
+ download_url = metadata['urls'][0]
+ if not github_repo:
+ return download_url
+ # If it's not a GH hash, it's a tagged release.
+ tagged_release = len(metadata['version']) != 40
+ if tagged_release:
+ # The GitHub version should look like the metadata version, but might have
+ # something like a "v" prefix.
+ return f'{github_repo}/releases/tag/{github_version}'
+ assert (metadata['version'] == github_version)
+ return f'{github_repo}/tree/{github_version}'
+
+
if __name__ == '__main__':
security_rst_root = sys.argv[1]
- Dep = namedtuple('Dep', ['name', 'sort_name', 'version', 'cpe'])
- use_categories = defaultdict(list)
+ Dep = namedtuple('Dep', ['name', 'sort_name', 'version', 'cpe', 'last_updated'])
+ use_categories = defaultdict(lambda: defaultdict(list))
# Bin rendered dependencies into per-use category lists.
- for k, v in repository_locations.DEPENDENCY_REPOSITORIES.items():
+ for k, v in dep_utils.RepositoryLocations().items():
cpe = v.get('cpe', '')
if cpe == 'N/A':
cpe = ''
@@ -67,17 +101,23 @@ def RenderVersion(version):
project_name = v['project_name']
project_url = v['project_url']
name = RstLink(project_name, project_url)
- version = RstLink(RenderVersion(v['version']), v['urls'][0])
- dep = Dep(name, project_name.lower(), version, cpe)
+ version = RstLink(RenderVersion(v['version']), GetVersionUrl(v))
+ last_updated = v['last_updated']
+ dep = Dep(name, project_name.lower(), version, cpe, last_updated)
for category in v['use_category']:
- use_categories[category].append(dep)
+ for ext in v.get('extensions', ['core']):
+ use_categories[category][ext].append(dep)
def CsvRow(dep):
- return [dep.name, dep.version, dep.cpe]
+ return [dep.name, dep.version, dep.last_updated, dep.cpe]
# Generate per-use category RST with CSV tables.
- for category, deps in use_categories.items():
- output_path = pathlib.Path(security_rst_root, f'external_dep_{category}.rst')
- content = CsvTable(['Name', 'Version', 'CPE'], [2, 1, 2],
- [CsvRow(dep) for dep in sorted(deps, key=lambda d: d.sort_name)])
+ for category, exts in use_categories.items():
+ content = ''
+ for ext_name, deps in sorted(exts.items()):
+ if ext_name != 'core':
+ content += RenderTitle(ext_name)
+ output_path = pathlib.Path(security_rst_root, f'external_dep_{category}.rst')
+ content += CsvTable(['Name', 'Version', 'Last updated', 'CPE'], [2, 1, 1, 2],
+ [CsvRow(dep) for dep in sorted(deps, key=lambda d: d.sort_name)])
output_path.write_text(content)
diff --git a/docs/publish.sh b/docs/publish.sh
index 498a68d0f45e..11b75f1b77c9 100755
--- a/docs/publish.sh
+++ b/docs/publish.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-# This is run on every commit that CircleCI picks up. It assumes that docs have already been built
+# This is run on every commit that Azure Pipelines picks up. It assumes that docs have already been built
# via docs/build.sh. The push behavior differs depending on the nature of the commit:
# * Tag commit (e.g. v1.6.0): pushes docs to versioned location, e.g.
# https://www.envoyproxy.io/docs/envoy/v1.6.0/.
@@ -10,35 +10,36 @@
set -e
DOCS_DIR=generated/docs
-CHECKOUT_DIR=../envoy-docs
+CHECKOUT_DIR=envoy-docs
BUILD_SHA=$(git rev-parse HEAD)
-if [ -n "$CIRCLE_TAG" ]
-then
- PUBLISH_DIR="$CHECKOUT_DIR"/docs/envoy/"$CIRCLE_TAG"
-elif [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ]
-then
- PUBLISH_DIR="$CHECKOUT_DIR"/docs/envoy/latest
+MAIN_BRANCH="refs/heads/master"
+RELEASE_TAG_REGEX="^refs/tags/v.*"
+
+if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then
+ PUBLISH_DIR="${CHECKOUT_DIR}"/docs/envoy/"${AZP_BRANCH/refs\/tags\//}"
+elif [[ "$AZP_BRANCH" == "${MAIN_BRANCH}" ]]; then
+ PUBLISH_DIR="${CHECKOUT_DIR}"/docs/envoy/latest
else
echo "Ignoring docs push"
exit 0
fi
+DOCS_MAIN_BRANCH="master"
+
echo 'cloning'
-git clone git@github.com:envoyproxy/envoyproxy.github.io "$CHECKOUT_DIR"
+git clone git@github.com:envoyproxy/envoyproxy.github.io "${CHECKOUT_DIR}" -b "${DOCS_MAIN_BRANCH}" --depth 1
-git -C "$CHECKOUT_DIR" fetch
-git -C "$CHECKOUT_DIR" checkout -B master origin/master
rm -fr "$PUBLISH_DIR"
mkdir -p "$PUBLISH_DIR"
cp -r "$DOCS_DIR"/* "$PUBLISH_DIR"
-cd "$CHECKOUT_DIR"
+cd "${CHECKOUT_DIR}"
-git config user.name "envoy-docs(travis)"
+git config user.name "envoy-docs(Azure Pipelines)"
git config user.email envoy-docs@users.noreply.github.com
-echo 'add'
+
+set -x
+
git add .
-echo 'commit'
git commit -m "docs envoy@$BUILD_SHA"
-echo 'push'
-git push origin master
+git push origin "${DOCS_MAIN_BRANCH}"
diff --git a/docs/redirects.txt b/docs/redirects.txt
new file mode 100644
index 000000000000..87047ab3f3b5
--- /dev/null
+++ b/docs/redirects.txt
@@ -0,0 +1,11 @@
+intro/arch_overview/http/websocket.rst intro/arch_overview/http/upgrades.rst
+configuration/observability/access_log.rst intro/arch_overview/observability/access_logging.rst
+
+install/building.rst start/building.rst
+install/ref_configs.rst start/install/ref_configs.rst
+install/sandboxes/local_docker_build.rst start/install/sandboxes/local_docker_build.rst
+install/tools/config_load_check_tool.rst start/install/tools/config_load_check_tool.rst
+install/tools/route_table_check_tool.rst start/install/tools/route_table_check_tool.rst
+install/tools/schema_validator_check_tool.rst start/install/tools/schema_validator_check_tool.rst
+install/tools/tools.rst start/install/tools/tools.rst
+install/install.rst start/start.rst
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 584d3ba990b2..c8e98061b50e 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,17 +1,127 @@
-alabaster==0.7.12
-Babel==2.8.0
-docutils==0.16
-gitdb==4.0.5
-GitPython==3.1.7
-imagesize==1.2.0
-Jinja2==2.11.2
-MarkupSafe==1.1.1
-Pygments==2.6.1
-pytz==2020.1
-requests>=2.24.0
-six==1.15.0
-smmap==3.0.4
-snowballstemmer==2.0.0
-sphinx_rtd_theme==0.5.0
-Sphinx==3.2.1
-sphinxcontrib-httpdomain==1.7.0
+alabaster==0.7.12 \
+ --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
+ --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02
+Babel==2.8.0 \
+ --hash=sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38 \
+ --hash=sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4
+certifi==2020.6.20 \
+ --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \
+ --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41
+chardet==3.0.4 \
+ --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
+ --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691
+docutils==0.16 \
+ --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
+ --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc
+gitdb==4.0.5 \
+ --hash=sha256:91f36bfb1ab7949b3b40e23736db18231bf7593edada2ba5c3a174a7b23657ac \
+ --hash=sha256:c9e1f2d0db7ddb9a704c2a0217be31214e91a4fe1dea1efad19ae42ba0c285c9
+GitPython==3.1.8 \
+ --hash=sha256:080bf8e2cf1a2b907634761c2eaefbe83b69930c94c66ad11b65a8252959f912 \
+ --hash=sha256:1858f4fd089abe92ae465f01d5aaaf55e937eca565fb2c1fce35a51b5f85c910
+idna==2.10 \
+ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \
+ --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0
+imagesize==1.2.0 \
+ --hash=sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1 \
+ --hash=sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1
+Jinja2==2.11.2 \
+ --hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \
+ --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035
+MarkupSafe==1.1.1 \
+ --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \
+ --hash=sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161 \
+ --hash=sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235 \
+ --hash=sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5 \
+ --hash=sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42 \
+ --hash=sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff \
+ --hash=sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b \
+ --hash=sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1 \
+ --hash=sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e \
+ --hash=sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183 \
+ --hash=sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66 \
+ --hash=sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b \
+ --hash=sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1 \
+ --hash=sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15 \
+ --hash=sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1 \
+ --hash=sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e \
+ --hash=sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b \
+ --hash=sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905 \
+ --hash=sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735 \
+ --hash=sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d \
+ --hash=sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e \
+ --hash=sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d \
+ --hash=sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c \
+ --hash=sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21 \
+ --hash=sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2 \
+ --hash=sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5 \
+ --hash=sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b \
+ --hash=sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6 \
+ --hash=sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f \
+ --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \
+ --hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \
+ --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \
+ --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be
+packaging==20.4 \
+ --hash=sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8 \
+ --hash=sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181
+Pygments==2.7.1 \
+ --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \
+ --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7
+pyparsing==2.4.7 \
+ --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \
+ --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b
+pytz==2020.1 \
+ --hash=sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed \
+ --hash=sha256:c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048
+requests==2.24.0 \
+ --hash=sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b \
+ --hash=sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898
+six==1.15.0 \
+ --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \
+ --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced
+smmap==3.0.4 \
+ --hash=sha256:54c44c197c819d5ef1991799a7e30b662d1e520f2ac75c9efbeb54a742214cf4 \
+ --hash=sha256:9c98bbd1f9786d22f14b3d4126894d56befb835ec90cef151af566c7e19b5d24
+snowballstemmer==2.0.0 \
+ --hash=sha256:209f257d7533fdb3cb73bdbd24f436239ca3b2fa67d56f6ff88e86be08cc5ef0 \
+ --hash=sha256:df3bac3df4c2c01363f3dd2cfa78cce2840a79b9f1c2d2de9ce8d31683992f52
+Sphinx==3.2.1 \
+ --hash=sha256:321d6d9b16fa381a5306e5a0b76cd48ffbc588e6340059a729c6fdd66087e0e8 \
+ --hash=sha256:ce6fd7ff5b215af39e2fcd44d4a321f6694b4530b6f2b2109b64d120773faea0
+sphinx-copybutton==0.3.0 \
+ --hash=sha256:4becad3a1e7c50211f1477e34fd4b6d027680e1612f497cb5b88cf85bccddaaa \
+ --hash=sha256:4cd06afd0588aa43eba968bfc6105e1ec6546c50a51f880af1d89afaebc6fb58
+sphinx-rtd-theme==0.5.0 \
+ --hash=sha256:22c795ba2832a169ca301cd0a083f7a434e09c538c70beb42782c073651b707d \
+ --hash=sha256:373413d0f82425aaa28fb288009bf0d0964711d347763af2f1b65cafcb028c82
+sphinx-tabs==1.3.0 \
+ --hash=sha256:537857f91f1b371f7b45eb8ac83001618b3e3178c78df073d2cc4558a8e66ef5 \
+ --hash=sha256:54132c8a57aa19bba6e17fe26eb94ea9df531708ff3f509b119313b32d0d5aff
+sphinxcontrib-applehelp==1.0.2 \
+ --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \
+ --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58
+sphinxcontrib-devhelp==1.0.2 \
+ --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \
+ --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4
+sphinxcontrib-htmlhelp==1.0.3 \
+ --hash=sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f \
+ --hash=sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b
+sphinxcontrib-httpdomain==1.7.0 \
+ --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \
+ --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335
+sphinxcontrib-jsmath==1.0.1 \
+ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
+ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
+sphinxcontrib-qthelp==1.0.3 \
+ --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \
+ --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6
+sphinxext-rediraffe==0.2.4 \
+ --hash=sha256:5428fb614d1fbc16964ba587aaa6b1c8ec92fd0b1d01bb6b369637446f43a27d \
+ --hash=sha256:13e6474342df6643723976a3429edfc5e811e9f48b9f832c9fb6bdd9fe53fd83
+sphinxcontrib-serializinghtml==1.1.4 \
+ --hash=sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc \
+ --hash=sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a
+urllib3==1.25.10 \
+ --hash=sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a \
+ --hash=sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461
diff --git a/docs/root/_include/ssl_stats.rst b/docs/root/_include/ssl_stats.rst
new file mode 100644
index 000000000000..93f9b247a67e
--- /dev/null
+++ b/docs/root/_include/ssl_stats.rst
@@ -0,0 +1,20 @@
+.. csv-table::
+ :header: Name, Type, Description
+ :widths: 1, 1, 2
+
+ connection_error, Counter, Total TLS connection errors not including failed certificate verifications
+ handshake, Counter, Total successful TLS connection handshakes
+ session_reused, Counter, Total successful TLS session resumptions
+ no_certificate, Counter, Total successful TLS connections with no client certificate
+ fail_verify_no_cert, Counter, Total TLS connections that failed because of missing client certificate
+ fail_verify_error, Counter, Total TLS connections that failed CA verification
+ fail_verify_san, Counter, Total TLS connections that failed SAN verification
+ fail_verify_cert_hash, Counter, Total TLS connections that failed certificate pinning verification
+ ocsp_staple_failed, Counter, Total TLS connections that failed compliance with the OCSP policy
+ ocsp_staple_omitted, Counter, Total TLS connections that succeeded without stapling an OCSP response
+ ocsp_staple_responses, Counter, Total TLS connections where a valid OCSP response was available (irrespective of whether the client requested stapling)
+ ocsp_staple_requests, Counter, Total TLS connections where the client requested an OCSP staple
+ ciphers., Counter, Total successful TLS connections that used cipher
+ curves., Counter, Total successful TLS connections that used ECDHE curve
+ sigalgs., Counter, Total successful TLS connections that used signature algorithm
+ versions., Counter, Total successful TLS connections that used protocol version
diff --git a/docs/root/_static/css/envoy.css b/docs/root/_static/css/envoy.css
index c65a71f05262..8021e5df6f21 100644
--- a/docs/root/_static/css/envoy.css
+++ b/docs/root/_static/css/envoy.css
@@ -14,3 +14,12 @@ table.docutils div.line-block {
overflow-wrap: break-word;
max-width: 1000px;
}
+
+/* To style the API version label of a search result item */
+.api-version-label {
+ border-radius: 20%;
+ background-color: #c0c0c0;
+ color: #ffffff;
+ margin-left: 4px;
+ padding: 4px;
+}
diff --git a/docs/root/_static/searchtools.js b/docs/root/_static/searchtools.js
index bd46e53c3501..4c46c2de9a61 100644
--- a/docs/root/_static/searchtools.js
+++ b/docs/root/_static/searchtools.js
@@ -9,6 +9,9 @@
*
*/
+// Modified from https://raw.githubusercontent.com/sphinx-doc/sphinx/3.x/sphinx/themes/basic/static/searchtools.js
+// to have renderApiVersionLabel to render the API version for each search result item.
+
if (!Scorer) {
/**
* Simple result scoring code.
@@ -249,6 +252,16 @@ var Search = {
//Search.lastresults = results.slice(); // a copy
//console.info('search results:', Search.lastresults);
+ // renderApiVersionLabel renders API version for each search result item.
+ function renderApiVersionLabel(linkUrl) {
+ const filtered = linkUrl
+ .split("/")
+ .filter((part) => part.startsWith("api-v"));
+ return filtered.length === 1
+ ? ' ' + filtered.pop() + ""
+ : "";
+ }
+
// print the results
var resultCount = results.length;
function displayNextItem() {
@@ -281,6 +294,10 @@ var Search = {
.attr("href", linkUrl + highlightstring + item[2])
.html(item[1])
);
+ var apiVersion = renderApiVersionLabel(linkUrl);
+ if (apiVersion !== "") {
+ listItem.append(apiVersion);
+ }
if (item[3]) {
listItem.append($(" (" + item[3] + ")"));
Search.output.append(listItem);
diff --git a/docs/root/about_docs.rst b/docs/root/about_docs.rst
index 317639fc61f9..7bab46859642 100644
--- a/docs/root/about_docs.rst
+++ b/docs/root/about_docs.rst
@@ -16,3 +16,4 @@ The Envoy documentation is composed of a few major sections:
* :ref:`Extending Envoy `: Information on how to write custom filters for Envoy.
* :ref:`API reference `: Envoy API detailed reference.
* :ref:`Envoy FAQ `: Have questions? We have answers. Hopefully.
+* :ref:`Version history `: Per-version release notes.
diff --git a/docs/root/api-v2/service/service.rst b/docs/root/api-v2/service/service.rst
index 951c00d28972..e0357a1c2331 100644
--- a/docs/root/api-v2/service/service.rst
+++ b/docs/root/api-v2/service/service.rst
@@ -7,6 +7,7 @@ Services
accesslog/v2/*
load_stats/v2/*
+ auth/v2/*
discovery/v2/*
metrics/v2/*
ratelimit/v2/*
diff --git a/docs/root/api-v3/bootstrap/bootstrap.rst b/docs/root/api-v3/bootstrap/bootstrap.rst
index d2397a9bf2ac..51d7b817c66d 100644
--- a/docs/root/api-v3/bootstrap/bootstrap.rst
+++ b/docs/root/api-v3/bootstrap/bootstrap.rst
@@ -10,3 +10,4 @@ Bootstrap
../config/metrics/v3/metrics_service.proto
../config/overload/v3/overload.proto
../config/ratelimit/v3/rls.proto
+ ../extensions/wasm/v3/wasm.proto
diff --git a/docs/root/api-v3/config/wasm/wasm.rst b/docs/root/api-v3/config/wasm/wasm.rst
index efdb96212478..a2f03f3304bb 100644
--- a/docs/root/api-v3/config/wasm/wasm.rst
+++ b/docs/root/api-v3/config/wasm/wasm.rst
@@ -6,3 +6,4 @@ WASM
:maxdepth: 2
../../extensions/wasm/v3/*
+ ../../extensions/stat_sinks/wasm/v3/*
diff --git a/docs/root/api-v3/config/watchdog/watchdog.rst b/docs/root/api-v3/config/watchdog/watchdog.rst
index 60f284384d59..f5906b3390d3 100644
--- a/docs/root/api-v3/config/watchdog/watchdog.rst
+++ b/docs/root/api-v3/config/watchdog/watchdog.rst
@@ -6,3 +6,4 @@ Watchdog
:maxdepth: 2
../../extensions/watchdog/profile_action/v3alpha/*
+ ../../watchdog/v3alpha/*
diff --git a/docs/root/api/api_supported_versions.rst b/docs/root/api/api_supported_versions.rst
index 89a6cb181ea6..93b9bdeb45c3 100644
--- a/docs/root/api/api_supported_versions.rst
+++ b/docs/root/api/api_supported_versions.rst
@@ -8,7 +8,7 @@ multiple major API versions at any point in time. The following versions are cur
* :ref:`v2 xDS API ` (*deprecated*, end-of-life EOY 2020). This API will not
accept new features after the end of Q1 2020.
-* :ref:`v3 xDS API ` (*active*, end-of-life EOY 2021). Envoy developers and
+* :ref:`v3 xDS API ` (*active*, end-of-life unknown). Envoy developers and
operators are encouraged to be actively adopting and working with v3 xDS.
The following API versions are no longer supported by Envoy:
diff --git a/docs/root/configuration/best_practices/_include/edge.yaml b/docs/root/configuration/best_practices/_include/edge.yaml
new file mode 100644
index 000000000000..958a231610f9
--- /dev/null
+++ b/docs/root/configuration/best_practices/_include/edge.yaml
@@ -0,0 +1,102 @@
+overload_manager:
+ refresh_interval: 0.25s
+ resource_monitors:
+ - name: "envoy.resource_monitors.fixed_heap"
+ typed_config:
+ "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig
+ # TODO: Tune for your system.
+ max_heap_size_bytes: 2147483648 # 2 GiB
+ actions:
+ - name: "envoy.overload_actions.shrink_heap"
+ triggers:
+ - name: "envoy.resource_monitors.fixed_heap"
+ threshold:
+ value: 0.95
+ - name: "envoy.overload_actions.stop_accepting_requests"
+ triggers:
+ - name: "envoy.resource_monitors.fixed_heap"
+ threshold:
+ value: 0.98
+
+admin:
+ access_log_path: "/var/log/envoy_admin.log"
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 9090
+
+static_resources:
+ listeners:
+ - address:
+ socket_address:
+ address: 0.0.0.0
+ port_value: 443
+ listener_filters:
+ - name: "envoy.filters.listener.tls_inspector"
+ typed_config: {}
+ per_connection_buffer_limit_bytes: 32768 # 32 KiB
+ filter_chains:
+ - filter_chain_match:
+ server_names: ["example.com", "www.example.com"]
+ transport_socket:
+ name: envoy.transport_sockets.tls
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
+ common_tls_context:
+ tls_certificates:
+ - certificate_chain: { filename: "certs/servercert.pem" }
+ private_key: { filename: "certs/serverkey.pem" }
+ # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol.
+ # use_proxy_proto: true
+ filters:
+ - name: envoy.filters.network.http_connection_manager
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
+ stat_prefix: ingress_http
+ use_remote_address: true
+ common_http_protocol_options:
+ idle_timeout: 3600s # 1 hour
+ headers_with_underscores_action: REJECT_REQUEST
+ http2_protocol_options:
+ max_concurrent_streams: 100
+ initial_stream_window_size: 65536 # 64 KiB
+ initial_connection_window_size: 1048576 # 1 MiB
+ stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests
+ request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests
+ route_config:
+ virtual_hosts:
+ - name: default
+ domains: "*"
+ routes:
+ - match: { prefix: "/" }
+ route:
+ cluster: service_foo
+ idle_timeout: 15s # must be disabled for long-lived and streaming requests
+ clusters:
+ name: service_foo
+ connect_timeout: 15s
+ per_connection_buffer_limit_bytes: 32768 # 32 KiB
+ load_assignment:
+ cluster_name: some_service
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 8080
+ http2_protocol_options:
+ initial_stream_window_size: 65536 # 64 KiB
+ initial_connection_window_size: 1048576 # 1 MiB
+
+layered_runtime:
+ layers:
+ - name: static_layer_0
+ static_layer:
+ envoy:
+ resource_limits:
+ listener:
+ example_listener_name:
+ connection_limit: 10000
+ overload:
+ global_downstream_max_connections: 50000
diff --git a/docs/root/configuration/best_practices/edge.rst b/docs/root/configuration/best_practices/edge.rst
index fc717a5f9235..d61c4684c71a 100644
--- a/docs/root/configuration/best_practices/edge.rst
+++ b/docs/root/configuration/best_practices/edge.rst
@@ -30,107 +30,5 @@ HTTP proxies should additionally configure:
The following is a YAML example of the above recommendation (taken from the :ref:`Google VRP
` edge server configuration):
-.. code-block:: yaml
-
- overload_manager:
- refresh_interval: 0.25s
- resource_monitors:
- - name: "envoy.resource_monitors.fixed_heap"
- typed_config:
- "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig
- # TODO: Tune for your system.
- max_heap_size_bytes: 2147483648 # 2 GiB
- actions:
- - name: "envoy.overload_actions.shrink_heap"
- triggers:
- - name: "envoy.resource_monitors.fixed_heap"
- threshold:
- value: 0.95
- - name: "envoy.overload_actions.stop_accepting_requests"
- triggers:
- - name: "envoy.resource_monitors.fixed_heap"
- threshold:
- value: 0.98
-
- admin:
- access_log_path: "/var/log/envoy_admin.log"
- address:
- socket_address:
- address: 127.0.0.1
- port_value: 9090
-
- static_resources:
- listeners:
- - address:
- socket_address:
- address: 0.0.0.0
- port_value: 443
- listener_filters:
- - name: "envoy.filters.listener.tls_inspector"
- typed_config: {}
- per_connection_buffer_limit_bytes: 32768 # 32 KiB
- filter_chains:
- - filter_chain_match:
- server_names: ["example.com", "www.example.com"]
- transport_socket:
- name: envoy.transport_sockets.tls
- typed_config:
- "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
- common_tls_context:
- tls_certificates:
- - certificate_chain: { filename: "example_com_cert.pem" }
- private_key: { filename: "example_com_key.pem" }
- # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol.
- # use_proxy_proto: true
- filters:
- - name: envoy.filters.network.http_connection_manager
- typed_config:
- "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
- stat_prefix: ingress_http
- use_remote_address: true
- common_http_protocol_options:
- idle_timeout: 3600s # 1 hour
- headers_with_underscores_action: REJECT_REQUEST
- http2_protocol_options:
- max_concurrent_streams: 100
- initial_stream_window_size: 65536 # 64 KiB
- initial_connection_window_size: 1048576 # 1 MiB
- stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests
- request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests
- route_config:
- virtual_hosts:
- - name: default
- domains: "*"
- routes:
- - match: { prefix: "/" }
- route:
- cluster: service_foo
- idle_timeout: 15s # must be disabled for long-lived and streaming requests
- clusters:
- name: service_foo
- connect_timeout: 15s
- per_connection_buffer_limit_bytes: 32768 # 32 KiB
- load_assignment:
- cluster_name: some_service
- endpoints:
- - lb_endpoints:
- - endpoint:
- address:
- socket_address:
- address: 127.0.0.1
- port_value: 8080
- http2_protocol_options:
- initial_stream_window_size: 65536 # 64 KiB
- initial_connection_window_size: 1048576 # 1 MiB
-
- layered_runtime:
- layers:
- - name: static_layer_0
- static_layer:
- envoy:
- resource_limits:
- listener:
- example_listener_name:
- connection_limit: 10000
- overload:
- global_downstream_max_connections: 50000
+.. literalinclude:: _include/edge.yaml
+ :language: yaml
diff --git a/docs/root/configuration/http/http_conn_man/http_conn_man.rst b/docs/root/configuration/http/http_conn_man/http_conn_man.rst
index a726c3983a7a..d4faa90c267d 100644
--- a/docs/root/configuration/http/http_conn_man/http_conn_man.rst
+++ b/docs/root/configuration/http/http_conn_man/http_conn_man.rst
@@ -13,6 +13,7 @@ HTTP connection manager
headers
header_sanitizing
local_reply
+ response_code_details
stats
runtime
rds
diff --git a/docs/root/configuration/http/http_conn_man/local_reply.rst b/docs/root/configuration/http/http_conn_man/local_reply.rst
index 5b87d9e3ef5c..d7649f0f4eaa 100644
--- a/docs/root/configuration/http/http_conn_man/local_reply.rst
+++ b/docs/root/configuration/http/http_conn_man/local_reply.rst
@@ -49,6 +49,8 @@ The response body content type can be customized. If not specified, the content
Local reply format can be specified as :ref:`SubstitutionFormatString `. It supports :ref:`text_format ` and :ref:`json_format `.
+Optionally, content-type can be modified further via :ref:`content_type ` field. If not specified, default content-type is `text/plain` for :ref:`text_format ` and `application/json` for :ref:`json_format `.
+
Example of a LocalReplyConfig with `body_format` field.
.. code-block::
@@ -63,7 +65,8 @@ Example of a LocalReplyConfig with `body_format` field.
runtime_key: key_b
status_code: 401
body_format_override:
- text_format: "%LOCAL_REPLY_BODY% %REQ(:path)%"
+ text_format: "%LOCAL_REPLY_BODY% %REQ(:path)%
"
+ content_type: "text/html; charset=UTF-8"
- filter:
status_code_filter:
comparison:
diff --git a/docs/root/configuration/http/http_conn_man/response_code_details.rst b/docs/root/configuration/http/http_conn_man/response_code_details.rst
new file mode 100644
index 000000000000..350c0767f93f
--- /dev/null
+++ b/docs/root/configuration/http/http_conn_man/response_code_details.rst
@@ -0,0 +1,101 @@
+.. _config_http_conn_man_details:
+
+Response Code Details
+=====================
+
+If _%RESPONSE_CODE_DETAILS%_ is configured on via :ref:`access logging`,
+or :ref:`custom headers` Envoy will communicate the detailed
+reason a given stream ended.
+This page lists the details sent by the HttpConnectionManager, Router filter, and codecs. It is not comprehensive as
+any other filters may send their own local replies with custom details.
+
+Below are the list of reasons the HttpConnectionManager or Router filter may send responses or reset streams.
+
+.. warning::
+ The following list is not guaranteed to be stable, since the details are subject to change.
+
+.. csv-table::
+ :header: Name, Description
+ :widths: 1, 2
+
+ absolute_path_rejected, The request was rejected due to using an absolute path on a route not supporting them.
+ admin_filter_response, The response was generated by the admin filter.
+ cluster_not_found, The request was rejected by the router filter because there was no cluster found for the selected route.
+ downstream_local_disconnect, The client connection was locally closed for an unspecified reason.
+ downstream_remote_disconnect, The client disconnected unexpectedly.
+ duration_timeout, The max connection duration was exceeded.
+ direct_response, A direct response was generated by the router filter.
+ filter_chain_not_found, The request was rejected due to no matching filter chain.
+ internal_redirect, The original stream was replaced with an internal redirect.
+ low_version, The HTTP/1.0 or HTTP/0.9 request was rejected due to HTTP/1.0 support not being configured.
+ maintenance_mode, The request was rejected by the router filter because the cluster was in maintenance mode.
+ max_duration_timeout, The per-stream max duration timeout was exceeded.
+ missing_host_header, The request was rejected due to a missing Host: or :authority field.
+ missing_path_rejected, The request was rejected due to a missing Path or :path header field.
+ no_healthy_upstream, The request was rejected by the router filter because there was no healthy upstream found.
+ overload, The request was rejected due to the Overload Manager reaching configured resource limits.
+ path_normalization_failed, "The request was rejected because path normalization was configured on and failed, probably due to an invalid path."
+ request_headers_failed_strict_check, The request was rejected due to x-envoy-* headers failing strict header validation.
+ request_overall_timeout, The per-stream total request timeout was exceeded.
+ request_payload_exceeded_retry_buffer_limit, Envoy is doing streaming proxying but too much data arrived while waiting to attempt a retry.
+ request_payload_too_large, Envoy is doing non-streaming proxying and the request payload exceeded configured limits.
+ response_payload_too_large, Envoy is doing non-streaming proxying and the response payload exceeded configured limits.
+ response_payload_too_large, Envoy is doing non-streaming proxying and the response payload exceeded configured limits.
+ route_configuration_not_found, The request was rejected because there was no route configuration found.
+ route_not_found, The request was rejected because there was no route found.
+ stream_idle_timeout, The per-stream keepalive timeout was exceeded.
+ upgrade_failed, The request was rejected because it attempted an unsupported upgrade.
+ upstream_max_stream_duration_reached, The request was destroyed because of it exceeded the configured max stream duration.
+ upstream_per_try_timeout, The final upstream try timed out.
+ upstream_reset_after_response_started{details}, The upstream connection was reset after a response was started. This may include further details about the cause of the disconnect.
+ upstream_reset_before_response_started{details}, The upstream connection was reset before a response was started This may include further details about the cause of the disconnect.
+ upstream_response_timeout, The upstream response timed out.
+ via_upstream, The response code was set by the upstream.
+
+
+.. _config_http_conn_man_details_per_codec:
+
+Per codec details
+-----------------
+
+Each codec may send codec-specific details when encountering errors.
+
+Http1 details
+~~~~~~~~~~~~~
+
+All http1 details are rooted at *http1.*
+
+.. csv-table::
+ :header: Name, Description
+ :widths: 1, 2
+
+ http1.body_disallowed, A body was sent on a request where bodies are not allowed.
+ http1.codec_error, Some error was encountered in the http_parser internals.
+ http1.connection_header_rejected, The Connection header was malformed or overly long.
+ http1.content_length_and_chunked_not_allowed, A request was sent with both Transfer-Encoding: chunked and a Content-Length header when disallowed by configuration.
+ http1.content_length_not_allowed, A content length was sent on a response it was disallowed on.
+ http1.headers_too_large, The overall byte size of rquest headers was larger than the configured limits.
+ http1.invalid_characters, The headers contained illegal characters.
+ http1.invalid_transfer_encoding, The Transfer-Encoding header was not valid.
+ http1.invalid_url, The request URL was not valid.
+ http1.too_many_headers, Too many headers were sent with this request.
+ http1.transfer_encoding_not_allowed, A transfer encoding was sent on a response it was disallowed on.
+ http1.unexpected_underscore, An underscore was sent in a header key when disallowed by configuration.
+
+
+Http2 details
+~~~~~~~~~~~~~
+
+All http2 details are rooted at *http2.*
+
+.. csv-table::
+ :header: Name, Description
+ :widths: 1, 2
+
+ http2.inbound_empty_frames_flood, Envoy detected an inbound HTTP/2 frame flood.
+ http2.invalid.header.field, One of the HTTP/2 headers was invalid
+ http2.outbound_frames_flood, Envoy detected an HTTP/2 frame flood from the server.
+ http2.too_many_headers, The number of headers (or trailers) exceeded the configured limits
+ http2.unexpected_underscore, Envoy was configured to drop requests with header keys beginning with underscores.
+ http2.unknown.nghttp2.error, An unknown error was encountered by nghttp2
+ http2.violation.of.messaging.rule, The stream was in violation of a HTTP/2 messaging rule.
diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst
index b8d4bf23591f..c6aa07f284d4 100644
--- a/docs/root/configuration/http/http_conn_man/stats.rst
+++ b/docs/root/configuration/http/http_conn_man/stats.rst
@@ -105,7 +105,9 @@ Each codec has the option of adding per-codec statistics. Both http1 and http2 h
Http1 codec statistics
~~~~~~~~~~~~~~~~~~~~~~
-All http1 statistics are rooted at *http1.*
+On the downstream side all http1 statistics are rooted at *http1.*
+
+On the upstream side all http1 statistics are rooted at *cluster..http1.*
.. csv-table::
:header: Name, Type, Description
@@ -119,7 +121,9 @@ All http1 statistics are rooted at *http1.*
Http2 codec statistics
~~~~~~~~~~~~~~~~~~~~~~
-All http2 statistics are rooted at *http2.*
+On the downstream side all http2 statistics are rooted at *http2.*
+
+On the upstream side all http2 statistics are rooted at *cluster.