diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 02b31478f4ce..4efd687e546c 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -10,217 +10,389 @@ trigger: # PR build config is manually overridden in Azure pipelines UI with different secrets pr: none -jobs: - - job: format - dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. - pool: - vmImage: "ubuntu-18.04" - steps: - - task: Cache@2 - inputs: - key: "format | ./WORKSPACE | **/*.bzl" - path: $(Build.StagingDirectory)/repository_cache - continueOnError: true - - - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh' - workingDirectory: $(Build.SourcesDirectory) - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - displayName: "Run check format scripts" - - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff" - artifactName: format - condition: failed() - - - job: release - displayName: "Linux-x64 release" - dependsOn: ["format"] - # For master builds, continue even if format fails - condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) - timeoutInMinutes: 360 - pool: - vmImage: "ubuntu-18.04" - steps: - - template: bazel.yml - parameters: - ciTarget: bazel.release - - - job: release_arm64 - displayName: "Linux-arm64 release" - dependsOn: ["format"] - # For master builds, continue even if format fails +stages: + - stage: precheck + jobs: + - job: format + dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. + pool: + vmImage: "ubuntu-18.04" + steps: + - task: Cache@2 + inputs: + key: "format | ./WORKSPACE | **/*.bzl" + path: $(Build.StagingDirectory)/repository_cache + continueOnError: true + + - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh' + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + displayName: "Run check format scripts" + + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff" + artifactName: format + condition: failed() + + - job: docs + dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. + pool: + vmImage: "ubuntu-18.04" + steps: + - task: Cache@2 + inputs: + key: "docs | ./WORKSPACE | **/*.bzl" + path: $(Build.StagingDirectory)/repository_cache + continueOnError: true + + - script: ci/run_envoy_docker.sh 'ci/do_ci.sh docs' + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + displayName: "Generate docs" + + - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/docs docs' + displayName: "Upload Docs to GCS" + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) + + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.SourcesDirectory)/generated/docs" + artifactName: docs + condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) + + - task: InstallSSHKey@0 + inputs: + hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" + sshPublicKey: "$(DocsPublicKey)" + sshPassphrase: "$(SshDeployKeyPassphrase)" + sshKeySecureFile: "$(DocsPrivateKey)" + condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'), eq(variables['PostSubmit'], true)) + + - script: docs/publish.sh + displayName: "Publish to GitHub" + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + AZP_SHA1: $(Build.SourceVersion) + condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'), eq(variables['PostSubmit'], true)) + + - stage: sync + condition: and(succeeded(), eq(variables['PostSubmit'], true)) + dependsOn: [] + jobs: + - job: filter_example + dependsOn: [] + pool: + vmImage: "ubuntu-18.04" + steps: + - task: InstallSSHKey@0 + inputs: + hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" + sshPublicKey: "$(FilterExamplePublicKey)" + sshPassphrase: "$(SshDeployKeyPassphrase)" + sshKeySecureFile: "$(FilterExamplePrivateKey)" + + - bash: ci/filter_example_mirror.sh + displayName: "Sync envoy-filter-example" + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + + - job: data_plane_api + dependsOn: [] + pool: + vmImage: "ubuntu-18.04" + steps: + - task: InstallSSHKey@0 + inputs: + hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" + sshPublicKey: "$(DataPlaneApiPublicKey)" + sshPassphrase: "$(SshDeployKeyPassphrase)" + sshKeySecureFile: "$(DataPlaneApiPrivateKey)" + + - bash: ci/api_mirror.sh + displayName: "Sync data-plane-api" + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + + - job: go_control_plane + dependsOn: [] + steps: + - task: InstallSSHKey@0 + inputs: + hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" + sshPublicKey: "$(GoControlPlanePublicKey)" + sshPassphrase: "$(SshDeployKeyPassphrase)" + sshKeySecureFile: "$(GoControlPlanePrivateKey)" + + - bash: | + cp -a ~/.ssh $(Build.StagingDirectory)/ + ci/run_envoy_docker.sh 'ci/go_mirror.sh' + displayName: "Sync go-control-plane" + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + AZP_BRANCH: $(Build.SourceBranch) + + - stage: linux_x64 + dependsOn: ["precheck"] + # For master builds, continue even if precheck fails condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) - timeoutInMinutes: 360 - pool: "arm-large" - steps: - - template: bazel.yml - parameters: - managedAgent: false - ciTarget: bazel.release - rbe: false - artifactSuffix: ".arm64" - bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base" - - - job: bazel - displayName: "Linux-x64" - dependsOn: ["release"] - # For master builds, continue even if format fails + jobs: + - job: release + # For master builds, continue even if format fails + timeoutInMinutes: 360 + pool: + vmImage: "ubuntu-18.04" + steps: + - template: bazel.yml + parameters: + ciTarget: bazel.release + + - stage: linux_arm64 + dependsOn: ["precheck"] + # For master builds, continue even if precheck fails condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) - strategy: - maxParallel: 3 - matrix: - gcc: - CI_TARGET: "bazel.gcc" - clang_tidy: - CI_TARGET: "bazel.clang_tidy" - asan: - CI_TARGET: "bazel.asan" - tsan: - CI_TARGET: "bazel.tsan" - compile_time_options: - CI_TARGET: "bazel.compile_time_options" - timeoutInMinutes: 360 - pool: - vmImage: "ubuntu-18.04" - steps: - - template: bazel.yml - parameters: - ciTarget: $(CI_TARGET) - - - job: coverage - displayName: "Linux-x64" - dependsOn: ["release"] - timeoutInMinutes: 360 - pool: "x64-large" - strategy: - maxParallel: 2 - matrix: - coverage: - CI_TARGET: "coverage" - fuzz_coverage: - CI_TARGET: "fuzz_coverage" - steps: - - template: bazel.yml - parameters: - managedAgent: false - ciTarget: bazel.$(CI_TARGET) - rbe: false - # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces - bazelBuildExtraOptions: "--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base" - - - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)' - displayName: "Upload $(CI_TARGET) Report to GCS" - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) - condition: always() - - - job: docker - displayName: "Linux multi-arch docker" - dependsOn: ["release", "release_arm64"] - pool: - vmImage: "ubuntu-18.04" - steps: - - bash: .azure-pipelines/cleanup.sh - displayName: "Removing tools from agent" - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "bazel.release" - itemPattern: "bazel.release/envoy_binary.tar.gz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "bazel.release.arm64" - itemPattern: "bazel.release.arm64/envoy_binary.tar.gz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - bash: | - set -e - mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64 - mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64 - ci/docker_ci.sh - workingDirectory: $(Build.SourcesDirectory) - env: - AZP_BRANCH: $(Build.SourceBranch) - AZP_SHA1: $(Build.SourceVersion) - DOCKERHUB_USERNAME: $(DockerUsername) - DOCKERHUB_PASSWORD: $(DockerPassword) - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/build_images" - artifactName: docker - condition: always() - - - job: examples + jobs: + - job: release + timeoutInMinutes: 360 + pool: "arm-large" + steps: + - template: bazel.yml + parameters: + managedAgent: false + ciTarget: bazel.release + rbe: false + artifactSuffix: ".arm64" + bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base" + + - stage: check + dependsOn: ["linux_x64"] + jobs: + - job: bazel + displayName: "linux_x64" + dependsOn: [] + strategy: + maxParallel: 3 + matrix: + api: + CI_TARGET: "bazel.api" + gcc: + CI_TARGET: "bazel.gcc" + clang_tidy: + CI_TARGET: "bazel.clang_tidy" + asan: + CI_TARGET: "bazel.asan" + tsan: + CI_TARGET: "bazel.tsan" + compile_time_options: + CI_TARGET: "bazel.compile_time_options" + timeoutInMinutes: 360 + pool: + vmImage: "ubuntu-18.04" + steps: + - template: bazel.yml + parameters: + ciTarget: $(CI_TARGET) + + - job: coverage + displayName: "linux_x64" + dependsOn: [] + timeoutInMinutes: 360 + pool: "x64-large" + strategy: + maxParallel: 2 + matrix: + coverage: + CI_TARGET: "coverage" + fuzz_coverage: + CI_TARGET: "fuzz_coverage" + steps: + - template: bazel.yml + parameters: + managedAgent: false + ciTarget: bazel.$(CI_TARGET) + rbe: false + # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces + bazelBuildExtraOptions: "--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base" + + - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)' + displayName: "Upload $(CI_TARGET) Report to GCS" + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) + condition: always() + + - stage: docker + dependsOn: ["linux_x64", "linux_arm64"] + jobs: + - job: docker + displayName: "linux multiarch" + pool: + vmImage: "ubuntu-18.04" + steps: + - bash: .azure-pipelines/cleanup.sh + displayName: "Removing tools from agent" + - bash: | + echo "disk space at beginning of build:" + df -h + displayName: "Check disk space at beginning" + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "bazel.release" + itemPattern: "bazel.release/envoy_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "bazel.release.arm64" + itemPattern: "bazel.release.arm64/envoy_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) + - bash: | + set -e + mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64 + mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64 + ci/docker_ci.sh + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + AZP_SHA1: $(Build.SourceVersion) + DOCKERHUB_USERNAME: $(DockerUsername) + DOCKERHUB_PASSWORD: $(DockerPassword) + - bash: | + echo "disk space at end of build:" + df -h + displayName: "Check disk space at end" + condition: always() + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/build_images" + artifactName: docker + condition: always() + + - stage: verify dependsOn: ["docker"] - displayName: "Verify examples run as documented" - pool: - vmImage: "ubuntu-18.04" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "docker" - itemPattern: "docker/envoy-docker-images.tar.xz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - bash: ./ci/do_ci.sh verify_examples - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - NO_BUILD_SETUP: 1 - - - job: macOS - dependsOn: ["format"] - timeoutInMinutes: 360 - pool: - vmImage: "macos-latest" - steps: - - script: ./ci/mac_ci_setup.sh - displayName: "Install dependencies" - - - script: ./ci/mac_ci_steps.sh - displayName: "Run Mac CI" - env: - BAZEL_BUILD_EXTRA_OPTIONS: "--remote_download_toplevel --flaky_test_attempts=2" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - - - task: PublishTestResults@2 - inputs: - testResultsFiles: "**/bazel-testlogs/**/test.xml" - testRunTitle: "macOS" - condition: always() - - - script: ./ci/flaky_test/run_process_xml_mac.sh - displayName: "Process Test Results" - env: - TEST_TMPDIR: $(Build.SourcesDirectory) - SLACK_TOKEN: $(SLACK_TOKEN) - CI_TARGET: "MacOS" - REPO_URI: $(Build.Repository.Uri) - BUILD_URI: $(Build.BuildUri) - - - job: Windows - dependsOn: ["format"] - timeoutInMinutes: 360 - pool: - vmImage: "windows-latest" - steps: - - bash: ci/run_envoy_docker_windows.sh ci/windows_ci_steps.sh - displayName: "Run Windows CI" - env: - ENVOY_RBE: "true" - BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs)" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + jobs: + - job: examples + pool: + vmImage: "ubuntu-18.04" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "docker" + itemPattern: "docker/envoy-docker-images.tar.xz" + downloadType: single + targetPath: $(Build.StagingDirectory) + - bash: ./ci/do_ci.sh verify_examples + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + NO_BUILD_SETUP: 1 + + - stage: macos + dependsOn: ["precheck"] + jobs: + - job: test + timeoutInMinutes: 360 + pool: + vmImage: "macos-latest" + steps: + - script: ./ci/mac_ci_setup.sh + displayName: "Install dependencies" + + - script: ./ci/mac_ci_steps.sh + displayName: "Run Mac CI" + env: + BAZEL_BUILD_EXTRA_OPTIONS: "--remote_download_toplevel --flaky_test_attempts=2" + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + + - task: PublishTestResults@2 + inputs: + testResultsFiles: "**/bazel-testlogs/**/test.xml" + testRunTitle: "macOS" + condition: always() + + - script: ./ci/flaky_test/run_process_xml.sh + displayName: "Process Test Results" + env: + TEST_TMPDIR: $(Build.SourcesDirectory) + SLACK_TOKEN: $(SLACK_TOKEN) + CI_TARGET: "MacOS" + REPO_URI: $(Build.Repository.Uri) + BUILD_URI: $(Build.BuildUri) + + - stage: windows + dependsOn: ["precheck"] + jobs: + - job: release + timeoutInMinutes: 360 + pool: + vmImage: "windows-latest" + steps: + - bash: ci/run_envoy_docker.sh ci/windows_ci_steps.sh + displayName: "Run Windows CI" + env: + ENVOY_DOCKER_BUILD_DIR: "$(Build.StagingDirectory)" + ENVOY_RBE: "true" + BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs)" + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/envoy" + artifactName: windows.release + condition: always() + + - job: docker + dependsOn: ["release"] + timeoutInMinutes: 360 + pool: + vmImage: "windows-latest" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "windows.release" + itemPattern: "windows.release/envoy_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) + - bash: | + set -e + # Convert to Unix-style path so tar doesn't think drive letter is a hostname + STAGING_DIR="/$(echo '$(Build.StagingDirectory)' | tr -d ':' | tr '\\' '/')" + mkdir -p windows/amd64 && tar zxf "${STAGING_DIR}/windows.release/envoy_binary.tar.gz" -C ./windows/amd64 + ci/docker_ci.sh + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + AZP_SHA1: $(Build.SourceVersion) + DOCKERHUB_USERNAME: $(DockerUsername) + DOCKERHUB_PASSWORD: $(DockerPassword) + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/build_images" + artifactName: docker_windows + condition: always() diff --git a/.bazelci/presubmit.yml b/.bazelci/presubmit.yml index ab83156fbc47..dcef9acbef46 100644 --- a/.bazelci/presubmit.yml +++ b/.bazelci/presubmit.yml @@ -1,10 +1,5 @@ --- tasks: - gcc: - name: "GCC" - platform: ubuntu1804 - build_targets: - - "//source/exe:envoy-static" rbe: name: "RBE" platform: ubuntu1804 diff --git a/.bazelrc b/.bazelrc index d3326e1de006..16d8843d6a88 100644 --- a/.bazelrc +++ b/.bazelrc @@ -3,10 +3,10 @@ # Bazel doesn't need more than 200MB of memory for local build based on memory profiling: # https://docs.bazel.build/versions/master/skylark/performance.html#memory-profiling # The default JVM max heapsize is 1/4 of physical memory up to 32GB which could be large -# enough to consume all memory constrained by cgroup in large host, which is the case in CircleCI. +# enough to consume all memory constrained by cgroup in large host. # Limiting JVM heapsize here to let it do GC more when approaching the limit to # leave room for compiler/linker. -# The number 2G is choosed heuristically to both support in CircleCI and large enough for RBE. +# The number 2G is chosen heuristically to both support large VM and small VM with RBE. # Startup options cannot be selected via config. startup --host_jvm_args=-Xmx2g @@ -19,7 +19,8 @@ build --host_javabase=@bazel_tools//tools/jdk:remote_jdk11 build --javabase=@bazel_tools//tools/jdk:remote_jdk11 build --enable_platform_specific_config -# Enable position independent code, this option is not supported on Windows and default on on macOS. +# Enable position independent code (this is the default on macOS and Windows) +# (Workaround for https://github.com/bazelbuild/rules_foreign_cc/issues/421) build:linux --copt=-fPIC build:linux --cxxopt=-std=c++17 build:linux --conlyopt=-fexceptions @@ -35,9 +36,6 @@ build --action_env=CXX build --action_env=LLVM_CONFIG build --action_env=PATH -# Skip system ICU linking. -build --@com_googlesource_googleurl//build_config:system_icu=0 - # Common flags for sanitizers build:sanitizer --define tcmalloc=disabled build:sanitizer --linkopt -ldl @@ -112,7 +110,8 @@ build:libc++ --config=clang build:libc++ --action_env=CXXFLAGS=-stdlib=libc++ build:libc++ --action_env=LDFLAGS=-stdlib=libc++ build:libc++ --action_env=BAZEL_CXXOPTS=-stdlib=libc++ -build:libc++ --action_env=BAZEL_LINKLIBS=-l%:libc++.a:-l%:libc++abi.a:-lm +build:libc++ --action_env=BAZEL_LINKLIBS=-l%:libc++.a:-l%:libc++abi.a +build:libc++ --action_env=BAZEL_LINKOPTS=-lm:-pthread build:libc++ --define force_libcpp=enabled # Optimize build for binary size reduction. @@ -141,7 +140,7 @@ build:coverage --strategy=CoverageReport=sandboxed,local build:coverage --experimental_use_llvm_covmap build:coverage --collect_code_coverage build:coverage --test_tag_filters=-nocoverage -build:coverage --instrumentation_filter="//source(?!/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" +build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" coverage:test-coverage --test_arg="-l trace" coverage:fuzz-coverage --config=plain-fuzzer coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh @@ -230,7 +229,7 @@ build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:e7ea4e81bbd5028abb9d3a2f2c0afe063d9b62c0 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker @@ -275,6 +274,8 @@ build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 # Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts. build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer build:plain-fuzzer --define ENVOY_CONFIG_ASAN=1 +build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link +build:plain-fuzzer --linkopt=-fsanitize=fuzzer-no-link # Compile database generation config build:compdb --build_tag_filters=-nocompdb @@ -285,27 +286,27 @@ build:windows --define signal_trace=disabled build:windows --define hot_restart=disabled build:windows --define tcmalloc=disabled build:windows --define manual_stamp=manual_stamp +build:windows --cxxopt="/std:c++17" -# Should not be required after upstream fix to bazel, -# and already a no-op to linux/macos builds -# see issue https://github.com/bazelbuild/rules_foreign_cc/issues/301 +# TODO(wrowe,sunjayBhatia): Resolve bugs upstream in curl and rules_foreign_cc +# See issue https://github.com/bazelbuild/rules_foreign_cc/issues/301 build:windows --copt="-DCARES_STATICLIB" build:windows --copt="-DNGHTTP2_STATICLIB" build:windows --copt="-DCURL_STATICLIB" -build:windows --cxxopt="/std:c++17" -# Required to work around build defects on Windows MSVC cl -# Unguarded gcc pragmas in quiche are not recognized by MSVC -build:msvc-cl --copt="/wd4068" -# Allows 'nodiscard' function return values to be discarded -build:msvc-cl --copt="/wd4834" -# Allows inline functions to be undefined -build:msvc-cl --copt="/wd4506" -build:msvc-cl --copt="-D_SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING" +# Override any clang preference if building msvc-cl +# Drop the determinism feature (-DDATE etc are a no-op in msvc-cl) +build:msvc-cl --action_env=USE_CLANG_CL="" +build:msvc-cl --define clang_cl=0 +build:msvc-cl --features=-determinism + +# Windows build behaviors when using clang-cl +build:clang-cl --action_env=USE_CLANG_CL=1 +build:clang-cl --define clang_cl=1 # Required to work around Windows clang-cl build defects # Ignore conflicting definitions of _WIN32_WINNT -# Overriding __TIME__ etc is problematic (and is actually an invalid no-op) +# Override determinism flags (DATE etc) is valid on clang-cl compiler build:clang-cl --copt="-Wno-macro-redefined" build:clang-cl --copt="-Wno-builtin-macro-redefined" build:clang-cl --action_env=USE_CLANG_CL=1 diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 2ee082e1f343..000000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,71 +0,0 @@ -version: 2.1 - -executors: - ubuntu-build: - description: "A regular build executor based on ubuntu image" - docker: - # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 - - image: envoyproxy/envoy-build-ubuntu:e7ea4e81bbd5028abb9d3a2f2c0afe063d9b62c0 - resource_class: xlarge - working_directory: /source - -jobs: - api: - executor: ubuntu-build - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - run: ci/do_circle_ci.sh bazel.api - - add_ssh_keys: - fingerprints: - - "fb:f3:fe:be:1c:b2:ec:b6:25:f9:7b:a6:87:54:02:8c" - - run: ci/api_mirror.sh - - store_artifacts: - path: /build/envoy/generated - destination: / - - go_control_plane_mirror: - executor: ubuntu-build - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - run: ci/do_circle_ci.sh bazel.api - - add_ssh_keys: - fingerprints: - - "9d:3b:fe:7c:09:3b:ce:a9:6a:de:de:41:fb:6b:52:62" - - run: ci/go_mirror.sh - - filter_example_mirror: - executor: ubuntu-build - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - add_ssh_keys: - fingerprints: - - "f6:f9:df:90:9c:4b:5f:9c:f4:69:fd:42:94:ff:88:24" - - run: ci/filter_example_mirror.sh - - docs: - executor: ubuntu-build - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - run: ci/do_circle_ci.sh docs - - add_ssh_keys: - fingerprints: - - "44:c7:a1:9e:f4:9e:a5:33:11:f1:0e:79:e1:55:c9:04" - - run: docs/publish.sh - - store_artifacts: - path: generated/docs - -workflows: - version: 2 - all: - jobs: - - api - - go_control_plane_mirror - - filter_example_mirror - - docs: - filters: - tags: - only: /^v.*/ diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index bd2530543f4c..21f934c44944 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM gcr.io/envoy-ci/envoy-build:e7ea4e81bbd5028abb9d3a2f2c0afe063d9b62c0 +FROM gcr.io/envoy-ci/envoy-build:b480535e8423b5fd7c102fd30c92f4785519e33a ARG USERNAME=vscode ARG USER_UID=501 diff --git a/.devcontainer/README.md b/.devcontainer/README.md index 1cd314d2e4e0..f8119f9c5f6a 100644 --- a/.devcontainer/README.md +++ b/.devcontainer/README.md @@ -13,6 +13,8 @@ This task is needed to run everytime after: - Changing a BUILD file that add/remove files from a target, changes dependencies - Changing API proto files +There are additional tools for VS Code located in [`tools/vscode`](../tools/vscode) directory. + ## Advanced Usages ### Using Remote Build Execution diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 462b00ee78d0..97c37be6a676 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -38,6 +38,7 @@ "zxh404.vscode-proto3", "bazelbuild.vscode-bazel", "llvm-vs-code-extensions.vscode-clangd", + "vadimcn.vscode-lldb", "webfreak.debug", "ms-python.python" ] diff --git a/.gitignore b/.gitignore index a030c858c372..ae1f29656b59 100644 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,4 @@ CMakeLists.txt cmake-build-debug /linux bazel.output.txt +*~ diff --git a/CODEOWNERS b/CODEOWNERS index 1edcf7c68c1e..9696b370c4c7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -23,6 +23,8 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/network/rocketmq_proxy @aaron-ai @lizhanhui @lizan # thrift_proxy extension /*/extensions/filters/network/thrift_proxy @zuercher @rgs1 +# cdn_loop extension +/*/extensions/filters/http/cdn_loop @justin-mp @penguingao @alyssawilk # compressor used by http compression filters /*/extensions/filters/http/common/compressor @gsagula @rojkov @dio /*/extensions/filters/http/compressor @rojkov @dio @@ -78,8 +80,16 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/listener/http_inspector @yxue @PiotrSikora @lizan # attribute context /*/extensions/filters/common/expr @kyessenov @yangminzhu @lizan +# webassembly access logger extensions +/*/extensions/access_loggers/wasm @PiotrSikora @lizan +# webassembly bootstrap extensions +/*/extensions/bootstrap/wasm @PiotrSikora @lizan +# webassembly http extensions +/*/extensions/filters/http/wasm @PiotrSikora @lizan +# webassembly network extensions +/*/extensions/filters/network/wasm @PiotrSikora @lizan # webassembly common extension -/*/extensions/common/wasm @jplevyak @PiotrSikora @lizan +/*/extensions/common/wasm @PiotrSikora @lizan # common matcher /*/extensions/common/matcher @mattklein123 @yangminzhu # common crypto extension @@ -105,6 +115,8 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/stat_sinks/dog_statsd @taiki45 @jmarantz /*/extensions/stat_sinks/hystrix @trabetti @jmarantz /*/extensions/stat_sinks/metrics_service @ramaraochavali @jmarantz +# webassembly stat-sink extensions +/*/extensions/stat_sinks/wasm @PiotrSikora @lizan /*/extensions/resource_monitors/injected_resource @eziskind @htuch /*/extensions/resource_monitors/common @eziskind @htuch /*/extensions/resource_monitors/fixed_heap @eziskind @htuch @@ -129,7 +141,7 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/compression/gzip @junr03 @rojkov /*/extensions/filters/http/decompressor @rojkov @dio # Watchdog Extensions -/*/extensions/watchdog/profile_action @kbaichoo @htuch +/*/extensions/watchdog/profile_action @kbaichoo @antoniovicente # Core upstream code extensions/upstreams/http @alyssawilk @snowp @mattklein123 extensions/upstreams/http/http @alyssawilk @snowp @mattklein123 @@ -137,3 +149,6 @@ extensions/upstreams/http/tcp @alyssawilk @mattklein123 extensions/upstreams/http/default @alyssawilk @snowp @mattklein123 # OAuth2 extensions/filters/http/oauth2 @rgs1 @derekargueta @snowp +# HTTP Local Rate Limit +/*/extensions/filters/http/local_ratelimit @rgs1 @mattklein123 +/*/extensions/filters/common/local_ratelimit @mattklein123 @rgs1 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1bef2955c288..bafe92bb2d8a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -69,7 +69,7 @@ versioning guidelines: cause a configuration load failure, unless the feature in question is explicitly overridden in [runtime](https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime#using-runtime-overrides-for-deprecated-features) - config ([example](configs/using_deprecated_config.v2.yaml)). Finally, following the deprecation + config ([example](configs/using_deprecated_config.yaml)). Finally, following the deprecation of the API major version where the field was first marked deprecated, the entire implementation code will be removed from the Envoy implementation. * This policy means that organizations deploying master should have some time to get ready for diff --git a/DEPENDENCY_POLICY.md b/DEPENDENCY_POLICY.md index 50aad88708aa..0944ad59030b 100644 --- a/DEPENDENCY_POLICY.md +++ b/DEPENDENCY_POLICY.md @@ -21,12 +21,14 @@ An example entry for the `nghttp2` dependency is: ```python com_github_nghttp2_nghttp2 = dict( project_name = "Nghttp2", + project_desc = "Implementation of HTTP/2 and its header compression ...", project_url = "https://nghttp2.org", version = "1.41.0", sha256 = "eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8", strip_prefix = "nghttp2-{version}", urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"], use_category = ["dataplane"], + last_updated = "2020-06-02", cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), ``` @@ -40,14 +42,16 @@ Dependency declarations must: `{dash_version}`. * Versions should prefer release versions over master branch GitHub SHA tarballs. A comment is necessary if the latter is used. This comment should contain the reason that a non-release - version is being used and the YYYY-MM-DD when the last update was performed. + version is being used. * Provide accurate entries for `use_category`. Please think carefully about whether there are data or control plane implications of the dependency. +* Reflect the date (YYYY-MM-DD) at which they were last updated in the `last_updated` field. This + date is preferably the date at which the PR is created. * CPEs are compulsory for all dependencies that are not purely build/test. [CPEs](https://en.wikipedia.org/wiki/Common_Platform_Enumeration) provide metadata that allow us to correlate with related CVEs in dashboards and other tooling, and also provide a machine - consumable join key. You can consult the latest [CPE - dictionary](https://nvd.nist.gov/products/cpe) to find a CPE for a dependency.`"N/A"` should only + consumable join key. You can consult [CPE + search](https://nvd.nist.gov/products/cpe/search) to find a CPE for a dependency.`"N/A"` should only be used if no CPE for the project is available in the CPE database. CPEs should be _versionless_ with a `:*` suffix, since the version can be computed from `version`. @@ -93,6 +97,33 @@ basis: Where possible, we prefer the latest release version for external dependencies, rather than master branch GitHub SHA tarballs. +## Dependency shepherds + +Sign-off from the [dependency +shepherds](https://github.com/orgs/envoyproxy/teams/dependency-shepherds) is +required for every PR that modifies external dependencies. The shepherds will +look to see that the policy in this document is enforced and that metadata is +kept up-to-date. + +## Dependency patches + +Occasionally it is necessary to introduce an Envoy-side patch to a dependency in a `.patch` file. +These are typically applied in [bazel/repositories.bzl](bazel/repositories.bzl). Our policy on this +is as follows: + +* Patch files impede dependency updates. They are expedient at creation time but are a maintenance + penalty. They reduce the velocity and increase the effort of upgrades in response to security + vulnerabilities in external dependencies. + +* No patch will be accepted without a sincere and sustained effort to upstream the patch to the + dependency's canonical repository. + +* There should exist a plan-of-record, filed as an issue in Envoy or the upstream GitHub tracking + elimination of the patch. + +* Every patch must have comments at its point-of-use in [bazel/repositories.bzl](bazel/repositories.bzl) + providing a rationale and detailing the tracking issue. + ## Policy exceptions The following dependencies are exempt from the policy: diff --git a/DEVELOPER.md b/DEVELOPER.md index 465644c0e02c..6786925fa7e8 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -1,6 +1,7 @@ # Developer documentation -Envoy is built using the Bazel build system. CircleCI builds, tests, and runs coverage against all pull requests and the master branch. +Envoy is built using the Bazel build system. Our CI on Azure Pipelines builds, tests, and runs coverage against +all pull requests and the master branch. To get started building Envoy locally, see the [Bazel quick start](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#quick-start-bazel-build-for-developers). To run tests, there are Bazel [targets](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#testing-envoy-with-bazel) for Google Test. @@ -10,7 +11,7 @@ If you plan to contribute to Envoy, you may find it useful to install the Envoy Below is a list of additional documentation to aid the development process: -- [General build and installation documentation](https://www.envoyproxy.io/docs/envoy/latest/install/install) +- [General build and installation documentation](https://www.envoyproxy.io/docs/envoy/latest/start/start) - [Building and testing Envoy with Bazel](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md) @@ -35,4 +36,3 @@ And some documents on components of Envoy architecture: - [Envoy flow control](https://github.com/envoyproxy/envoy/blob/master/source/docs/flow_control.md) - [Envoy's subset load balancer](https://github.com/envoyproxy/envoy/blob/master/source/docs/subset_load_balancer.md) - diff --git a/EXTENSION_POLICY.md b/EXTENSION_POLICY.md index 0063a2a2139c..1c4a059b748c 100644 --- a/EXTENSION_POLICY.md +++ b/EXTENSION_POLICY.md @@ -59,6 +59,19 @@ In the event that the Extension PR author is a sponsoring maintainer and no othe is available, another maintainer may be enlisted to perform a minimal review for style and common C++ anti-patterns. The Extension PR must still be approved by a non-maintainer reviewer. +## Wasm extensions + +Wasm extensions are not allowed in the main envoyproxy/envoy repository unless +part of the Wasm implementation validation. The rationale for this policy: +* Wasm extensions should not depend upon Envoy implementation specifics as + they exist behind a version independent ABI. Hence, there is little value in + qualifying Wasm extensions in the main repository. +* Wasm extensions introduce extensive dependencies via crates, etc. We would + prefer to keep the envoyproxy/envoy repository dependencies minimal, easy + to reason about and maintain. +* We do not implement any core extensions in Wasm and do not plan to in the + medium term. + ## Extension stability and security posture Every extension is expected to be tagged with a `status` and `security_posture` in its diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 923559eea7ad..767605eda220 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -87,6 +87,7 @@ or you can subscribe to the iCal feed [here](webcal://kubernetes.app.opsgenie.co * Remove the "Pending" tags and add dates to the top of the [release notes for this version](docs/root/version_history/current.rst). * Switch the [VERSION](VERSION) from a "dev" variant to a final variant. E.g., "1.6.0-dev" to "1.6.0". + * Update the [RELEASES](RELEASES.md) doc with the relevant dates. * Get a review and merge. * Wait for tests to pass on [master](https://dev.azure.com/cncf/envoy/_build). * Create a [tagged release](https://github.com/envoyproxy/envoy/releases). The release should @@ -137,10 +138,7 @@ Deprecated ---------- ``` * Run the deprecate_versions.py script (e.g. `sh tools/deprecate_version/deprecate_version.sh`) - to file tracking issues for code which can be removed. -* Run the deprecate_features.py script (e.g. `sh tools/deprecate_features/deprecate_features.sh`) - to make the last release's deprecated features fatal-by-default. Submit the resultant PR and send - an email to envoy-announce. + to file tracking issues for runtime guarded code which can be removed. * Check source/common/runtime/runtime_features.cc and see if any runtime guards in disabled_runtime_features should be reassessed, and ping on the relevant issues. @@ -186,7 +184,7 @@ build confidence in consistent application of the API guidelines to PRs. Adding new [extensions](REPO_LAYOUT.md#sourceextensions-layout) has a dedicated policy. Please see [this](./EXTENSION_POLICY.md) document for more information. -# Exernal dependency policy +# External dependency policy Adding new external dependencies has a dedicated policy. Please see [this](DEPENDENCY_POLICY.md) document for more information. diff --git a/OWNERS.md b/OWNERS.md index 6c3c85d64fa7..4adc81048c59 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -37,12 +37,13 @@ routing PRs, questions, etc. to the right place. * Lua, access logging, and general miscellany. * Joshua Marantz ([jmarantz](https://github.com/jmarantz)) (jmarantz@google.com) * Stats, abseil, scalability, and performance. +* Antonio Vicente ([antoniovicente](https://github.com/antoniovicente)) (avd@google.com) + * Event management, security, performance, data plane. # Envoy security team * All maintainers * Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com) -* Antonio Vicente ([antoniovicente](https://github.com/antoniovicente)) (avd@google.com) * Tony Allen ([tonya11en](https://github.com/tonya11en)) (tallen@lyft.com) # Emeritus maintainers @@ -67,3 +68,5 @@ matter expert reviews. Feel free to loop them in as needed. * Redis, Python, configuration/operational questions. * Yuchen Dai ([lambdai](https://github.com/lambdai)) (lambdai@google.com) * v2 xDS, listeners, filter chain discovery service. +* Michael Payne ([moderation](https://github.com/moderation)) (m@m17e.org) + * External dependencies, Envoy's supply chain and documentation. diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md index 9b3d5cd043ba..aa8c5e750247 100644 --- a/PULL_REQUESTS.md +++ b/PULL_REQUESTS.md @@ -70,6 +70,16 @@ current version. Please include any relevant links. Each release note should be relevant subsystem in **alphabetical order** (see existing examples as a guide) and include links to relevant parts of the documentation. Thank you! Please write in N/A if there are no release notes. +### Platform Specific Features + +If this change involves any platform specific features (e.g. utilizing OS-specific socket options) +or only implements new features for a limited set of platforms (e.g. Linux amd64 only), please +include an explanation that addresses the reasoning behind this. Please also open a new tracking +issue for each platform this change is not implemented on (and link them in the PR) to enable +maintainers and contributors to triage. Reviewers will look for the change to avoid +`#ifdef ` and rather prefer feature guards to not enable the change on a given platform +using the build system. + ### Runtime guard If this PR has a user-visible behavioral change, or otherwise falls under the diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index 5a1545aacd7a..366209eed929 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -18,6 +18,7 @@ Risk Level: Testing: Docs Changes: Release Notes: +Platform Specific Features: [Optional Runtime guard:] [Optional Fixes #Issue] [Optional Deprecated:] diff --git a/README.md b/README.md index 290119f82e23..03c0aa0432d6 100644 --- a/README.md +++ b/README.md @@ -10,15 +10,14 @@ involved and how Envoy plays a role, read the CNCF [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1266/badge)](https://bestpractices.coreinfrastructure.org/projects/1266) [![Azure Pipelines](https://dev.azure.com/cncf/envoy/_apis/build/status/11?branchName=master)](https://dev.azure.com/cncf/envoy/_build/latest?definitionId=11&branchName=master) -[![CircleCI](https://circleci.com/gh/envoyproxy/envoy/tree/master.svg?style=shield)](https://circleci.com/gh/envoyproxy/envoy/tree/master) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/envoy.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:envoy) -[![Jenkins](https://img.shields.io/jenkins/s/https/powerci.osuosl.org/job/build-envoy-master/badge/icon/.svg?label=ppc64le%20build)](http://powerci.osuosl.org/job/build-envoy-master/) +[![Jenkins](https://powerci.osuosl.org/buildStatus/icon?job=build-envoy-static-master&subject=ppc64le%20build)](https://powerci.osuosl.org/job/build-envoy-static-master/) ## Documentation * [Official documentation](https://www.envoyproxy.io/) * [FAQ](https://www.envoyproxy.io/docs/envoy/latest/faq/overview) -* [Unofficial Chinese documentation](https://github.com/servicemesher/envoy/) +* [Unofficial Chinese documentation](https://www.servicemesher.com/envoy/) * Watch [a video overview of Envoy](https://www.youtube.com/watch?v=RVZX4CwKhGE) ([transcript](https://www.microservices.com/talks/lyfts-envoy-monolith-service-mesh-matt-klein/)) to find out more about the origin story and design philosophy of Envoy diff --git a/RELEASES.md b/RELEASES.md index 3ca3f28c376c..0a58aa22c4c2 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -64,7 +64,7 @@ deadline of 3 weeks. | 1.13.0 | 2019/12/31 | 2020/01/20 | +20 days | 2021/01/20 | | 1.14.0 | 2020/03/31 | 2020/04/08 | +8 days | 2021/04/08 | | 1.15.0 | 2020/06/30 | 2020/07/07 | +7 days | 2021/07/07 | -| 1.16.0 | 2020/09/30 | | | | +| 1.16.0 | 2020/09/30 | 2020/10/08 | +8 days | 2021/10/08 | | 1.17.0 | 2020/12/31 | | | | diff --git a/REPO_LAYOUT.md b/REPO_LAYOUT.md index cd87e015ac5b..e4f2452a1417 100644 --- a/REPO_LAYOUT.md +++ b/REPO_LAYOUT.md @@ -4,7 +4,8 @@ This is a high level overview of how the repository is laid out to both aid in c as well as to clearly specify how extensions are added to the repository. The top level directories are: -* [.circleci/](.circleci/): Configuration for [CircleCI](https://circleci.com/gh/envoyproxy). +* [.azure-pipelines/](.azure-pipelines/): Configuration for +[Azure Pipelines](https://azure.microsoft.com/en-us/services/devops/pipelines/). * [api/](api/): Envoy data plane API. * [bazel/](bazel/): Configuration for Envoy's use of [Bazel](https://bazel.build/). * [ci/](ci/): Scripts used both during CI as well as to build Docker containers. diff --git a/STYLE.md b/STYLE.md index 7965f90f7236..ee2deadf170b 100644 --- a/STYLE.md +++ b/STYLE.md @@ -1,7 +1,7 @@ # C++ coding style * The Envoy source code is formatted using clang-format. Thus all white spaces, etc. - issues are taken care of automatically. The CircleCI tests will automatically check + issues are taken care of automatically. The Azure Pipelines will automatically check the code format and fail. There are make targets that can both check the format (check_format) as well as fix the code format for you (fix_format). Errors in .clang-tidy are enforced while other warnings are suggestions. Note that code and @@ -105,17 +105,18 @@ A few general notes on our error handling philosophy: * All error code returns should be checked. -* At a very high level, our philosophy is that errors that are *likely* to happen should be - gracefully handled. Examples of likely errors include any type of network error, disk IO error, - bad data returned by an API call, bad data read from runtime files, etc. Errors that are - *unlikely* to happen should lead to process death, under the assumption that the additional burden - of defensive coding and testing is not an effective use of time for an error that should not happen - given proper system setup. Examples of these types of errors include not being able to open the shared - memory region, an invalid initial JSON config read from disk, system calls that should not fail - assuming correct parameters (which should be validated via tests), etc. Examples of system calls - that should not fail when passed valid parameters include most usages of `setsockopt()`, - `getsockopt()`, the kernel returning a valid `sockaddr` after a successful call to `accept()`, - `pthread_create()`, `pthread_join()`, etc. +* At a very high level, our philosophy is that errors should be handled gracefully when caused by: + - Untrusted network traffic OR + - Raised by the Envoy process environment and are *likely* to happen +* Examples of likely environnmental errors include any type of network error, disk IO error, bad + data returned by an API call, bad data read from runtime files, etc. Errors in the Envoy + environment that are *unlikely* to happen after process initialization, should lead to process + death, under the assumption that the additional burden of defensive coding and testing is not an + effective use of time for an error that should not happen given proper system setup. Examples of + these types of errors include not being able to open the shared memory region, system calls that + should not fail assuming correct parameters (which should be validated via tests), etc. Examples + of system calls that should not fail when passed valid parameters include the kernel returning a + valid `sockaddr` after a successful call to `accept()`, `pthread_create()`, `pthread_join()`, etc. * OOM events (both memory and FDs) are considered fatal crashing errors. An OOM error should never silently be ignored and should crash the process either via the C++ allocation error exception, an explicit `RELEASE_ASSERT` following a third party library call, or an obvious crash on a subsequent diff --git a/VERSION b/VERSION index 1f0d2f335194..ee8855caa4a7 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.16.0-dev +1.17.0-dev diff --git a/api/BUILD b/api/BUILD index 4aad4899a847..345732128a0d 100644 --- a/api/BUILD +++ b/api/BUILD @@ -171,6 +171,7 @@ proto_library( "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", "//envoy/extensions/filters/http/cache/v3alpha:pkg", + "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", "//envoy/extensions/filters/http/csrf/v3:pkg", @@ -189,6 +190,7 @@ proto_library( "//envoy/extensions/filters/http/health_check/v3:pkg", "//envoy/extensions/filters/http/ip_tagging/v3:pkg", "//envoy/extensions/filters/http/jwt_authn/v3:pkg", + "//envoy/extensions/filters/http/local_ratelimit/v3:pkg", "//envoy/extensions/filters/http/lua/v3:pkg", "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", "//envoy/extensions/filters/http/on_demand/v3:pkg", @@ -235,6 +237,7 @@ proto_library( "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", + "//envoy/extensions/stat_sinks/wasm/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", "//envoy/extensions/transport_sockets/quic/v3:pkg", @@ -268,6 +271,7 @@ proto_library( "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", + "//envoy/watchdog/v3alpha:pkg", ], ) diff --git a/api/CONTRIBUTING.md b/api/CONTRIBUTING.md index 773248f2e2ea..01ba39b500b8 100644 --- a/api/CONTRIBUTING.md +++ b/api/CONTRIBUTING.md @@ -50,11 +50,11 @@ generated RST files are also viewable in `generated/rst`. Note also that the generated documentation can be viewed in CI: -1. Open docs job in CircleCI. -2. Navigate to "artifacts" tab. -3. Expand files and click on `index.html`. +1. Open docs job in Azure Pipelines. +2. Navigate to "Upload Docs to GCS" log. +3. Click on the link there. -If you do not see an artifacts tab this is a bug in CircleCI. Try logging out and logging back in. +If you do not see "Upload Docs to GCS" or it is failing, that means the docs are not built correctly. ### Documentation guidelines diff --git a/api/bazel/envoy_http_archive.bzl b/api/bazel/envoy_http_archive.bzl index 13b98f770619..15fd65b2af27 100644 --- a/api/bazel/envoy_http_archive.bzl +++ b/api/bazel/envoy_http_archive.bzl @@ -10,8 +10,7 @@ def envoy_http_archive(name, locations, **kwargs): # This repository has already been defined, probably because the user # wants to override the version. Do nothing. return - loc_key = kwargs.pop("repository_key", name) - location = locations[loc_key] + location = locations[name] # HTTP tarball at a given URL. Add a BUILD file if requested. http_archive( diff --git a/api/bazel/external_deps.bzl b/api/bazel/external_deps.bzl new file mode 100644 index 000000000000..588879c4bd0a --- /dev/null +++ b/api/bazel/external_deps.bzl @@ -0,0 +1,140 @@ +load("@envoy_api//bazel:repository_locations_utils.bzl", "load_repository_locations_spec") + +# Envoy dependencies may be annotated with the following attributes: +DEPENDENCY_ANNOTATIONS = [ + # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID + # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See + # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements + # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned. + # This attribute is optional for components with use categories listed in the + # USE_CATEGORIES_WITH_CPE_OPTIONAL + "cpe", + + # A list of extensions when 'use_category' contains 'dataplane_ext' or 'observability_ext'. + "extensions", + + # Additional dependencies loaded transitively via this dependency that are not tracked in + # Envoy (see the external dependency at the given version for information). + "implied_untracked_deps", + + # When the dependency was last updated in Envoy. + "last_updated", + + # Project metadata. + "project_desc", + "project_name", + "project_url", + + # List of the categories describing how the dependency is being used. This attribute is used + # for automatic tracking of security posture of Envoy's dependencies. + # Possible values are documented in the USE_CATEGORIES list below. + # This attribute is mandatory for each dependecy. + "use_category", + + # The dependency version. This may be either a tagged release (preferred) + # or git SHA (as an exception when no release tagged version is suitable). + "version", +] + +# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed +# to be declared. +USE_CATEGORIES = [ + # This dependency is used in API protos. + "api", + # This dependency is used in build process. + "build", + # This dependency is used to process xDS requests. + "controlplane", + # This dependency is used in processing downstream or upstream requests (core). + "dataplane_core", + # This dependency is used in processing downstream or upstream requests (extensions). + "dataplane_ext", + # This dependecy is used for logging, metrics or tracing (core). It may process unstrusted input. + "observability_core", + # This dependecy is used for logging, metrics or tracing (extensions). It may process unstrusted input. + "observability_ext", + # This dependency does not handle untrusted data and is used for various utility purposes. + "other", + # This dependency is used only in tests. + "test_only", + # Documentation generation + "docs", + # Developer tools (not used in build or docs) + "devtools", +] + +# Components with these use categories are not required to specify the 'cpe' +# and 'last_updated' annotation. +USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test_only", "api"] + +def _fail_missing_attribute(attr, key): + fail("The '%s' attribute must be defined for external dependecy " % attr + key) + +# Method for verifying content of the repository location specifications. +# +# We also remove repository metadata attributes so that further consumers, e.g. +# http_archive, are not confused by them. +def load_repository_locations(repository_locations_spec): + locations = {} + for key, location in load_repository_locations_spec(repository_locations_spec).items(): + mutable_location = dict(location) + locations[key] = mutable_location + + if "sha256" not in location or len(location["sha256"]) == 0: + _fail_missing_attribute("sha256", key) + + if "project_name" not in location: + _fail_missing_attribute("project_name", key) + + if "project_desc" not in location: + _fail_missing_attribute("project_desc", key) + + if "project_url" not in location: + _fail_missing_attribute("project_url", key) + project_url = location["project_url"] + if not project_url.startswith("https://") and not project_url.startswith("http://"): + fail("project_url must start with https:// or http://: " + project_url) + + if "version" not in location: + _fail_missing_attribute("version", key) + + if "use_category" not in location: + _fail_missing_attribute("use_category", key) + use_category = location["use_category"] + + if "dataplane_ext" in use_category or "observability_ext" in use_category: + if "extensions" not in location: + _fail_missing_attribute("extensions", key) + + if "last_updated" not in location: + _fail_missing_attribute("last_updated", key) + last_updated = location["last_updated"] + + # Starlark doesn't have regexes. + if len(last_updated) != 10 or last_updated[4] != "-" or last_updated[7] != "-": + fail("last_updated must match YYYY-DD-MM: " + last_updated) + + if "cpe" in location: + cpe = location["cpe"] + + # Starlark doesn't have regexes. + cpe_components = len(cpe.split(":")) + + # We allow cpe:2.3:a:foo:*:* and cpe:2.3.:a:foo:bar:* only. + cpe_components_valid = (cpe_components == 6) + cpe_matches = (cpe == "N/A" or (cpe.startswith("cpe:2.3:a:") and cpe.endswith(":*") and cpe_components_valid)) + if not cpe_matches: + fail("CPE must match cpe:2.3:a:::*: " + cpe) + elif not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location["use_category"]]: + _fail_missing_attribute("cpe", key) + + for category in location["use_category"]: + if category not in USE_CATEGORIES: + fail("Unknown use_category value '" + category + "' for dependecy " + key) + + # Remove any extra annotations that we add, so that we don't confuse http_archive etc. + for annotation in DEPENDENCY_ANNOTATIONS: + if annotation in mutable_location: + mutable_location.pop(annotation) + + return locations diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl index a64e733cf74a..a12a0ea98b3a 100644 --- a/api/bazel/repositories.bzl +++ b/api/bazel/repositories.bzl @@ -1,40 +1,43 @@ load(":envoy_http_archive.bzl", "envoy_http_archive") -load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") +load(":external_deps.bzl", "load_repository_locations") +load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC") -def api_dependencies(): +REPOSITORY_LOCATIONS = load_repository_locations(REPOSITORY_LOCATIONS_SPEC) + +# Use this macro to reference any HTTP archive from bazel/repository_locations.bzl. +def external_http_archive(name, **kwargs): envoy_http_archive( - "bazel_skylib", + name, locations = REPOSITORY_LOCATIONS, + **kwargs ) - envoy_http_archive( - "com_envoyproxy_protoc_gen_validate", - locations = REPOSITORY_LOCATIONS, + +def api_dependencies(): + external_http_archive( + name = "bazel_skylib", ) - envoy_http_archive( + external_http_archive( + name = "com_envoyproxy_protoc_gen_validate", + ) + external_http_archive( name = "com_google_googleapis", - locations = REPOSITORY_LOCATIONS, ) - envoy_http_archive( + external_http_archive( name = "com_github_cncf_udpa", - locations = REPOSITORY_LOCATIONS, ) - envoy_http_archive( + external_http_archive( name = "prometheus_metrics_model", - locations = REPOSITORY_LOCATIONS, build_file_content = PROMETHEUSMETRICS_BUILD_CONTENT, ) - envoy_http_archive( + external_http_archive( name = "opencensus_proto", - locations = REPOSITORY_LOCATIONS, ) - envoy_http_archive( + external_http_archive( name = "rules_proto", - locations = REPOSITORY_LOCATIONS, ) - envoy_http_archive( + external_http_archive( name = "com_github_openzipkin_zipkinapi", - locations = REPOSITORY_LOCATIONS, build_file_content = ZIPKINAPI_BUILD_CONTENT, ) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 2f0fdc723dbb..bdcf31e867d2 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -1,66 +1,91 @@ -BAZEL_SKYLIB_RELEASE = "1.0.3" -BAZEL_SKYLIB_SHA256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c" - -OPENCENSUS_PROTO_RELEASE = "0.3.0" -OPENCENSUS_PROTO_SHA256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0" - -PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020 -PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8" - -GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019 -GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405" - -PROMETHEUS_GIT_SHA = "60555c9708c786597e6b07bf846d0dc5c2a46f54" # Jun 23, 2020 -PROMETHEUS_SHA = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e" - -UDPA_RELEASE = "0.0.1" -UDPA_SHA256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8" - -ZIPKINAPI_RELEASE = "0.2.2" -ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" - -RULES_PROTO_GIT_SHA = "40298556293ae502c66579620a7ce867d5f57311" # Aug 17, 2020 -RULES_PROTO_SHA256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5" - -REPOSITORY_LOCATIONS = dict( +# This should match the schema defined in external_deps.bzl. +REPOSITORY_LOCATIONS_SPEC = dict( bazel_skylib = dict( - sha256 = BAZEL_SKYLIB_SHA256, - urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/" + BAZEL_SKYLIB_RELEASE + "/bazel-skylib-" + BAZEL_SKYLIB_RELEASE + ".tar.gz"], + project_name = "bazel-skylib", + project_desc = "Common useful functions and rules for Bazel", + project_url = "https://github.com/bazelbuild/bazel-skylib", + version = "1.0.3", + sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", + urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"], + last_updated = "2020-08-27", + use_category = ["api"], ), com_envoyproxy_protoc_gen_validate = dict( - sha256 = PGV_SHA256, - strip_prefix = "protoc-gen-validate-" + PGV_GIT_SHA, - urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/" + PGV_GIT_SHA + ".tar.gz"], + project_name = "protoc-gen-validate (PGV)", + project_desc = "protoc plugin to generate polyglot message validators", + project_url = "https://github.com/envoyproxy/protoc-gen-validate", + version = "278964a8052f96a2f514add0298098f63fb7f47f", + sha256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8", + strip_prefix = "protoc-gen-validate-{version}", + urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/{version}.tar.gz"], + last_updated = "2020-06-09", + use_category = ["api"], + ), + com_github_cncf_udpa = dict( + project_name = "Universal Data Plane API", + project_desc = "Universal Data Plane API Working Group (UDPA-WG)", + project_url = "https://github.com/cncf/udpa", + version = "0.0.1", + sha256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8", + strip_prefix = "udpa-{version}", + urls = ["https://github.com/cncf/udpa/archive/v{version}.tar.gz"], + last_updated = "2020-09-23", + use_category = ["api"], + ), + com_github_openzipkin_zipkinapi = dict( + project_name = "Zipkin API", + project_desc = "Zipkin's language independent model and HTTP Api Definitions", + project_url = "https://github.com/openzipkin/zipkin-api", + version = "0.2.2", + sha256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b", + strip_prefix = "zipkin-api-{version}", + urls = ["https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz"], + last_updated = "2020-09-23", + use_category = ["api"], ), com_google_googleapis = dict( # TODO(dio): Consider writing a Starlark macro for importing Google API proto. - sha256 = GOOGLEAPIS_SHA, - strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA, - urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"], + project_name = "Google APIs", + project_desc = "Public interface definitions of Google APIs", + project_url = "https://github.com/googleapis/googleapis", + version = "82944da21578a53b74e547774cf62ed31a05b841", + sha256 = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405", + strip_prefix = "googleapis-{version}", + urls = ["https://github.com/googleapis/googleapis/archive/{version}.tar.gz"], + last_updated = "2019-12-02", + use_category = ["api"], ), - com_github_cncf_udpa = dict( - sha256 = UDPA_SHA256, - strip_prefix = "udpa-" + UDPA_RELEASE, - urls = ["https://github.com/cncf/udpa/archive/v" + UDPA_RELEASE + ".tar.gz"], + opencensus_proto = dict( + project_name = "OpenCensus Proto", + project_desc = "Language Independent Interface Types For OpenCensus", + project_url = "https://github.com/census-instrumentation/opencensus-proto", + version = "0.3.0", + sha256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0", + strip_prefix = "opencensus-proto-{version}/src", + urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz"], + last_updated = "2020-06-20", + use_category = ["api"], ), prometheus_metrics_model = dict( - sha256 = PROMETHEUS_SHA, - strip_prefix = "client_model-" + PROMETHEUS_GIT_SHA, - urls = ["https://github.com/prometheus/client_model/archive/" + PROMETHEUS_GIT_SHA + ".tar.gz"], - ), - opencensus_proto = dict( - sha256 = OPENCENSUS_PROTO_SHA256, - strip_prefix = "opencensus-proto-" + OPENCENSUS_PROTO_RELEASE + "/src", - urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v" + OPENCENSUS_PROTO_RELEASE + ".tar.gz"], + project_name = "Prometheus client model", + project_desc = "Data model artifacts for Prometheus", + project_url = "https://github.com/prometheus/client_model", + version = "60555c9708c786597e6b07bf846d0dc5c2a46f54", + sha256 = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e", + strip_prefix = "client_model-{version}", + urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"], + last_updated = "2020-06-23", + use_category = ["api"], ), rules_proto = dict( - sha256 = RULES_PROTO_SHA256, - strip_prefix = "rules_proto-" + RULES_PROTO_GIT_SHA + "", - urls = ["https://github.com/bazelbuild/rules_proto/archive/" + RULES_PROTO_GIT_SHA + ".tar.gz"], - ), - com_github_openzipkin_zipkinapi = dict( - sha256 = ZIPKINAPI_SHA256, - strip_prefix = "zipkin-api-" + ZIPKINAPI_RELEASE, - urls = ["https://github.com/openzipkin/zipkin-api/archive/" + ZIPKINAPI_RELEASE + ".tar.gz"], + project_name = "Protobuf Rules for Bazel", + project_desc = "Protocol buffer rules for Bazel", + project_url = "https://github.com/bazelbuild/rules_proto", + version = "40298556293ae502c66579620a7ce867d5f57311", + sha256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5", + strip_prefix = "rules_proto-{version}", + urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"], + last_updated = "2020-08-17", + use_category = ["api"], ), ) diff --git a/api/bazel/repository_locations_utils.bzl b/api/bazel/repository_locations_utils.bzl new file mode 100644 index 000000000000..3b984e1bc580 --- /dev/null +++ b/api/bazel/repository_locations_utils.bzl @@ -0,0 +1,20 @@ +def _format_version(s, version): + return s.format(version = version, dash_version = version.replace(".", "-"), underscore_version = version.replace(".", "_")) + +# Generate a "repository location specification" from raw repository +# specification. The information should match the format required by +# external_deps.bzl. This function mostly does interpolation of {version} in +# the repository info fields. This code should be capable of running in both +# Python and Starlark. +def load_repository_locations_spec(repository_locations_spec): + locations = {} + for key, location in repository_locations_spec.items(): + mutable_location = dict(location) + locations[key] = mutable_location + + # Fixup with version information. + if "version" in location: + if "strip_prefix" in location: + mutable_location["strip_prefix"] = _format_version(location["strip_prefix"], location["version"]) + mutable_location["urls"] = [_format_version(url, location["version"]) for url in location["urls"]] + return locations diff --git a/api/envoy/admin/v3/BUILD b/api/envoy/admin/v3/BUILD index 4163de8e0aba..38eadcb09fea 100644 --- a/api/envoy/admin/v3/BUILD +++ b/api/envoy/admin/v3/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/admin/v2alpha:pkg", "//envoy/annotations:pkg", "//envoy/config/bootstrap/v3:pkg", + "//envoy/config/cluster/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/tap/v3:pkg", "//envoy/type/v3:pkg", diff --git a/api/envoy/admin/v3/certs.proto b/api/envoy/admin/v3/certs.proto index 158c8aead28f..5580bb5ef17d 100644 --- a/api/envoy/admin/v3/certs.proto +++ b/api/envoy/admin/v3/certs.proto @@ -34,11 +34,19 @@ message Certificate { repeated CertificateDetails cert_chain = 2; } -// [#next-free-field: 7] +// [#next-free-field: 8] message CertificateDetails { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CertificateDetails"; + message OcspDetails { + // Indicates the time from which the OCSP response is valid. + google.protobuf.Timestamp valid_from = 1; + + // Indicates the time at which the OCSP response expires. + google.protobuf.Timestamp expiration = 2; + } + // Path of the certificate. string path = 1; @@ -56,6 +64,9 @@ message CertificateDetails { // Indicates the time at which the certificate expires. google.protobuf.Timestamp expiration_time = 6; + + // Details related to the OCSP response associated with this certificate, if any. + OcspDetails ocsp_details = 7; } message SubjectAlternateName { diff --git a/api/envoy/admin/v3/clusters.proto b/api/envoy/admin/v3/clusters.proto index fc05c8a10de2..8eeaec20becc 100644 --- a/api/envoy/admin/v3/clusters.proto +++ b/api/envoy/admin/v3/clusters.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.admin.v3; import "envoy/admin/v3/metrics.proto"; +import "envoy/config/cluster/v3/circuit_breaker.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/health_check.proto"; @@ -28,7 +29,7 @@ message Clusters { } // Details an individual cluster's current status. -// [#next-free-field: 6] +// [#next-free-field: 7] message ClusterStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClusterStatus"; @@ -76,6 +77,9 @@ message ClusterStatus { // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. type.v3.Percent local_origin_success_rate_ejection_threshold = 5; + + // :ref:`Circuit breaking ` settings of the cluster. + config.cluster.v3.CircuitBreakers circuit_breakers = 6; } // Current state of a particular host. diff --git a/api/envoy/admin/v3/server_info.proto b/api/envoy/admin/v3/server_info.proto index b91303f3d8fe..5e3765a8586f 100644 --- a/api/envoy/admin/v3/server_info.proto +++ b/api/envoy/admin/v3/server_info.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.admin.v3; +import "envoy/config/core/v3/base.proto"; + import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; @@ -17,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Proto representation of the value returned by /server_info, containing // server version/server status information. -// [#next-free-field: 7] +// [#next-free-field: 8] message ServerInfo { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ServerInfo"; @@ -52,9 +54,12 @@ message ServerInfo { // Command line options the server is currently running with. CommandLineOptions command_line_options = 6; + + // Populated node identity of this server. + config.core.v3.Node node = 7; } -// [#next-free-field: 35] +// [#next-free-field: 37] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -179,4 +184,10 @@ message CommandLineOptions { // See :option:`--enable-fine-grain-logging` for details. bool enable_fine_grain_logging = 34; + + // See :option:`--socket-path` for details. + string socket_path = 35; + + // See :option:`--socket-mode` for details. + uint32 socket_mode = 36; } diff --git a/api/envoy/admin/v3/tap.proto b/api/envoy/admin/v3/tap.proto index ca7ab4405a9b..934170b2deea 100644 --- a/api/envoy/admin/v3/tap.proto +++ b/api/envoy/admin/v3/tap.proto @@ -21,7 +21,7 @@ message TapRequest { // The opaque configuration ID used to match the configuration to a loaded extension. // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string config_id = 1 [(validate.rules).string = {min_len: 1}]; // The tap configuration to load. config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; diff --git a/api/envoy/admin/v4alpha/BUILD b/api/envoy/admin/v4alpha/BUILD index f2cb1a2a70c0..28f1e7d8c821 100644 --- a/api/envoy/admin/v4alpha/BUILD +++ b/api/envoy/admin/v4alpha/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/admin/v3:pkg", "//envoy/annotations:pkg", "//envoy/config/bootstrap/v4alpha:pkg", + "//envoy/config/cluster/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/tap/v4alpha:pkg", "//envoy/type/v3:pkg", diff --git a/api/envoy/admin/v4alpha/certs.proto b/api/envoy/admin/v4alpha/certs.proto index 585b09bccf4c..0dd868f71fa6 100644 --- a/api/envoy/admin/v4alpha/certs.proto +++ b/api/envoy/admin/v4alpha/certs.proto @@ -34,10 +34,21 @@ message Certificate { repeated CertificateDetails cert_chain = 2; } -// [#next-free-field: 7] +// [#next-free-field: 8] message CertificateDetails { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CertificateDetails"; + message OcspDetails { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.CertificateDetails.OcspDetails"; + + // Indicates the time from which the OCSP response is valid. + google.protobuf.Timestamp valid_from = 1; + + // Indicates the time at which the OCSP response expires. + google.protobuf.Timestamp expiration = 2; + } + // Path of the certificate. string path = 1; @@ -55,6 +66,9 @@ message CertificateDetails { // Indicates the time at which the certificate expires. google.protobuf.Timestamp expiration_time = 6; + + // Details related to the OCSP response associated with this certificate, if any. + OcspDetails ocsp_details = 7; } message SubjectAlternateName { diff --git a/api/envoy/admin/v4alpha/clusters.proto b/api/envoy/admin/v4alpha/clusters.proto index 9056262cae86..10d920976930 100644 --- a/api/envoy/admin/v4alpha/clusters.proto +++ b/api/envoy/admin/v4alpha/clusters.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.admin.v4alpha; import "envoy/admin/v4alpha/metrics.proto"; +import "envoy/config/cluster/v4alpha/circuit_breaker.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/health_check.proto"; @@ -28,7 +29,7 @@ message Clusters { } // Details an individual cluster's current status. -// [#next-free-field: 6] +// [#next-free-field: 7] message ClusterStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClusterStatus"; @@ -76,6 +77,9 @@ message ClusterStatus { // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. type.v3.Percent local_origin_success_rate_ejection_threshold = 5; + + // :ref:`Circuit breaking ` settings of the cluster. + config.cluster.v4alpha.CircuitBreakers circuit_breakers = 6; } // Current state of a particular host. diff --git a/api/envoy/admin/v4alpha/server_info.proto b/api/envoy/admin/v4alpha/server_info.proto index 3f3570af0111..6f56978d49fe 100644 --- a/api/envoy/admin/v4alpha/server_info.proto +++ b/api/envoy/admin/v4alpha/server_info.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.admin.v4alpha; +import "envoy/config/core/v4alpha/base.proto"; + import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; @@ -17,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // Proto representation of the value returned by /server_info, containing // server version/server status information. -// [#next-free-field: 7] +// [#next-free-field: 8] message ServerInfo { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ServerInfo"; @@ -52,9 +54,12 @@ message ServerInfo { // Command line options the server is currently running with. CommandLineOptions command_line_options = 6; + + // Populated node identity of this server. + config.core.v4alpha.Node node = 7; } -// [#next-free-field: 35] +// [#next-free-field: 37] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -178,4 +183,10 @@ message CommandLineOptions { // See :option:`--enable-fine-grain-logging` for details. bool enable_fine_grain_logging = 34; + + // See :option:`--socket-path` for details. + string socket_path = 35; + + // See :option:`--socket-mode` for details. + uint32 socket_mode = 36; } diff --git a/api/envoy/admin/v4alpha/tap.proto b/api/envoy/admin/v4alpha/tap.proto index 039dfcfeb812..e89259380418 100644 --- a/api/envoy/admin/v4alpha/tap.proto +++ b/api/envoy/admin/v4alpha/tap.proto @@ -21,7 +21,7 @@ message TapRequest { // The opaque configuration ID used to match the configuration to a loaded extension. // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string config_id = 1 [(validate.rules).string = {min_len: 1}]; // The tap configuration to load. config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; diff --git a/api/envoy/api/v2/cluster.proto b/api/envoy/api/v2/cluster.proto index d1a50fbdb91e..fab95f71b763 100644 --- a/api/envoy/api/v2/cluster.proto +++ b/api/envoy/api/v2/cluster.proto @@ -352,6 +352,10 @@ message Cluster { // This header isn't sanitized by default, so enabling this feature allows HTTP clients to // route traffic to arbitrary hosts and/or ports, which may have serious security // consequences. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. bool use_http_header = 1; } @@ -677,10 +681,16 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple's API only allows overriding DNS resolvers via system settings. repeated core.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. diff --git a/api/envoy/api/v2/route/route_components.proto b/api/envoy/api/v2/route/route_components.proto index 339c7bcbc53a..c1e84a5618a7 100644 --- a/api/envoy/api/v2/route/route_components.proto +++ b/api/envoy/api/v2/route/route_components.proto @@ -756,6 +756,10 @@ message RouteAction { // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string cluster_header = 2 [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; @@ -866,6 +870,10 @@ message RouteAction { // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string auto_host_rewrite_header = 29 [ (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, (udpa.annotations.field_migrate).rename = "host_rewrite_header" diff --git a/api/envoy/config/accesslog/v3/accesslog.proto b/api/envoy/config/accesslog/v3/accesslog.proto index e9d815aafcea..d85c6af8294e 100644 --- a/api/envoy/config/accesslog/v3/accesslog.proto +++ b/api/envoy/config/accesslog/v3/accesslog.proto @@ -164,7 +164,7 @@ message RuntimeFilter { // Runtime key to get an optional overridden numerator for use in the // *percent_sampled* field. If found in runtime, this value will replace the // default numerator. - string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; // The default sampling percentage. If not specified, defaults to 0% with // denominator of 100. @@ -254,6 +254,7 @@ message ResponseFlagFilter { in: "UMSDR" in: "RFCF" in: "NFCF" + in: "DT" } } }]; diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto index bd4bcd48c4b4..0714b614c41d 100644 --- a/api/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/api/envoy/config/accesslog/v4alpha/accesslog.proto @@ -164,7 +164,7 @@ message RuntimeFilter { // Runtime key to get an optional overridden numerator for use in the // *percent_sampled* field. If found in runtime, this value will replace the // default numerator. - string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; // The default sampling percentage. If not specified, defaults to 0% with // denominator of 100. @@ -253,6 +253,7 @@ message ResponseFlagFilter { in: "UMSDR" in: "RFCF" in: "NFCF" + in: "DT" } } }]; diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto index da88dce786ae..30c276f24276 100644 --- a/api/envoy/config/bootstrap/v2/bootstrap.proto +++ b/api/envoy/config/bootstrap/v2/bootstrap.proto @@ -169,6 +169,9 @@ message Bootstrap { // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 20; } diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index a1e981fcbdda..a9a0290b297c 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -40,7 +40,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 27] +// [#next-free-field: 28] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -176,7 +176,13 @@ message Bootstrap { }]; // Optional watchdog configuration. - Watchdog watchdog = 8; + // This is for a single watchdog configuration for the entire system. + // Deprecated in favor of *watchdogs* which has finer granularity. + Watchdog watchdog = 8 [deprecated = true]; + + // Optional watchdogs configuration. + // This is used for specifying different watchdogs for the different subsystems. + Watchdogs watchdogs = 27; // Configuration for an external tracing provider. // @@ -228,6 +234,9 @@ message Bootstrap { // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 20; // Specifies optional bootstrap extensions to be instantiated at startup time. @@ -336,6 +345,17 @@ message ClusterManager { core.v3.ApiConfigSource load_stats_config = 4; } +// Allows you to specify different watchdog configs for different subsystems. +// This allows finer tuned policies for the watchdog. If a subsystem is omitted +// the default values for that system will be used. +message Watchdogs { + // Watchdog for the main thread. + Watchdog main_thread_watchdog = 1; + + // Watchdog for the worker threads. + Watchdog worker_watchdog = 2; +} + // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. @@ -486,7 +506,7 @@ message RuntimeLayer { // Descriptive name for the runtime layer. This is only used for the runtime // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof layer_specifier { option (validate.required) = true; diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index 989ecd30ddc4..ef10dead9706 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -38,7 +38,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 27] +// [#next-free-field: 28] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -98,9 +98,9 @@ message Bootstrap { core.v4alpha.ApiConfigSource ads_config = 3; } - reserved 10, 11, 9; + reserved 10, 11, 8, 9; - reserved "runtime", "tracing"; + reserved "runtime", "watchdog", "tracing"; // Node identity to present to the management server and for instance // identification purposes (e.g. in generated headers). @@ -173,8 +173,9 @@ message Bootstrap { gte {nanos: 1000000} }]; - // Optional watchdog configuration. - Watchdog watchdog = 8; + // Optional watchdogs configuration. + // This is used for specifying different watchdogs for the different subsystems. + Watchdogs watchdogs = 27; // Configuration for the runtime configuration provider. If not // specified, a “null” provider will be used which will result in all defaults @@ -219,6 +220,9 @@ message Bootstrap { // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 20; // Specifies optional bootstrap extensions to be instantiated at startup time. @@ -327,6 +331,20 @@ message ClusterManager { core.v4alpha.ApiConfigSource load_stats_config = 4; } +// Allows you to specify different watchdog configs for different subsystems. +// This allows finer tuned policies for the watchdog. If a subsystem is omitted +// the default values for that system will be used. +message Watchdogs { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.Watchdogs"; + + // Watchdog for the main thread. + Watchdog main_thread_watchdog = 1; + + // Watchdog for the worker threads. + Watchdog worker_watchdog = 2; +} + // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. @@ -481,7 +499,7 @@ message RuntimeLayer { // Descriptive name for the runtime layer. This is only used for the runtime // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof layer_specifier { option (validate.required) = true; diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 3571ccf9abbd..8e039a1f16fe 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -170,7 +170,7 @@ message Cluster { "envoy.api.v2.Cluster.CustomClusterType"; // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Cluster specific configuration which depends on the cluster being instantiated. // See the supported cluster for further documentation. @@ -436,6 +436,10 @@ message Cluster { // This header isn't sanitized by default, so enabling this feature allows HTTP clients to // route traffic to arbitrary hosts and/or ports, which may have serious security // consequences. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. bool use_http_header = 1; } @@ -612,7 +616,32 @@ message Cluster { // // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can // harm latency more than the prefetching helps. - google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1 + [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + + // Indicates how many many streams (rounded up) can be anticipated across a cluster for each + // stream, useful for low QPS services. This is currently supported for a subset of + // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). + // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a + // cluster, doing best effort predictions of what upstream would be picked next and + // pre-establishing a connection. + // + // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be prefetched - one to the first upstream for this + // cluster, one to the second on the assumption there will be a follow-up stream. + // + // Prefetching will be limited to one prefetch per configured upstream in the cluster. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight, so during warm up and in steady state if a connection + // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for + // connection establishment. + // + // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met, + // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream. + // TODO(alyssawilk) per LB docs and LB overview docs when unhiding. + google.protobuf.DoubleValue predictive_prefetch_ratio = 2 + [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } reserved 12, 15, 7, 11, 35; @@ -675,7 +704,7 @@ message Cluster { // :ref:`statistics ` if :ref:`alt_stat_name // ` is not provided. // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // An optional alternative to the cluster name to be used while emitting stats. // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be @@ -804,10 +833,16 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple's API only allows overriding DNS resolvers via system settings. repeated core.v3.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. diff --git a/api/envoy/config/cluster/v3/filter.proto b/api/envoy/config/cluster/v3/filter.proto index af3116ec26eb..74f4a1137dab 100644 --- a/api/envoy/config/cluster/v3/filter.proto +++ b/api/envoy/config/cluster/v3/filter.proto @@ -21,7 +21,7 @@ message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index 9b7536836365..0ad15668e6cf 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -172,7 +172,7 @@ message Cluster { "envoy.config.cluster.v3.Cluster.CustomClusterType"; // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Cluster specific configuration which depends on the cluster being instantiated. // See the supported cluster for further documentation. @@ -442,6 +442,10 @@ message Cluster { // This header isn't sanitized by default, so enabling this feature allows HTTP clients to // route traffic to arbitrary hosts and/or ports, which may have serious security // consequences. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. bool use_http_header = 1; } @@ -622,7 +626,32 @@ message Cluster { // // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can // harm latency more than the prefetching helps. - google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1 + [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + + // Indicates how many many streams (rounded up) can be anticipated across a cluster for each + // stream, useful for low QPS services. This is currently supported for a subset of + // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). + // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a + // cluster, doing best effort predictions of what upstream would be picked next and + // pre-establishing a connection. + // + // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be prefetched - one to the first upstream for this + // cluster, one to the second on the assumption there will be a follow-up stream. + // + // Prefetching will be limited to one prefetch per configured upstream in the cluster. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight, so during warm up and in steady state if a connection + // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for + // connection establishment. + // + // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met, + // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream. + // TODO(alyssawilk) per LB docs and LB overview docs when unhiding. + google.protobuf.DoubleValue predictive_prefetch_ratio = 2 + [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } reserved 12, 15, 7, 11, 35, 47; @@ -685,7 +714,7 @@ message Cluster { // :ref:`statistics ` if :ref:`alt_stat_name // ` is not provided. // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // An optional alternative to the cluster name to be used while emitting stats. // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be @@ -814,10 +843,16 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple's API only allows overriding DNS resolvers via system settings. repeated core.v4alpha.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. diff --git a/api/envoy/config/cluster/v4alpha/filter.proto b/api/envoy/config/cluster/v4alpha/filter.proto index eb825fdeb6d5..5a4a4facbd81 100644 --- a/api/envoy/config/cluster/v4alpha/filter.proto +++ b/api/envoy/config/cluster/v4alpha/filter.proto @@ -21,7 +21,7 @@ message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/api/envoy/config/core/v3/address.proto b/api/envoy/config/core/v3/address.proto index 5102c2d57591..8228450eb93c 100644 --- a/api/envoy/config/core/v3/address.proto +++ b/api/envoy/config/core/v3/address.proto @@ -24,12 +24,24 @@ message Pipe { // abstract namespace. The starting '@' is replaced by a null byte by Envoy. // Paths starting with '@' will result in an error in environments other than // Linux. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; + string path = 1 [(validate.rules).string = {min_len: 1}]; // The mode for the Pipe. Not applicable for abstract sockets. uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; } +// [#not-implemented-hide:] The address represents an envoy internal listener. +// TODO(lambdai): Make this address available for listener and endpoint. +// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30. +message EnvoyInternalAddress { + oneof address_name_specifier { + option (validate.required) = true; + + // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. + string server_listener_name = 1; + } +} + // [#next-free-field: 7] message SocketAddress { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketAddress"; @@ -52,7 +64,7 @@ message SocketAddress { // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string = {min_bytes: 1}]; + string address = 2 [(validate.rules).string = {min_len: 1}]; oneof port_specifier { option (validate.required) = true; @@ -129,6 +141,9 @@ message Address { SocketAddress socket_address = 1; Pipe pipe = 2; + + // [#not-implemented-hide:] + EnvoyInternalAddress envoy_internal_address = 3; } } @@ -138,7 +153,7 @@ message CidrRange { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.CidrRange"; // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Length of prefix, e.g. 0, 32. google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto index 15a17b49384d..4d7d69fae70b 100644 --- a/api/envoy/config/core/v3/base.proto +++ b/api/envoy/config/core/v3/base.proto @@ -237,7 +237,16 @@ message RuntimeUInt32 { uint32 default_value = 2; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 3 [(validate.rules).string = {min_len: 1}]; +} + +// Runtime derived percentage with a default when not specified. +message RuntimePercent { + // Default value if runtime value is not available. + type.v3.Percent default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived double with a default when not specified. @@ -248,7 +257,7 @@ message RuntimeDouble { double default_value = 1; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived bool with a default when not specified. @@ -262,7 +271,7 @@ message RuntimeFeatureFlag { // Runtime key to get value for comparison. This value is used if defined. The boolean value must // be represented via its // `canonical JSON encoding `_. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Header name/value pair. @@ -272,7 +281,7 @@ message HeaderValue { // Header name. string key = 1 [(validate.rules).string = - {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; + {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Header value. // @@ -312,13 +321,13 @@ message DataSource { option (validate.required) = true; // Local filesystem data source. - string filename = 1 [(validate.rules).string = {min_bytes: 1}]; + string filename = 1 [(validate.rules).string = {min_len: 1}]; // Bytes inlined in the configuration. bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; + string inline_string = 3 [(validate.rules).string = {min_len: 1}]; } } @@ -345,7 +354,7 @@ message RemoteDataSource { HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; + string sha256 = 2 [(validate.rules).string = {min_len: 1}]; // Retry policy for fetching remote data. RetryPolicy retry_policy = 3; @@ -379,7 +388,7 @@ message TransportSocket { // The name of the transport socket to instantiate. The name must match a supported transport // socket implementation. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. diff --git a/api/envoy/config/core/v3/grpc_method_list.proto b/api/envoy/config/core/v3/grpc_method_list.proto index 800d7b5332a0..e79ec24e0201 100644 --- a/api/envoy/config/core/v3/grpc_method_list.proto +++ b/api/envoy/config/core/v3/grpc_method_list.proto @@ -22,7 +22,7 @@ message GrpcMethodList { "envoy.api.v2.core.GrpcMethodList.Service"; // The name of the gRPC service. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The names of the gRPC methods in this service. repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; diff --git a/api/envoy/config/core/v3/grpc_service.proto b/api/envoy/config/core/v3/grpc_service.proto index 967c694d2bc4..e3730d017410 100644 --- a/api/envoy/config/core/v3/grpc_service.proto +++ b/api/envoy/config/core/v3/grpc_service.proto @@ -35,13 +35,13 @@ message GrpcService { // The name of the upstream gRPC cluster. SSL credentials will be supplied // in the :ref:`Cluster ` :ref:`transport_socket // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. string authority = 2 [(validate.rules).string = - {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; + {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // [#next-free-field: 9] @@ -160,10 +160,10 @@ message GrpcService { // The path of subject token, a security token that represents the // identity of the party on behalf of whom the request is being made. - string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}]; + string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; // Type of the subject token. - string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}]; + string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; // The path of actor token, a security token that represents the identity // of the acting party. The acting party is authorized to use the @@ -230,7 +230,7 @@ message GrpcService { // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; + string target_uri = 1 [(validate.rules).string = {min_len: 1}]; ChannelCredentials channel_credentials = 2; @@ -247,7 +247,7 @@ message GrpcService { // // streams_total, Counter, Total number of streams opened // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; // The name of the Google gRPC credentials factory to use. This must have been registered with // Envoy. If this is empty, a default credentials factory will be used that sets up channel @@ -286,8 +286,10 @@ message GrpcService { // request. google.protobuf.Duration timeout = 3; - // Additional metadata to include in streams initiated to the GrpcService. - // This can be used for scenarios in which additional ad hoc authorization - // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + // Additional metadata to include in streams initiated to the GrpcService. This can be used for + // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to + // be injected. For more information, including details on header value syntax, see the + // documentation on :ref:`custom request headers + // `. repeated HeaderValue initial_metadata = 5; } diff --git a/api/envoy/config/core/v3/health_check.proto b/api/envoy/config/core/v3/health_check.proto index c6b4acfa937a..ccd473969846 100644 --- a/api/envoy/config/core/v3/health_check.proto +++ b/api/envoy/config/core/v3/health_check.proto @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 24] +// [#next-free-field: 25] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; @@ -67,7 +67,7 @@ message HealthCheck { option (validate.required) = true; // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string = {min_bytes: 1}]; + string text = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] Binary payload. bytes binary = 2; @@ -91,9 +91,8 @@ message HealthCheck { // Specifies the HTTP path that will be requested during health checking. For example // */healthcheck*. - string path = 2 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; + string path = 2 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; @@ -187,7 +186,7 @@ message HealthCheck { reserved "config"; // The registered name of the custom health checker. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. @@ -285,6 +284,21 @@ message HealthCheck { // The default value for "no traffic interval" is 60 seconds. google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; + // The "no traffic healthy interval" is a special health check interval that + // is used for hosts that are currently passing active health checking + // (including new hosts) when the cluster has received no traffic. + // + // This is useful for when we want to send frequent health checks with + // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once + // a host in the cluster is marked as healthy. + // + // Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. + // + // If no_traffic_healthy_interval is not set, it will default to the + // no traffic interval and send that interval regardless of health state. + google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}]; + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. diff --git a/api/envoy/config/core/v3/http_uri.proto b/api/envoy/config/core/v3/http_uri.proto index 42bcd4f61572..5d1fc239e07e 100644 --- a/api/envoy/config/core/v3/http_uri.proto +++ b/api/envoy/config/core/v3/http_uri.proto @@ -27,7 +27,7 @@ message HttpUri { // // uri: https://www.googleapis.com/oauth2/v1/certs // - string uri = 1 [(validate.rules).string = {min_bytes: 1}]; + string uri = 1 [(validate.rules).string = {min_len: 1}]; // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or @@ -45,7 +45,7 @@ message HttpUri { // // cluster: jwks_cluster // - string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 2 [(validate.rules).string = {min_len: 1}]; } // Sets the maximum duration in milliseconds that a response can take to arrive upon request. diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 3e20f3b449ae..17a6955d6851 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.core.v3; +import "envoy/type/v3/percent.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -177,7 +179,27 @@ message Http1ProtocolOptions { google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; } -// [#next-free-field: 15] +message KeepaliveSettings { + // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. + google.protobuf.Duration interval = 1 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // How long to wait for a response to a keepalive PING. If a response is not received within this + // time period, the connection will be aborted. + google.protobuf.Duration timeout = 2 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // A random jitter amount as a percentage of interval that will be added to each interval. + // A value of zero means there will be no jitter. + // The default value is 15%. + type.v3.Percent interval_jitter = 3; +} + +// [#next-free-field: 16] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http2ProtocolOptions"; @@ -345,6 +367,10 @@ message Http2ProtocolOptions { // `_ for // standardized identifiers. repeated SettingsParameter custom_settings_parameters = 13; + + // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer + // does not respond within the configured timeout, the connection will be aborted. + KeepaliveSettings connection_keepalive = 15; } // [#not-implemented-hide:] diff --git a/api/envoy/config/core/v3/substitution_format_string.proto b/api/envoy/config/core/v3/substitution_format_string.proto index 6c129707b2e2..10d99b878bdd 100644 --- a/api/envoy/config/core/v3/substitution_format_string.proto +++ b/api/envoy/config/core/v3/substitution_format_string.proto @@ -23,17 +23,20 @@ message SubstitutionFormatString { // Specify a format with command operators to form a text string. // Its details is described in :ref:`format string`. // - // .. code-block:: + // For example, setting ``text_format`` like below, // - // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // The following plain text will be created: + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // - // .. code-block:: + // generates plain text similar to: // - // upstream connect error:204:path=/foo + // .. code-block:: text // - string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + // upstream connect error:503:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_len: 1}]; // Specify a format with command operators to form a JSON string. // Its details is described in :ref:`format dictionary`. @@ -41,11 +44,12 @@ message SubstitutionFormatString { // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). // See the documentation for a specific command operator for details. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // json_format: - // status: %RESPONSE_CODE% - // message: %LOCAL_REPLY_BODY% + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" // // The following JSON object would be created: // @@ -65,4 +69,15 @@ message SubstitutionFormatString { // empty string, so that empty values are omitted entirely. // * for ``json_format`` the keys with null values are omitted in the output structure. bool omit_empty_values = 3; + + // Specify a *content_type* field. + // If this field is not set then ``text/plain`` is used for *text_format* and + // ``application/json`` is used for *json_format*. + // + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // content_type: "text/html; charset=UTF-8" + // + string content_type = 4; } diff --git a/api/envoy/config/core/v4alpha/address.proto b/api/envoy/config/core/v4alpha/address.proto index ffade4bed75b..6ae82359504e 100644 --- a/api/envoy/config/core/v4alpha/address.proto +++ b/api/envoy/config/core/v4alpha/address.proto @@ -24,12 +24,27 @@ message Pipe { // abstract namespace. The starting '@' is replaced by a null byte by Envoy. // Paths starting with '@' will result in an error in environments other than // Linux. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; + string path = 1 [(validate.rules).string = {min_len: 1}]; // The mode for the Pipe. Not applicable for abstract sockets. uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; } +// [#not-implemented-hide:] The address represents an envoy internal listener. +// TODO(lambdai): Make this address available for listener and endpoint. +// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30. +message EnvoyInternalAddress { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.EnvoyInternalAddress"; + + oneof address_name_specifier { + option (validate.required) = true; + + // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. + string server_listener_name = 1; + } +} + // [#next-free-field: 7] message SocketAddress { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketAddress"; @@ -52,7 +67,7 @@ message SocketAddress { // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string = {min_bytes: 1}]; + string address = 2 [(validate.rules).string = {min_len: 1}]; oneof port_specifier { option (validate.required) = true; @@ -129,6 +144,9 @@ message Address { SocketAddress socket_address = 1; Pipe pipe = 2; + + // [#not-implemented-hide:] + EnvoyInternalAddress envoy_internal_address = 3; } } @@ -138,7 +156,7 @@ message CidrRange { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.CidrRange"; // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Length of prefix, e.g. 0, 32. google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; diff --git a/api/envoy/config/core/v4alpha/base.proto b/api/envoy/config/core/v4alpha/base.proto index b13b4e89bfd1..dc1104a219b7 100644 --- a/api/envoy/config/core/v4alpha/base.proto +++ b/api/envoy/config/core/v4alpha/base.proto @@ -229,7 +229,19 @@ message RuntimeUInt32 { uint32 default_value = 2; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 3 [(validate.rules).string = {min_len: 1}]; +} + +// Runtime derived percentage with a default when not specified. +message RuntimePercent { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.RuntimePercent"; + + // Default value if runtime value is not available. + type.v3.Percent default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived double with a default when not specified. @@ -240,7 +252,7 @@ message RuntimeDouble { double default_value = 1; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived bool with a default when not specified. @@ -254,7 +266,7 @@ message RuntimeFeatureFlag { // Runtime key to get value for comparison. This value is used if defined. The boolean value must // be represented via its // `canonical JSON encoding `_. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Header name/value pair. @@ -264,7 +276,7 @@ message HeaderValue { // Header name. string key = 1 [(validate.rules).string = - {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; + {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Header value. // @@ -304,13 +316,13 @@ message DataSource { option (validate.required) = true; // Local filesystem data source. - string filename = 1 [(validate.rules).string = {min_bytes: 1}]; + string filename = 1 [(validate.rules).string = {min_len: 1}]; // Bytes inlined in the configuration. bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; + string inline_string = 3 [(validate.rules).string = {min_len: 1}]; } } @@ -337,7 +349,7 @@ message RemoteDataSource { HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; + string sha256 = 2 [(validate.rules).string = {min_len: 1}]; // Retry policy for fetching remote data. RetryPolicy retry_policy = 3; @@ -373,7 +385,7 @@ message TransportSocket { // The name of the transport socket to instantiate. The name must match a supported transport // socket implementation. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. diff --git a/api/envoy/config/core/v4alpha/grpc_method_list.proto b/api/envoy/config/core/v4alpha/grpc_method_list.proto index a4a7be077b27..371ea32c10f3 100644 --- a/api/envoy/config/core/v4alpha/grpc_method_list.proto +++ b/api/envoy/config/core/v4alpha/grpc_method_list.proto @@ -23,7 +23,7 @@ message GrpcMethodList { "envoy.config.core.v3.GrpcMethodList.Service"; // The name of the gRPC service. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The names of the gRPC methods in this service. repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; diff --git a/api/envoy/config/core/v4alpha/grpc_service.proto b/api/envoy/config/core/v4alpha/grpc_service.proto index 51f11fa1f346..9ea35b456470 100644 --- a/api/envoy/config/core/v4alpha/grpc_service.proto +++ b/api/envoy/config/core/v4alpha/grpc_service.proto @@ -35,13 +35,13 @@ message GrpcService { // The name of the upstream gRPC cluster. SSL credentials will be supplied // in the :ref:`Cluster ` :ref:`transport_socket // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. string authority = 2 [(validate.rules).string = - {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; + {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // [#next-free-field: 9] @@ -160,10 +160,10 @@ message GrpcService { // The path of subject token, a security token that represents the // identity of the party on behalf of whom the request is being made. - string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}]; + string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; // Type of the subject token. - string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}]; + string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; // The path of actor token, a security token that represents the identity // of the acting party. The acting party is authorized to use the @@ -236,7 +236,7 @@ message GrpcService { // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; + string target_uri = 1 [(validate.rules).string = {min_len: 1}]; ChannelCredentials channel_credentials = 2; @@ -253,7 +253,7 @@ message GrpcService { // // streams_total, Counter, Total number of streams opened // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; // The name of the Google gRPC credentials factory to use. This must have been registered with // Envoy. If this is empty, a default credentials factory will be used that sets up channel @@ -292,8 +292,10 @@ message GrpcService { // request. google.protobuf.Duration timeout = 3; - // Additional metadata to include in streams initiated to the GrpcService. - // This can be used for scenarios in which additional ad hoc authorization - // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + // Additional metadata to include in streams initiated to the GrpcService. This can be used for + // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to + // be injected. For more information, including details on header value syntax, see the + // documentation on :ref:`custom request headers + // `. repeated HeaderValue initial_metadata = 5; } diff --git a/api/envoy/config/core/v4alpha/health_check.proto b/api/envoy/config/core/v4alpha/health_check.proto index 39badc334b01..2761b856a3d7 100644 --- a/api/envoy/config/core/v4alpha/health_check.proto +++ b/api/envoy/config/core/v4alpha/health_check.proto @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 24] +// [#next-free-field: 25] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; @@ -67,7 +67,7 @@ message HealthCheck { option (validate.required) = true; // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string = {min_bytes: 1}]; + string text = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] Binary payload. bytes binary = 2; @@ -91,9 +91,8 @@ message HealthCheck { // Specifies the HTTP path that will be requested during health checking. For example // */healthcheck*. - string path = 2 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; + string path = 2 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; @@ -187,7 +186,7 @@ message HealthCheck { reserved "config"; // The registered name of the custom health checker. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. @@ -285,6 +284,21 @@ message HealthCheck { // The default value for "no traffic interval" is 60 seconds. google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; + // The "no traffic healthy interval" is a special health check interval that + // is used for hosts that are currently passing active health checking + // (including new hosts) when the cluster has received no traffic. + // + // This is useful for when we want to send frequent health checks with + // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once + // a host in the cluster is marked as healthy. + // + // Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. + // + // If no_traffic_healthy_interval is not set, it will default to the + // no traffic interval and send that interval regardless of health state. + google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}]; + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. diff --git a/api/envoy/config/core/v4alpha/http_uri.proto b/api/envoy/config/core/v4alpha/http_uri.proto index e88a9aa7d7df..ae1c0c9a3d4e 100644 --- a/api/envoy/config/core/v4alpha/http_uri.proto +++ b/api/envoy/config/core/v4alpha/http_uri.proto @@ -27,7 +27,7 @@ message HttpUri { // // uri: https://www.googleapis.com/oauth2/v1/certs // - string uri = 1 [(validate.rules).string = {min_bytes: 1}]; + string uri = 1 [(validate.rules).string = {min_len: 1}]; // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or @@ -45,7 +45,7 @@ message HttpUri { // // cluster: jwks_cluster // - string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 2 [(validate.rules).string = {min_len: 1}]; } // Sets the maximum duration in milliseconds that a response can take to arrive upon request. diff --git a/api/envoy/config/core/v4alpha/protocol.proto b/api/envoy/config/core/v4alpha/protocol.proto index 19e5de6d8b1a..807488cef49d 100644 --- a/api/envoy/config/core/v4alpha/protocol.proto +++ b/api/envoy/config/core/v4alpha/protocol.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.core.v4alpha; +import "envoy/type/v3/percent.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -177,7 +179,30 @@ message Http1ProtocolOptions { google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; } -// [#next-free-field: 15] +message KeepaliveSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.KeepaliveSettings"; + + // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. + google.protobuf.Duration interval = 1 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // How long to wait for a response to a keepalive PING. If a response is not received within this + // time period, the connection will be aborted. + google.protobuf.Duration timeout = 2 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // A random jitter amount as a percentage of interval that will be added to each interval. + // A value of zero means there will be no jitter. + // The default value is 15%. + type.v3.Percent interval_jitter = 3; +} + +// [#next-free-field: 16] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http2ProtocolOptions"; @@ -335,6 +360,10 @@ message Http2ProtocolOptions { // `_ for // standardized identifiers. repeated SettingsParameter custom_settings_parameters = 13; + + // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer + // does not respond within the configured timeout, the connection will be aborted. + KeepaliveSettings connection_keepalive = 15; } // [#not-implemented-hide:] diff --git a/api/envoy/config/core/v4alpha/substitution_format_string.proto b/api/envoy/config/core/v4alpha/substitution_format_string.proto index ffff2fe3e754..e996bcbc0cf6 100644 --- a/api/envoy/config/core/v4alpha/substitution_format_string.proto +++ b/api/envoy/config/core/v4alpha/substitution_format_string.proto @@ -27,17 +27,20 @@ message SubstitutionFormatString { // Specify a format with command operators to form a text string. // Its details is described in :ref:`format string`. // - // .. code-block:: + // For example, setting ``text_format`` like below, // - // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // The following plain text will be created: + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // - // .. code-block:: + // generates plain text similar to: // - // upstream connect error:204:path=/foo + // .. code-block:: text // - string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + // upstream connect error:503:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_len: 1}]; // Specify a format with command operators to form a JSON string. // Its details is described in :ref:`format dictionary`. @@ -45,11 +48,12 @@ message SubstitutionFormatString { // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). // See the documentation for a specific command operator for details. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // json_format: - // status: %RESPONSE_CODE% - // message: %LOCAL_REPLY_BODY% + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" // // The following JSON object would be created: // @@ -69,4 +73,15 @@ message SubstitutionFormatString { // empty string, so that empty values are omitted entirely. // * for ``json_format`` the keys with null values are omitted in the output structure. bool omit_empty_values = 3; + + // Specify a *content_type* field. + // If this field is not set then ``text/plain`` is used for *text_format* and + // ``application/json`` is used for *json_format*. + // + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // content_type: "text/html; charset=UTF-8" + // + string content_type = 4; } diff --git a/api/envoy/config/endpoint/v3/endpoint.proto b/api/envoy/config/endpoint/v3/endpoint.proto index e58c327156cf..214ce6c20883 100644 --- a/api/envoy/config/endpoint/v3/endpoint.proto +++ b/api/envoy/config/endpoint/v3/endpoint.proto @@ -46,7 +46,7 @@ message ClusterLoadAssignment { "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload"; // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_bytes: 1}]; + string category = 1 [(validate.rules).string = {min_len: 1}]; // Percentage of traffic that should be dropped for the category. type.v3.FractionalPercent drop_percentage = 2; @@ -105,7 +105,7 @@ message ClusterLoadAssignment { // ` value if specified // in the cluster :ref:`EdsClusterConfig // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // List of endpoints to load balance to. repeated LocalityLbEndpoints endpoints = 2; diff --git a/api/envoy/config/endpoint/v3/load_report.proto b/api/envoy/config/endpoint/v3/load_report.proto index 3f067737ec25..7140ca05afc7 100644 --- a/api/envoy/config/endpoint/v3/load_report.proto +++ b/api/envoy/config/endpoint/v3/load_report.proto @@ -129,14 +129,14 @@ message ClusterStats { "envoy.api.v2.endpoint.ClusterStats.DroppedRequests"; // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_bytes: 1}]; + string category = 1 [(validate.rules).string = {min_len: 1}]; // Total number of deliberately dropped requests for the category. uint64 dropped_count = 2; } // The name of the cluster. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // The eds_cluster_config service_name of the cluster. // It's possible that two clusters send the same service_name to EDS, diff --git a/api/envoy/config/filter/http/cache/v2alpha/cache.proto b/api/envoy/config/filter/http/cache/v2alpha/cache.proto index d08b5462fd88..98035c05d45a 100644 --- a/api/envoy/config/filter/http/cache/v2alpha/cache.proto +++ b/api/envoy/config/filter/http/cache/v2alpha/cache.proto @@ -48,17 +48,14 @@ message CacheConfig { // Config specific to the cache storage implementation. google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - // [#not-implemented-hide:] - // - // - // List of allowed *Vary* headers. + // List of matching rules that defines allowed *Vary* headers. // // The *vary* response header holds a list of header names that affect the // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't in + // response's *vary* header mentions any header names that aren't matched by any rules in // *allowed_vary_headers*, that response will not be cached. // // During lookup, *allowed_vary_headers* controls what request headers will be diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto index 29aa8380191b..436bb6bf4616 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto @@ -51,6 +51,10 @@ message PerRouteConfig { // :ref:`HCM host rewrite header ` // given that the value set here would be used for DNS lookups whereas the value set in the HCM // would be used for host header forwarding which is not the desired outcome. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string auto_host_rewrite_header = 2 [(udpa.annotations.field_migrate).rename = "host_rewrite_header"]; } diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 06b13acb2f63..c05032df21a4 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -586,6 +586,10 @@ message ScopedRoutes { } // The name of the header field to extract the value from. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The element separator (e.g., ';' separates 'a;b;c;d'). diff --git a/api/envoy/config/grpc_credential/v3/aws_iam.proto b/api/envoy/config/grpc_credential/v3/aws_iam.proto index eeb5d93ec689..e2e9c7da4833 100644 --- a/api/envoy/config/grpc_credential/v3/aws_iam.proto +++ b/api/envoy/config/grpc_credential/v3/aws_iam.proto @@ -24,7 +24,7 @@ message AwsIamConfig { // of the Grpc endpoint. // // Example: appmesh - string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string service_name = 1 [(validate.rules).string = {min_len: 1}]; // The `region `_ hosting the Grpc // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment diff --git a/api/envoy/config/listener/v3/listener.proto b/api/envoy/config/listener/v3/listener.proto index 88e8ae4ad5b1..aac1166f49fd 100644 --- a/api/envoy/config/listener/v3/listener.proto +++ b/api/envoy/config/listener/v3/listener.proto @@ -33,10 +33,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Listener list collections. Entries are *Listener* resources or references. // [#not-implemented-hide:] message ListenerCollection { - udpa.core.v1.CollectionEntry entries = 1; + repeated udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 25] +// [#next-free-field: 26] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -116,6 +116,10 @@ message Listener { // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + FilterChain default_filter_chain = 25; + // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 diff --git a/api/envoy/config/listener/v3/listener_components.proto b/api/envoy/config/listener/v3/listener_components.proto index 8a22fbc97f5f..3ecfc7932b56 100644 --- a/api/envoy/config/listener/v3/listener_components.proto +++ b/api/envoy/config/listener/v3/listener_components.proto @@ -32,7 +32,7 @@ message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. @@ -65,6 +65,18 @@ message Filter { // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // @@ -303,7 +315,7 @@ message ListenerFilter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. diff --git a/api/envoy/config/listener/v4alpha/listener.proto b/api/envoy/config/listener/v4alpha/listener.proto index 753f6d733cc0..fbc65d0880f3 100644 --- a/api/envoy/config/listener/v4alpha/listener.proto +++ b/api/envoy/config/listener/v4alpha/listener.proto @@ -36,10 +36,10 @@ message ListenerCollection { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.ListenerCollection"; - udpa.core.v1.CollectionEntry entries = 1; + repeated udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 25] +// [#next-free-field: 26] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; @@ -119,6 +119,10 @@ message Listener { // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + FilterChain default_filter_chain = 25; + // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 diff --git a/api/envoy/config/listener/v4alpha/listener_components.proto b/api/envoy/config/listener/v4alpha/listener_components.proto index 61babe8e622f..0c75f92b4027 100644 --- a/api/envoy/config/listener/v4alpha/listener_components.proto +++ b/api/envoy/config/listener/v4alpha/listener_components.proto @@ -32,7 +32,7 @@ message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. @@ -65,6 +65,18 @@ message Filter { // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // @@ -307,7 +319,7 @@ message ListenerFilter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto index c6113bf5a5d3..62afcf56e4e7 100644 --- a/api/envoy/config/metrics/v2/stats.proto +++ b/api/envoy/config/metrics/v2/stats.proto @@ -201,7 +201,7 @@ message TagSpecifier { // // { // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\.((.+?)\.)" + // "regex": "^cluster\\.((.+?)\\.)" // } // // Note that the regex will remove ``foo_cluster.`` making the tag extracted @@ -218,11 +218,11 @@ message TagSpecifier { // [ // { // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" // }, // { // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\.((.*?)\.)" + // "regex": "^http\\.((.*?)\\.)" // } // ] // diff --git a/api/envoy/config/metrics/v3/stats.proto b/api/envoy/config/metrics/v3/stats.proto index 275db1f6457a..be0cbb9dab92 100644 --- a/api/envoy/config/metrics/v3/stats.proto +++ b/api/envoy/config/metrics/v3/stats.proto @@ -244,7 +244,7 @@ message TagSpecifier { // // { // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\.((.+?)\.)" + // "regex": "^cluster\\.((.+?)\\.)" // } // // Note that the regex will remove ``foo_cluster.`` making the tag extracted @@ -261,11 +261,11 @@ message TagSpecifier { // [ // { // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" // }, // { // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\.((.*?)\.)" + // "regex": "^http\\.((.*?)\\.)" // } // ] // diff --git a/api/envoy/config/metrics/v4alpha/stats.proto b/api/envoy/config/metrics/v4alpha/stats.proto index 6265118cf9b8..bd37875c0bf9 100644 --- a/api/envoy/config/metrics/v4alpha/stats.proto +++ b/api/envoy/config/metrics/v4alpha/stats.proto @@ -244,7 +244,7 @@ message TagSpecifier { // // { // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\.((.+?)\.)" + // "regex": "^cluster\\.((.+?)\\.)" // } // // Note that the regex will remove ``foo_cluster.`` making the tag extracted @@ -261,11 +261,11 @@ message TagSpecifier { // [ // { // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" // }, // { // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\.((.*?)\.)" + // "regex": "^http\\.((.*?)\\.)" // } // ] // diff --git a/api/envoy/config/overload/v3/overload.proto b/api/envoy/config/overload/v3/overload.proto index 061783a04b77..ac1d444b629a 100644 --- a/api/envoy/config/overload/v3/overload.proto +++ b/api/envoy/config/overload/v3/overload.proto @@ -37,7 +37,7 @@ message ResourceMonitor { // ` // * :ref:`envoy.resource_monitors.injected_resource // ` - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Configuration for the resource monitor being instantiated. oneof config_type { @@ -69,7 +69,7 @@ message Trigger { "envoy.config.overload.v2alpha.Trigger"; // The name of the resource this is a trigger for. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof trigger_oneof { option (validate.required) = true; @@ -87,7 +87,7 @@ message OverloadAction { // The name of the overload action. This is just a well-known string that listeners can // use for registering callbacks. Custom overload actions should be named using reverse // DNS to ensure uniqueness. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A set of triggers for this action. The state of the action is the maximum // state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 93fb6b05911d..595fde141e6c 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -60,7 +60,7 @@ message VirtualHost { // The logical name of the virtual host. This is used when emitting certain // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A list of domains (host/authority header) that will be matched to this // virtual host. Wildcard hosts are supported in the suffix or prefix form. @@ -113,7 +113,7 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each request // handled by this virtual host. repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a list of HTTP headers that should be added to each response @@ -128,7 +128,7 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Indicates that the virtual host has a CORS policy. @@ -263,7 +263,7 @@ message Route { // Specifies a list of HTTP headers that should be removed from each request // matching this route. repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a set of headers that will be added to responses to requests @@ -278,7 +278,7 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Presence of the object defines whether the connection manager's tracing configuration @@ -311,7 +311,7 @@ message WeightedCluster { // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, @@ -338,7 +338,9 @@ message WeightedCluster { // Specifies a list of HTTP headers that should be removed from each request when // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`. - repeated string request_headers_to_remove = 9; + repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Specifies a list of headers to be added to responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`. @@ -352,7 +354,9 @@ message WeightedCluster { // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`. - repeated string response_headers_to_remove = 6; + repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as @@ -440,7 +444,7 @@ message RouteMatch { // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style // upgrades. // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, - // where CONNECT requests may have a path, the path matchers will work if + // where Extended CONNECT requests may have a path, the path matchers will work if // there is a path present. // Note that CONNECT support is currently considered alpha in Envoy. // [#comment:TODO(htuch): Replace the above comment with an alpha tag. @@ -545,7 +549,7 @@ message CorsPolicy { core.v3.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 36] +// [#next-free-field: 37] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; @@ -587,7 +591,7 @@ message RouteAction { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // If not specified, all requests to the target cluster will be mirrored. // @@ -616,9 +620,8 @@ message RouteAction { // The name of the request header that will be used to obtain the hash // key. If the request header is not present, no hash will be produced. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If specified, the request header value will be rewritten and used // to produce the hash key. @@ -646,7 +649,7 @@ message RouteAction { // The name of the cookie that will be used to obtain the hash key. If the // cookie is not present and ttl below is not set, no hash will be // produced. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // If specified, a cookie with the TTL will be generated if the cookie is // not present. If the TTL is present and zero, the generated cookie will @@ -673,7 +676,7 @@ message RouteAction { // The name of the URL query parameter that will be used to obtain the hash // key. If the parameter is not present, no hash will be produced. Query // parameter names are case-sensitive. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; } message FilterState { @@ -683,7 +686,7 @@ message RouteAction { // The name of the Object in the per-request filterState, which is an // Envoy::Http::Hashable object. If there is no data associated with the key, // or the stored object is not Envoy::Http::Hashable, no hash will be produced. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } oneof policy_specifier { @@ -760,6 +763,32 @@ message RouteAction { ConnectConfig connect_config = 3; } + message MaxStreamDuration { + // Specifies the maximum duration allowed for streams on the route. If not specified, the value + // from the :ref:`max_stream_duration + // ` field in + // :ref:`HttpConnectionManager.common_http_protocol_options + // ` + // is used. If this field is set explicitly to zero, any + // HttpConnectionManager max_stream_duration timeout will be disabled for + // this route. + google.protobuf.Duration max_stream_duration = 1; + + // If present, and the request contains a `grpc-timeout header + // `_, use that value as the + // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. + // If set to 0, the `grpc-timeout` header is used without modification. + google.protobuf.Duration grpc_timeout_header_max = 2; + + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by + // subtracting the provided duration from the header. This is useful for allowing Envoy to set + // its global timeout to be less than that of the deadline imposed by the calling client, which + // makes it more likely that Envoy will handle the timeout instead of having the call canceled + // by the client. If, after applying the offset, the resulting timeout is zero or negative, + // the stream will timeout immediately. + google.protobuf.Duration grpc_timeout_header_offset = 3; + } + reserved 12, 18, 19, 16, 22, 21, 10; reserved "request_mirror_policy"; @@ -769,7 +798,7 @@ message RouteAction { // Indicates the upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the @@ -780,8 +809,12 @@ message RouteAction { // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string cluster_header = 2 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -888,6 +921,10 @@ message RouteAction { // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string host_rewrite_header = 29 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; @@ -968,7 +1005,9 @@ message RouteAction { // limits. By default, if the route configured rate limits, the virtual host // :ref:`rate_limits ` are not applied to the // request. - google.protobuf.BoolValue include_vh_rate_limits = 14; + // + // This field is deprecated. Please use :ref:`vh_rate_limits ` + google.protobuf.BoolValue include_vh_rate_limits = 14 [deprecated = true]; // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to @@ -987,6 +1026,7 @@ message RouteAction { // Indicates that the route has a CORS policy. CorsPolicy cors = 17; + // Deprecated by :ref:`grpc_timeout_header_max ` // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, // or its default value (infinity) instead of @@ -1006,8 +1046,9 @@ message RouteAction { // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. - google.protobuf.Duration max_grpc_timeout = 23; + google.protobuf.Duration max_grpc_timeout = 23 [deprecated = true]; + // Deprecated by :ref:`grpc_timeout_header_offset `. // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting // the provided duration from the header. This is useful in allowing Envoy to set its global // timeout to be less than that of the deadline imposed by the calling client, which makes it more @@ -1015,7 +1056,7 @@ message RouteAction { // The offset will only be applied if the provided grpc_timeout is greater than the offset. This // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning // infinity). - google.protobuf.Duration grpc_timeout_offset = 28; + google.protobuf.Duration grpc_timeout_offset = 28 [deprecated = true]; repeated UpgradeConfig upgrade_configs = 25; @@ -1047,6 +1088,9 @@ message RouteAction { // it'll take precedence over the virtual host level hedge policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). HedgePolicy hedge_policy = 27; + + // Specifies the maximum stream duration for this route. + MaxStreamDuration max_stream_duration = 36; } // HTTP retry :ref:`architecture overview `. @@ -1067,7 +1111,7 @@ message RetryPolicy { reserved "config"; - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; @@ -1082,7 +1126,7 @@ message RetryPolicy { reserved "config"; - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; @@ -1110,9 +1154,15 @@ message RetryPolicy { } message ResetHeader { + // The name of the reset header. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + // The format of the reset header. ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; } @@ -1378,7 +1428,7 @@ message Decorator { // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden // by the :ref:`x-envoy-decorator-operation // ` header. - string operation = 1 [(validate.rules).string = {min_bytes: 1}]; + string operation = 1 [(validate.rules).string = {min_len: 1}]; // Whether the decorated details should be propagated to the other party. The default is true. google.protobuf.BoolValue propagate = 2; @@ -1453,14 +1503,14 @@ message VirtualCluster { // Specifies the name of the virtual cluster. The virtual cluster name as well // as the virtual host name are used when emitting statistics. The statistics are emitted by the // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + string name = 2 [(validate.rules).string = {min_len: 1}]; } // Global rate limiting :ref:`architecture overview `. message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 8] + // [#next-free-field: 9] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -1511,12 +1561,11 @@ message RateLimit { // The header name to be queried from the request headers. The header’s // value is used to populate the value of the descriptor entry for the // descriptor_key. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; // If set to true, Envoy skips the descriptor while calling rate limiting service // when header is not present in the request. By default it skips calling the @@ -1545,7 +1594,7 @@ message RateLimit { "envoy.api.v2.route.RateLimit.Action.GenericKey"; // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // An optional key to use in the descriptor entry. If not set it defaults // to 'generic_key' as the descriptor key. @@ -1562,7 +1611,7 @@ message RateLimit { "envoy.api.v2.route.RateLimit.Action.HeaderValueMatch"; // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // If set to true, the action will append a descriptor entry when the // request matches the headers. If set to false, the action will append a @@ -1578,14 +1627,18 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } - // The following descriptor entry is appended when the dynamic metadata contains a key value: + // The following descriptor entry is appended when the + // :ref:`dynamic metadata ` contains a key value: // // .. code-block:: cpp // - // ("", "") + // ("", "") + // + // .. attention:: + // This action has been deprecated in favor of the :ref:`metadata ` action message DynamicMetaData { // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. @@ -1596,6 +1649,35 @@ message RateLimit { string default_value = 3; } + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message MetaData { + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + } + oneof action_specifier { option (validate.required) = true; @@ -1618,7 +1700,14 @@ message RateLimit { HeaderValueMatch header_value_match = 6; // Rate limit on dynamic metadata. - DynamicMetaData dynamic_metadata = 7; + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`metadata ` field + DynamicMetaData dynamic_metadata = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // Rate limit on metadata. + MetaData metadata = 8; } } @@ -1701,7 +1790,7 @@ message HeaderMatcher { // Specifies the name of the header in the request. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Specifies how the header match will be performed to route the request. oneof header_match_specifier { @@ -1736,7 +1825,7 @@ message HeaderMatcher { // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; + string prefix_match = 9 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. @@ -1744,7 +1833,7 @@ message HeaderMatcher { // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; + string suffix_match = 10 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on whether the header value contains // the given value or not. @@ -1753,7 +1842,7 @@ message HeaderMatcher { // Examples: // // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. - string contains_match = 12 [(validate.rules).string = {min_bytes: 1}]; + string contains_match = 12 [(validate.rules).string = {min_len: 1}]; } // If specified, the match result will be inverted before checking. Defaults to false. @@ -1778,7 +1867,7 @@ message QueryParameterMatcher { // Specifies the name of a key that must be present in the requested // *path*'s query string. - string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; + string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. diff --git a/api/envoy/config/route/v3/scoped_route.proto b/api/envoy/config/route/v3/scoped_route.proto index d6611b0b1d06..b7e3aa66e07f 100644 --- a/api/envoy/config/route/v3/scoped_route.proto +++ b/api/envoy/config/route/v3/scoped_route.proto @@ -108,12 +108,12 @@ message ScopedRouteConfiguration { bool on_demand = 4; // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v3.DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` associated // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; // The key to match against. Key key = 3 [(validate.rules).message = {required: true}]; diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 1c510a4be725..0bf0b493e956 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -59,7 +59,7 @@ message VirtualHost { // The logical name of the virtual host. This is used when emitting certain // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A list of domains (host/authority header) that will be matched to this // virtual host. Wildcard hosts are supported in the suffix or prefix form. @@ -112,7 +112,7 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each request // handled by this virtual host. repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a list of HTTP headers that should be added to each response @@ -127,7 +127,7 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Indicates that the virtual host has a CORS policy. @@ -262,7 +262,7 @@ message Route { // Specifies a list of HTTP headers that should be removed from each request // matching this route. repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a set of headers that will be added to responses to requests @@ -277,7 +277,7 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Presence of the object defines whether the connection manager's tracing configuration @@ -311,7 +311,7 @@ message WeightedCluster { // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, @@ -338,7 +338,9 @@ message WeightedCluster { // Specifies a list of HTTP headers that should be removed from each request when // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. - repeated string request_headers_to_remove = 9; + repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Specifies a list of headers to be added to responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. @@ -352,7 +354,9 @@ message WeightedCluster { // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. - repeated string response_headers_to_remove = 6; + repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as @@ -442,7 +446,7 @@ message RouteMatch { // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style // upgrades. // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, - // where CONNECT requests may have a path, the path matchers will work if + // where Extended CONNECT requests may have a path, the path matchers will work if // there is a path present. // Note that CONNECT support is currently considered alpha in Envoy. // [#comment:TODO(htuch): Replace the above comment with an alpha tag. @@ -547,7 +551,7 @@ message CorsPolicy { core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 36] +// [#next-free-field: 37] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; @@ -580,7 +584,7 @@ message RouteAction { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // If not specified, all requests to the target cluster will be mirrored. // @@ -609,9 +613,8 @@ message RouteAction { // The name of the request header that will be used to obtain the hash // key. If the request header is not present, no hash will be produced. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If specified, the request header value will be rewritten and used // to produce the hash key. @@ -639,7 +642,7 @@ message RouteAction { // The name of the cookie that will be used to obtain the hash key. If the // cookie is not present and ttl below is not set, no hash will be // produced. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // If specified, a cookie with the TTL will be generated if the cookie is // not present. If the TTL is present and zero, the generated cookie will @@ -666,7 +669,7 @@ message RouteAction { // The name of the URL query parameter that will be used to obtain the hash // key. If the parameter is not present, no hash will be produced. Query // parameter names are case-sensitive. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; } message FilterState { @@ -676,7 +679,7 @@ message RouteAction { // The name of the Object in the per-request filterState, which is an // Envoy::Http::Hashable object. If there is no data associated with the key, // or the stored object is not Envoy::Http::Hashable, no hash will be produced. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } oneof policy_specifier { @@ -756,16 +759,46 @@ message RouteAction { ConnectConfig connect_config = 3; } - reserved 12, 18, 19, 16, 22, 21, 10, 26, 31; + message MaxStreamDuration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.MaxStreamDuration"; + + // Specifies the maximum duration allowed for streams on the route. If not specified, the value + // from the :ref:`max_stream_duration + // ` field in + // :ref:`HttpConnectionManager.common_http_protocol_options + // ` + // is used. If this field is set explicitly to zero, any + // HttpConnectionManager max_stream_duration timeout will be disabled for + // this route. + google.protobuf.Duration max_stream_duration = 1; + + // If present, and the request contains a `grpc-timeout header + // `_, use that value as the + // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. + // If set to 0, the `grpc-timeout` header is used without modification. + google.protobuf.Duration grpc_timeout_header_max = 2; + + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by + // subtracting the provided duration from the header. This is useful for allowing Envoy to set + // its global timeout to be less than that of the deadline imposed by the calling client, which + // makes it more likely that Envoy will handle the timeout instead of having the call canceled + // by the client. If, after applying the offset, the resulting timeout is zero or negative, + // the stream will timeout immediately. + google.protobuf.Duration grpc_timeout_header_offset = 3; + } + + reserved 12, 18, 19, 16, 22, 21, 10, 14, 23, 28, 26, 31; - reserved "request_mirror_policy", "internal_redirect_action", "max_internal_redirects"; + reserved "request_mirror_policy", "include_vh_rate_limits", "max_grpc_timeout", + "grpc_timeout_offset", "internal_redirect_action", "max_internal_redirects"; oneof cluster_specifier { option (validate.required) = true; // Indicates the upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the @@ -776,8 +809,12 @@ message RouteAction { // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string cluster_header = 2 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -884,6 +921,10 @@ message RouteAction { // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string host_rewrite_header = 29 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; @@ -960,12 +1001,6 @@ message RouteAction { // route. repeated RateLimit rate_limits = 13; - // Specifies if the rate limit filter should include the virtual host rate - // limits. By default, if the route configured rate limits, the virtual host - // :ref:`rate_limits ` are not applied to the - // request. - google.protobuf.BoolValue include_vh_rate_limits = 14; - // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to // route the request. The method of combination is deterministic such that @@ -983,36 +1018,6 @@ message RouteAction { // Indicates that the route has a CORS policy. CorsPolicy cors = 17; - // If present, and the request is a gRPC request, use the - // `grpc-timeout header `_, - // or its default value (infinity) instead of - // :ref:`timeout `, but limit the applied timeout - // to the maximum value specified here. If configured as 0, the maximum allowed timeout for - // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used - // and gRPC requests time out like any other requests using - // :ref:`timeout ` or its default. - // This can be used to prevent unexpected upstream request timeouts due to potentially long - // time gaps between gRPC request and response in gRPC streaming mode. - // - // .. note:: - // - // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes - // precedence over `grpc-timeout header `_, when - // both are present. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration max_grpc_timeout = 23; - - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - // the provided duration from the header. This is useful in allowing Envoy to set its global - // timeout to be less than that of the deadline imposed by the calling client, which makes it more - // likely that Envoy will handle the timeout instead of having the call canceled by the client. - // The offset will only be applied if the provided grpc_timeout is greater than the offset. This - // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - // infinity). - google.protobuf.Duration grpc_timeout_offset = 28; - repeated UpgradeConfig upgrade_configs = 25; // If present, Envoy will try to follow an upstream redirect response instead of proxying the @@ -1025,6 +1030,9 @@ message RouteAction { // it'll take precedence over the virtual host level hedge policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). HedgePolicy hedge_policy = 27; + + // Specifies the maximum stream duration for this route. + MaxStreamDuration max_stream_duration = 36; } // HTTP retry :ref:`architecture overview `. @@ -1045,7 +1053,7 @@ message RetryPolicy { reserved "config"; - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; @@ -1060,7 +1068,7 @@ message RetryPolicy { reserved "config"; - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; @@ -1091,9 +1099,15 @@ message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy.ResetHeader"; + // The name of the reset header. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + // The format of the reset header. ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; } @@ -1362,7 +1376,7 @@ message Decorator { // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden // by the :ref:`x-envoy-decorator-operation // ` header. - string operation = 1 [(validate.rules).string = {min_bytes: 1}]; + string operation = 1 [(validate.rules).string = {min_len: 1}]; // Whether the decorated details should be propagated to the other party. The default is true. google.protobuf.BoolValue propagate = 2; @@ -1438,14 +1452,14 @@ message VirtualCluster { // Specifies the name of the virtual cluster. The virtual cluster name as well // as the virtual host name are used when emitting statistics. The statistics are emitted by the // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + string name = 2 [(validate.rules).string = {min_len: 1}]; } // Global rate limiting :ref:`architecture overview `. message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; - // [#next-free-field: 8] + // [#next-free-field: 9] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action"; @@ -1496,12 +1510,11 @@ message RateLimit { // The header name to be queried from the request headers. The header’s // value is used to populate the value of the descriptor entry for the // descriptor_key. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; // If set to true, Envoy skips the descriptor while calling rate limiting service // when header is not present in the request. By default it skips calling the @@ -1530,7 +1543,7 @@ message RateLimit { "envoy.config.route.v3.RateLimit.Action.GenericKey"; // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // An optional key to use in the descriptor entry. If not set it defaults // to 'generic_key' as the descriptor key. @@ -1547,7 +1560,7 @@ message RateLimit { "envoy.config.route.v3.RateLimit.Action.HeaderValueMatch"; // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // If set to true, the action will append a descriptor entry when the // request matches the headers. If set to false, the action will append a @@ -1563,17 +1576,21 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } - // The following descriptor entry is appended when the dynamic metadata contains a key value: + // The following descriptor entry is appended when the + // :ref:`dynamic metadata ` contains a key value: // // .. code-block:: cpp // - // ("", "") + // ("", "") + // + // .. attention:: + // This action has been deprecated in favor of the :ref:`metadata ` action message DynamicMetaData { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. @@ -1584,6 +1601,42 @@ message RateLimit { string default_value = 3; } + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message MetaData { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.MetaData"; + + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + } + + reserved 7; + + reserved "dynamic_metadata"; + oneof action_specifier { option (validate.required) = true; @@ -1605,8 +1658,8 @@ message RateLimit { // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; - // Rate limit on dynamic metadata. - DynamicMetaData dynamic_metadata = 7; + // Rate limit on metadata. + MetaData metadata = 8; } } @@ -1696,7 +1749,7 @@ message HeaderMatcher { // Specifies the name of the header in the request. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Specifies how the header match will be performed to route the request. oneof header_match_specifier { @@ -1731,7 +1784,7 @@ message HeaderMatcher { // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; + string prefix_match = 9 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. @@ -1739,7 +1792,7 @@ message HeaderMatcher { // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; + string suffix_match = 10 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on whether the header value contains // the given value or not. @@ -1748,7 +1801,7 @@ message HeaderMatcher { // Examples: // // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. - string contains_match = 12 [(validate.rules).string = {min_bytes: 1}]; + string contains_match = 12 [(validate.rules).string = {min_len: 1}]; } // If specified, the match result will be inverted before checking. Defaults to false. @@ -1773,7 +1826,7 @@ message QueryParameterMatcher { // Specifies the name of a key that must be present in the requested // *path*'s query string. - string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; + string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. diff --git a/api/envoy/config/route/v4alpha/scoped_route.proto b/api/envoy/config/route/v4alpha/scoped_route.proto index 33fc756a60a4..0704ceacbbac 100644 --- a/api/envoy/config/route/v4alpha/scoped_route.proto +++ b/api/envoy/config/route/v4alpha/scoped_route.proto @@ -108,12 +108,12 @@ message ScopedRouteConfiguration { bool on_demand = 4; // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v4alpha.DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` associated // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; // The key to match against. Key key = 3 [(validate.rules).message = {required: true}]; diff --git a/api/envoy/config/tap/v3/common.proto b/api/envoy/config/tap/v3/common.proto index 42783115f871..a8324a6ebc1a 100644 --- a/api/envoy/config/tap/v3/common.proto +++ b/api/envoy/config/tap/v3/common.proto @@ -261,7 +261,7 @@ message FilePerTapSink { // Path prefix. The output file will be of the form _.pb, where is an // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string path_prefix = 1 [(validate.rules).string = {min_len: 1}]; } // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC diff --git a/api/envoy/config/tap/v4alpha/common.proto b/api/envoy/config/tap/v4alpha/common.proto index 8366187fd1bf..fbee12d7f99d 100644 --- a/api/envoy/config/tap/v4alpha/common.proto +++ b/api/envoy/config/tap/v4alpha/common.proto @@ -259,7 +259,7 @@ message FilePerTapSink { // Path prefix. The output file will be of the form _.pb, where is an // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string path_prefix = 1 [(validate.rules).string = {min_len: 1}]; } // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC diff --git a/api/envoy/config/trace/v3/datadog.proto b/api/envoy/config/trace/v3/datadog.proto index f1fe3e666125..c101ab2f03c9 100644 --- a/api/envoy/config/trace/v3/datadog.proto +++ b/api/envoy/config/trace/v3/datadog.proto @@ -22,8 +22,8 @@ message DatadogConfig { "envoy.config.trace.v2.DatadogConfig"; // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string service_name = 2 [(validate.rules).string = {min_len: 1}]; } diff --git a/api/envoy/config/trace/v3/dynamic_ot.proto b/api/envoy/config/trace/v3/dynamic_ot.proto index fb372da8c52a..c28106871542 100644 --- a/api/envoy/config/trace/v3/dynamic_ot.proto +++ b/api/envoy/config/trace/v3/dynamic_ot.proto @@ -28,7 +28,7 @@ message DynamicOtConfig { // Dynamic library implementing the `OpenTracing API // `_. - string library = 1 [(validate.rules).string = {min_bytes: 1}]; + string library = 1 [(validate.rules).string = {min_len: 1}]; // The configuration to use when creating a tracer from the given dynamic // library. diff --git a/api/envoy/config/trace/v3/http_tracer.proto b/api/envoy/config/trace/v3/http_tracer.proto index 2a87a28db25e..33adea18a4d6 100644 --- a/api/envoy/config/trace/v3/http_tracer.proto +++ b/api/envoy/config/trace/v3/http_tracer.proto @@ -52,7 +52,7 @@ message Tracing { // - *envoy.tracers.datadog* // - *envoy.tracers.opencensus* // - *envoy.tracers.xray* - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Trace driver specific configuration which depends on the driver being instantiated. // See the trace drivers for examples: diff --git a/api/envoy/config/trace/v3/lightstep.proto b/api/envoy/config/trace/v3/lightstep.proto index 0e0b60b5bddb..0b7be7c4e609 100644 --- a/api/envoy/config/trace/v3/lightstep.proto +++ b/api/envoy/config/trace/v3/lightstep.proto @@ -38,11 +38,11 @@ message LightstepConfig { } // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // File containing the access token to the `LightStep // `_ API. - string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; + string access_token_file = 2 [(validate.rules).string = {min_len: 1}]; // Propagation modes to use by LightStep's tracer. repeated PropagationMode propagation_modes = 3 diff --git a/api/envoy/config/trace/v3/zipkin.proto b/api/envoy/config/trace/v3/zipkin.proto index 5c5349cdf155..ee4e4d9b7898 100644 --- a/api/envoy/config/trace/v3/zipkin.proto +++ b/api/envoy/config/trace/v3/zipkin.proto @@ -49,12 +49,12 @@ message ZipkinConfig { // The cluster manager cluster that hosts the Zipkin collectors. Note that the // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster // resources `. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The API endpoint of the Zipkin service where the spans will be sent. When // using a standard Zipkin installation, the API endpoint is typically // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Determines whether a 128bit trace id will be used when creating a new // trace instance. The default value is false, which will result in a 64 bit trace id being used. diff --git a/api/envoy/config/trace/v4alpha/http_tracer.proto b/api/envoy/config/trace/v4alpha/http_tracer.proto index 663886a97bb4..ea918ec2bff5 100644 --- a/api/envoy/config/trace/v4alpha/http_tracer.proto +++ b/api/envoy/config/trace/v4alpha/http_tracer.proto @@ -52,7 +52,7 @@ message Tracing { // - *envoy.tracers.datadog* // - *envoy.tracers.opencensus* // - *envoy.tracers.xray* - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Trace driver specific configuration which depends on the driver being instantiated. // See the trace drivers for examples: diff --git a/api/envoy/data/accesslog/v3/accesslog.proto b/api/envoy/data/accesslog/v3/accesslog.proto index c16b5be1ff0e..af7edab5836a 100644 --- a/api/envoy/data/accesslog/v3/accesslog.proto +++ b/api/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 23] +// [#next-free-field: 24] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -272,6 +272,9 @@ message ResponseFlags { // Indicates that a filter configuration is not available. bool no_filter_config_found = 22; + + // Indicates that request or connection exceeded the downstream connection duration. + bool duration_timeout = 23; } // Properties of a negotiated TLS connection. diff --git a/api/envoy/data/cluster/v3/outlier_detection_event.proto b/api/envoy/data/cluster/v3/outlier_detection_event.proto index ae1ad4c94d17..f87cd1582b09 100644 --- a/api/envoy/data/cluster/v3/outlier_detection_event.proto +++ b/api/envoy/data/cluster/v3/outlier_detection_event.proto @@ -88,10 +88,10 @@ message OutlierDetectionEvent { google.protobuf.UInt64Value secs_since_last_action = 3; // The :ref:`cluster ` that owns the ejected host. - string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 4 [(validate.rules).string = {min_len: 1}]; // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}]; + string upstream_url = 5 [(validate.rules).string = {min_len: 1}]; // The action that took place. Action action = 6 [(validate.rules).enum = {defined_only: true}]; diff --git a/api/envoy/data/core/v3/health_check_event.proto b/api/envoy/data/core/v3/health_check_event.proto index 88b195b92b3d..2b0f9d888f46 100644 --- a/api/envoy/data/core/v3/health_check_event.proto +++ b/api/envoy/data/core/v3/health_check_event.proto @@ -40,7 +40,7 @@ message HealthCheckEvent { config.core.v3.Address host = 2; - string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 3 [(validate.rules).string = {min_len: 1}]; oneof event { option (validate.required) = true; diff --git a/api/envoy/data/dns/v3/dns_table.proto b/api/envoy/data/dns/v3/dns_table.proto index 354ad69fca66..4398403b7ed0 100644 --- a/api/envoy/data/dns/v3/dns_table.proto +++ b/api/envoy/data/dns/v3/dns_table.proto @@ -86,7 +86,8 @@ message DnsTable { // This message defines a service selection record returned for a service query in a domain message DnsService { // The name of the service without the protocol or domain name - string service_name = 1; + string service_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The service protocol. This can be specified as a string or the numeric value of the protocol DnsServiceProtocol protocol = 2; diff --git a/api/envoy/data/dns/v4alpha/dns_table.proto b/api/envoy/data/dns/v4alpha/dns_table.proto index 140ca4489c20..f142cfa7bf8c 100644 --- a/api/envoy/data/dns/v4alpha/dns_table.proto +++ b/api/envoy/data/dns/v4alpha/dns_table.proto @@ -95,7 +95,8 @@ message DnsTable { "envoy.data.dns.v3.DnsTable.DnsService"; // The name of the service without the protocol or domain name - string service_name = 1; + string service_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The service protocol. This can be specified as a string or the numeric value of the protocol DnsServiceProtocol protocol = 2; diff --git a/api/envoy/extensions/access_loggers/file/v3/file.proto b/api/envoy/extensions/access_loggers/file/v3/file.proto index de33623c207f..f17a2e7f4ca9 100644 --- a/api/envoy/extensions/access_loggers/file/v3/file.proto +++ b/api/envoy/extensions/access_loggers/file/v3/file.proto @@ -27,7 +27,7 @@ message FileAccessLog { "envoy.config.accesslog.v2.FileAccessLog"; // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; + string path = 1 [(validate.rules).string = {min_len: 1}]; oneof access_log_format { // Access log :ref:`format string`. diff --git a/api/envoy/extensions/access_loggers/file/v4alpha/file.proto b/api/envoy/extensions/access_loggers/file/v4alpha/file.proto index fa2ec9a50495..03d138585d23 100644 --- a/api/envoy/extensions/access_loggers/file/v4alpha/file.proto +++ b/api/envoy/extensions/access_loggers/file/v4alpha/file.proto @@ -31,7 +31,7 @@ message FileAccessLog { reserved "format", "json_format", "typed_json_format"; // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; + string path = 1 [(validate.rules).string = {min_len: 1}]; oneof access_log_format { // Configuration to form access log data and format. diff --git a/api/envoy/extensions/access_loggers/grpc/v3/als.proto b/api/envoy/extensions/access_loggers/grpc/v3/als.proto index 4996a877a9c6..968dfbeec016 100644 --- a/api/envoy/extensions/access_loggers/grpc/v3/als.proto +++ b/api/envoy/extensions/access_loggers/grpc/v3/als.proto @@ -62,7 +62,7 @@ message CommonGrpcAccessLogConfig { // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier // `. This allows the // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string log_name = 1 [(validate.rules).string = {min_len: 1}]; // The gRPC service for the access log service. config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; diff --git a/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto index cd9db5906436..413743a203f0 100644 --- a/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto +++ b/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto @@ -12,9 +12,12 @@ option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [[#not-implemented-hide:] +// [#protodoc-title: Wasm access log] +// [#extension: envoy.access_loggers.wasm] + // Custom configuration for an :ref:`AccessLog ` -// that calls into a WASM VM. +// that calls into a WASM VM. Configures the built-in *envoy.access_loggers.wasm* +// AccessLog. message WasmAccessLog { envoy.extensions.wasm.v3.PluginConfig config = 1; } diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 79cd583486ac..5579cc16bd97 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -37,7 +37,7 @@ message DnsCacheConfig { // configurations with the same name *must* otherwise have the same settings when referenced // from different configuration components. Configuration will fail to load if this is not // the case. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The DNS lookup family to use during resolution. // @@ -98,5 +98,8 @@ message DnsCacheConfig { // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 8; } diff --git a/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto index 9255deb4b64d..30efa6026218 100644 --- a/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ b/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto @@ -69,10 +69,10 @@ message RateLimitDescriptor { "envoy.api.v2.ratelimit.RateLimitDescriptor.Entry"; // Descriptor key. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; // Descriptor value. - string value = 2 [(validate.rules).string = {min_bytes: 1}]; + string value = 2 [(validate.rules).string = {min_len: 1}]; } // Override rate limit to apply to this descriptor instead of the limit diff --git a/api/envoy/extensions/common/tap/v3/common.proto b/api/envoy/extensions/common/tap/v3/common.proto index 68e80dad76b4..aa7ae8264757 100644 --- a/api/envoy/extensions/common/tap/v3/common.proto +++ b/api/envoy/extensions/common/tap/v3/common.proto @@ -64,5 +64,5 @@ message AdminConfig { // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string config_id = 1 [(validate.rules).string = {min_len: 1}]; } diff --git a/api/envoy/extensions/common/tap/v4alpha/common.proto b/api/envoy/extensions/common/tap/v4alpha/common.proto index 536f13d049c3..efa7744e357f 100644 --- a/api/envoy/extensions/common/tap/v4alpha/common.proto +++ b/api/envoy/extensions/common/tap/v4alpha/common.proto @@ -65,5 +65,5 @@ message AdminConfig { // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string config_id = 1 [(validate.rules).string = {min_len: 1}]; } diff --git a/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto index 8dd851f4020a..c524e022e859 100644 --- a/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ b/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto @@ -51,10 +51,11 @@ message GradientControllerConfig { "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig." "MinimumRTTCalculationParams"; - // The time interval between recalculating the minimum request round-trip time. + // The time interval between recalculating the minimum request round-trip time. Has to be + // positive. google.protobuf.Duration interval = 1 [(validate.rules).duration = { required: true - gt {} + gte {nanos: 1000000} }]; // The number of requests to aggregate/sample during the minRTT recalculation window before diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto index 6f01c88885f4..c77d93762099 100644 --- a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto +++ b/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto @@ -23,6 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Admission Control] // [#extension: envoy.filters.http.admission_control] +// [#next-free-field: 6] message AdmissionControl { // Default method of specifying what constitutes a successful request. All status codes that // indicate a successful request must be explicitly specified if not relying on the default @@ -75,16 +76,23 @@ message AdmissionControl { } // The sliding time window over which the success rate is calculated. The window is rounded to the - // nearest second. Defaults to 120s. + // nearest second. Defaults to 30s. google.protobuf.Duration sampling_window = 3; // Rejection probability is defined by the formula:: // - // max(0, (rq_count - aggression_coefficient * rq_success_count) / (rq_count + 1)) + // max(0, (rq_count - rq_success_count / sr_threshold) / (rq_count + 1)) ^ (1 / aggression) // - // The coefficient dictates how aggressively the admission controller will throttle requests as - // the success rate drops. Lower values will cause throttling to kick in at higher success rates - // and result in more aggressive throttling. Any values less than 1.0, will be set to 1.0. If the - // message is unspecified, the coefficient is 2.0. - config.core.v3.RuntimeDouble aggression_coefficient = 4; + // The aggression dictates how heavily the admission controller will throttle requests upon SR + // dropping at or below the threshold. A value of 1 will result in a linear increase in + // rejection probability as SR drops. Any values less than 1.0, will be set to 1.0. If the + // message is unspecified, the aggression is 1.0. See `the admission control documentation + // `_ + // for a diagram illustrating this. + config.core.v3.RuntimeDouble aggression = 4; + + // Dictates the success rate at which the rejection probability is non-zero. As success rate drops + // below this threshold, rejection probability will increase. Any success rate above the threshold + // results in a rejection probability of 0. Defaults to 95%. + config.core.v3.RuntimePercent sr_threshold = 5; } diff --git a/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto b/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto index b80bc1b82108..6a516b430028 100644 --- a/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto +++ b/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto @@ -25,13 +25,13 @@ message AwsRequestSigning { // of the HTTP endpoint. // // Example: s3 - string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string service_name = 1 [(validate.rules).string = {min_len: 1}]; // The `region `_ hosting the HTTP // endpoint. // // Example: us-west-2 - string region = 2 [(validate.rules).string = {min_bytes: 1}]; + string region = 2 [(validate.rules).string = {min_len: 1}]; // Indicates that before signing headers, the host header will be swapped with // this value. If not set or empty, the original host header value diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto index f78b1d24ac2c..9260abe94a96 100644 --- a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto @@ -52,17 +52,14 @@ message CacheConfig { // Config specific to the cache storage implementation. google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - // [#not-implemented-hide:] - // - // - // List of allowed *Vary* headers. + // List of matching rules that defines allowed *Vary* headers. // // The *vary* response header holds a list of header names that affect the // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't in + // response's *vary* header mentions any header names that aren't matched by any rules in // *allowed_vary_headers*, that response will not be cached. // // During lookup, *allowed_vary_headers* controls what request headers will be diff --git a/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto index 19921edb0310..ad9bb4c639a4 100644 --- a/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto +++ b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto @@ -52,17 +52,14 @@ message CacheConfig { // Config specific to the cache storage implementation. google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - // [#not-implemented-hide:] - // - // - // List of allowed *Vary* headers. + // List of matching rules that defines allowed *Vary* headers. // // The *vary* response header holds a list of header names that affect the // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't in + // response's *vary* header mentions any header names that aren't matched by any rules in // *allowed_vary_headers*, that response will not be cached. // // During lookup, *allowed_vary_headers* controls what request headers will be diff --git a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD b/api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto b/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto new file mode 100644 index 000000000000..7952f9b3d448 --- /dev/null +++ b/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.cdn_loop.v3alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha"; +option java_outer_classname = "CdnLoopProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: HTTP CDN-Loop Filter] +// [#extension: envoy.filters.http.cdn_loop] + +// CDN-Loop Header filter config. See the :ref:`configuration overview +// ` for more information. +message CdnLoopConfig { + // The CDN identifier to use for loop checks and to append to the + // CDN-Loop header. + // + // RFC 8586 calls this the cdn-id. The cdn-id can either be a + // pseudonym or hostname the CDN is in control of. + // + // cdn_id must not be empty. + string cdn_id = 1 [(validate.rules).string = {min_len: 1}]; + + // The maximum allowed count of cdn_id in the downstream CDN-Loop + // request header. + // + // The default of 0 means a request can transit the CdnLoopFilter + // once. A value of 1 means that a request can transit the + // CdnLoopFilter twice and so on. + uint32 max_allowed_occurrences = 2; +} diff --git a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto index b8a2525dbf54..70dd21a324b3 100644 --- a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto +++ b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto @@ -55,6 +55,10 @@ message PerRouteConfig { // :ref:`HCM host rewrite header ` // given that the value set here would be used for DNS lookups whereas the value set in the HCM // would be used for host header forwarding which is not the desired outcome. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string host_rewrite_header = 2; } } diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index d9264ca66b66..395258802f56 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/core/v3/http_uri.proto"; +import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http_status.proto"; @@ -23,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 13] +// [#next-free-field: 15] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; @@ -103,6 +104,10 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. config.core.v3.RuntimeFractionalPercent filter_enabled = 9; + // Specifies if the filter is enabled with metadata matcher. + // If this field is not specified, the filter will be enabled for all requests. + type.matcher.v3.MetadataMatcher filter_enabled_metadata = 14; + // Specifies whether to deny the requests, when the filter is disabled. // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to determine whether to deny request for @@ -117,6 +122,23 @@ message ExtAuthz { // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 10; + + // Optional additional prefix to use when emitting statistics. This allows to distinguish + // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example: + // + // .. code-block:: yaml + // + // http_filters: + // - name: envoy.filters.http.ext_authz + // typed_config: + // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. + // - name: envoy.filters.http.ext_authz + // typed_config: + // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc. + // + string stat_prefix = 13; } // Configuration for buffering the request data. @@ -134,6 +156,13 @@ message BufferSettings { // The authorization request will be dispatched and no 413 HTTP error will be returned by the // filter. bool allow_partial_message = 2; + + // If true, the body sent to the external authorization service is set with raw bytes, it sets + // the :ref:`raw_body` + // field of HTTP request attribute context. Otherwise, :ref:` + // body` will be filled + // with UTF-8 string request body. + bool pack_as_bytes = 3; } // HttpService is used for raw HTTP communication between the filter and the authorization service. @@ -243,11 +272,7 @@ message ExtAuthzPerRoute { } } -// Extra settings for the check request. You can use this to provide extra context for the -// external authorization server on specific virtual hosts \ routes. For example, adding a context -// extension on the virtual host level can give the ext-authz server information on what virtual -// host is used without needing to parse the host header. If CheckSettings is specified in multiple -// per-filter-configs, they will be merged in order, and the result will be used. +// Extra settings for the check request. message CheckSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.CheckSettings"; @@ -255,6 +280,12 @@ message CheckSettings { // Context extensions to set on the CheckRequest's // :ref:`AttributeContext.context_extensions` // + // You can use this to provide extra context for the external authorization server on specific + // virtual hosts/routes. For example, adding a context extension on the virtual host level can + // give the ext-authz server information on what virtual host is used without needing to parse the + // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged + // in order, and the result will be used. + // // Merge semantics for this field are such that keys from more specific configs override. // // .. note:: @@ -262,4 +293,8 @@ message CheckSettings { // These settings are only applied to a filter configured with a // :ref:`grpc_service`. map context_extensions = 1; + + // When set to true, disable the configured :ref:`with_request_body + // ` for a route. + bool disable_request_body_buffering = 2; } diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto index 05ced9299258..ec8854f5d1be 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/v3/http_status.proto"; @@ -23,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 13] +// [#next-free-field: 15] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; @@ -103,6 +104,10 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; + // Specifies if the filter is enabled with metadata matcher. + // If this field is not specified, the filter will be enabled for all requests. + type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 14; + // Specifies whether to deny the requests, when the filter is disabled. // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to determine whether to deny request for @@ -117,6 +122,23 @@ message ExtAuthz { // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 10; + + // Optional additional prefix to use when emitting statistics. This allows to distinguish + // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example: + // + // .. code-block:: yaml + // + // http_filters: + // - name: envoy.filters.http.ext_authz + // typed_config: + // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. + // - name: envoy.filters.http.ext_authz + // typed_config: + // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc. + // + string stat_prefix = 13; } // Configuration for buffering the request data. @@ -134,6 +156,13 @@ message BufferSettings { // The authorization request will be dispatched and no 413 HTTP error will be returned by the // filter. bool allow_partial_message = 2; + + // If true, the body sent to the external authorization service is set with raw bytes, it sets + // the :ref:`raw_body` + // field of HTTP request attribute context. Otherwise, :ref:` + // body` will be filled + // with UTF-8 string request body. + bool pack_as_bytes = 3; } // HttpService is used for raw HTTP communication between the filter and the authorization service. @@ -243,11 +272,7 @@ message ExtAuthzPerRoute { } } -// Extra settings for the check request. You can use this to provide extra context for the -// external authorization server on specific virtual hosts \ routes. For example, adding a context -// extension on the virtual host level can give the ext-authz server information on what virtual -// host is used without needing to parse the host header. If CheckSettings is specified in multiple -// per-filter-configs, they will be merged in order, and the result will be used. +// Extra settings for the check request. message CheckSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.CheckSettings"; @@ -255,6 +280,12 @@ message CheckSettings { // Context extensions to set on the CheckRequest's // :ref:`AttributeContext.context_extensions` // + // You can use this to provide extra context for the external authorization server on specific + // virtual hosts/routes. For example, adding a context extension on the virtual host level can + // give the ext-authz server information on what virtual host is used without needing to parse the + // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged + // in order, and the result will be used. + // // Merge semantics for this field are such that keys from more specific configs override. // // .. note:: @@ -262,4 +293,8 @@ message CheckSettings { // These settings are only applied to a filter configured with a // :ref:`grpc_service`. map context_extensions = 1; + + // When set to true, disable the configured :ref:`with_request_body + // ` for a route. + bool disable_request_body_buffering = 2; } diff --git a/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto b/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto index 85d7cbe1cecd..b2c4ad2ee681 100644 --- a/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto +++ b/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto @@ -23,7 +23,7 @@ message FilterConfig { // The content-type to pass to the upstream when the gRPC bridge filter is applied. // The filter will also validate that the upstream responds with the same content type. - string content_type = 1 [(validate.rules).string = {min_bytes: 1}]; + string content_type = 1 [(validate.rules).string = {min_len: 1}]; // If true, Envoy will assume that the upstream doesn't understand gRPC frames and // strip the gRPC frame from the request, and add it back in to the response. This will diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index ace7c535069a..5e399790a7ec 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -56,7 +56,7 @@ message Config { string metadata_namespace = 1; // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; + string key = 2 [(validate.rules).string = {min_len: 1}]; // The value to pair with the given key. // diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto index 0d7c814584dc..5b06f1e78556 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -55,7 +55,7 @@ message Config { string metadata_namespace = 1; // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; + string key = 2 [(validate.rules).string = {min_len: 1}]; oneof value_type { // The value to pair with the given key. diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 39fe6187f64f..5588961bf512 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -62,7 +62,7 @@ message JwtProvider { // Example: https://securetoken.google.com // Example: 1234567-compute@developer.gserviceaccount.com // - string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; + string issuer = 1 [(validate.rules).string = {min_len: 1}]; // The list of JWT `audiences `_ are // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, @@ -220,7 +220,7 @@ message JwtHeader { // The HTTP header name. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The value prefix. The value format is "value_prefix" // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the @@ -414,7 +414,7 @@ message FilterStateRule { "envoy.config.filter.http.jwt_authn.v2alpha.FilterStateRule"; // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. diff --git a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto index 302cf7253dde..12d4fa5fe1d3 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto @@ -62,7 +62,7 @@ message JwtProvider { // Example: https://securetoken.google.com // Example: 1234567-compute@developer.gserviceaccount.com // - string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; + string issuer = 1 [(validate.rules).string = {min_len: 1}]; // The list of JWT `audiences `_ are // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, @@ -220,7 +220,7 @@ message JwtHeader { // The HTTP header name. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The value prefix. The value format is "value_prefix" // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the @@ -414,7 +414,7 @@ message FilterStateRule { "envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule"; // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. diff --git a/api/envoy/extensions/filters/http/local_ratelimit/v3/BUILD b/api/envoy/extensions/filters/http/local_ratelimit/v3/BUILD new file mode 100644 index 000000000000..ad2fc9a9a84f --- /dev/null +++ b/api/envoy/extensions/filters/http/local_ratelimit/v3/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto b/api/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto new file mode 100644 index 000000000000..94f21edd3eed --- /dev/null +++ b/api/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.local_ratelimit.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/v3/http_status.proto"; +import "envoy/type/v3/token_bucket.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.local_ratelimit.v3"; +option java_outer_classname = "LocalRateLimitProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Local Rate limit] +// Local Rate limit :ref:`configuration overview `. +// [#extension: envoy.filters.http.local_ratelimit] + +// [#next-free-field: 7] +message LocalRateLimit { + // The human readable prefix to use when emitting stats. + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // This field allows for a custom HTTP response status code to the downstream client when + // the request has been rate limited. + // Defaults to 429 (TooManyRequests). + // + // .. note:: + // If this is set to < 400, 429 will be used instead. + type.v3.HttpStatus status = 2; + + // The token bucket configuration to use for rate limiting requests that are processed by this + // filter. Each request processed by the filter consumes a single token. If the token is available, + // the request will be allowed. If no tokens are available, the request will receive the configured + // rate limit status. + // + // .. note:: + // It's fine for the token bucket to be unset for the global configuration since the rate limit + // can be applied at a the virtual host or route level. Thus, the token bucket must be set + // for the per route configuration otherwise the config will be rejected. + // + // .. note:: + // When using per route configuration, the bucket becomes unique to that route. + // + // .. note:: + // In the current implementation the token bucket's :ref:`fill_interval + // ` must be >= 50ms to avoid too aggressive + // refills. + type.v3.TokenBucket token_bucket = 3; + + // If set, this will enable -- but not necessarily enforce -- the rate limit for the given + // fraction of requests. + // Defaults to 0% of requests for safety. + config.core.v3.RuntimeFractionalPercent filter_enabled = 4; + + // If set, this will enforce the rate limit decisions for the given fraction of requests. + // + // Note: this only applies to the fraction of enabled requests. + // + // Defaults to 0% of requests for safety. + config.core.v3.RuntimeFractionalPercent filter_enforced = 5; + + // Specifies a list of HTTP headers that should be added to each response for requests that + // have been rate limited. + repeated config.core.v3.HeaderValueOption response_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 10}]; +} diff --git a/api/envoy/extensions/filters/http/lua/v3/lua.proto b/api/envoy/extensions/filters/http/lua/v3/lua.proto index fc348c2365cd..1636c01ab1c7 100644 --- a/api/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/api/envoy/extensions/filters/http/lua/v3/lua.proto @@ -25,7 +25,7 @@ message Lua { // further loads code from disk if desired. Note that if JSON configuration is used, the code must // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. - string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; + string inline_code = 1 [(validate.rules).string = {min_len: 1}]; // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute // `. The Lua source codes can be diff --git a/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto b/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto index 53678996de6c..e4be64167ed2 100644 --- a/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto +++ b/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto @@ -26,7 +26,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message OAuth2Credentials { // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. - string client_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string client_id = 1 [(validate.rules).string = {min_len: 1}]; // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server. transport_sockets.tls.v3.SdsSecretConfig token_secret = 2 @@ -50,7 +50,7 @@ message OAuth2Config { config.core.v3.HttpUri token_endpoint = 1; // The endpoint redirect to for authorization in response to unauthorized requests. - string authorization_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Credentials used for OAuth. OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}]; @@ -60,7 +60,7 @@ message OAuth2Config { // documentation on :ref:`custom request headers `. // // This URI should not contain any query parameters. - string redirect_uri = 4 [(validate.rules).string = {min_bytes: 1}]; + string redirect_uri = 4 [(validate.rules).string = {min_len: 1}]; // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server. type.matcher.v3.PathMatcher redirect_path_matcher = 5 diff --git a/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto b/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto index 547a3060e16b..ee51e1f96099 100644 --- a/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto +++ b/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto @@ -29,7 +29,7 @@ message OAuth2Credentials { "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Credentials"; // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. - string client_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string client_id = 1 [(validate.rules).string = {min_len: 1}]; // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server. transport_sockets.tls.v4alpha.SdsSecretConfig token_secret = 2 @@ -56,7 +56,7 @@ message OAuth2Config { config.core.v4alpha.HttpUri token_endpoint = 1; // The endpoint redirect to for authorization in response to unauthorized requests. - string authorization_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Credentials used for OAuth. OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}]; @@ -66,7 +66,7 @@ message OAuth2Config { // documentation on :ref:`custom request headers `. // // This URI should not contain any query parameters. - string redirect_uri = 4 [(validate.rules).string = {min_bytes: 1}]; + string redirect_uri = 4 [(validate.rules).string = {min_len: 1}]; // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server. type.matcher.v4alpha.PathMatcher redirect_path_matcher = 5 diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 781fddc1939c..bc58e7f9b2e1 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 9] +// [#next-free-field: 10] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; @@ -34,7 +34,7 @@ message RateLimit { } // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string = {min_bytes: 1}]; + string domain = 1 [(validate.rules).string = {min_len: 1}]; // Specifies the rate limit configurations to be applied with the same // stage number. If not set, the default stage number is 0. @@ -60,7 +60,6 @@ message RateLimit { // The filter's behaviour in case the rate limiting service does // not respond back. When it is set to true, Envoy will not allow traffic in case of // communication failure between rate limiting service and the proxy. - // Defaults to false. bool failure_mode_deny = 5; // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead @@ -99,4 +98,25 @@ message RateLimit { // Disabled by default. XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 [(validate.rules).enum = {defined_only: true}]; + + // Disables emitting the :ref:`x-envoy-ratelimited` header + // in case of rate limiting (i.e. 429 responses). + // Having this header not present potentially makes the request retriable. + bool disable_x_envoy_ratelimited_header = 9; +} + +message RateLimitPerRoute { + enum VhRateLimitsOptions { + // Use the virtual host rate limits unless the route has a rate limit policy. + OVERRIDE = 0; + + // Use the virtual host rate limits even if the route has a rate limit policy. + INCLUDE = 1; + + // Ignore the virtual host rate limits even if the route does not have a rate limit policy. + IGNORE = 2; + } + + // Specifies if the rate limit filter should include the virtual host rate limits. + VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/extensions/filters/http/squash/v3/squash.proto b/api/envoy/extensions/filters/http/squash/v3/squash.proto index 0ea335a414fa..f9bc9cceceb9 100644 --- a/api/envoy/extensions/filters/http/squash/v3/squash.proto +++ b/api/envoy/extensions/filters/http/squash/v3/squash.proto @@ -24,7 +24,7 @@ message Squash { "envoy.config.filter.http.squash.v2.Squash"; // The name of the cluster that hosts the Squash server. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // When the filter requests the Squash server to create a DebugAttachment, it will use this // structure as template for the body of the request. It can contain reference to environment diff --git a/api/envoy/extensions/filters/http/wasm/v3/wasm.proto b/api/envoy/extensions/filters/http/wasm/v3/wasm.proto index a812992a5b84..55eba141f45f 100644 --- a/api/envoy/extensions/filters/http/wasm/v3/wasm.proto +++ b/api/envoy/extensions/filters/http/wasm/v3/wasm.proto @@ -13,7 +13,10 @@ option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [[#not-implemented-hide:] +// [#protodoc-title: Wasm] +// [#extension: envoy.filters.http.wasm] +// Wasm :ref:`configuration overview `. + message Wasm { // General Plugin configuration. envoy.extensions.wasm.v3.PluginConfig config = 1; diff --git a/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto index 8fd0c63d0c82..fb8047d391e9 100644 --- a/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto +++ b/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto @@ -24,7 +24,7 @@ message ProxyProtocol { string metadata_namespace = 1; // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; + string key = 2 [(validate.rules).string = {min_len: 1}]; } // A Rule defines what metadata to apply when a header is present or missing. diff --git a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto index b3af267a77ad..2ed14c7f0e23 100644 --- a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto @@ -30,11 +30,11 @@ message ClientSSLAuth { // of principals. The service must support the expected :ref:`REST API // `. string auth_api_cluster = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; // Time in milliseconds between principal refreshes from the // authentication service. Default is 60000 (60s). The actual fetch time diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto index 749708880d71..646f053ca9b6 100644 --- a/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto +++ b/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto @@ -37,7 +37,7 @@ message DubboProxy { "envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy"; // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Configure the protocol used. ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; @@ -62,7 +62,7 @@ message DubboFilter { // The name of the filter to instantiate. The name must match a supported // filter. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto index 4894c7693fd7..30499c27f6f0 100644 --- a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto +++ b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto @@ -37,7 +37,7 @@ message DubboProxy { "envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy"; // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Configure the protocol used. ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; @@ -62,7 +62,7 @@ message DubboFilter { // The name of the filter to instantiate. The name must match a supported // filter. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/api/envoy/extensions/filters/network/ext_authz/v3/BUILD b/api/envoy/extensions/filters/network/ext_authz/v3/BUILD index a4e298b42619..a5c5b57b7227 100644 --- a/api/envoy/extensions/filters/network/ext_authz/v3/BUILD +++ b/api/envoy/extensions/filters/network/ext_authz/v3/BUILD @@ -8,6 +8,7 @@ api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/ext_authz/v2:pkg", + "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto index 50161f1cb92b..78f4167ccc33 100644 --- a/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto @@ -4,6 +4,7 @@ package envoy.extensions.filters.network.ext_authz.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; +import "envoy/type/matcher/v3/metadata.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -23,13 +24,13 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // gRPC Authorization API defined by // :ref:`CheckRequest `. // A failed check will cause this filter to close the TCP connection. -// [#next-free-field: 6] +// [#next-free-field: 7] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.ext_authz.v2.ExtAuthz"; // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The external authorization gRPC service configuration. // The default timeout is set to 200ms by this filter. @@ -51,4 +52,8 @@ message ExtAuthz { // version of Check{Request,Response} used on the wire. config.core.v3.ApiVersion transport_api_version = 5 [(validate.rules).enum = {defined_only: true}]; + + // Specifies if the filter is enabled with metadata matcher. + // If this field is not specified, the filter will be enabled for all requests. + type.matcher.v3.MetadataMatcher filter_enabled_metadata = 6; } diff --git a/api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD b/api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD new file mode 100644 index 000000000000..6d146b1c64d1 --- /dev/null +++ b/api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/network/ext_authz/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto new file mode 100644 index 000000000000..f877a3ed8502 --- /dev/null +++ b/api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.ext_authz.v4alpha; + +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v4alpha"; +option java_outer_classname = "ExtAuthzProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Network External Authorization ] +// The network layer external authorization service configuration +// :ref:`configuration overview `. +// [#extension: envoy.filters.network.ext_authz] + +// External Authorization filter calls out to an external service over the +// gRPC Authorization API defined by +// :ref:`CheckRequest `. +// A failed check will cause this filter to close the TCP connection. +// [#next-free-field: 7] +message ExtAuthz { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.ext_authz.v3.ExtAuthz"; + + // The prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // The external authorization gRPC service configuration. + // The default timeout is set to 200ms by this filter. + config.core.v4alpha.GrpcService grpc_service = 2; + + // The filter's behaviour in case the external authorization service does + // not respond back. When it is set to true, Envoy will also allow traffic in case of + // communication failure between authorization service and the proxy. + // Defaults to false. + bool failure_mode_allow = 3; + + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 4; + + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of Check{Request,Response} used on the wire. + config.core.v4alpha.ApiVersion transport_api_version = 5 + [(validate.rules).enum = {defined_only: true}]; + + // Specifies if the filter is enabled with metadata matcher. + // If this field is not specified, the filter will be enabled for all requests. + type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 6; +} diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 68c5c8cad2a3..a4c115c68da0 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -252,7 +252,7 @@ message HttpConnectionManager { // The human readable prefix to use when emitting statistics for the // connection manager. See the :ref:`statistics documentation ` for // more information. - string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; oneof route_specifier { option (validate.required) = true; @@ -571,27 +571,29 @@ message LocalReplyConfig { // The configuration to form response body from the :ref:`command operators ` // and to specify response content type as one of: plain/text or application/json. // - // Example one: plain/text body_format. + // Example one: "plain/text" ``body_format``. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // - // The following response body in `plain/text` format will be generated for a request with + // The following response body in "plain/text" format will be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. // - // .. code-block:: + // .. code-block:: text // // upstream connect error:503:path=/foo // - // Example two: application/json body_format. + // Example two: "application/json" ``body_format``. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // json_format: - // status: %RESPONSE_CODE% - // message: %LOCAL_REPLY_BODY% - // path: $REQ(:path)% + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // path: "%REQ(:path)%" // // The following response body in "application/json" format would be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. @@ -716,14 +718,18 @@ message ScopedRoutes { // If an element contains no separator, the whole element is parsed as key and the // fragment value is an empty string. // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string = {min_bytes: 1}]; + string separator = 1 [(validate.rules).string = {min_len: 1}]; // The key to match on. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; + string key = 2 [(validate.rules).string = {min_len: 1}]; } // The name of the header field to extract the value from. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string name = 1 [(validate.rules).string = {min_len: 1}]; // The element separator (e.g., ';' separates 'a;b;c;d'). // Default: empty string. This causes the entirety of the header field to be extracted. @@ -757,7 +763,7 @@ message ScopedRoutes { } // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The algorithm to use for constructing a scope key for each request. ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; @@ -807,15 +813,15 @@ message HttpFilter { // The name of the filter configuration. The name is used as a fallback to // select an extension if the type of the configuration proto is not // sufficient. It also serves as a resource name in ExtensionConfigDS. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. oneof config_type { + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. google.protobuf.Any typed_config = 4; // Configuration source specifier for an extension configuration discovery service. - // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // In case of a failure and without the default configuration, the HTTP listener responds with code 500. // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). config.core.v3.ExtensionConfigSource config_discovery = 5; } diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 9db92927ebbe..ceb7f4a65a1f 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -251,7 +251,7 @@ message HttpConnectionManager { // The human readable prefix to use when emitting statistics for the // connection manager. See the :ref:`statistics documentation ` for // more information. - string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; oneof route_specifier { option (validate.required) = true; @@ -573,27 +573,29 @@ message LocalReplyConfig { // The configuration to form response body from the :ref:`command operators ` // and to specify response content type as one of: plain/text or application/json. // - // Example one: plain/text body_format. + // Example one: "plain/text" ``body_format``. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // - // The following response body in `plain/text` format will be generated for a request with + // The following response body in "plain/text" format will be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. // - // .. code-block:: + // .. code-block:: text // // upstream connect error:503:path=/foo // - // Example two: application/json body_format. + // Example two: "application/json" ``body_format``. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // json_format: - // status: %RESPONSE_CODE% - // message: %LOCAL_REPLY_BODY% - // path: $REQ(:path)% + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // path: "%REQ(:path)%" // // The following response body in "application/json" format would be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. @@ -721,14 +723,18 @@ message ScopedRoutes { // If an element contains no separator, the whole element is parsed as key and the // fragment value is an empty string. // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string = {min_bytes: 1}]; + string separator = 1 [(validate.rules).string = {min_len: 1}]; // The key to match on. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; + string key = 2 [(validate.rules).string = {min_len: 1}]; } // The name of the header field to extract the value from. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string name = 1 [(validate.rules).string = {min_len: 1}]; // The element separator (e.g., ';' separates 'a;b;c;d'). // Default: empty string. This causes the entirety of the header field to be extracted. @@ -762,7 +768,7 @@ message ScopedRoutes { } // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The algorithm to use for constructing a scope key for each request. ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; @@ -813,15 +819,15 @@ message HttpFilter { // The name of the filter configuration. The name is used as a fallback to // select an extension if the type of the configuration proto is not // sufficient. It also serves as a resource name in ExtensionConfigDS. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. oneof config_type { + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. google.protobuf.Any typed_config = 4; // Configuration source specifier for an extension configuration discovery service. - // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // In case of a failure and without the default configuration, the HTTP listener responds with code 500. // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). config.core.v4alpha.ExtensionConfigSource config_discovery = 5; } diff --git a/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto index 497e688f4c3d..0fac07427d0c 100644 --- a/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto +++ b/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto @@ -20,5 +20,5 @@ message KafkaBroker { "envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker"; // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; } diff --git a/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto b/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto index 027bc0e3fc98..37eb8c62d0e2 100644 --- a/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto +++ b/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto @@ -24,7 +24,7 @@ message LocalRateLimit { // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The token bucket configuration to use for rate limiting connections that are processed by the // filter's filter chain. Each incoming connection processed by the filter consumes a single diff --git a/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto b/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto index 7bd17600d145..ebdfb6f2fcc0 100644 --- a/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto +++ b/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto @@ -17,13 +17,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // MongoDB :ref:`configuration overview `. // [#extension: envoy.filters.network.mongo_proxy] +// [#next-free-field: 6] message MongoProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.mongo_proxy.v2.MongoProxy"; // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The optional path to use for writing Mongo access logs. If not access log // path is specified no access logs will be written. Note that access log is @@ -39,4 +40,9 @@ message MongoProxy { // Flag to specify whether :ref:`dynamic metadata // ` should be emitted. Defaults to false. bool emit_dynamic_metadata = 4; + + // List of commands to emit metrics for. Defaults to "delete", "insert", and "update". + // Note that metrics will not be emitted for "find" commands, since those are considered + // queries, and metrics for those are emitted under a dedicated "query" namespace. + repeated string commands = 5; } diff --git a/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto b/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto index 663449b27035..9dfdb14d3f11 100644 --- a/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto +++ b/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto @@ -21,7 +21,7 @@ message MySQLProxy { // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] The optional path to use for writing MySQL access logs. // If the access log field is empty, access logs will not be written. diff --git a/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto index b92d3cee2541..2fcdda846b6a 100644 --- a/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto @@ -26,10 +26,10 @@ message RateLimit { "envoy.config.filter.network.rate_limit.v2.RateLimit"; // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string = {min_bytes: 1}]; + string domain = 2 [(validate.rules).string = {min_len: 1}]; // The rate limit descriptor list to use in the rate limit service request. repeated common.ratelimit.v3.RateLimitDescriptor descriptors = 3 diff --git a/api/envoy/extensions/filters/network/rbac/v3/rbac.proto b/api/envoy/extensions/filters/network/rbac/v3/rbac.proto index e62f7b4c419e..6b8d3b0181b9 100644 --- a/api/envoy/extensions/filters/network/rbac/v3/rbac.proto +++ b/api/envoy/extensions/filters/network/rbac/v3/rbac.proto @@ -46,7 +46,7 @@ message RBAC { config.rbac.v3.RBAC shadow_rules = 2; // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; // RBAC enforcement strategy. By default RBAC will be enforced only once // when the first byte of data arrives from the downstream. When used in diff --git a/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto b/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto index 8452a89822c1..a1508997df62 100644 --- a/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto +++ b/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto @@ -46,7 +46,7 @@ message RBAC { config.rbac.v4alpha.RBAC shadow_rules = 2; // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; // RBAC enforcement strategy. By default RBAC will be enforced only once // when the first byte of data arrives from the downstream. When used in diff --git a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 740095ac5120..4e1a8099fc37 100644 --- a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -140,7 +140,7 @@ message RedisProxy { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // If not specified or the runtime key is not present, all requests to the target cluster // will be mirrored. @@ -162,7 +162,7 @@ message RedisProxy { bool remove_prefix = 2; // Upstream cluster to forward the command to. - string cluster = 3 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 3 [(validate.rules).string = {min_len: 1}]; // Indicates that the route has a request mirroring policy. repeated RequestMirrorPolicy request_mirror_policy = 4; @@ -213,7 +213,7 @@ message RedisProxy { reserved "cluster"; // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Network settings for the connection pool to the upstream clusters. ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto index ee77ab909592..e29a3d10af0a 100644 --- a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message RocketmqProxy { // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is specified in this property. RouteConfiguration route_config = 2; diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto index 5fe5d33ffacf..899debcbde7b 100644 --- a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto @@ -48,7 +48,7 @@ message RouteMatch { message RouteAction { // Indicates the upstream cluster to which the request should be routed. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. config.core.v3.Metadata metadata_match = 2; diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto index a765734e66db..cbc66fcd9979 100644 --- a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto @@ -25,7 +25,7 @@ message RocketmqProxy { "envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy"; // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is specified in this property. RouteConfiguration route_config = 2; diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto index 995e8bcb05e3..0925afef833d 100644 --- a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto @@ -60,7 +60,7 @@ message RouteAction { "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction"; // Indicates the upstream cluster to which the request should be routed. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. config.core.v4alpha.Metadata metadata_match = 2; diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto b/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto index 3d305cadcf40..cbe7581588b9 100644 --- a/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto +++ b/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // TCP Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.tcp_proxy] -// [#next-free-field: 13] +// [#next-free-field: 14] message TcpProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.tcp_proxy.v2.TcpProxy"; @@ -39,7 +39,7 @@ message TcpProxy { "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is // determined by its weight. The sum of weights across all entries in the @@ -67,7 +67,7 @@ message TcpProxy { "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.TunnelingConfig"; // The hostname to send in the synthesized CONNECT headers to the upstream proxy. - string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; + string hostname = 1 [(validate.rules).string = {min_len: 1}]; } reserved 6; @@ -76,7 +76,7 @@ message TcpProxy { // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; oneof cluster_specifier { option (validate.required) = true; @@ -134,4 +134,10 @@ message TcpProxy { // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload // will be proxied upstream as per usual. TunnelingConfig tunneling_config = 12; + + // The maximum duration of a connection. The duration is defined as the period since a connection + // was established. If not set, there is no max duration. When max_downstream_connection_duration + // is reached the connection will be closed. Duration must be at least 1ms. + google.protobuf.Duration max_downstream_connection_duration = 13 + [(validate.rules).duration = {gte {nanos: 1000000}}]; } diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto index 1857f2abcd4e..9a2f395176b1 100644 --- a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto +++ b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // TCP Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.tcp_proxy] -// [#next-free-field: 13] +// [#next-free-field: 14] message TcpProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy"; @@ -39,7 +39,7 @@ message TcpProxy { "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is // determined by its weight. The sum of weights across all entries in the @@ -67,7 +67,7 @@ message TcpProxy { "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig"; // The hostname to send in the synthesized CONNECT headers to the upstream proxy. - string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; + string hostname = 1 [(validate.rules).string = {min_len: 1}]; } reserved 6; @@ -76,7 +76,7 @@ message TcpProxy { // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; oneof cluster_specifier { option (validate.required) = true; @@ -134,4 +134,10 @@ message TcpProxy { // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload // will be proxied upstream as per usual. TunnelingConfig tunneling_config = 12; + + // The maximum duration of a connection. The duration is defined as the period since a connection + // was established. If not set, there is no max duration. When max_downstream_connection_duration + // is reached the connection will be closed. Duration must be at least 1ms. + google.protobuf.Duration max_downstream_connection_duration = 13 + [(validate.rules).duration = {gte {nanos: 1000000}}]; } diff --git a/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto index 4fc3289ae33d..c93b4d1e8e5a 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto @@ -25,7 +25,7 @@ message RateLimit { "envoy.config.filter.thrift.rate_limit.v2alpha1.RateLimit"; // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string = {min_bytes: 1}]; + string domain = 1 [(validate.rules).string = {min_len: 1}]; // Specifies the rate limit configuration stage. Each configured rate limit filter performs a // rate limit check using descriptors configured in the diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index b7afc4f0b803..f00b0e6983d1 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -91,7 +91,7 @@ message RouteAction { // Indicates a single upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -103,9 +103,8 @@ message RouteAction { // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. - string cluster_header = 6 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; + string cluster_header = 6 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in @@ -138,7 +137,7 @@ message WeightedCluster { "envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is determined by its // weight. The sum of weights across all entries in the clusters array determines the total diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto index 74c71afb5424..2b9863e91ffa 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto @@ -72,7 +72,7 @@ message ThriftProxy { ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is static and is specified in this property. RouteConfiguration route_config = 4; @@ -99,7 +99,7 @@ message ThriftFilter { // [#comment:TODO(zuercher): Auto generate the following list] // * :ref:`envoy.filters.thrift.router ` // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto index 374cc131ddf8..b73a78c4f2cc 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto @@ -91,7 +91,7 @@ message RouteAction { // Indicates a single upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -103,9 +103,8 @@ message RouteAction { // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. - string cluster_header = 6 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; + string cluster_header = 6 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in @@ -138,7 +137,7 @@ message WeightedCluster { "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is determined by its // weight. The sum of weights across all entries in the clusters array determines the total diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto index 6bf055da3ce6..b75d0e39eaf2 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto @@ -72,7 +72,7 @@ message ThriftProxy { ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is static and is specified in this property. RouteConfiguration route_config = 4; @@ -99,7 +99,7 @@ message ThriftFilter { // [#comment:TODO(zuercher): Auto generate the following list] // * :ref:`envoy.filters.thrift.router ` // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. diff --git a/api/envoy/extensions/filters/network/wasm/v3/wasm.proto b/api/envoy/extensions/filters/network/wasm/v3/wasm.proto index 131582762b59..0c1ac6af440e 100644 --- a/api/envoy/extensions/filters/network/wasm/v3/wasm.proto +++ b/api/envoy/extensions/filters/network/wasm/v3/wasm.proto @@ -13,7 +13,10 @@ option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [[#not-implemented-hide:] +// [#protodoc-title: Wasm] +// [#extension: envoy.filters.network.wasm] +// Wasm :ref:`configuration overview `. + message Wasm { // General Plugin configuration. envoy.extensions.wasm.v3.PluginConfig config = 1; diff --git a/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto b/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto index a90f777d79ec..eb2c202c58f1 100644 --- a/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto +++ b/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto @@ -23,7 +23,7 @@ message ZooKeeperProxy { // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. // If the access log field is empty, access logs will not be written. diff --git a/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto b/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto index d3f6123548f8..1e986434f777 100644 --- a/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto +++ b/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto @@ -35,13 +35,13 @@ message UdpProxyConfig { } // The stat prefix used when emitting UDP proxy filter stats. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; oneof route_specifier { option (validate.required) = true; // The upstream cluster to connect to. - string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 2 [(validate.rules).string = {min_len: 1}]; } // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by diff --git a/api/envoy/extensions/stat_sinks/wasm/v3/BUILD b/api/envoy/extensions/stat_sinks/wasm/v3/BUILD new file mode 100644 index 000000000000..c37174bdefc4 --- /dev/null +++ b/api/envoy/extensions/stat_sinks/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/stat_sinks/wasm/v3/wasm.proto b/api/envoy/extensions/stat_sinks/wasm/v3/wasm.proto new file mode 100644 index 000000000000..3fc5dae91795 --- /dev/null +++ b/api/envoy/extensions/stat_sinks/wasm/v3/wasm.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.extensions.stat_sinks.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.stat_sinks.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Wasm] +// Wasm :ref:`configuration overview `. +// [#extension: envoy.stat_sinks.wasm] + +message Wasm { + // General Plugin configuration. + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto b/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto index 94359ce837bf..f41c8added21 100644 --- a/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto +++ b/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto @@ -20,8 +20,8 @@ message DatadogConfig { "envoy.config.trace.v3.DatadogConfig"; // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string service_name = 2 [(validate.rules).string = {min_len: 1}]; } diff --git a/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto b/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto index d311304a3ddf..21455a974d3b 100644 --- a/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto +++ b/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto @@ -25,7 +25,7 @@ message DynamicOtConfig { // Dynamic library implementing the `OpenTracing API // `_. - string library = 1 [(validate.rules).string = {min_bytes: 1}]; + string library = 1 [(validate.rules).string = {min_len: 1}]; // The configuration to use when creating a tracer from the given dynamic // library. diff --git a/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto b/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto index 93ea47ba6a10..d7e306754dc9 100644 --- a/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto +++ b/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto @@ -35,11 +35,11 @@ message LightstepConfig { } // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // File containing the access token to the `LightStep // `_ API. - string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; + string access_token_file = 2 [(validate.rules).string = {min_len: 1}]; // Propagation modes to use by LightStep's tracer. repeated PropagationMode propagation_modes = 3 diff --git a/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto b/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto index 3abbcad2de15..a6974fcc0ea6 100644 --- a/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto +++ b/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto @@ -47,12 +47,12 @@ message ZipkinConfig { // The cluster manager cluster that hosts the Zipkin collectors. Note that the // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster // resources `. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The API endpoint of the Zipkin service where the spans will be sent. When // using a standard Zipkin installation, the API endpoint is typically // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Determines whether a 128bit trace id will be used when creating a new // trace instance. The default value is false, which will result in a 64 bit trace id being used. diff --git a/api/envoy/extensions/transport_sockets/alts/v3/alts.proto b/api/envoy/extensions/transport_sockets/alts/v3/alts.proto index 6c001be1c746..85a8c66d0c0e 100644 --- a/api/envoy/extensions/transport_sockets/alts/v3/alts.proto +++ b/api/envoy/extensions/transport_sockets/alts/v3/alts.proto @@ -22,7 +22,7 @@ message Alts { // The location of a handshaker service, this is usually 169.254.169.254:8080 // on GCE. - string handshaker_service = 1 [(validate.rules).string = {min_bytes: 1}]; + string handshaker_service = 1 [(validate.rules).string = {min_len: 1}]; // The acceptable service accounts from peer, peers not in the list will be rejected in the // handshake validation step. If empty, no validation will be performed. diff --git a/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto index c6c2ee9798d6..687226574d29 100644 --- a/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto +++ b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto @@ -6,7 +6,6 @@ import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3"; @@ -16,9 +15,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Upstream Proxy Protocol] // [#extension: envoy.transport_sockets.upstream_proxy_protocol] -// [#not-implemented-hide:] + // Configuration for PROXY protocol socket message ProxyProtocolUpstreamTransport { + // The PROXY protocol settings config.core.v3.ProxyProtocolConfig config = 1; // The underlying transport socket being wrapped. diff --git a/api/envoy/extensions/transport_sockets/tls/v3/common.proto b/api/envoy/extensions/transport_sockets/tls/v3/common.proto index 5eab3c1060b5..a7b9360d248a 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/common.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -120,7 +120,7 @@ message PrivateKeyProvider { // Private key method provider name. The name must match a // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string provider_name = 1 [(validate.rules).string = {min_len: 1}]; // Private key method provider specific configuration. oneof config_type { @@ -151,7 +151,9 @@ message TlsCertificate { // TLS private key is not password encrypted. config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - // [#not-implemented-hide:] + // The OCSP response to be stapled with this certificate during the handshake. + // The response must be DER-encoded and may only be provided via ``filename`` or + // ``inline_bytes``. The response may pertain to only one certificate. config.core.v3.DataSource ocsp_staple = 4; // [#not-implemented-hide:] @@ -205,7 +207,7 @@ message CertificateValidationContext { ACCEPT_UNTRUSTED = 1; } - reserved 4; + reserved 4, 5; reserved "verify_subject_alt_name"; @@ -264,7 +266,7 @@ message CertificateValidationContext { // because SPKI is tied to a private key, so it doesn't change when the certificate // is renewed using the same private key. repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. @@ -293,7 +295,7 @@ message CertificateValidationContext { // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; // An optional list of Subject Alternative name matchers. Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified matches. @@ -315,9 +317,6 @@ message CertificateValidationContext { // `. repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - // [#not-implemented-hide:] Must present signed certificate time-stamp. google.protobuf.BoolValue require_signed_certificate_timestamp = 6; diff --git a/api/envoy/extensions/transport_sockets/tls/v3/secret.proto b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto index 80c68a56f5ce..f25370c3c9f6 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/secret.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -12,6 +12,7 @@ import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "SecretProto"; @@ -33,7 +34,10 @@ message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + string name = 1 [ + (validate.rules).string = {min_len: 1}, + (udpa.annotations.field_migrate).oneof_promotion = "name_specifier" + ]; // Resource locator for SDS. This is mutually exclusive to *name*. // [#not-implemented-hide:] diff --git a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto index f746f3d2f1cf..e11b2691978c 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -54,11 +54,33 @@ message UpstreamTlsContext { google.protobuf.UInt32Value max_session_keys = 4; } -// [#next-free-field: 8] +// [#next-free-field: 9] message DownstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.DownstreamTlsContext"; + enum OcspStaplePolicy { + // OCSP responses are optional. If an OCSP response is absent + // or expired, the associated certificate will be used for + // connections without an OCSP staple. + LENIENT_STAPLING = 0; + + // OCSP responses are optional. If an OCSP response is absent, + // the associated certificate will be used without an + // OCSP staple. If a response is provided but is expired, + // the associated certificate will not be used for + // subsequent connections. If no suitable certificate is found, + // the connection is rejected. + STRICT_STAPLING = 1; + + // OCSP responses are required. Configuration will fail if + // a certificate is provided without an OCSP response. If a + // response expires, the associated certificate will not be + // used connections. If no suitable certificate is found, the + // connection is rejected. + MUST_STAPLE = 2; + } + // Common TLS context settings. CommonTlsContext common_tls_context = 1; @@ -96,6 +118,11 @@ message DownstreamTlsContext { lt {seconds: 4294967296} gte {} }]; + + // Config for whether to use certificates if they do not have + // an accompanying OCSP response or if the response expires at runtime. + // Defaults to LENIENT_STAPLING + OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; } // TLS context shared by both client and server TLS contexts. @@ -108,7 +135,7 @@ message CommonTlsContext { message CertificateProvider { // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Provider specific config. // Note: an implementation is expected to dedup multiple instances of the same config diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto index 589dd17b543a..3608f93ffe30 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto @@ -121,7 +121,7 @@ message PrivateKeyProvider { // Private key method provider name. The name must match a // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string provider_name = 1 [(validate.rules).string = {min_len: 1}]; // Private key method provider specific configuration. oneof config_type { @@ -153,7 +153,9 @@ message TlsCertificate { // TLS private key is not password encrypted. config.core.v4alpha.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - // [#not-implemented-hide:] + // The OCSP response to be stapled with this certificate during the handshake. + // The response must be DER-encoded and may only be provided via ``filename`` or + // ``inline_bytes``. The response may pertain to only one certificate. config.core.v4alpha.DataSource ocsp_staple = 4; // [#not-implemented-hide:] @@ -207,7 +209,7 @@ message CertificateValidationContext { ACCEPT_UNTRUSTED = 1; } - reserved 4; + reserved 4, 5; reserved "verify_subject_alt_name"; @@ -266,7 +268,7 @@ message CertificateValidationContext { // because SPKI is tied to a private key, so it doesn't change when the certificate // is renewed using the same private key. repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. @@ -295,7 +297,7 @@ message CertificateValidationContext { // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; // An optional list of Subject Alternative name matchers. Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified matches. @@ -317,9 +319,6 @@ message CertificateValidationContext { // `. repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9; - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - // [#not-implemented-hide:] Must present signed certificate time-stamp. google.protobuf.BoolValue require_signed_certificate_timestamp = 6; diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto index 11306f21415a..9848eaadef0b 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto @@ -11,6 +11,7 @@ import "udpa/core/v1/resource_locator.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; option java_outer_classname = "SecretProto"; @@ -35,7 +36,7 @@ message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Resource locator for SDS. This is mutually exclusive to *name*. // [#not-implemented-hide:] diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto index 44963f687073..6a49cb352ec4 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto @@ -53,11 +53,33 @@ message UpstreamTlsContext { google.protobuf.UInt32Value max_session_keys = 4; } -// [#next-free-field: 8] +// [#next-free-field: 9] message DownstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; + enum OcspStaplePolicy { + // OCSP responses are optional. If an OCSP response is absent + // or expired, the associated certificate will be used for + // connections without an OCSP staple. + LENIENT_STAPLING = 0; + + // OCSP responses are optional. If an OCSP response is absent, + // the associated certificate will be used without an + // OCSP staple. If a response is provided but is expired, + // the associated certificate will not be used for + // subsequent connections. If no suitable certificate is found, + // the connection is rejected. + STRICT_STAPLING = 1; + + // OCSP responses are required. Configuration will fail if + // a certificate is provided without an OCSP response. If a + // response expires, the associated certificate will not be + // used connections. If no suitable certificate is found, the + // connection is rejected. + MUST_STAPLE = 2; + } + // Common TLS context settings. CommonTlsContext common_tls_context = 1; @@ -95,6 +117,11 @@ message DownstreamTlsContext { lt {seconds: 4294967296} gte {} }]; + + // Config for whether to use certificates if they do not have + // an accompanying OCSP response or if the response expires at runtime. + // Defaults to LENIENT_STAPLING + OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; } // TLS context shared by both client and server TLS contexts. @@ -111,7 +138,7 @@ message CommonTlsContext { // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Provider specific config. // Note: an implementation is expected to dedup multiple instances of the same config diff --git a/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto b/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto index c6b02364aa2d..44e207172c9b 100644 --- a/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto +++ b/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto @@ -5,7 +5,7 @@ package envoy.extensions.upstreams.http.generic.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.generic.v3"; -option java_outer_classname = "GenericConnectionPoolProto"; +option java_outer_classname = "GenericConnectionPoolProtoOuterClass"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; diff --git a/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto b/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto index e4c2d6ff9b84..8318f3c666d9 100644 --- a/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto +++ b/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto @@ -5,7 +5,7 @@ package envoy.extensions.upstreams.http.http.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.http.v3"; -option java_outer_classname = "HttpConnectionPoolProto"; +option java_outer_classname = "HttpConnectionPoolProtoOuterClass"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; diff --git a/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto b/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto index 5bc8734cb3f7..7c1d633432e9 100644 --- a/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto +++ b/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto @@ -5,7 +5,7 @@ package envoy.extensions.upstreams.http.tcp.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3"; -option java_outer_classname = "TcpConnectionPoolProto"; +option java_outer_classname = "TcpConnectionPoolProtoOuterClass"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; diff --git a/api/envoy/extensions/wasm/v3/wasm.proto b/api/envoy/extensions/wasm/v3/wasm.proto index 26f458214466..b42fb75a0bf7 100644 --- a/api/envoy/extensions/wasm/v3/wasm.proto +++ b/api/envoy/extensions/wasm/v3/wasm.proto @@ -16,8 +16,8 @@ option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] +// [#extension: envoy.bootstrap.wasm] -// [[#not-implemented-hide:] // Configuration for a Wasm VM. // [#next-free-field: 7] message VmConfig { @@ -29,7 +29,7 @@ message VmConfig { string vm_id = 1; // The Wasm runtime type (either "v8" or "null" for code compiled into Envoy). - string runtime = 2 [(validate.rules).string = {min_bytes: 1}]; + string runtime = 2 [(validate.rules).string = {min_len: 1}]; // The Wasm code that Envoy will execute. config.core.v3.AsyncDataSource code = 3; @@ -51,7 +51,6 @@ message VmConfig { bool nack_on_code_cache_miss = 6; } -// [[#not-implemented-hide:] // Base Configuration for Wasm Plugins e.g. filters and services. // [#next-free-field: 6] message PluginConfig { @@ -66,9 +65,9 @@ message PluginConfig { string root_id = 2; // Configuration for finding or starting VM. - oneof vm_config { - VmConfig inline_vm_config = 3; - // In the future add referential VM configurations. + oneof vm { + VmConfig vm_config = 3; + // TODO: add referential VM configurations. } // Filter/service configuration used to configure or reconfigure a plugin @@ -86,7 +85,6 @@ message PluginConfig { bool fail_open = 5; } -// [[#not-implemented-hide:] // WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService // ` This opaque configuration will be used to create a Wasm Service. message WasmService { diff --git a/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto b/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto index 02636d0fb25f..fb2369089151 100644 --- a/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto +++ b/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto @@ -23,7 +23,7 @@ message ProfileActionConfig { google.protobuf.Duration profile_duration = 1; // File path to the directory to output profiles. - string profile_path = 2 [(validate.rules).string = {min_bytes: 1}]; + string profile_path = 2 [(validate.rules).string = {min_len: 1}]; // Limits the max number of profiles that can be generated by this action // over its lifetime to avoid filling the disk. diff --git a/api/envoy/service/accesslog/v3/als.proto b/api/envoy/service/accesslog/v3/als.proto index 3f5e37325cc5..5421c2304918 100644 --- a/api/envoy/service/accesslog/v3/als.proto +++ b/api/envoy/service/accesslog/v3/als.proto @@ -50,7 +50,7 @@ message StreamAccessLogsMessage { // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig // `. - string log_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string log_name = 2 [(validate.rules).string = {min_len: 1}]; } // Wrapper for batches of HTTP access log entries. diff --git a/api/envoy/service/accesslog/v4alpha/als.proto b/api/envoy/service/accesslog/v4alpha/als.proto index 4edb5eade0f2..e7e96583fd2c 100644 --- a/api/envoy/service/accesslog/v4alpha/als.proto +++ b/api/envoy/service/accesslog/v4alpha/als.proto @@ -50,7 +50,7 @@ message StreamAccessLogsMessage { // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig // `. - string log_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string log_name = 2 [(validate.rules).string = {min_len: 1}]; } // Wrapper for batches of HTTP access log entries. diff --git a/api/envoy/service/auth/v2/external_auth.proto b/api/envoy/service/auth/v2/external_auth.proto index 0f580fe7dc34..7dbfd3556968 100644 --- a/api/envoy/service/auth/v2/external_auth.proto +++ b/api/envoy/service/auth/v2/external_auth.proto @@ -43,7 +43,8 @@ message DeniedHttpResponse { type.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers - // to the downstream client. + // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. repeated api.v2.core.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data @@ -55,9 +56,10 @@ message DeniedHttpResponse { message OkHttpResponse { // HTTP entity headers in addition to the original request headers. This allows the authorization // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`, - // the filter will append the correspondent header value to the matched request header. Note that - // by Leaving `append` as false, the filter will either add a new header, or override an existing + // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. By setting the `append` field to `true`, + // the filter will append the correspondent header value to the matched request header. + // By leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated api.v2.core.HeaderValueOption headers = 2; } diff --git a/api/envoy/service/auth/v3/attribute_context.proto b/api/envoy/service/auth/v3/attribute_context.proto index 3c4fe0af665e..cdf3ee9f96e4 100644 --- a/api/envoy/service/auth/v3/attribute_context.proto +++ b/api/envoy/service/auth/v3/attribute_context.proto @@ -97,7 +97,7 @@ message AttributeContext { // This message defines attributes for an HTTP request. // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. - // [#next-free-field: 12] + // [#next-free-field: 13] message HttpRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.AttributeContext.HttpRequest"; @@ -145,6 +145,12 @@ message AttributeContext { // The HTTP request body. string body = 11; + + // The HTTP request body in bytes. This is used instead of + // :ref:`body ` when + // :ref:`pack_as_bytes ` + // is set to true. + bytes raw_body = 12; } // The source of a network activity, such as starting a TCP connection. diff --git a/api/envoy/service/auth/v3/external_auth.proto b/api/envoy/service/auth/v3/external_auth.proto index 317d83abe485..9e2bf8fccd5b 100644 --- a/api/envoy/service/auth/v3/external_auth.proto +++ b/api/envoy/service/auth/v3/external_auth.proto @@ -50,7 +50,8 @@ message DeniedHttpResponse { type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers - // to the downstream client. + // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. repeated config.core.v3.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data @@ -59,18 +60,37 @@ message DeniedHttpResponse { } // HTTP attributes for an OK response. +// [#next-free-field: 6] message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.OkHttpResponse"; // HTTP entity headers in addition to the original request headers. This allows the authorization // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`, - // the filter will append the correspondent header value to the matched request header. Note that - // by Leaving `append` as false, the filter will either add a new header, or override an existing + // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. By setting the `append` field to `true`, + // the filter will append the correspondent header value to the matched request header. + // By leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated config.core.v3.HeaderValueOption headers = 2; + // HTTP entity headers to remove from the original request before dispatching + // it to the upstream. This allows the authorization service to act on auth + // related headers (like `Authorization`), process them, and consume them. + // Under this model, the upstream will either receive the request (if it's + // authorized) or not receive it (if it's not), but will not see headers + // containing authorization credentials. + // + // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as + // the header `Host`, may not be removed as that would make the request + // malformed. If mentioned in `headers_to_remove` these special headers will + // be ignored. + // + // When using the HTTP service this must instead be set by the HTTP + // authorization service as a comma separated list like so: + // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``. + repeated string headers_to_remove = 5; + // This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata // `. Until it is removed, // setting this field overrides :ref:`CheckResponse.dynamic_metadata diff --git a/api/envoy/service/auth/v4alpha/attribute_context.proto b/api/envoy/service/auth/v4alpha/attribute_context.proto index 24f728c7adef..a1bf9c9c62cb 100644 --- a/api/envoy/service/auth/v4alpha/attribute_context.proto +++ b/api/envoy/service/auth/v4alpha/attribute_context.proto @@ -97,7 +97,7 @@ message AttributeContext { // This message defines attributes for an HTTP request. // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. - // [#next-free-field: 12] + // [#next-free-field: 13] message HttpRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.AttributeContext.HttpRequest"; @@ -145,6 +145,12 @@ message AttributeContext { // The HTTP request body. string body = 11; + + // The HTTP request body in bytes. This is used instead of + // :ref:`body ` when + // :ref:`pack_as_bytes ` + // is set to true. + bytes raw_body = 12; } // The source of a network activity, such as starting a TCP connection. diff --git a/api/envoy/service/auth/v4alpha/external_auth.proto b/api/envoy/service/auth/v4alpha/external_auth.proto index cca67e6ecce9..06ccecec15da 100644 --- a/api/envoy/service/auth/v4alpha/external_auth.proto +++ b/api/envoy/service/auth/v4alpha/external_auth.proto @@ -50,7 +50,8 @@ message DeniedHttpResponse { type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers - // to the downstream client. + // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. repeated config.core.v4alpha.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data @@ -59,6 +60,7 @@ message DeniedHttpResponse { } // HTTP attributes for an OK response. +// [#next-free-field: 6] message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.OkHttpResponse"; @@ -69,11 +71,29 @@ message OkHttpResponse { // HTTP entity headers in addition to the original request headers. This allows the authorization // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`, - // the filter will append the correspondent header value to the matched request header. Note that - // by Leaving `append` as false, the filter will either add a new header, or override an existing + // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. By setting the `append` field to `true`, + // the filter will append the correspondent header value to the matched request header. + // By leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated config.core.v4alpha.HeaderValueOption headers = 2; + + // HTTP entity headers to remove from the original request before dispatching + // it to the upstream. This allows the authorization service to act on auth + // related headers (like `Authorization`), process them, and consume them. + // Under this model, the upstream will either receive the request (if it's + // authorized) or not receive it (if it's not), but will not see headers + // containing authorization credentials. + // + // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as + // the header `Host`, may not be removed as that would make the request + // malformed. If mentioned in `headers_to_remove` these special headers will + // be ignored. + // + // When using the HTTP service this must instead be set by the HTTP + // authorization service as a comma separated list like so: + // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``. + repeated string headers_to_remove = 5; } // Intended for gRPC and Network Authorization servers `only`. diff --git a/api/envoy/service/extension/v3/config_discovery.proto b/api/envoy/service/extension/v3/config_discovery.proto index 652355b707e3..d0b703312346 100644 --- a/api/envoy/service/extension/v3/config_discovery.proto +++ b/api/envoy/service/extension/v3/config_discovery.proto @@ -16,7 +16,7 @@ option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: ExtensionConfigDS] +// [#protodoc-title: Extension Config Discovery Service (ECDS)] // Return extension configurations. service ExtensionConfigDiscoveryService { diff --git a/api/envoy/service/runtime/v3/rtds.proto b/api/envoy/service/runtime/v3/rtds.proto index b12844233883..796b6fac24e6 100644 --- a/api/envoy/service/runtime/v3/rtds.proto +++ b/api/envoy/service/runtime/v3/rtds.proto @@ -52,7 +52,7 @@ message Runtime { // Runtime resource name. This makes the Runtime a self-describing xDS // resource. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; google.protobuf.Struct layer = 2; } diff --git a/api/envoy/service/status/v3/csds.proto b/api/envoy/service/status/v3/csds.proto index 23f1352bf489..8e81dcdd2bff 100644 --- a/api/envoy/service/status/v3/csds.proto +++ b/api/envoy/service/status/v3/csds.proto @@ -9,6 +9,7 @@ import "envoy/type/matcher/v3/node.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -21,9 +22,8 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Client Status Discovery Service (CSDS)] // CSDS is Client Status Discovery Service. It can be used to get the status of -// an xDS-compliant client from the management server's point of view. In the -// future, it can potentially be used as an interface to get the current -// state directly from the client. +// an xDS-compliant client from the management server's point of view. It can +// also be used to get the current xDS states directly from the client. service ClientStatusDiscoveryService { rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { } @@ -34,7 +34,7 @@ service ClientStatusDiscoveryService { } } -// Status of a config. +// Status of a config from a management server view. enum ConfigStatus { // Status info is not available/unknown. UNKNOWN = 0; @@ -49,10 +49,30 @@ enum ConfigStatus { // ACK/NACK. STALE = 3; - // Management server has sent the config to client but received NACK. + // Management server has sent the config to client but received NACK. The + // attached config dump will be the latest config (the rejected one), since + // it is the persisted version in the management server. ERROR = 4; } +// Config status from a client-side view. +enum ClientConfigStatus { + // Config status is not available/unknown. + CLIENT_UNKNOWN = 0; + + // Client requested the config but hasn't received any config from management + // server yet. + CLIENT_REQUESTED = 1; + + // Client received the config and replied with ACK. + CLIENT_ACKED = 2; + + // Client received the config and replied with NACK. Notably, the attached + // config dump is not the NACKed version, but the most recent accepted one. If + // no config is accepted yet, the attached config dump will be empty. + CLIENT_NACKED = 3; +} + // Request for client status of clients identified by a list of NodeMatchers. message ClientStatusRequest { option (udpa.annotations.versioning).previous_message_type = @@ -67,12 +87,20 @@ message ClientStatusRequest { } // Detailed config (per xDS) with status. -// [#next-free-field: 7] +// [#next-free-field: 8] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v2.PerXdsConfig"; - ConfigStatus status = 1; + // Config status generated by management servers. Will not be present if the + // CSDS server is an xDS client. + ConfigStatus status = 1 [(udpa.annotations.field_migrate).oneof_promotion = "status_config"]; + + // Client config status is populated by xDS clients. Will not be present if + // the CSDS server is an xDS server. No matter what the client config status + // is, xDS clients should always dump the most recent accepted xDS config. + ClientConfigStatus client_status = 7 + [(udpa.annotations.field_migrate).oneof_promotion = "status_config"]; oneof per_xds_config { admin.v3.ListenersConfigDump listener_config = 2; diff --git a/api/envoy/service/status/v4alpha/csds.proto b/api/envoy/service/status/v4alpha/csds.proto index 37758954cadb..e1556de8b913 100644 --- a/api/envoy/service/status/v4alpha/csds.proto +++ b/api/envoy/service/status/v4alpha/csds.proto @@ -21,9 +21,8 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Client Status Discovery Service (CSDS)] // CSDS is Client Status Discovery Service. It can be used to get the status of -// an xDS-compliant client from the management server's point of view. In the -// future, it can potentially be used as an interface to get the current -// state directly from the client. +// an xDS-compliant client from the management server's point of view. It can +// also be used to get the current xDS states directly from the client. service ClientStatusDiscoveryService { rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { } @@ -34,7 +33,7 @@ service ClientStatusDiscoveryService { } } -// Status of a config. +// Status of a config from a management server view. enum ConfigStatus { // Status info is not available/unknown. UNKNOWN = 0; @@ -49,10 +48,30 @@ enum ConfigStatus { // ACK/NACK. STALE = 3; - // Management server has sent the config to client but received NACK. + // Management server has sent the config to client but received NACK. The + // attached config dump will be the latest config (the rejected one), since + // it is the persisted version in the management server. ERROR = 4; } +// Config status from a client-side view. +enum ClientConfigStatus { + // Config status is not available/unknown. + CLIENT_UNKNOWN = 0; + + // Client requested the config but hasn't received any config from management + // server yet. + CLIENT_REQUESTED = 1; + + // Client received the config and replied with ACK. + CLIENT_ACKED = 2; + + // Client received the config and replied with NACK. Notably, the attached + // config dump is not the NACKed version, but the most recent accepted one. If + // no config is accepted yet, the attached config dump will be empty. + CLIENT_NACKED = 3; +} + // Request for client status of clients identified by a list of NodeMatchers. message ClientStatusRequest { option (udpa.annotations.versioning).previous_message_type = @@ -67,12 +86,21 @@ message ClientStatusRequest { } // Detailed config (per xDS) with status. -// [#next-free-field: 7] +// [#next-free-field: 8] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v3.PerXdsConfig"; - ConfigStatus status = 1; + oneof status_config { + // Config status generated by management servers. Will not be present if the + // CSDS server is an xDS client. + ConfigStatus status = 1; + + // Client config status is populated by xDS clients. Will not be present if + // the CSDS server is an xDS server. No matter what the client config status + // is, xDS clients should always dump the most recent accepted xDS config. + ClientConfigStatus client_status = 7; + } oneof per_xds_config { admin.v4alpha.ListenersConfigDump listener_config = 2; diff --git a/api/envoy/service/tap/v2alpha/BUILD b/api/envoy/service/tap/v2alpha/BUILD index 267aeaa0efab..8e0561a169c5 100644 --- a/api/envoy/service/tap/v2alpha/BUILD +++ b/api/envoy/service/tap/v2alpha/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ - "//envoy/api/v2:pkg", "//envoy/api/v2/core:pkg", "//envoy/api/v2/route:pkg", "//envoy/data/tap/v2alpha:pkg", diff --git a/api/envoy/service/tap/v2alpha/tapds.proto b/api/envoy/service/tap/v2alpha/tapds.proto deleted file mode 100644 index 81b9cb0e447b..000000000000 --- a/api/envoy/service/tap/v2alpha/tapds.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v2alpha; - -import "envoy/api/v2/discovery.proto"; -import "envoy/service/tap/v2alpha/common.proto"; - -import "google/api/annotations.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; -option java_outer_classname = "TapdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap discovery service] - -// [#not-implemented-hide:] Tap discovery service. -service TapDiscoveryService { - rpc StreamTapConfigs(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { - } - - rpc DeltaTapConfigs(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } - - rpc FetchTapConfigs(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:tap_configs"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name -// The filter TapDS config references this name. -message TapResource { - // The name of the tap configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Tap config to apply - TapConfig config = 2; -} diff --git a/api/envoy/service/tap/v3/BUILD b/api/envoy/service/tap/v3/BUILD index 0aa82fa145be..5ee1ce553f48 100644 --- a/api/envoy/service/tap/v3/BUILD +++ b/api/envoy/service/tap/v3/BUILD @@ -8,9 +8,7 @@ api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/tap/v3:pkg", "//envoy/data/tap/v3:pkg", - "//envoy/service/discovery/v3:pkg", "//envoy/service/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/service/tap/v3/tapds.proto b/api/envoy/service/tap/v3/tapds.proto deleted file mode 100644 index 51393d6e14c7..000000000000 --- a/api/envoy/service/tap/v3/tapds.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v3; - -import "envoy/config/tap/v3/common.proto"; -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v3"; -option java_outer_classname = "TapdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap discovery service] - -// [#not-implemented-hide:] Tap discovery service. -service TapDiscoveryService { - rpc StreamTapConfigs(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaTapConfigs(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchTapConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:tap_configs"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name -// The filter TapDS config references this name. -message TapResource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.TapResource"; - - // The name of the tap configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Tap config to apply - config.tap.v3.TapConfig config = 2; -} diff --git a/api/envoy/service/tap/v4alpha/BUILD b/api/envoy/service/tap/v4alpha/BUILD index 8e407d4f61e3..cb89a6907d9a 100644 --- a/api/envoy/service/tap/v4alpha/BUILD +++ b/api/envoy/service/tap/v4alpha/BUILD @@ -8,9 +8,7 @@ api_proto_package( has_services = True, deps = [ "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v4alpha:pkg", "//envoy/data/tap/v3:pkg", - "//envoy/service/discovery/v4alpha:pkg", "//envoy/service/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/service/tap/v4alpha/tapds.proto b/api/envoy/service/tap/v4alpha/tapds.proto deleted file mode 100644 index a041beea2697..000000000000 --- a/api/envoy/service/tap/v4alpha/tapds.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v4alpha; - -import "envoy/config/tap/v4alpha/common.proto"; -import "envoy/service/discovery/v4alpha/discovery.proto"; - -import "google/api/annotations.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; -option java_outer_classname = "TapdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tap discovery service] - -// [#not-implemented-hide:] Tap discovery service. -service TapDiscoveryService { - rpc StreamTapConfigs(stream discovery.v4alpha.DiscoveryRequest) - returns (stream discovery.v4alpha.DiscoveryResponse) { - } - - rpc DeltaTapConfigs(stream discovery.v4alpha.DeltaDiscoveryRequest) - returns (stream discovery.v4alpha.DeltaDiscoveryResponse) { - } - - rpc FetchTapConfigs(discovery.v4alpha.DiscoveryRequest) - returns (discovery.v4alpha.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:tap_configs"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name -// The filter TapDS config references this name. -message TapResource { - option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v3.TapResource"; - - // The name of the tap configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Tap config to apply - config.tap.v4alpha.TapConfig config = 2; -} diff --git a/api/envoy/type/matcher/metadata.proto b/api/envoy/type/matcher/metadata.proto index 2cbc602564c5..ed58d04adb02 100644 --- a/api/envoy/type/matcher/metadata.proto +++ b/api/envoy/type/matcher/metadata.proto @@ -83,12 +83,12 @@ message MetadataMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_bytes: 1}]; + string filter = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto index b23c0bff3075..6c499235bbe2 100644 --- a/api/envoy/type/matcher/regex.proto +++ b/api/envoy/type/matcher/regex.proto @@ -48,7 +48,7 @@ message RegexMatcher { } // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_bytes: 1}]; + string regex = 2 [(validate.rules).string = {min_len: 1}]; } // Describes how to match a string and then produce a new string using a regular diff --git a/api/envoy/type/matcher/string.proto b/api/envoy/type/matcher/string.proto index 431043e00ec1..499eaf21775f 100644 --- a/api/envoy/type/matcher/string.proto +++ b/api/envoy/type/matcher/string.proto @@ -34,7 +34,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string prefix = 2 [(validate.rules).string = {min_len: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -42,7 +42,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + string suffix = 3 [(validate.rules).string = {min_len: 1}]; // The input string must match the regular expression specified here. // The regex grammar is defined `here diff --git a/api/envoy/type/matcher/struct.proto b/api/envoy/type/matcher/struct.proto index f65b1d121845..10d4672e0622 100644 --- a/api/envoy/type/matcher/struct.proto +++ b/api/envoy/type/matcher/struct.proto @@ -72,7 +72,7 @@ message StructMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } diff --git a/api/envoy/type/matcher/v3/metadata.proto b/api/envoy/type/matcher/v3/metadata.proto index 65ec4f47ffff..a7184ee98050 100644 --- a/api/envoy/type/matcher/v3/metadata.proto +++ b/api/envoy/type/matcher/v3/metadata.proto @@ -89,12 +89,12 @@ message MetadataMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_bytes: 1}]; + string filter = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; diff --git a/api/envoy/type/matcher/v3/regex.proto b/api/envoy/type/matcher/v3/regex.proto index 6087c6f90fad..f5913c460c46 100644 --- a/api/envoy/type/matcher/v3/regex.proto +++ b/api/envoy/type/matcher/v3/regex.proto @@ -54,7 +54,7 @@ message RegexMatcher { } // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_bytes: 1}]; + string regex = 2 [(validate.rules).string = {min_len: 1}]; } // Describes how to match a string and then produce a new string using a regular diff --git a/api/envoy/type/matcher/v3/string.proto b/api/envoy/type/matcher/v3/string.proto index d453d43d3f85..7a91b58f3bc6 100644 --- a/api/envoy/type/matcher/v3/string.proto +++ b/api/envoy/type/matcher/v3/string.proto @@ -41,7 +41,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string prefix = 2 [(validate.rules).string = {min_len: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -49,7 +49,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + string suffix = 3 [(validate.rules).string = {min_len: 1}]; // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; @@ -60,7 +60,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc.def* - string contains = 7 [(validate.rules).string = {min_bytes: 1}]; + string contains = 7 [(validate.rules).string = {min_len: 1}]; } // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no diff --git a/api/envoy/type/matcher/v3/struct.proto b/api/envoy/type/matcher/v3/struct.proto index b88d7b11bc2a..c753d07a5c0a 100644 --- a/api/envoy/type/matcher/v3/struct.proto +++ b/api/envoy/type/matcher/v3/struct.proto @@ -78,7 +78,7 @@ message StructMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } diff --git a/api/envoy/type/matcher/v4alpha/metadata.proto b/api/envoy/type/matcher/v4alpha/metadata.proto index 8abe14e7b667..35af650391ff 100644 --- a/api/envoy/type/matcher/v4alpha/metadata.proto +++ b/api/envoy/type/matcher/v4alpha/metadata.proto @@ -90,12 +90,12 @@ message MetadataMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_bytes: 1}]; + string filter = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; diff --git a/api/envoy/type/matcher/v4alpha/regex.proto b/api/envoy/type/matcher/v4alpha/regex.proto index 087c5e3f7292..537635ec87d0 100644 --- a/api/envoy/type/matcher/v4alpha/regex.proto +++ b/api/envoy/type/matcher/v4alpha/regex.proto @@ -49,7 +49,7 @@ message RegexMatcher { } // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_bytes: 1}]; + string regex = 2 [(validate.rules).string = {min_len: 1}]; } // Describes how to match a string and then produce a new string using a regular diff --git a/api/envoy/type/matcher/v4alpha/string.proto b/api/envoy/type/matcher/v4alpha/string.proto index fc17946fe3b5..1bc0118ced9b 100644 --- a/api/envoy/type/matcher/v4alpha/string.proto +++ b/api/envoy/type/matcher/v4alpha/string.proto @@ -42,7 +42,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string prefix = 2 [(validate.rules).string = {min_len: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -50,7 +50,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + string suffix = 3 [(validate.rules).string = {min_len: 1}]; // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; @@ -61,7 +61,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc.def* - string contains = 7 [(validate.rules).string = {min_bytes: 1}]; + string contains = 7 [(validate.rules).string = {min_len: 1}]; } // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no diff --git a/api/envoy/type/matcher/v4alpha/struct.proto b/api/envoy/type/matcher/v4alpha/struct.proto index 643cc5a47570..328ac555bd81 100644 --- a/api/envoy/type/matcher/v4alpha/struct.proto +++ b/api/envoy/type/matcher/v4alpha/struct.proto @@ -79,7 +79,7 @@ message StructMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } diff --git a/api/envoy/type/metadata/v3/metadata.proto b/api/envoy/type/metadata/v3/metadata.proto index ddcce6882057..b971d8debbe5 100644 --- a/api/envoy/type/metadata/v3/metadata.proto +++ b/api/envoy/type/metadata/v3/metadata.proto @@ -49,13 +49,13 @@ message MetadataKey { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The key name of Metadata to retrieve the Struct from the metadata. // Typically, it represents a builtin subsystem or custom extension. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. It can be a prefix or a full path, // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example, diff --git a/api/envoy/type/tracing/v3/custom_tag.proto b/api/envoy/type/tracing/v3/custom_tag.proto index 42518ead59d1..bcebe5779ba1 100644 --- a/api/envoy/type/tracing/v3/custom_tag.proto +++ b/api/envoy/type/tracing/v3/custom_tag.proto @@ -26,7 +26,7 @@ message CustomTag { "envoy.type.tracing.v2.CustomTag.Literal"; // Static literal value to populate the tag value. - string value = 1 [(validate.rules).string = {min_bytes: 1}]; + string value = 1 [(validate.rules).string = {min_len: 1}]; } // Environment type custom tag with environment name and default value. @@ -35,7 +35,7 @@ message CustomTag { "envoy.type.tracing.v2.CustomTag.Environment"; // Environment variable name to obtain the value to populate the tag value. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // When the environment variable is not found, // the tag value will be populated with this default value if specified, @@ -50,7 +50,7 @@ message CustomTag { // Header name to obtain the value to populate the tag value. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // When the header does not exist, // the tag value will be populated with this default value if specified, @@ -80,7 +80,7 @@ message CustomTag { } // Used to populate the tag name. - string tag = 1 [(validate.rules).string = {min_bytes: 1}]; + string tag = 1 [(validate.rules).string = {min_len: 1}]; // Used to specify what kind of custom tag. oneof type { diff --git a/api/envoy/watchdog/v3alpha/BUILD b/api/envoy/watchdog/v3alpha/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/api/envoy/watchdog/v3alpha/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/watchdog/v3alpha/README.md b/api/envoy/watchdog/v3alpha/README.md new file mode 100644 index 000000000000..c8433b9c05b5 --- /dev/null +++ b/api/envoy/watchdog/v3alpha/README.md @@ -0,0 +1,2 @@ +This contains watchdog actions that are part of core Envoy, and therefore cannot +be in the extensions directory. diff --git a/api/envoy/watchdog/v3alpha/abort_action.proto b/api/envoy/watchdog/v3alpha/abort_action.proto new file mode 100644 index 000000000000..3f47fddaa77e --- /dev/null +++ b/api/envoy/watchdog/v3alpha/abort_action.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.watchdog.v3alpha; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.watchdog.v3alpha"; +option java_outer_classname = "AbortActionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Watchdog Action that kills a stuck thread to kill the process.] + +// A GuardDogAction that will terminate the process by killing the +// stuck thread. This would allow easier access to the call stack of the stuck +// thread since we would run signal handlers on that thread. By default +// this will be registered to run as the last watchdog action on KILL and +// MULTIKILL events if those are enabled. +message AbortActionConfig { + // How long to wait for the thread to respond to the thread kill function + // before killing the process from this action. This is a blocking action. + // By default this is 5 seconds. + google.protobuf.Duration wait_duration = 1; +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index c93b1f7d84c5..2e0a1cd4997d 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -54,6 +54,7 @@ proto_library( "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", "//envoy/extensions/filters/http/cache/v3alpha:pkg", + "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", "//envoy/extensions/filters/http/csrf/v3:pkg", @@ -72,6 +73,7 @@ proto_library( "//envoy/extensions/filters/http/health_check/v3:pkg", "//envoy/extensions/filters/http/ip_tagging/v3:pkg", "//envoy/extensions/filters/http/jwt_authn/v3:pkg", + "//envoy/extensions/filters/http/local_ratelimit/v3:pkg", "//envoy/extensions/filters/http/lua/v3:pkg", "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", "//envoy/extensions/filters/http/on_demand/v3:pkg", @@ -118,6 +120,7 @@ proto_library( "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", + "//envoy/extensions/stat_sinks/wasm/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", "//envoy/extensions/transport_sockets/quic/v3:pkg", @@ -151,6 +154,7 @@ proto_library( "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", + "//envoy/watchdog/v3alpha:pkg", ], ) diff --git a/api/xds_protocol.rst b/api/xds_protocol.rst index c7cbe6f03bcc..641d30794006 100644 --- a/api/xds_protocol.rst +++ b/api/xds_protocol.rst @@ -695,10 +695,16 @@ An example minimal ``bootstrap.yaml`` fragment for ADS configuration is: address: port_value: lb_policy: ROUND_ROBIN - http2_protocol_options: {} + # It is recommended to configure either HTTP/2 or TCP keepalives in order to detect + # connection issues, and allow Envoy to reconnect. TCP keepalive is less expensive, but + # may be inadequate if there is a TCP proxy between Envoy and the management server. + # HTTP/2 keepalive is slightly more expensive, but may detect issues through more types + # of intermediate proxies. + http2_protocol_options: + connection_keepalive: + interval: 30s + timeout: 5s upstream_connection_options: - # configure a TCP keep-alive to detect and reconnect to the admin - # server in the event of a TCP socket disconnection tcp_keepalive: ... admin: diff --git a/bazel/BUILD b/bazel/BUILD index 0cdca5a06d7f..d03b931018a3 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -84,6 +84,33 @@ config_setting( }, ) +config_setting( + name = "clang_cl_opt_build", + values = { + "cpu": "x64_windows", + "define": "clang_cl=1", + "compilation_mode": "opt", + }, +) + +config_setting( + name = "clang_cl_dbg_build", + values = { + "cpu": "x64_windows", + "define": "clang_cl=1", + "compilation_mode": "dbg", + }, +) + +config_setting( + name = "clang_cl_fastbuild_build", + values = { + "cpu": "x64_windows", + "define": "clang_cl=1", + "compilation_mode": "fastbuild", + }, +) + config_setting( name = "opt_build", values = {"compilation_mode": "opt"}, @@ -155,6 +182,37 @@ config_setting( values = {"define": "tcmalloc=debug"}, ) +config_setting( + name = "gperftools_tcmalloc", + values = {"define": "tcmalloc=gperftools"}, +) + +# As select() can't be nested we need these specialized settings to avoid ambiguity when choosing +# tcmalloc's flavor for x86_64 builds. +config_setting( + name = "disable_tcmalloc_on_linux_x86_64", + values = { + "define": "tcmalloc=disabled", + "cpu": "k8", + }, +) + +config_setting( + name = "gperftools_tcmalloc_on_linux_x86_64", + values = { + "define": "tcmalloc=gperftools", + "cpu": "k8", + }, +) + +config_setting( + name = "debug_tcmalloc_on_linux_x86_64", + values = { + "define": "tcmalloc=debug", + "cpu": "k8", + }, +) + config_setting( name = "disable_signal_trace", values = {"define": "signal_trace=disabled"}, @@ -256,6 +314,27 @@ config_setting( values = {"define": "quiche=enabled"}, ) +# TODO: consider converting WAVM VM support to an extension (https://github.com/envoyproxy/envoy/issues/12574) +config_setting( + name = "wasm_all", + values = {"define": "wasm=enabled"}, +) + +config_setting( + name = "wasm_wavm", + values = {"define": "wasm=wavm"}, +) + +config_setting( + name = "wasm_v8", + values = {"define": "wasm=v8"}, +) + +config_setting( + name = "wasm_none", + values = {"define": "wasm=disabled"}, +) + # Alias pointing to the selected version of BoringSSL: # - BoringSSL FIPS from @boringssl_fips//:ssl, # - non-FIPS BoringSSL from @boringssl//:ssl. diff --git a/bazel/README.md b/bazel/README.md index 951c76dc53e4..b8bb300a2a77 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -29,10 +29,10 @@ to find the right version of Bazel and set the version to `USE_BAZEL_VERSION` en ## Production environments To build Envoy with Bazel in a production environment, where the [Envoy -dependencies](https://www.envoyproxy.io/docs/envoy/latest/install/building.html#requirements) are typically +dependencies](https://www.envoyproxy.io/docs/envoy/latest/start/building#requirements) are typically independently sourced, the following steps should be followed: -1. Configure, build and/or install the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/install/building.html#requirements). +1. Configure, build and/or install the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/start/building#requirements). 1. `bazel build -c opt //source/exe:envoy-static` from the repository root. ## Quick start Bazel build for developers @@ -127,6 +127,11 @@ for how to update or override dependencies. startup --output_base=C:/_eb ``` + Bazel also creates file symlinks when building Envoy. It's strongly recommended to enable file symlink support + using [Bazel's instructions](https://docs.bazel.build/versions/master/windows.html#enable-symlink-support). + For other common issues, see the + [Using Bazel on Windows](https://docs.bazel.build/versions/master/windows.html) page. + [python3](https://www.python.org/downloads/): Specifically, the Windows-native flavor distributed by python.org. The POSIX flavor available via MSYS2, the Windows Store flavor and other distributions will not work. Add a symlink for `python3.exe` pointing to the installed `python.exe` for Envoy scripts @@ -145,7 +150,8 @@ for how to update or override dependencies. package. Earlier versions of VC++ Build Tools/Visual Studio are not recommended or supported. If installed in a non-standard filesystem location, be sure to set the `BAZEL_VC` environment variable to the path of the VC++ package to allow Bazel to find your installation of VC++. NOTE: ensure that - the `link.exe` that resolves on your PATH is from VC++ Build Tools and not `/usr/bin/link.exe` from MSYS2. + the `link.exe` that resolves on your PATH is from VC++ Build Tools and not `/usr/bin/link.exe` from MSYS2, + which is determined by their relative ordering in your PATH. ``` set BAZEL_VC=%USERPROFILE%\VSBT2019\VC set PATH=%PATH%;%USERPROFILE%\VSBT2019\VC\Tools\MSVC\14.26.28801\bin\Hostx64\x64 @@ -160,10 +166,11 @@ for how to update or override dependencies. set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja ``` - [MSYS2 shell](https://msys2.github.io/): Set the `BAZEL_SH` environment variable to the path - of the installed MSYS2 `bash.exe` executable. Additionally, setting the `MSYS2_ARG_CONV_EXCL` environment - variable to a value of `*` is often advisable to ensure argument parsing in the MSYS2 shell - behaves as expected. + [MSYS2 shell](https://msys2.github.io/): Install to a path with no spaces, e.g. C:\msys32. + + Set the `BAZEL_SH` environment variable to the path of the installed MSYS2 `bash.exe` + executable. Additionally, setting the `MSYS2_ARG_CONV_EXCL` environment variable to a value + of `*` is often advisable to ensure argument parsing in the MSYS2 shell behaves as expected. ``` set PATH=%PATH%;%USERPROFILE%\msys64\usr\bin set BAZEL_SH=%USERPROFILE%\msys64\usr\bin\bash.exe @@ -181,7 +188,7 @@ for how to update or override dependencies. The TMPDIR path and MSYS2 `mktemp` command are used frequently by the `rules_foreign_cc` component of Bazel as well as Envoy's test scripts, causing problems if not set to a path accessible to both Windows and msys commands. [Note the `ci/windows_ci_steps.sh` script - which builds envoy and run tests in CI) creates this symlink automatically.] + which builds envoy and run tests in CI creates this symlink automatically.] In the MSYS2 shell, install additional packages via pacman: ``` @@ -212,7 +219,8 @@ for how to update or override dependencies. in your shell for buildifier to work. 1. `go get -u github.com/bazelbuild/buildtools/buildozer` to install buildozer. You may need to set `BUILDOZER_BIN` to `$GOPATH/bin/buildozer` in your shell for buildozer to work. -1. `bazel build //source/exe:envoy-static` from the Envoy source directory. +1. `bazel build //source/exe:envoy-static` from the Envoy source directory. Add `-c opt` for an optimized release build or + `-c dbg` for an unoptimized, fully instrumented debugging build. ## Building Envoy with the CI Docker image @@ -228,7 +236,7 @@ From a Windows host with Docker installed, the Windows containers feature enable MSYS2 or Git bash), run: ``` -./ci/run_envoy_docker_windows.sh './ci/windows_ci_steps.sh' +./ci/run_envoy_docker.sh './ci/windows_ci_steps.sh' ``` See also the [documentation](https://github.com/envoyproxy/envoy/tree/master/ci) for developer use of the @@ -599,7 +607,8 @@ The following optional features can be disabled on the Bazel build command-line: * Google C++ gRPC client with `--define google_grpc=disabled` * Backtracing on signals with `--define signal_trace=disabled` * Active stream state dump on signals with `--define signal_trace=disabled` or `--define disable_object_dump_on_signal_trace=disabled` -* tcmalloc with `--define tcmalloc=disabled` +* tcmalloc with `--define tcmalloc=disabled`. Also you can choose Gperftools' implementation of + tcmalloc with `--define tcmalloc=gperftools` which is the default for non-x86 builds. * deprecated features with `--define deprecated_features=disabled` @@ -618,7 +627,8 @@ The following optional features can be enabled on the Bazel build command-line: `--define log_debug_assert_in_release=enabled`. The default behavior is to compile debug assertions out of release builds so that the condition is not evaluated. This option has no effect in debug builds. * memory-debugging (scribbling over memory after allocation and before freeing) with - `--define tcmalloc=debug`. Note this option cannot be used with FIPS-compliant mode BoringSSL. + `--define tcmalloc=debug`. Note this option cannot be used with FIPS-compliant mode BoringSSL and + tcmalloc is built from the sources of Gperftools. * Default [path normalization](https://github.com/envoyproxy/envoy/issues/6435) with `--define path_normalization_by_default=true`. Note this still could be disable by explicit xDS config. * Manual stamping via VersionInfo with `--define manual_stamp=manual_stamp`. diff --git a/bazel/coverage/collect_cc_coverage.sh b/bazel/coverage/collect_cc_coverage.sh index 53926e5cb6af..f0b85c5c0ddb 100755 --- a/bazel/coverage/collect_cc_coverage.sh +++ b/bazel/coverage/collect_cc_coverage.sh @@ -41,6 +41,8 @@ # gcda or profraw) and uses either lcov or gcov to get the coverage data. # The coverage data is placed in $COVERAGE_OUTPUT_FILE. +read -ra COVERAGE_GCOV_OPTIONS <<< "${COVERAGE_GCOV_OPTIONS:-}" + # Checks if clang llvm coverage should be used instead of lcov. function uses_llvm() { if stat "${COVERAGE_DIR}"/*.profraw >/dev/null 2>&1; then @@ -68,24 +70,24 @@ function init_gcov() { # $COVERAGE_DIR. # Writes the collected coverage into the given output file. function llvm_coverage() { - local output_file="${1}"; shift + local output_file="${1}" object_file object_files object_param=() + shift export LLVM_PROFILE_FILE="${COVERAGE_DIR}/%h-%p-%m.profraw" "${COVERAGE_GCOV_PATH}" merge -output "${output_file}.data" \ "${COVERAGE_DIR}"/*.profraw - local object_files="$(find -L "${RUNFILES_DIR}" -type f -exec file -L {} \; \ + object_files="$(find -L "${RUNFILES_DIR}" -type f -exec file -L {} \; \ | grep ELF | grep -v "LSB core" | sed 's,:.*,,')" - - local object_param="" + for object_file in ${object_files}; do - object_param+=" -object ${object_file}" + object_param+=(-object "${object_file}") done llvm-cov export -instr-profile "${output_file}.data" -format=lcov \ -ignore-filename-regex='.*external/.+' \ -ignore-filename-regex='/tmp/.+' \ - ${object_param} | sed 's#/proc/self/cwd/##' > "${output_file}" + "${object_param[@]}" | sed 's#/proc/self/cwd/##' > "${output_file}" } # Generates a code coverage report in gcov intermediate text format by invoking @@ -97,17 +99,15 @@ function llvm_coverage() { # - output_file The location of the file where the generated code coverage # report is written. function gcov_coverage() { - local output_file="${1}"; shift - - # We'll save the standard output of each the gcov command in this log. - local gcov_log="$output_file.gcov.log" + local gcda gcno_path line output_file="${1}" + shift # Copy .gcno files next to their corresponding .gcda files in $COVERAGE_DIR # because gcov expects them to be in the same directory. while read -r line; do if [[ ${line: -4} == "gcno" ]]; then gcno_path=${line} - local gcda="${COVERAGE_DIR}/$(dirname ${gcno_path})/$(basename ${gcno_path} .gcno).gcda" + gcda="${COVERAGE_DIR}/$(dirname "${gcno_path}")/$(basename "${gcno_path}" .gcno).gcda" # If the gcda file was not found we skip generating coverage from the gcno # file. if [[ -f "$gcda" ]]; then @@ -115,7 +115,7 @@ function gcov_coverage() { # We overcome this by copying the gcno to $COVERAGE_DIR where the gcda # files are expected to be. if [ ! -f "${COVERAGE_DIR}/${gcno_path}" ]; then - mkdir -p "${COVERAGE_DIR}/$(dirname ${gcno_path})" + mkdir -p "${COVERAGE_DIR}/$(dirname "${gcno_path}")" cp "$ROOT/${gcno_path}" "${COVERAGE_DIR}/${gcno_path}" fi # Invoke gcov to generate a code coverage report with the flags: @@ -134,12 +134,12 @@ function gcov_coverage() { # Don't generate branch coverage (-b) because of a gcov issue that # segfaults when both -i and -b are used (see # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84879). - "${GCOV}" -i $COVERAGE_GCOV_OPTIONS -o "$(dirname ${gcda})" "${gcda}" + "${GCOV}" -i "${COVERAGE_GCOV_OPTIONS[@]}" -o "$(dirname "${gcda}")" "${gcda}" # Append all .gcov files in the current directory to the output file. - cat *.gcov >> "$output_file" + cat ./*.gcov >> "$output_file" # Delete the .gcov files. - rm *.gcov + rm ./*.gcov fi fi done < "${COVERAGE_MANIFEST}" diff --git a/bazel/coverage/fuzz_coverage_wrapper.sh b/bazel/coverage/fuzz_coverage_wrapper.sh index 42d705f7f134..f185b7d37083 100755 --- a/bazel/coverage/fuzz_coverage_wrapper.sh +++ b/bazel/coverage/fuzz_coverage_wrapper.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -x +set -ex TEST_BINARY=$1 shift @@ -9,9 +9,14 @@ shift rm -rf fuzz_corpus mkdir -p fuzz_corpus/seed_corpus -cp -r $@ fuzz_corpus/seed_corpus +cp -r "$@" fuzz_corpus/seed_corpus # TODO(asraa): When fuzz targets are stable, remove error suppression and run coverage while fuzzing. -LLVM_PROFILE_FILE= ${TEST_BINARY} fuzz_corpus -seed=${FUZZ_CORPUS_SEED:-1} -max_total_time=${FUZZ_CORPUS_TIME:-60} -max_len=2048 -rss_limit_mb=8192 || true +LLVM_PROFILE_FILE='' ${TEST_BINARY} fuzz_corpus -seed="${FUZZ_CORPUS_SEED:-1}" -max_total_time="${FUZZ_CORPUS_TIME:-60}" -max_len=2048 -rss_limit_mb=8192 -timeout=30 || : -${TEST_BINARY} fuzz_corpus -rss_limit_mb=8192 -runs=0 +# Passing files instead of a directory will run fuzzing as a regression test. +# TODO(asraa): Remove manual `|| :`, but this shouldn't be necessary. +_CORPUS="$(find fuzz_corpus -type f)" +while read -r line; do CORPUS+=("$line"); done \ + <<< "$_CORPUS" +${TEST_BINARY} "${CORPUS[@]}" -rss_limit_mb=8192 || : diff --git a/bazel/crates.bzl b/bazel/crates.bzl new file mode 100644 index 000000000000..d4373143ddd4 --- /dev/null +++ b/bazel/crates.bzl @@ -0,0 +1,113 @@ +""" +cargo-raze crate workspace functions + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository") + +def _new_http_archive(name, **kwargs): + if not native.existing_rule(name): + http_archive(name = name, **kwargs) + +def _new_git_repository(name, **kwargs): + if not native.existing_rule(name): + new_git_repository(name = name, **kwargs) + +def raze_fetch_remote_crates(): + _new_http_archive( + name = "raze__ahash__0_3_8", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/ahash/ahash-0.3.8.crate", + type = "tar.gz", + strip_prefix = "ahash-0.3.8", + build_file = Label("//bazel/external/cargo/remote:ahash-0.3.8.BUILD"), + ) + + _new_http_archive( + name = "raze__autocfg__1_0_0", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/autocfg/autocfg-1.0.0.crate", + type = "tar.gz", + strip_prefix = "autocfg-1.0.0", + build_file = Label("//bazel/external/cargo/remote:autocfg-1.0.0.BUILD"), + ) + + _new_http_archive( + name = "raze__cfg_if__0_1_10", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/cfg-if/cfg-if-0.1.10.crate", + type = "tar.gz", + strip_prefix = "cfg-if-0.1.10", + build_file = Label("//bazel/external/cargo/remote:cfg-if-0.1.10.BUILD"), + ) + + _new_http_archive( + name = "raze__hashbrown__0_7_2", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/hashbrown/hashbrown-0.7.2.crate", + type = "tar.gz", + strip_prefix = "hashbrown-0.7.2", + build_file = Label("//bazel/external/cargo/remote:hashbrown-0.7.2.BUILD"), + ) + + _new_http_archive( + name = "raze__libc__0_2_74", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/libc/libc-0.2.74.crate", + type = "tar.gz", + strip_prefix = "libc-0.2.74", + build_file = Label("//bazel/external/cargo/remote:libc-0.2.74.BUILD"), + ) + + _new_http_archive( + name = "raze__log__0_4_11", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/log/log-0.4.11.crate", + type = "tar.gz", + strip_prefix = "log-0.4.11", + build_file = Label("//bazel/external/cargo/remote:log-0.4.11.BUILD"), + ) + + _new_http_archive( + name = "raze__memory_units__0_4_0", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/memory_units/memory_units-0.4.0.crate", + type = "tar.gz", + strip_prefix = "memory_units-0.4.0", + build_file = Label("//bazel/external/cargo/remote:memory_units-0.4.0.BUILD"), + ) + + _new_http_archive( + name = "raze__proxy_wasm__0_1_2", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/proxy-wasm/proxy-wasm-0.1.2.crate", + type = "tar.gz", + strip_prefix = "proxy-wasm-0.1.2", + build_file = Label("//bazel/external/cargo/remote:proxy-wasm-0.1.2.BUILD"), + ) + + _new_http_archive( + name = "raze__wee_alloc__0_4_5", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/wee_alloc/wee_alloc-0.4.5.crate", + type = "tar.gz", + strip_prefix = "wee_alloc-0.4.5", + build_file = Label("//bazel/external/cargo/remote:wee_alloc-0.4.5.BUILD"), + ) + + _new_http_archive( + name = "raze__winapi__0_3_9", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi/winapi-0.3.9.crate", + type = "tar.gz", + strip_prefix = "winapi-0.3.9", + build_file = Label("//bazel/external/cargo/remote:winapi-0.3.9.BUILD"), + ) + + _new_http_archive( + name = "raze__winapi_i686_pc_windows_gnu__0_4_0", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi-i686-pc-windows-gnu/winapi-i686-pc-windows-gnu-0.4.0.crate", + type = "tar.gz", + strip_prefix = "winapi-i686-pc-windows-gnu-0.4.0", + build_file = Label("//bazel/external/cargo/remote:winapi-i686-pc-windows-gnu-0.4.0.BUILD"), + ) + + _new_http_archive( + name = "raze__winapi_x86_64_pc_windows_gnu__0_4_0", + url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi-x86_64-pc-windows-gnu/winapi-x86_64-pc-windows-gnu-0.4.0.crate", + type = "tar.gz", + strip_prefix = "winapi-x86_64-pc-windows-gnu-0.4.0", + build_file = Label("//bazel/external/cargo/remote:winapi-x86_64-pc-windows-gnu-0.4.0.BUILD"), + ) diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index 92c837a4f06a..dc02f5056cd0 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -5,6 +5,8 @@ load("@bazel_toolchains//rules/exec_properties:exec_properties.bzl", "create_rbe load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies") load("@upb//bazel:repository_defs.bzl", upb_bazel_version_repository = "bazel_version_repository") +load("@io_bazel_rules_rust//rust:repositories.bzl", "rust_repositories") +load("@io_bazel_rules_rust//:workspace.bzl", "bazel_version") load("@config_validation_pip3//:requirements.bzl", config_validation_pip_install = "pip_install") load("@configs_pip3//:requirements.bzl", configs_pip_install = "pip_install") load("@headersplit_pip3//:requirements.bzl", headersplit_pip_install = "pip_install") @@ -23,8 +25,10 @@ def envoy_dependency_imports(go_version = GO_VERSION): rbe_toolchains_config() gazelle_dependencies() apple_rules_dependencies() + rust_repositories() + bazel_version(name = "bazel_version") upb_bazel_version_repository(name = "upb_bazel_version") - antlr_dependencies(471) + antlr_dependencies(472) custom_exec_properties( name = "envoy_large_machine_exec_property", @@ -33,6 +37,7 @@ def envoy_dependency_imports(go_version = GO_VERSION): }, ) + # These dependencies, like most of the Go in this repository, exist only for the API. go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable", @@ -40,14 +45,12 @@ def envoy_dependency_imports(go_version = GO_VERSION): sum = "h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=", version = "v1.29.1", ) - go_repository( name = "org_golang_x_net", importpath = "golang.org/x/net", sum = "h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=", version = "v0.0.0-20190813141303-74dc4d7220e7", ) - go_repository( name = "org_golang_x_text", importpath = "golang.org/x/text", diff --git a/bazel/envoy_binary.bzl b/bazel/envoy_binary.bzl index 16adfb38a439..d966bdf5a104 100644 --- a/bazel/envoy_binary.bzl +++ b/bazel/envoy_binary.bzl @@ -60,8 +60,13 @@ def _envoy_linkopts(): "-pagezero_size 10000", "-image_base 100000000", ], + "@envoy//bazel:clang_cl_opt_build": [ + "-DEFAULTLIB:ws2_32.lib", + "-DEFAULTLIB:iphlpapi.lib", + "-DEBUG:FULL", + "-WX", + ], "@envoy//bazel:windows_x86_64": [ - "-DEFAULTLIB:advapi32.lib", "-DEFAULTLIB:ws2_32.lib", "-DEFAULTLIB:iphlpapi.lib", "-WX", diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index bdeb501e3068..e95329095dca 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -19,6 +19,10 @@ load( _envoy_select_google_grpc = "envoy_select_google_grpc", _envoy_select_hot_restart = "envoy_select_hot_restart", _envoy_select_new_codecs_in_integration_tests = "envoy_select_new_codecs_in_integration_tests", + _envoy_select_wasm = "envoy_select_wasm", + _envoy_select_wasm_all_v8_wavm_none = "envoy_select_wasm_all_v8_wavm_none", + _envoy_select_wasm_v8 = "envoy_select_wasm_v8", + _envoy_select_wasm_wavm = "envoy_select_wasm_wavm", ) load( ":envoy_test.bzl", @@ -176,6 +180,10 @@ def envoy_google_grpc_external_deps(): envoy_select_boringssl = _envoy_select_boringssl envoy_select_google_grpc = _envoy_select_google_grpc envoy_select_hot_restart = _envoy_select_hot_restart +envoy_select_wasm = _envoy_select_wasm +envoy_select_wasm_all_v8_wavm_none = _envoy_select_wasm_all_v8_wavm_none +envoy_select_wasm_wavm = _envoy_select_wasm_wavm +envoy_select_wasm_v8 = _envoy_select_wasm_v8 envoy_select_new_codecs_in_integration_tests = _envoy_select_new_codecs_in_integration_tests # Binary wrappers (from envoy_binary.bzl) diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index 10b3448c00ee..96ca7a504f9f 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -32,9 +32,17 @@ def envoy_copts(repository, test = False): "-DNOMCX", "-DNOIME", "-DNOCRYPT", - # this is to silence the incorrect MSVC compiler warning when trying to convert between - # std::optional data types while conversions between primitive types are producing no error + # Ignore unguarded gcc pragmas in quiche (unrecognized by MSVC) + # TODO(wrowe,sunjayBhatia): Drop this change when fixed in bazel/external/quiche.genrule_cmd + "-wd4068", + # Silence incorrect MSVC compiler warnings when converting between std::optional + # data types (while conversions between primitive types are producing no error) "-wd4244", + # Allow inline functions to be undefined + "-wd4506", + # Allow 'nodiscard' function return values to be discarded + # TODO(wrowe,sunjayBhatia): Drop this option when all causes are fixed + "-wd4834", ] return select({ @@ -48,8 +56,11 @@ def envoy_copts(repository, test = False): repository + "//bazel:windows_opt_build": [], repository + "//bazel:windows_fastbuild_build": [], repository + "//bazel:windows_dbg_build": [], + repository + "//bazel:clang_cl_opt_build": [] if test else ["-Z7", "-fstandalone-debug"], + repository + "//bazel:clang_cl_fastbuild_build": ["-fno-standalone-debug"], + repository + "//bazel:clang_cl_dbg_build": ["-fstandalone-debug"], }) + select({ - repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand", "-Wc++2a-extensions"], + repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand", "-Wc++2a-extensions", "-Wrange-loop-analysis"], repository + "//bazel:gcc_build": ["-Wno-maybe-uninitialized"], "//conditions:default": [], }) + select({ @@ -57,10 +68,13 @@ def envoy_copts(repository, test = False): "//conditions:default": [], }) + select({ repository + "//bazel:disable_tcmalloc": ["-DABSL_MALLOC_HOOK_MMAP_DISABLE"], - "//conditions:default": ["-DTCMALLOC"], - }) + select({ - repository + "//bazel:debug_tcmalloc": ["-DENVOY_MEMORY_DEBUG_ENABLED=1"], - "//conditions:default": [], + repository + "//bazel:disable_tcmalloc_on_linux_x86_64": ["-DABSL_MALLOC_HOOK_MMAP_DISABLE"], + repository + "//bazel:gperftools_tcmalloc": ["-DGPERFTOOLS_TCMALLOC"], + repository + "//bazel:gperftools_tcmalloc_on_linux_x86_64": ["-DGPERFTOOLS_TCMALLOC"], + repository + "//bazel:debug_tcmalloc": ["-DENVOY_MEMORY_DEBUG_ENABLED=1", "-DGPERFTOOLS_TCMALLOC"], + repository + "//bazel:debug_tcmalloc_on_linux_x86_64": ["-DENVOY_MEMORY_DEBUG_ENABLED=1", "-DGPERFTOOLS_TCMALLOC"], + repository + "//bazel:linux_x86_64": ["-DTCMALLOC"], + "//conditions:default": ["-DGPERFTOOLS_TCMALLOC"], }) + select({ repository + "//bazel:disable_signal_trace": [], "//conditions:default": ["-DENVOY_HANDLE_SIGNALS"], @@ -115,6 +129,12 @@ def envoy_stdlib_deps(): def tcmalloc_external_dep(repository): return select({ repository + "//bazel:disable_tcmalloc": None, + repository + "//bazel:disable_tcmalloc_on_linux_x86_64": None, + repository + "//bazel:debug_tcmalloc": envoy_external_dep_path("gperftools"), + repository + "//bazel:debug_tcmalloc_on_linux_x86_64": envoy_external_dep_path("gperftools"), + repository + "//bazel:gperftools_tcmalloc": envoy_external_dep_path("gperftools"), + repository + "//bazel:gperftools_tcmalloc_on_linux_x86_64": envoy_external_dep_path("gperftools"), + repository + "//bazel:linux_x86_64": envoy_external_dep_path("tcmalloc"), "//conditions:default": envoy_external_dep_path("gperftools"), }) diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index 471c8b72eec7..5eb90df500c0 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -20,6 +20,12 @@ load( def tcmalloc_external_deps(repository): return select({ repository + "//bazel:disable_tcmalloc": [], + repository + "//bazel:disable_tcmalloc_on_linux_x86_64": [], + repository + "//bazel:debug_tcmalloc": [envoy_external_dep_path("gperftools")], + repository + "//bazel:debug_tcmalloc_on_linux_x86_64": [envoy_external_dep_path("gperftools")], + repository + "//bazel:gperftools_tcmalloc": [envoy_external_dep_path("gperftools")], + repository + "//bazel:gperftools_tcmalloc_on_linux_x86_64": [envoy_external_dep_path("gperftools")], + repository + "//bazel:linux_x86_64": [envoy_external_dep_path("tcmalloc")], "//conditions:default": [envoy_external_dep_path("gperftools")], }) @@ -98,7 +104,8 @@ def envoy_cc_library( tags = [], deps = [], strip_include_prefix = None, - textual_hdrs = None): + textual_hdrs = None, + defines = []): if tcmalloc_dep: deps += tcmalloc_external_deps(repository) @@ -123,6 +130,7 @@ def envoy_cc_library( alwayslink = 1, linkstatic = envoy_linkstatic(), strip_include_prefix = strip_include_prefix, + defines = defines, ) # Intended for usage by external consumers. This allows them to disambiguate diff --git a/bazel/envoy_select.bzl b/bazel/envoy_select.bzl index 107ad2a21bde..5a33e4da515d 100644 --- a/bazel/envoy_select.bzl +++ b/bazel/envoy_select.bzl @@ -32,6 +32,36 @@ def envoy_select_hot_restart(xs, repository = ""): "//conditions:default": xs, }) +# Selects the given values depending on the WASM runtimes enabled in the current build. +def envoy_select_wasm(xs): + return select({ + "@envoy//bazel:wasm_none": [], + "//conditions:default": xs, + }) + +def envoy_select_wasm_v8(xs): + return select({ + "@envoy//bazel:wasm_wavm": [], + "@envoy//bazel:wasm_none": [], + "//conditions:default": xs, + }) + +def envoy_select_wasm_wavm(xs): + return select({ + "@envoy//bazel:wasm_all": xs, + "@envoy//bazel:wasm_wavm": xs, + "//conditions:default": [], + }) + +def envoy_select_wasm_all_v8_wavm_none(xs1, xs2, xs3, xs4): + return select({ + "@envoy//bazel:wasm_all": xs1, + "@envoy//bazel:wasm_v8": xs2, + "@envoy//bazel:wasm_wavm": xs3, + "@envoy//bazel:wasm_none": xs4, + "//conditions:default": xs2, + }) + # Select the given values if use legacy codecs in test is on in the current build. def envoy_select_new_codecs_in_integration_tests(xs, repository = ""): return select({ diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 454733300dd8..1d5720149428 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -29,6 +29,7 @@ def _envoy_cc_test_infrastructure_library( tags = [], include_prefix = None, copts = [], + alwayslink = 1, **kargs): # Add implicit tcmalloc external dependency(if available) in order to enable CPU and heap profiling in tests. deps += tcmalloc_external_deps(repository) @@ -44,7 +45,7 @@ def _envoy_cc_test_infrastructure_library( ], tags = tags, include_prefix = include_prefix, - alwayslink = 1, + alwayslink = alwayslink, linkstatic = envoy_linkstatic(), **kargs ) @@ -58,7 +59,6 @@ def _envoy_test_linkopts(): "-image_base 100000000", ], "@envoy//bazel:windows_x86_64": [ - "-DEFAULTLIB:advapi32.lib", "-DEFAULTLIB:ws2_32.lib", "-DEFAULTLIB:iphlpapi.lib", "-WX", @@ -205,6 +205,7 @@ def envoy_cc_test_library( tags = [], include_prefix = None, copts = [], + alwayslink = 1, **kargs): deps = deps + [ repository + "//test/test_common:printers_includes", @@ -222,6 +223,7 @@ def envoy_cc_test_library( include_prefix, copts, visibility = ["//visibility:public"], + alwayslink = alwayslink, **kargs ) diff --git a/bazel/external/cargo/BUILD b/bazel/external/cargo/BUILD new file mode 100644 index 000000000000..e216296d130d --- /dev/null +++ b/bazel/external/cargo/BUILD @@ -0,0 +1,23 @@ +""" +cargo-raze workspace build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +package(default_visibility = ["//visibility:public"]) + +licenses([ + "notice", # See individual crates for specific licenses +]) + +alias( + name = "log", + actual = "@raze__log__0_4_11//:log", + tags = ["cargo-raze"], +) + +alias( + name = "proxy_wasm", + actual = "@raze__proxy_wasm__0_1_2//:proxy_wasm", + tags = ["cargo-raze"], +) diff --git a/bazel/external/cargo/remote/BUILD b/bazel/external/cargo/remote/BUILD new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/bazel/external/cargo/remote/ahash-0.3.8.BUILD b/bazel/external/cargo/remote/ahash-0.3.8.BUILD new file mode 100644 index 000000000000..a34e9e1685cf --- /dev/null +++ b/bazel/external/cargo/remote/ahash-0.3.8.BUILD @@ -0,0 +1,46 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "notice", # MIT from expression "MIT OR Apache-2.0" +]) + +# Unsupported target "ahash" with type "bench" omitted + +rust_library( + name = "ahash", + srcs = glob(["**/*.rs"]), + crate_features = [ + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2018", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "0.3.8", + deps = [ + ], +) + +# Unsupported target "bench" with type "test" omitted +# Unsupported target "map" with type "bench" omitted +# Unsupported target "map_tests" with type "test" omitted +# Unsupported target "nopanic" with type "test" omitted diff --git a/bazel/external/cargo/remote/autocfg-1.0.0.BUILD b/bazel/external/cargo/remote/autocfg-1.0.0.BUILD new file mode 100644 index 000000000000..9f51a3e4cd37 --- /dev/null +++ b/bazel/external/cargo/remote/autocfg-1.0.0.BUILD @@ -0,0 +1,45 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "notice", # Apache-2.0 from expression "Apache-2.0 OR MIT" +]) + +rust_library( + name = "autocfg", + srcs = glob(["**/*.rs"]), + crate_features = [ + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2015", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "1.0.0", + deps = [ + ], +) + +# Unsupported target "integers" with type "example" omitted +# Unsupported target "paths" with type "example" omitted +# Unsupported target "rustflags" with type "test" omitted +# Unsupported target "traits" with type "example" omitted +# Unsupported target "versions" with type "example" omitted diff --git a/bazel/external/cargo/remote/cfg-if-0.1.10.BUILD b/bazel/external/cargo/remote/cfg-if-0.1.10.BUILD new file mode 100644 index 000000000000..b36c1413e5b0 --- /dev/null +++ b/bazel/external/cargo/remote/cfg-if-0.1.10.BUILD @@ -0,0 +1,41 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "notice", # MIT from expression "MIT OR Apache-2.0" +]) + +rust_library( + name = "cfg_if", + srcs = glob(["**/*.rs"]), + crate_features = [ + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2018", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "0.1.10", + deps = [ + ], +) + +# Unsupported target "xcrate" with type "test" omitted diff --git a/bazel/external/cargo/remote/hashbrown-0.7.2.BUILD b/bazel/external/cargo/remote/hashbrown-0.7.2.BUILD new file mode 100644 index 000000000000..54276e05010e --- /dev/null +++ b/bazel/external/cargo/remote/hashbrown-0.7.2.BUILD @@ -0,0 +1,50 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "notice", # Apache-2.0 from expression "Apache-2.0 OR MIT" +]) + +# Unsupported target "bench" with type "bench" omitted +# Unsupported target "build-script-build" with type "custom-build" omitted + +rust_library( + name = "hashbrown", + srcs = glob(["**/*.rs"]), + crate_features = [ + "ahash", + "inline-more", + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2018", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "0.7.2", + deps = [ + "@raze__ahash__0_3_8//:ahash", + ], +) + +# Unsupported target "hasher" with type "test" omitted +# Unsupported target "rayon" with type "test" omitted +# Unsupported target "serde" with type "test" omitted +# Unsupported target "set" with type "test" omitted diff --git a/bazel/external/cargo/remote/libc-0.2.74.BUILD b/bazel/external/cargo/remote/libc-0.2.74.BUILD new file mode 100644 index 000000000000..76a2773d1a4c --- /dev/null +++ b/bazel/external/cargo/remote/libc-0.2.74.BUILD @@ -0,0 +1,42 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "notice", # MIT from expression "MIT OR Apache-2.0" +]) + +# Unsupported target "build-script-build" with type "custom-build" omitted +# Unsupported target "const_fn" with type "test" omitted + +rust_library( + name = "libc", + srcs = glob(["**/*.rs"]), + crate_features = [ + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2015", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "0.2.74", + deps = [ + ], +) diff --git a/bazel/external/cargo/remote/log-0.4.11.BUILD b/bazel/external/cargo/remote/log-0.4.11.BUILD new file mode 100644 index 000000000000..9596e2448ecc --- /dev/null +++ b/bazel/external/cargo/remote/log-0.4.11.BUILD @@ -0,0 +1,46 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "notice", # MIT from expression "MIT OR Apache-2.0" +]) + +# Unsupported target "build-script-build" with type "custom-build" omitted +# Unsupported target "filters" with type "test" omitted + +rust_library( + name = "log", + srcs = glob(["**/*.rs"]), + crate_features = [ + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2015", + rustc_flags = [ + "--cap-lints=allow", + "--cfg=atomic_cas", + ], + tags = ["cargo-raze"], + version = "0.4.11", + deps = [ + "@raze__cfg_if__0_1_10//:cfg_if", + ], +) + +# Unsupported target "macros" with type "test" omitted diff --git a/bazel/external/cargo/remote/memory_units-0.4.0.BUILD b/bazel/external/cargo/remote/memory_units-0.4.0.BUILD new file mode 100644 index 000000000000..c5c3c3987128 --- /dev/null +++ b/bazel/external/cargo/remote/memory_units-0.4.0.BUILD @@ -0,0 +1,39 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "reciprocal", # MPL-2.0 from expression "MPL-2.0" +]) + +rust_library( + name = "memory_units", + srcs = glob(["**/*.rs"]), + crate_features = [ + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2015", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "0.4.0", + deps = [ + ], +) diff --git a/bazel/external/cargo/remote/proxy-wasm-0.1.2.BUILD b/bazel/external/cargo/remote/proxy-wasm-0.1.2.BUILD new file mode 100644 index 000000000000..2f9895fea7fa --- /dev/null +++ b/bazel/external/cargo/remote/proxy-wasm-0.1.2.BUILD @@ -0,0 +1,47 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "notice", # Apache-2.0 from expression "Apache-2.0" +]) + +# Unsupported target "hello_world" with type "example" omitted +# Unsupported target "http_auth_random" with type "example" omitted +# Unsupported target "http_body" with type "example" omitted +# Unsupported target "http_headers" with type "example" omitted + +rust_library( + name = "proxy_wasm", + srcs = glob(["**/*.rs"]), + crate_features = [ + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2018", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "0.1.2", + deps = [ + "@raze__hashbrown__0_7_2//:hashbrown", + "@raze__log__0_4_11//:log", + "@raze__wee_alloc__0_4_5//:wee_alloc", + ], +) diff --git a/bazel/external/cargo/remote/wee_alloc-0.4.5.BUILD b/bazel/external/cargo/remote/wee_alloc-0.4.5.BUILD new file mode 100644 index 000000000000..ab49873603cd --- /dev/null +++ b/bazel/external/cargo/remote/wee_alloc-0.4.5.BUILD @@ -0,0 +1,46 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "reciprocal", # MPL-2.0 from expression "MPL-2.0" +]) + +# Unsupported target "build-script-build" with type "custom-build" omitted + +rust_library( + name = "wee_alloc", + srcs = glob(["**/*.rs"]), + crate_features = [ + "default", + "size_classes", + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2015", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "0.4.5", + deps = [ + "@raze__cfg_if__0_1_10//:cfg_if", + "@raze__libc__0_2_74//:libc", + "@raze__memory_units__0_4_0//:memory_units", + ], +) diff --git a/bazel/external/cargo/remote/winapi-0.3.9.BUILD b/bazel/external/cargo/remote/winapi-0.3.9.BUILD new file mode 100644 index 000000000000..2495dd1d900e --- /dev/null +++ b/bazel/external/cargo/remote/winapi-0.3.9.BUILD @@ -0,0 +1,44 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "notice", # MIT from expression "MIT OR Apache-2.0" +]) + +# Unsupported target "build-script-build" with type "custom-build" omitted + +rust_library( + name = "winapi", + srcs = glob(["**/*.rs"]), + crate_features = [ + "memoryapi", + "synchapi", + "winbase", + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2015", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "0.3.9", + deps = [ + ], +) diff --git a/bazel/external/cargo/remote/winapi-i686-pc-windows-gnu-0.4.0.BUILD b/bazel/external/cargo/remote/winapi-i686-pc-windows-gnu-0.4.0.BUILD new file mode 100644 index 000000000000..d6c1545143fe --- /dev/null +++ b/bazel/external/cargo/remote/winapi-i686-pc-windows-gnu-0.4.0.BUILD @@ -0,0 +1,41 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "notice", # MIT from expression "MIT OR Apache-2.0" +]) + +# Unsupported target "build-script-build" with type "custom-build" omitted + +rust_library( + name = "winapi_i686_pc_windows_gnu", + srcs = glob(["**/*.rs"]), + crate_features = [ + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2015", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "0.4.0", + deps = [ + ], +) diff --git a/bazel/external/cargo/remote/winapi-x86_64-pc-windows-gnu-0.4.0.BUILD b/bazel/external/cargo/remote/winapi-x86_64-pc-windows-gnu-0.4.0.BUILD new file mode 100644 index 000000000000..e666870dbd05 --- /dev/null +++ b/bazel/external/cargo/remote/winapi-x86_64-pc-windows-gnu-0.4.0.BUILD @@ -0,0 +1,41 @@ +""" +cargo-raze crate build file. + +DO NOT EDIT! Replaced on runs of cargo-raze +""" + +load( + "@io_bazel_rules_rust//rust:rust.bzl", + "rust_library", +) + +package(default_visibility = [ + # Public for visibility by "@raze__crate__version//" targets. + # + # Prefer access through "//bazel/external/cargo", which limits external + # visibility to explicit Cargo.toml dependencies. + "//visibility:public", +]) + +licenses([ + "notice", # MIT from expression "MIT OR Apache-2.0" +]) + +# Unsupported target "build-script-build" with type "custom-build" omitted + +rust_library( + name = "winapi_x86_64_pc_windows_gnu", + srcs = glob(["**/*.rs"]), + crate_features = [ + ], + crate_root = "src/lib.rs", + crate_type = "lib", + edition = "2015", + rustc_flags = [ + "--cap-lints=allow", + ], + tags = ["cargo-raze"], + version = "0.4.0", + deps = [ + ], +) diff --git a/bazel/external/googleurl.patch b/bazel/external/googleurl.patch deleted file mode 100644 index fb33ca4475fb..000000000000 --- a/bazel/external/googleurl.patch +++ /dev/null @@ -1,52 +0,0 @@ -# TODO(dio): Consider to remove this patch when we have the ability to compile the project using -# clang-cl. Tracked in https://github.com/envoyproxy/envoy/issues/11974. - -diff --git a/base/compiler_specific.h b/base/compiler_specific.h -index 0cd36dc..8c4cbd4 100644 ---- a/base/compiler_specific.h -+++ b/base/compiler_specific.h -@@ -7,10 +7,6 @@ - - #include "build/build_config.h" - --#if defined(COMPILER_MSVC) && !defined(__clang__) --#error "Only clang-cl is supported on Windows, see https://crbug.com/988071" --#endif -- - // Annotate a variable indicating it's ok if the variable is not used. - // (Typically used to silence a compiler warning when the assignment - // is important for some other reason.) -@@ -55,8 +51,12 @@ - // prevent code folding, see gurl_base::debug::Alias. - // Use like: - // void NOT_TAIL_CALLED FooBar(); --#if defined(__clang__) && __has_attribute(not_tail_called) -+#if defined(__clang__) -+#if defined(__has_attribute) -+#if __has_attribute(not_tail_called) - #define NOT_TAIL_CALLED __attribute__((not_tail_called)) -+#endif -+#endif - #else - #define NOT_TAIL_CALLED - #endif -@@ -226,7 +226,9 @@ - #endif - #endif - --#if defined(__clang__) && __has_attribute(uninitialized) -+#if defined(__clang__) -+#if defined(__has_attribute) -+#if __has_attribute(uninitialized) - // Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for - // the specified variable. - // Library-wide alternative is -@@ -257,6 +259,8 @@ - // E.g. platform, bot, benchmark or test name in patch description or next to - // the attribute. - #define STACK_UNINITIALIZED __attribute__((uninitialized)) -+#endif -+#endif - #else - #define STACK_UNINITIALIZED - #endif diff --git a/bazel/external/icuuc.BUILD b/bazel/external/icuuc.BUILD deleted file mode 100644 index 305d0db952b1..000000000000 --- a/bazel/external/icuuc.BUILD +++ /dev/null @@ -1,55 +0,0 @@ -load("@rules_cc//cc:defs.bzl", "cc_library") - -licenses(["notice"]) # Apache 2 - -exports_files(["LICENSE"]) - -icuuc_copts = [ - "-DU_STATIC_IMPLEMENTATION", - "-DU_COMMON_IMPLEMENTATION", - "-DU_HAVE_STD_ATOMICS", -] + select({ - "@envoy//bazel:apple": [ - "-Wno-shorten-64-to-32", - "-Wno-unused-variable", - ], - "@envoy//bazel:windows_x86_64": [ - "/utf-8", - "/DLOCALE_ALLOW_NEUTRAL_NAMES=0", - ], - # TODO(dio): Add "@envoy//bazel:android" when we have it. - # "@envoy//bazel:android": [ - # "-fdata-sections", - # "-DU_HAVE_NL_LANGINFO_CODESET=0", - # "-Wno-deprecated-declarations", - # ], - "//conditions:default": [], -}) - -cc_library( - name = "headers", - hdrs = glob(["source/common/unicode/*.h"]), - includes = ["source/common"], - visibility = ["//visibility:public"], -) - -cc_library( - name = "common", - hdrs = glob(["source/common/unicode/*.h"]), - includes = ["source/common"], - visibility = ["//visibility:public"], - deps = [":icuuc"], -) - -cc_library( - name = "icuuc", - srcs = glob([ - "source/common/*.c", - "source/common/*.cpp", - "source/stubdata/*.cpp", - ]), - hdrs = glob(["source/common/*.h"]), - copts = icuuc_copts, - visibility = ["//visibility:private"], - deps = [":headers"], -) diff --git a/bazel/external/proxy_wasm_cpp_host.BUILD b/bazel/external/proxy_wasm_cpp_host.BUILD index 4cb87cf98ec1..1b3f0829d7b2 100644 --- a/bazel/external/proxy_wasm_cpp_host.BUILD +++ b/bazel/external/proxy_wasm_cpp_host.BUILD @@ -1,4 +1,10 @@ load("@rules_cc//cc:defs.bzl", "cc_library") +load( + "@envoy//bazel:envoy_build_system.bzl", + "envoy_select_wasm_all_v8_wavm_none", + "envoy_select_wasm_v8", + "envoy_select_wasm_wavm", +) licenses(["notice"]) # Apache 2 @@ -14,14 +20,44 @@ cc_library( cc_library( name = "lib", - srcs = glob( - [ - "src/**/*.h", - "src/**/*.cc", - ], - exclude = ["src/**/wavm*"], + # Note that the select cannot appear in the glob. + srcs = envoy_select_wasm_all_v8_wavm_none( + glob( + [ + "src/**/*.h", + "src/**/*.cc", + ], + ), + glob( + [ + "src/**/*.h", + "src/**/*.cc", + ], + exclude = ["src/wavm/*"], + ), + glob( + [ + "src/**/*.h", + "src/**/*.cc", + ], + exclude = ["src/v8/*"], + ), + glob( + [ + "src/**/*.h", + "src/**/*.cc", + ], + exclude = [ + "src/wavm/*", + "src/v8/*", + ], + ), ), - copts = ["-std=c++14"], + copts = envoy_select_wasm_wavm([ + '-DWAVM_API=""', + "-Wno-non-virtual-dtor", + "-Wno-old-style-cast", + ]), deps = [ ":include", "//external:abseil_flat_hash_map", @@ -29,9 +65,12 @@ cc_library( "//external:abseil_strings", "//external:protobuf", "//external:ssl", - "//external:wee8", "//external:zlib", "@proxy_wasm_cpp_sdk//:api_lib", "@proxy_wasm_cpp_sdk//:common_lib", - ], + ] + envoy_select_wasm_wavm([ + "@envoy//bazel/foreign_cc:wavm", + ]) + envoy_select_wasm_v8([ + "//external:wee8", + ]), ) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index b641e9d59e84..7541909aa191 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -53,23 +53,21 @@ genrule( # These options are only used to suppress errors in brought-in QUICHE tests. # Use #pragma GCC diagnostic ignored in integration code to suppress these errors. +quiche_common_copts = [ + "-Wno-unused-function", + # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames. + "-Wno-invalid-offsetof", + "-Wno-range-loop-analysis", +] + quiche_copts = select({ - "@envoy//bazel:windows_x86_64": [], - "//conditions:default": [ - # Remove these after upstream fix. - "-Wno-unused-parameter", - "-Wno-unused-function", - "-Wno-return-type", - "-Wno-unknown-warning-option", - "-Wno-deprecated-copy", - "-Wno-ignored-qualifiers", - "-Wno-sign-compare", - "-Wno-inconsistent-missing-override", - # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames. - "-Wno-invalid-offsetof", - # to suppress errors re: size_t vs. int comparisons + # Ignore unguarded #pragma GCC statements in QUICHE sources + "@envoy//bazel:windows_x86_64": ["-wd4068"], + # Remove these after upstream fix. + "@envoy//bazel:gcc_build": [ "-Wno-sign-compare", - ], + ] + quiche_common_copts, + "//conditions:default": quiche_common_copts, }) test_suite( @@ -2247,6 +2245,12 @@ envoy_cc_library( "quiche/quic/core/frames/quic_window_update_frame.h", ], copts = quiche_copts, + # TODO: Work around initializer in anonymous union in fastbuild build. + # Remove this after upstream fix. + defines = select({ + "@envoy//bazel:windows_x86_64": ["QUIC_FRAME_DEBUG=0"], + "//conditions:default": [], + }), repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], @@ -3534,6 +3538,23 @@ envoy_cc_test_library( ], ) +envoy_cc_library( + name = "quic_test_tools_flow_controller_peer_lib", + srcs = [ + "quiche/quic/test_tools/quic_flow_controller_peer.cc", + ], + hdrs = [ + "quiche/quic/test_tools/quic_flow_controller_peer.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_packets_lib", + ":quic_core_session_lib", + ], +) + envoy_cc_test_library( name = "quic_test_tools_framer_peer_lib", srcs = ["quiche/quic/test_tools/quic_framer_peer.cc"], @@ -3667,6 +3688,7 @@ envoy_cc_test_library( ":quic_core_session_lib", ":quic_core_stream_send_buffer_lib", ":quic_platform_base", + ":quic_test_tools_flow_controller_peer_lib", ":quic_test_tools_stream_send_buffer_peer_lib", ], ) @@ -3831,7 +3853,6 @@ envoy_cc_library( hdrs = [ "quiche/common/platform/api/quiche_arraysize.h", "quiche/common/platform/api/quiche_logging.h", - "quiche/common/platform/api/quiche_map_util.h", "quiche/common/platform/api/quiche_optional.h", "quiche/common/platform/api/quiche_ptr_util.h", "quiche/common/platform/api/quiche_str_cat.h", diff --git a/bazel/external/wee8.BUILD b/bazel/external/wee8.BUILD index b61f95748672..3a62ecd9ebf4 100644 --- a/bazel/external/wee8.BUILD +++ b/bazel/external/wee8.BUILD @@ -13,6 +13,10 @@ cc_library( "wee8/include/v8-version.h", "wee8/third_party/wasm-api/wasm.hh", ], + copts = [ + "-Wno-range-loop-analysis", + ], + defines = ["ENVOY_WASM_V8"], includes = [ "wee8/include", "wee8/third_party", diff --git a/bazel/external/wee8.genrule_cmd b/bazel/external/wee8.genrule_cmd index 8cb0e24c5f49..d8cbd1981a64 100644 --- a/bazel/external/wee8.genrule_cmd +++ b/bazel/external/wee8.genrule_cmd @@ -19,7 +19,7 @@ pushd $$ROOT/wee8 rm -rf out/wee8 # Export compiler configuration. -export CXXFLAGS="$${CXXFLAGS-} -Wno-deprecated-copy -Wno-unknown-warning-option" +export CXXFLAGS="$${CXXFLAGS-} -Wno-sign-compare -Wno-deprecated-copy -Wno-unknown-warning-option -Wno-range-loop-analysis" if [[ ( `uname` == "Darwin" && $${CXX-} == "" ) || $${CXX-} == *"clang"* ]]; then export IS_CLANG=true export CC=$${CC:-clang} diff --git a/bazel/external/wee8.patch b/bazel/external/wee8.patch index ad1c20b6c00b..cce3eecde614 100644 --- a/bazel/external/wee8.patch +++ b/bazel/external/wee8.patch @@ -34,7 +34,7 @@ #endif --- wee8/build/config/sanitizers/sanitizers.gni +++ wee8/build/config/sanitizers/sanitizers.gni -@@ -147,7 +147,7 @@ if (!is_a_target_toolchain) { +@@ -150,7 +150,7 @@ if (!is_a_target_toolchain) { # standard system libraries. We have instrumented system libraries for msan, # which requires them to prevent false positives. # TODO(thakis): Maybe remove this variable. @@ -43,7 +43,7 @@ # Whether we are doing a fuzzer build. Normally this should be checked instead # of checking "use_libfuzzer || use_afl" because often developers forget to -@@ -195,8 +195,7 @@ assert(!using_sanitizer || is_clang, +@@ -198,8 +198,7 @@ assert(!using_sanitizer || is_clang, assert(!is_cfi || is_clang, "is_cfi requires setting is_clang = true in 'gn args'") diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 6357444b4d0b..c2a214747107 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -67,16 +67,13 @@ configure_make( # TODO(htuch): Remove when #6084 is fixed "//bazel:asan_build": {"ENVOY_CONFIG_ASAN": "1"}, "//bazel:msan_build": {"ENVOY_CONFIG_MSAN": "1"}, - "//bazel:windows_dbg_build": {"WINDOWS_DBG_BUILD": "debug"}, "//conditions:default": {}, }), lib_source = "@com_github_moonjit_moonjit//:all", make_commands = [], out_include_dir = "include/moonjit-2.2", - static_libraries = select({ - "//bazel:windows_x86_64": ["lua51.lib"], - "//conditions:default": ["libluajit-5.1.a"], - }), + static_libraries = ["libluajit-5.1.a"], + tags = ["skip_on_windows"], ) envoy_cmake_external( @@ -115,8 +112,9 @@ envoy_cmake_external( "CMAKE_USE_GSSAPI": "off", "HTTP_ONLY": "on", "CMAKE_INSTALL_LIBDIR": "lib", - # Explicitly enable Unix sockets and disable crypto for Windows - "USE_UNIX_SOCKETS": "on", + # Explicitly enable Unix sockets, once afunix.h is correctly detected + # "USE_UNIX_SOCKETS": "on", + # Explicitly disable "Windows" crypto for Windows "CURL_DISABLE_CRYPTO_AUTH": "on", # C-Ares. "ENABLE_ARES": "on", @@ -137,7 +135,6 @@ envoy_cmake_external( "ZLIB_LIBRARY": "$EXT_BUILD_DEPS/zlib", "ZLIB_INCLUDE_DIR": "$EXT_BUILD_DEPS/zlib/include", "CMAKE_CXX_COMPILER_FORCED": "on", - "CMAKE_C_FLAGS_BAZEL": "-fPIC", }, defines = ["CURL_STATICLIB"], generate_crosstool_file = True, @@ -149,8 +146,8 @@ envoy_cmake_external( deps = [ ":ares", ":nghttp2", - ":zlib", "//external:ssl", + "//external:zlib", ], ) @@ -192,6 +189,109 @@ envoy_cmake_external( }), ) +envoy_cmake_external( + name = "llvm", + cache_entries = { + # Disable both: BUILD and INCLUDE, since some of the INCLUDE + # targets build code instead of only generating build files. + "LLVM_BUILD_DOCS": "off", + "LLVM_INCLUDE_DOCS": "off", + "LLVM_BUILD_EXAMPLES": "off", + "LLVM_INCLUDE_EXAMPLES": "off", + "LLVM_BUILD_RUNTIME": "off", + "LLVM_BUILD_RUNTIMES": "off", + "LLVM_INCLUDE_RUNTIMES": "off", + "LLVM_BUILD_TESTS": "off", + "LLVM_INCLUDE_TESTS": "off", + "LLVM_BUILD_TOOLS": "off", + "LLVM_INCLUDE_TOOLS": "off", + "LLVM_BUILD_UTILS": "off", + "LLVM_INCLUDE_UTILS": "off", + "LLVM_ENABLE_LIBEDIT": "off", + "LLVM_ENABLE_LIBXML2": "off", + "LLVM_ENABLE_TERMINFO": "off", + "LLVM_ENABLE_ZLIB": "off", + "LLVM_TARGETS_TO_BUILD": "X86", + "CMAKE_CXX_COMPILER_FORCED": "on", + # Workaround for the issue with statically linked libstdc++ + # using -l:libstdc++.a. + "CMAKE_CXX_FLAGS": "-lstdc++", + }, + env_vars = { + # Workaround for the -DDEBUG flag added in fastbuild on macOS, + # which conflicts with DEBUG macro used in LLVM. + "CFLAGS": "-UDEBUG", + "CXXFLAGS": "-UDEBUG", + "ASMFLAGS": "-UDEBUG", + }, + lib_source = "@org_llvm_llvm//:all", + static_libraries = select({ + "//conditions:default": [ + # Order from llvm-config --libnames. + "libLLVMLTO.a", + "libLLVMPasses.a", + "libLLVMObjCARCOpts.a", + "libLLVMSymbolize.a", + "libLLVMDebugInfoPDB.a", + "libLLVMDebugInfoDWARF.a", + "libLLVMFuzzMutate.a", + "libLLVMTableGen.a", + "libLLVMDlltoolDriver.a", + "libLLVMLineEditor.a", + "libLLVMOrcJIT.a", + "libLLVMCoverage.a", + "libLLVMMIRParser.a", + "libLLVMObjectYAML.a", + "libLLVMLibDriver.a", + "libLLVMOption.a", + "libLLVMWindowsManifest.a", + "libLLVMX86Disassembler.a", + "libLLVMX86AsmParser.a", + "libLLVMX86CodeGen.a", + "libLLVMGlobalISel.a", + "libLLVMSelectionDAG.a", + "libLLVMAsmPrinter.a", + "libLLVMDebugInfoCodeView.a", + "libLLVMDebugInfoMSF.a", + "libLLVMX86Desc.a", + "libLLVMMCDisassembler.a", + "libLLVMX86Info.a", + "libLLVMX86Utils.a", + "libLLVMMCJIT.a", + "libLLVMInterpreter.a", + "libLLVMExecutionEngine.a", + "libLLVMRuntimeDyld.a", + "libLLVMCodeGen.a", + "libLLVMTarget.a", + "libLLVMCoroutines.a", + "libLLVMipo.a", + "libLLVMInstrumentation.a", + "libLLVMVectorize.a", + "libLLVMScalarOpts.a", + "libLLVMLinker.a", + "libLLVMIRReader.a", + "libLLVMAsmParser.a", + "libLLVMInstCombine.a", + "libLLVMTransformUtils.a", + "libLLVMBitWriter.a", + "libLLVMAnalysis.a", + "libLLVMProfileData.a", + "libLLVMObject.a", + "libLLVMMCParser.a", + "libLLVMMC.a", + "libLLVMBitReader.a", + "libLLVMBitstreamReader.a", + "libLLVMCore.a", + "libLLVMBinaryFormat.a", + "libLLVMSupport.a", + "libLLVMDemangle.a", + "libLLVMRemarks.a", + "libLLVMCFGuard.a", + "libLLVMTextAPI.a", + ], + }), +) + envoy_cmake_external( name = "nghttp2", cache_entries = { @@ -211,10 +311,38 @@ envoy_cmake_external( }), ) +envoy_cmake_external( + name = "wavm", + binaries = ["wavm"], + cache_entries = { + "LLVM_DIR": "$EXT_BUILD_DEPS/copy_llvm/llvm/lib/cmake/llvm", + "WAVM_ENABLE_STATIC_LINKING": "on", + "WAVM_ENABLE_RELEASE_ASSERTS": "on", + "WAVM_ENABLE_UNWIND": "no", + # Workaround for the issue with statically linked libstdc++ + # using -l:libstdc++.a. + "CMAKE_CXX_FLAGS": "-lstdc++ -Wno-unused-command-line-argument", + }, + defines = ["ENVOY_WASM_WAVM"], + env_vars = { + # Workaround for the -DDEBUG flag added in fastbuild on macOS, + # which conflicts with DEBUG macro used in LLVM. + "CFLAGS": "-UDEBUG", + "CXXFLAGS": "-UDEBUG", + "ASMFLAGS": "-UDEBUG", + }, + lib_source = "@com_github_wavm_wavm//:all", + static_libraries = select({ + "//conditions:default": [ + "libWAVM.a", + ], + }), + deps = [":llvm"], +) + envoy_cmake_external( name = "zlib", cache_entries = { - "BUILD_SHARED_LIBS": "off", "CMAKE_CXX_COMPILER_FORCED": "on", "CMAKE_C_COMPILER_FORCED": "on", "SKIP_BUILD_EXAMPLES": "on", diff --git a/bazel/foreign_cc/curl-revert-cmake-minreqver.patch b/bazel/foreign_cc/curl-revert-cmake-minreqver.patch deleted file mode 100644 index 78ba60fdb34b..000000000000 --- a/bazel/foreign_cc/curl-revert-cmake-minreqver.patch +++ /dev/null @@ -1,17 +0,0 @@ -# Curl 7.69.1 introduces a range-bound cmake revisions between 3.0 and 3.16 -# but this causes the Win32 build to be broken (and is unwise as cmake -# has already released 3.17) -diff --git a/CMakeLists.txt b/CMakeLists.txt -index b13616fc7..8b6d77542 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -38,8 +38,7 @@ - # To check: - # (From Daniel Stenberg) The cmake build selected to run gcc with -fPIC on my box while the plain configure script did not. - # (From Daniel Stenberg) The gcc command line use neither -g nor any -O options. As a developer, I also treasure our configure scripts's --enable-debug option that sets a long range of "picky" compiler options. --cmake_minimum_required(VERSION 3.0...3.16 FATAL_ERROR) -- -+cmake_minimum_required(VERSION 3.0 FATAL_ERROR) - set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake;${CMAKE_MODULE_PATH}") - include(Utilities) - include(Macros) diff --git a/bazel/foreign_cc/curl.patch b/bazel/foreign_cc/curl.patch new file mode 100644 index 000000000000..7c2a7bc129e0 --- /dev/null +++ b/bazel/foreign_cc/curl.patch @@ -0,0 +1,29 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index ec1cfa782..0c5a72f00 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -42,0 +42,5 @@ ++# revert CMake bug triggered by curl's defined max CMake policy version, see https://gitlab.kitware.com/cmake/cmake/-/issues/21288 ++if(POLICY CMP0091) ++ cmake_policy(SET CMP0091 OLD) ++endif() ++ +@@ -249,3 +254,6 @@ +- set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") +- set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /MT") +- set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /MTd") ++ foreach(build_suffix "" _DEBUG _RELEASE _MINSIZEREL _RELWITHDEBINFO) ++ set(flags_var CMAKE_C_FLAGS${build_suffix}) ++ if("${${flags_var}}" MATCHES "/MD") ++ string(REGEX REPLACE "/MD" "/MT" ${flags_var} "${${flags_var}}") ++ endif() ++ endforeach() +diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt +index 911c9096d..ba6af1bf1 100644 +--- a/lib/CMakeLists.txt ++++ b/lib/CMakeLists.txt +@@ -91,4 +91,0 @@ add_library( +-if(MSVC AND NOT BUILD_SHARED_LIBS) +- set_target_properties(${LIB_NAME} PROPERTIES STATIC_LIBRARY_FLAGS ${CMAKE_EXE_LINKER_FLAGS}) +-endif() +- diff --git a/bazel/foreign_cc/llvm.patch b/bazel/foreign_cc/llvm.patch new file mode 100644 index 000000000000..cd02f2842401 --- /dev/null +++ b/bazel/foreign_cc/llvm.patch @@ -0,0 +1,25 @@ +# Workaround for Envoy's CMAKE_BUILD_TYPE=Bazel. +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -247,7 +247,7 @@ + string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE) + + if (CMAKE_BUILD_TYPE AND +- NOT uppercase_CMAKE_BUILD_TYPE MATCHES "^(DEBUG|RELEASE|RELWITHDEBINFO|MINSIZEREL)$") ++ NOT uppercase_CMAKE_BUILD_TYPE MATCHES "^(DEBUG|RELEASE|RELWITHDEBINFO|MINSIZEREL|BAZEL)$") + message(FATAL_ERROR "Invalid value for CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") + endif() + +# Workaround for a missing -fuse-ld flag in CXXFLAGS, which results in +# different linkers being used during configure and compilation phases. +--- a/cmake/modules/HandleLLVMOptions.cmake ++++ b/cmake/modules/HandleLLVMOptions.cmake +@@ -718,8 +718,6 @@ endif() + if (UNIX AND CMAKE_GENERATOR STREQUAL "Ninja") + include(CheckLinkerFlag) + check_linker_flag("-Wl,--color-diagnostics" LINKER_SUPPORTS_COLOR_DIAGNOSTICS) +- append_if(LINKER_SUPPORTS_COLOR_DIAGNOSTICS "-Wl,--color-diagnostics" +- CMAKE_EXE_LINKER_FLAGS CMAKE_MODULE_LINKER_FLAGS CMAKE_SHARED_LINKER_FLAGS) + endif() + + # Add flags for add_dead_strip(). diff --git a/bazel/foreign_cc/luajit.patch b/bazel/foreign_cc/luajit.patch index b454b7dfd149..c0fb0da819fd 100644 --- a/bazel/foreign_cc/luajit.patch +++ b/bazel/foreign_cc/luajit.patch @@ -1,5 +1,5 @@ diff --git a/src/Makefile b/src/Makefile -index f56465d..5d91fa7 100644 +index e65b55e..f0a61dd 100644 --- a/src/Makefile +++ b/src/Makefile @@ -27,7 +27,7 @@ NODOTABIVER= 51 @@ -33,96 +33,96 @@ index f56465d..5d91fa7 100644 # # Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter. #XCFLAGS+= -DLUAJIT_DISABLE_JIT -@@ -111,7 +111,7 @@ XCFLAGS= - #XCFLAGS+= -DLUAJIT_NUMMODE=2 - # - # Enable GC64 mode for x64. --#XCFLAGS+= -DLUAJIT_ENABLE_GC64 -+XCFLAGS+= -DLUAJIT_ENABLE_GC64 - # - ############################################################################## - -@@ -587,7 +587,7 @@ endif - +@@ -591,7 +591,7 @@ endif + Q= @ E= @echo -#Q= +Q= #E= @: - - ############################################################################## -EOF ---- a/src/msvcbuild.bat 2020-08-13 18:42:05.667354300 +0000 -+++ b/src/msvcbuild.bat 2020-08-13 19:03:25.092297900 +0000 -@@ -14,7 +14,7 @@ - @if not defined INCLUDE goto :FAIL - @setlocal --@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline -+@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT - @set LJLINK=link /nologo - @set LJMT=mt /nologo - @set LJLIB=lib /nologo /nodefaultlib -@@ -25,7 +25,7 @@ - @set LJLIBNAME=lua51.lib - @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c - --%LJCOMPILE% host\minilua.c -+%LJCOMPILE% /O2 host\minilua.c - @if errorlevel 1 goto :BAD - %LJLINK% /out:minilua.exe minilua.obj - @if errorlevel 1 goto :BAD -@@ -48,7 +48,7 @@ - minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC% - @if errorlevel 1 goto :BAD - --%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c -+%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c - @if errorlevel 1 goto :BAD - %LJLINK% /out:buildvm.exe buildvm*.obj - @if errorlevel 1 goto :BAD -@@ -72,24 +72,35 @@ - - @if "%1" neq "debug" goto :NODEBUG - @shift --@set LJCOMPILE=%LJCOMPILE% /Zi -+@set LJCOMPILE=%LJCOMPILE% /O0 /Z7 - @set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no -+@set LJCRTDBG=d -+@goto :ENDDEBUG - :NODEBUG -+@set LJCOMPILE=%LJCOMPILE% /O2 /Z7 -+@set LJLINK=%LJLINK% /release /incremental:no -+@set LJCRTDBG= -+:ENDDEBUG - @if "%1"=="amalg" goto :AMALGDLL - @if "%1"=="static" goto :STATIC --%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c -+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% -+%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c - @if errorlevel 1 goto :BAD - %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj - @if errorlevel 1 goto :BAD - @goto :MTDLL - :STATIC -+@shift -+@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG% - %LJCOMPILE% lj_*.c lib_*.c - @if errorlevel 1 goto :BAD - %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj - @if errorlevel 1 goto :BAD - @goto :MTDLL - :AMALGDLL --%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c -+@shift -+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% -+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c - @if errorlevel 1 goto :BAD - %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj - @if errorlevel 1 goto :BAD + ############################################################################## +diff --git a/src/msvcbuild.bat b/src/msvcbuild.bat +index ae035dc..0e7eac9 100644 +--- a/src/msvcbuild.bat ++++ b/src/msvcbuild.bat +@@ -13,9 +13,7 @@ + @if not defined INCLUDE goto :FAIL + + @setlocal +-@rem Add more debug flags here, e.g. DEBUGCFLAGS=/DLUA_USE_APICHECK +-@set DEBUGCFLAGS= +-@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline ++@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT + @set LJLINK=link /nologo + @set LJMT=mt /nologo + @set LJLIB=lib /nologo /nodefaultlib +@@ -24,10 +22,9 @@ + @set DASC=vm_x64.dasc + @set LJDLLNAME=lua51.dll + @set LJLIBNAME=lua51.lib +-@set BUILDTYPE=release + @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c + +-%LJCOMPILE% host\minilua.c ++%LJCOMPILE% /O2 host\minilua.c + @if errorlevel 1 goto :BAD + %LJLINK% /out:minilua.exe minilua.obj + @if errorlevel 1 goto :BAD +@@ -51,7 +48,7 @@ if exist minilua.exe.manifest^ + minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC% + @if errorlevel 1 goto :BAD + +-%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c ++%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c + @if errorlevel 1 goto :BAD + %LJLINK% /out:buildvm.exe buildvm*.obj + @if errorlevel 1 goto :BAD +@@ -75,26 +72,35 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c + + @if "%1" neq "debug" goto :NODEBUG + @shift +-@set BUILDTYPE=debug +-@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS% +-@set LJLINK=%LJLINK% /opt:ref /opt:icf /incremental:no ++@set LJCOMPILE=%LJCOMPILE% /O0 /Z7 ++@set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no ++@set LJCRTDBG=d ++@goto :ENDDEBUG + :NODEBUG +-@set LJLINK=%LJLINK% /%BUILDTYPE% ++@set LJCOMPILE=%LJCOMPILE% /O2 /Z7 ++@set LJLINK=%LJLINK% /release /incremental:no ++@set LJCRTDBG= ++:ENDDEBUG + @if "%1"=="amalg" goto :AMALGDLL + @if "%1"=="static" goto :STATIC +-%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c ++@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% ++%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c + @if errorlevel 1 goto :BAD + %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj + @if errorlevel 1 goto :BAD + @goto :MTDLL + :STATIC ++@shift ++@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG% + %LJCOMPILE% lj_*.c lib_*.c + @if errorlevel 1 goto :BAD + %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj + @if errorlevel 1 goto :BAD + @goto :MTDLL + :AMALGDLL +-%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c ++@shift ++@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% ++%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c + @if errorlevel 1 goto :BAD + %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj + @if errorlevel 1 goto :BAD diff --git a/build.py b/build.py new file mode 100755 -index 0000000..9c71271 +index 0000000..3eb74ff --- /dev/null +++ b/build.py @@ -0,0 +1,56 @@ @@ -168,7 +168,7 @@ index 0000000..9c71271 + dst_dir = os.getcwd() + "/luajit" + shutil.copytree(src_dir, os.path.basename(src_dir)) + os.chdir(os.path.basename(src_dir) + "/src") -+ os.system('msvcbuild.bat gc64 ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static') ++ os.system('msvcbuild.bat ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static') + os.makedirs(dst_dir + "/lib", exist_ok=True) + shutil.copy("lua51.lib", dst_dir + "/lib") + os.makedirs(dst_dir + "/include/luajit-2.1", exist_ok=True) diff --git a/bazel/foreign_cc/moonjit.patch b/bazel/foreign_cc/moonjit.patch index 99ac22fb04fe..5bb745875132 100644 --- a/bazel/foreign_cc/moonjit.patch +++ b/bazel/foreign_cc/moonjit.patch @@ -3,7 +3,7 @@ new file mode 100644 index 00000000..dab3606c --- /dev/null +++ b/build.py -@@ -0,0 +1,56 @@ +@@ -0,0 +1,39 @@ +#!/usr/bin/env python3 + +import argparse @@ -41,24 +41,7 @@ index 00000000..dab3606c + + os.system('make -j{} V=1 PREFIX="{}" install'.format(os.cpu_count(), args.prefix)) + -+def win_main(): -+ src_dir = os.path.dirname(os.path.realpath(__file__)) -+ dst_dir = os.getcwd() + "/moonjit" -+ shutil.copytree(src_dir, os.path.basename(src_dir)) -+ os.chdir(os.path.basename(src_dir) + "/src") -+ os.system('msvcbuild.bat gc64 ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static') -+ os.makedirs(dst_dir + "/lib", exist_ok=True) -+ shutil.copy("lua51.lib", dst_dir + "/lib") -+ os.makedirs(dst_dir + "/include/moonjit-2.2", exist_ok=True) -+ for header in ["lauxlib.h", "luaconf.h", "lua.h", "lua.hpp", "luajit.h", "lualib.h"]: -+ shutil.copy(header, dst_dir + "/include/moonjit-2.2") -+ os.makedirs(dst_dir + "/bin", exist_ok=True) -+ shutil.copy("luajit.exe", dst_dir + "/bin") -+ -+if os.name == 'nt': -+ win_main() -+else: -+ main() ++main() + diff --git a/src/Makefile b/src/Makefile index dad9aeec..e10b3118 100644 @@ -104,78 +87,3 @@ index dad9aeec..e10b3118 100644 #E= @: ############################################################################## -diff --git a/src/msvcbuild.bat b/src/msvcbuild.bat -index c2d2c212..71f24422 100644 ---- a/src/msvcbuild.bat -+++ b/src/msvcbuild.bat -@@ -15,7 +15,7 @@ - @setlocal - @rem Add more debug flags here, e.g. DEBUGCFLAGS=/DLUA_USE_APICHECK - @set DEBUGCFLAGS= --@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline -+@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT - @set LJLINK=link /nologo - @set LJMT=mt /nologo - @set LJLIB=lib /nologo /nodefaultlib -@@ -24,10 +24,9 @@ - @set DASC=vm_x86.dasc - @set LJDLLNAME=lua51.dll - @set LJLIBNAME=lua51.lib --@set BUILDTYPE=release - @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_utf8.c - --%LJCOMPILE% host\minilua.c -+%LJCOMPILE% /O2 host\minilua.c - @if errorlevel 1 goto :BAD - %LJLINK% /out:minilua.exe minilua.obj - @if errorlevel 1 goto :BAD -@@ -50,7 +49,7 @@ if exist minilua.exe.manifest^ - minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC% - @if errorlevel 1 goto :BAD - --%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c -+%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c - @if errorlevel 1 goto :BAD - %LJLINK% /out:buildvm.exe buildvm*.obj - @if errorlevel 1 goto :BAD -@@ -74,25 +73,35 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c - - @if "%1" neq "debug" goto :NODEBUG - @shift --@set BUILDTYPE=debug --@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS% -+@set LJCOMPILE=%LJCOMPILE% /O0 /Z7 -+@set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no -+@set LJCRTDBG=d -+@goto :ENDDEBUG - :NODEBUG --@set LJLINK=%LJLINK% /%BUILDTYPE% -+@set LJCOMPILE=%LJCOMPILE% /O2 /Z7 -+@set LJLINK=%LJLINK% /release /incremental:no -+@set LJCRTDBG= -+:ENDDEBUG - @if "%1"=="amalg" goto :AMALGDLL - @if "%1"=="static" goto :STATIC --%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c -+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% -+LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c - @if errorlevel 1 goto :BAD - %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj - @if errorlevel 1 goto :BAD - @goto :MTDLL - :STATIC -+@shift -+@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG% - %LJCOMPILE% lj_*.c lib_*.c - @if errorlevel 1 goto :BAD - %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj - @if errorlevel 1 goto :BAD - @goto :MTDLL - :AMALGDLL --%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c -+@shift -+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% -+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c - @if errorlevel 1 goto :BAD - %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj - @if errorlevel 1 goto :BAD diff --git a/bazel/gen_sh_test_runner.sh b/bazel/gen_sh_test_runner.sh index 8e33707db49b..5665ce914814 100755 --- a/bazel/gen_sh_test_runner.sh +++ b/bazel/gen_sh_test_runner.sh @@ -14,7 +14,7 @@ TEST_NAME="${RAW_TEST_NAME//./_}" EXEC_ARGS="\"$1\"" shift -for a in $@ +for a in "$@" do EXEC_ARGS="${EXEC_ARGS}, \"$a\"" done diff --git a/bazel/get_workspace_status b/bazel/get_workspace_status index 82bb7593c7f3..4cfce22fd2d2 100755 --- a/bazel/get_workspace_status +++ b/bazel/get_workspace_status @@ -29,21 +29,14 @@ then fi # The code below presents an implementation that works for git repository -git_rev=$(git rev-parse HEAD) -if [[ $? != 0 ]]; -then - exit 1 -fi +git_rev=$(git rev-parse HEAD) || exit 1 echo "BUILD_SCM_REVISION ${git_rev}" echo "STABLE_BUILD_SCM_REVISION ${git_rev}" # Check whether there are any uncommitted changes -git diff-index --quiet HEAD -- -if [[ $? == 0 ]]; -then - tree_status="Clean" -else +tree_status="Clean" +git diff-index --quiet HEAD -- || { tree_status="Modified" -fi +} echo "BUILD_SCM_STATUS ${tree_status}" echo "STABLE_BUILD_SCM_STATUS ${tree_status}" diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index ce30752e1e94..5047a52141f0 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,9 +1,10 @@ -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":dev_binding.bzl", "envoy_dev_binding") load(":genrule_repository.bzl", "genrule_repository") load("@envoy_api//bazel:envoy_http_archive.bzl", "envoy_http_archive") -load(":repository_locations.bzl", "DEPENDENCY_ANNOTATIONS", "DEPENDENCY_REPOSITORIES", "USE_CATEGORIES", "USE_CATEGORIES_WITH_CPE_OPTIONAL") +load("@envoy_api//bazel:external_deps.bzl", "load_repository_locations") +load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC") load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") +load(":crates.bzl", "raze_fetch_remote_crates") PPC_SKIP_TARGETS = ["envoy.filters.http.lua"] @@ -16,70 +17,29 @@ WINDOWS_SKIP_TARGETS = [ # Make all contents of an external repository accessible under a filegroup. Used for external HTTP # archives, e.g. cares. -BUILD_ALL_CONTENT = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])""" - -def _fail_missing_attribute(attr, key): - fail("The '%s' attribute must be defined for external dependecy " % attr + key) - -# Method for verifying content of the DEPENDENCY_REPOSITORIES defined in bazel/repository_locations.bzl -# Verification is here so that bazel/repository_locations.bzl can be loaded into other tools written in Python, -# and as such needs to be free of bazel specific constructs. -# -# We also remove the attributes for further consumption in this file, since rules such as http_archive -# don't recognize them. -def _repository_locations(): - locations = {} - for key, location in DEPENDENCY_REPOSITORIES.items(): - mutable_location = dict(location) - locations[key] = mutable_location - - if "sha256" not in location or len(location["sha256"]) == 0: - _fail_missing_attribute("sha256", key) - - if "project_name" not in location: - _fail_missing_attribute("project_name", key) - mutable_location.pop("project_name") - - if "project_url" not in location: - _fail_missing_attribute("project_url", key) - mutable_location.pop("project_url") - - if "version" not in location: - _fail_missing_attribute("version", key) - mutable_location.pop("version") - - if "use_category" not in location: - _fail_missing_attribute("use_category", key) - mutable_location.pop("use_category") - - if "cpe" in location: - mutable_location.pop("cpe") - elif not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location["use_category"]]: - _fail_missing_attribute("cpe", key) - - for category in location["use_category"]: - if category not in USE_CATEGORIES: - fail("Unknown use_category value '" + category + "' for dependecy " + key) - - return locations - -REPOSITORY_LOCATIONS = _repository_locations() - -# To initialize http_archive REPOSITORY_LOCATIONS dictionaries must be stripped of annotations. -# See repository_locations.bzl for the list of annotation attributes. -def _get_location(dependency): - stripped = dict(REPOSITORY_LOCATIONS[dependency]) - for attribute in DEPENDENCY_ANNOTATIONS: - stripped.pop(attribute, None) - return stripped - -def _repository_impl(name, **kwargs): +def _build_all_content(exclude = []): + return """filegroup(name = "all", srcs = glob(["**"], exclude={}), visibility = ["//visibility:public"])""".format(repr(exclude)) + +BUILD_ALL_CONTENT = _build_all_content() + +REPOSITORY_LOCATIONS = load_repository_locations(REPOSITORY_LOCATIONS_SPEC) + +# Use this macro to reference any HTTP archive from bazel/repository_locations.bzl. +def external_http_archive(name, **kwargs): envoy_http_archive( name, locations = REPOSITORY_LOCATIONS, **kwargs ) +# Use this macro to reference any genrule_repository sourced from bazel/repository_locations.bzl. +def external_genrule_repository(name, **kwargs): + location = REPOSITORY_LOCATIONS[name] + genrule_repository( + name = name, + **dict(location, **kwargs) + ) + def _default_envoy_build_config_impl(ctx): ctx.file("WORKSPACE", "") ctx.file("BUILD.bazel", "") @@ -95,26 +55,26 @@ _default_envoy_build_config = repository_rule( # Python dependencies. def _python_deps(): # TODO(htuch): convert these to pip3_import. - _repository_impl( + external_http_archive( name = "com_github_twitter_common_lang", build_file = "@envoy//bazel/external:twitter_common_lang.BUILD", ) - _repository_impl( + external_http_archive( name = "com_github_twitter_common_rpc", build_file = "@envoy//bazel/external:twitter_common_rpc.BUILD", ) - _repository_impl( + external_http_archive( name = "com_github_twitter_common_finagle_thrift", build_file = "@envoy//bazel/external:twitter_common_finagle_thrift.BUILD", ) - _repository_impl( + external_http_archive( name = "six", build_file = "@com_google_protobuf//third_party:six.BUILD", ) # Bazel native C++ dependencies. For the dependencies that doesn't provide autoconf/automake builds. def _cc_deps(): - _repository_impl("grpc_httpjson_transcoding") + external_http_archive("grpc_httpjson_transcoding") native.bind( name = "path_matcher", actual = "@grpc_httpjson_transcoding//src:path_matcher", @@ -128,13 +88,17 @@ def _go_deps(skip_targets): # Keep the skip_targets check around until Istio Proxy has stopped using # it to exclude the Go rules. if "io_bazel_rules_go" not in skip_targets: - _repository_impl( + external_http_archive( name = "io_bazel_rules_go", # TODO(wrowe, sunjayBhatia): remove when Windows RBE supports batch file invocation patch_args = ["-p1"], patches = ["@envoy//bazel:rules_go.patch"], ) - _repository_impl("bazel_gazelle") + external_http_archive("bazel_gazelle") + +def _rust_deps(): + external_http_archive("io_bazel_rules_rust") + raze_fetch_remote_crates() def envoy_dependencies(skip_targets = []): # Setup Envoy developer tools. @@ -172,6 +136,7 @@ def envoy_dependencies(skip_targets = []): _com_github_google_benchmark() _com_github_google_jwt_verify() _com_github_google_libprotobuf_mutator() + _com_github_google_tcmalloc() _com_github_gperftools_gperftools() _com_github_grpc_grpc() _com_github_jbeder_yaml_cpp() @@ -198,14 +163,13 @@ def envoy_dependencies(skip_targets = []): _proxy_wasm_cpp_sdk() _proxy_wasm_cpp_host() _emscripten_toolchain() - _repository_impl("com_googlesource_code_re2") + external_http_archive("com_googlesource_code_re2") _com_google_cel_cpp() - _repository_impl("com_github_google_flatbuffers") - _repository_impl("bazel_toolchains") - _repository_impl("bazel_compdb") - _repository_impl("envoy_build_tools") - _repository_impl("rules_cc") - _org_unicode_icuuc() + external_http_archive("com_github_google_flatbuffers") + external_http_archive("bazel_toolchains") + external_http_archive("bazel_compdb") + external_http_archive("envoy_build_tools") + external_http_archive("rules_cc") # Unconditional, since we use this only for compiler-agnostic fuzzing utils. _org_llvm_releases_compiler_rt() @@ -213,8 +177,12 @@ def envoy_dependencies(skip_targets = []): _python_deps() _cc_deps() _go_deps(skip_targets) + _rust_deps() _kafka_deps() + _org_llvm_llvm() + _com_github_wavm_wavm() + switched_rules_by_language( name = "com_google_googleapis_imports", cc = True, @@ -230,25 +198,22 @@ def envoy_dependencies(skip_targets = []): ) def _boringssl(): - _repository_impl( + external_http_archive( name = "boringssl", patch_args = ["-p1"], patches = ["@envoy//bazel:boringssl_static.patch"], ) def _boringssl_fips(): - location = REPOSITORY_LOCATIONS["boringssl_fips"] - genrule_repository( + external_genrule_repository( name = "boringssl_fips", - urls = location["urls"], - sha256 = location["sha256"], genrule_cmd_file = "@envoy//bazel/external:boringssl_fips.genrule_cmd", build_file = "@envoy//bazel/external:boringssl_fips.BUILD", patches = ["@envoy//bazel/external:boringssl_fips.patch"], ) def _com_github_circonus_labs_libcircllhist(): - _repository_impl( + external_http_archive( name = "com_github_circonus_labs_libcircllhist", build_file = "@envoy//bazel/external:libcircllhist.BUILD", ) @@ -258,11 +223,9 @@ def _com_github_circonus_labs_libcircllhist(): ) def _com_github_c_ares_c_ares(): - location = _get_location("com_github_c_ares_c_ares") - http_archive( + external_http_archive( name = "com_github_c_ares_c_ares", build_file_content = BUILD_ALL_CONTENT, - **location ) native.bind( name = "ares", @@ -270,7 +233,7 @@ def _com_github_c_ares_c_ares(): ) def _com_github_cyan4973_xxhash(): - _repository_impl( + external_http_archive( name = "com_github_cyan4973_xxhash", build_file = "@envoy//bazel/external:xxhash.BUILD", ) @@ -280,7 +243,7 @@ def _com_github_cyan4973_xxhash(): ) def _com_github_envoyproxy_sqlparser(): - _repository_impl( + external_http_archive( name = "com_github_envoyproxy_sqlparser", build_file = "@envoy//bazel/external:sqlparser.BUILD", ) @@ -290,7 +253,7 @@ def _com_github_envoyproxy_sqlparser(): ) def _com_github_mirror_tclap(): - _repository_impl( + external_http_archive( name = "com_github_mirror_tclap", build_file = "@envoy//bazel/external:tclap.BUILD", patch_args = ["-p1"], @@ -306,7 +269,7 @@ def _com_github_mirror_tclap(): ) def _com_github_fmtlib_fmt(): - _repository_impl( + external_http_archive( name = "com_github_fmtlib_fmt", build_file = "@envoy//bazel/external:fmtlib.BUILD", ) @@ -316,7 +279,7 @@ def _com_github_fmtlib_fmt(): ) def _com_github_gabime_spdlog(): - _repository_impl( + external_http_archive( name = "com_github_gabime_spdlog", build_file = "@envoy//bazel/external:spdlog.BUILD", ) @@ -326,10 +289,8 @@ def _com_github_gabime_spdlog(): ) def _com_github_google_benchmark(): - location = _get_location("com_github_google_benchmark") - http_archive( + external_http_archive( name = "com_github_google_benchmark", - **location ) native.bind( name = "benchmark", @@ -337,13 +298,13 @@ def _com_github_google_benchmark(): ) def _com_github_google_libprotobuf_mutator(): - _repository_impl( + external_http_archive( name = "com_github_google_libprotobuf_mutator", build_file = "@envoy//bazel/external:libprotobuf_mutator.BUILD", ) def _com_github_jbeder_yaml_cpp(): - _repository_impl( + external_http_archive( name = "com_github_jbeder_yaml_cpp", ) native.bind( @@ -352,11 +313,9 @@ def _com_github_jbeder_yaml_cpp(): ) def _com_github_libevent_libevent(): - location = _get_location("com_github_libevent_libevent") - http_archive( + external_http_archive( name = "com_github_libevent_libevent", build_file_content = BUILD_ALL_CONTENT, - **location ) native.bind( name = "event", @@ -364,7 +323,7 @@ def _com_github_libevent_libevent(): ) def _net_zlib(): - _repository_impl( + external_http_archive( name = "net_zlib", build_file_content = BUILD_ALL_CONTENT, patch_args = ["-p1"], @@ -383,16 +342,19 @@ def _net_zlib(): ) def _com_github_zlib_ng_zlib_ng(): - _repository_impl( + external_http_archive( name = "com_github_zlib_ng_zlib_ng", build_file_content = BUILD_ALL_CONTENT, ) def _com_google_cel_cpp(): - _repository_impl("com_google_cel_cpp") - _repository_impl("rules_antlr") - location = _get_location("antlr4_runtimes") - http_archive( + external_http_archive("com_google_cel_cpp") + external_http_archive("rules_antlr") + + # Parser dependencies + # TODO: upgrade this when cel is upgraded to use the latest version + external_http_archive(name = "rules_antlr") + external_http_archive( name = "antlr4_runtimes", build_file_content = """ package(default_visibility = ["//visibility:public"]) @@ -406,12 +368,10 @@ cc_library( patch_args = ["-p1"], # Patches ASAN violation of initialization fiasco patches = ["@envoy//bazel:antlr.patch"], - **location ) def _com_github_nghttp2_nghttp2(): - location = _get_location("com_github_nghttp2_nghttp2") - http_archive( + external_http_archive( name = "com_github_nghttp2_nghttp2", build_file_content = BUILD_ALL_CONTENT, patch_args = ["-p1"], @@ -420,7 +380,6 @@ def _com_github_nghttp2_nghttp2(): # https://github.com/nghttp2/nghttp2/pull/1395 # https://github.com/envoyproxy/envoy/pull/8572#discussion_r334067786 patches = ["@envoy//bazel/foreign_cc:nghttp2.patch"], - **location ) native.bind( name = "nghttp2", @@ -428,7 +387,7 @@ def _com_github_nghttp2_nghttp2(): ) def _io_opentracing_cpp(): - _repository_impl( + external_http_archive( name = "io_opentracing_cpp", patch_args = ["-p1"], # Workaround for LSAN false positive in https://github.com/envoyproxy/envoy/issues/7647 @@ -440,15 +399,15 @@ def _io_opentracing_cpp(): ) def _com_lightstep_tracer_cpp(): - _repository_impl("com_lightstep_tracer_cpp") + external_http_archive("com_lightstep_tracer_cpp") native.bind( name = "lightstep", actual = "@com_lightstep_tracer_cpp//:manual_tracer_lib", ) def _com_github_datadog_dd_opentracing_cpp(): - _repository_impl("com_github_datadog_dd_opentracing_cpp") - _repository_impl( + external_http_archive("com_github_datadog_dd_opentracing_cpp") + external_http_archive( name = "com_github_msgpack_msgpack_c", build_file = "@com_github_datadog_dd_opentracing_cpp//:bazel/external/msgpack.BUILD", ) @@ -458,7 +417,7 @@ def _com_github_datadog_dd_opentracing_cpp(): ) def _com_github_tencent_rapidjson(): - _repository_impl( + external_http_archive( name = "com_github_tencent_rapidjson", build_file = "@envoy//bazel/external:rapidjson.BUILD", ) @@ -468,7 +427,7 @@ def _com_github_tencent_rapidjson(): ) def _com_github_nodejs_http_parser(): - _repository_impl( + external_http_archive( name = "com_github_nodejs_http_parser", build_file = "@envoy//bazel/external:http-parser.BUILD", ) @@ -478,7 +437,7 @@ def _com_github_nodejs_http_parser(): ) def _com_google_googletest(): - _repository_impl("com_google_googletest") + external_http_archive("com_google_googletest") native.bind( name = "googletest", actual = "@com_google_googletest//:gtest", @@ -489,7 +448,7 @@ def _com_google_googletest(): # pull in more bits of abseil as needed, and is now the preferred # method for pure Bazel deps. def _com_google_absl(): - _repository_impl("com_google_absl") + external_http_archive("com_google_absl") native.bind( name = "abseil_any", actual = "@com_google_absl//absl/types:any", @@ -592,8 +551,8 @@ def _com_google_absl(): ) def _com_google_protobuf(): - _repository_impl("rules_python") - _repository_impl( + external_http_archive("rules_python") + external_http_archive( "com_google_protobuf", patches = ["@envoy//bazel:protobuf.patch"], patch_args = ["-p1"], @@ -624,10 +583,8 @@ def _com_google_protobuf(): ) def _io_opencensus_cpp(): - location = _get_location("io_opencensus_cpp") - http_archive( + external_http_archive( name = "io_opencensus_cpp", - **location ) native.bind( name = "opencensus_trace", @@ -668,15 +625,21 @@ def _io_opencensus_cpp(): def _com_github_curl(): # Used by OpenCensus Zipkin exporter. - location = _get_location("com_github_curl") - http_archive( + external_http_archive( name = "com_github_curl", build_file_content = BUILD_ALL_CONTENT + """ cc_library(name = "curl", visibility = ["//visibility:public"], deps = ["@envoy//bazel/foreign_cc:curl"]) """, - patches = ["@envoy//bazel/foreign_cc:curl-revert-cmake-minreqver.patch"], + # Patch curl 7.72.0 due to CMake's problematic implementation of policy `CMP0091` + # introduced in CMake 3.15 and then deprecated in CMake 3.18. Curl forcing the CMake + # ruleset to 3.16 breaks the Envoy windows fastbuild target. + # Also cure a fatal assumption creating a static library using LLVM `lld-link.exe` + # adding dynamic link flags, which breaks the Envoy clang-cl library archive step. + # Upstream patch submitted: https://github.com/curl/curl/pull/6050 + # TODO(https://github.com/envoyproxy/envoy/issues/11816): This patch is obsoleted + # by elimination of the curl dependency. + patches = ["@envoy//bazel/foreign_cc:curl.patch"], patch_args = ["-p1"], - **location ) native.bind( name = "curl", @@ -684,13 +647,11 @@ cc_library(name = "curl", visibility = ["//visibility:public"], deps = ["@envoy/ ) def _com_googlesource_chromium_v8(): - location = _get_location("com_googlesource_chromium_v8") - genrule_repository( + external_genrule_repository( name = "com_googlesource_chromium_v8", genrule_cmd_file = "@envoy//bazel/external:wee8.genrule_cmd", build_file = "@envoy//bazel/external:wee8.BUILD", patches = ["@envoy//bazel/external:wee8.patch"], - **location ) native.bind( name = "wee8", @@ -698,11 +659,8 @@ def _com_googlesource_chromium_v8(): ) def _com_googlesource_quiche(): - location = REPOSITORY_LOCATIONS["com_googlesource_quiche"] - genrule_repository( + external_genrule_repository( name = "com_googlesource_quiche", - urls = location["urls"], - sha256 = location["sha256"], genrule_cmd_file = "@envoy//bazel/external:quiche.genrule_cmd", build_file = "@envoy//bazel/external:quiche.BUILD", ) @@ -728,10 +686,8 @@ def _com_googlesource_quiche(): ) def _com_googlesource_googleurl(): - _repository_impl( + external_http_archive( name = "com_googlesource_googleurl", - patches = ["@envoy//bazel/external:googleurl.patch"], - patch_args = ["-p1"], ) native.bind( name = "googleurl", @@ -739,14 +695,14 @@ def _com_googlesource_googleurl(): ) def _org_llvm_releases_compiler_rt(): - _repository_impl( + external_http_archive( name = "org_llvm_releases_compiler_rt", build_file = "@envoy//bazel/external:compiler_rt.BUILD", ) def _com_github_grpc_grpc(): - _repository_impl("com_github_grpc_grpc") - _repository_impl("build_bazel_rules_apple") + external_http_archive("com_github_grpc_grpc") + external_http_archive("build_bazel_rules_apple") # Rebind some stuff to match what the gRPC Bazel is expecting. native.bind( @@ -788,7 +744,7 @@ def _com_github_grpc_grpc(): ) def _upb(): - _repository_impl( + external_http_archive( name = "upb", patches = ["@envoy//bazel:upb.patch"], patch_args = ["-p1"], @@ -800,23 +756,28 @@ def _upb(): ) def _proxy_wasm_cpp_sdk(): - _repository_impl(name = "proxy_wasm_cpp_sdk") + external_http_archive(name = "proxy_wasm_cpp_sdk") def _proxy_wasm_cpp_host(): - _repository_impl( + external_http_archive( name = "proxy_wasm_cpp_host", build_file = "@envoy//bazel/external:proxy_wasm_cpp_host.BUILD", ) def _emscripten_toolchain(): - _repository_impl( + external_http_archive( name = "emscripten_toolchain", - build_file_content = BUILD_ALL_CONTENT, - patch_cmds = REPOSITORY_LOCATIONS["emscripten_toolchain"]["patch_cmds"], + build_file_content = _build_all_content(exclude = [ + "upstream/emscripten/cache/is_vanilla.txt", + ".emscripten_sanity", + ]), + patch_cmds = [ + "[[ \"$(uname -m)\" == \"x86_64\" ]] && ./emsdk install 1.39.6-upstream && ./emsdk activate --embedded 1.39.6-upstream || true", + ], ) def _com_github_google_jwt_verify(): - _repository_impl("com_github_google_jwt_verify") + external_http_archive("com_github_google_jwt_verify") native.bind( name = "jwt_verify_lib", @@ -824,14 +785,12 @@ def _com_github_google_jwt_verify(): ) def _com_github_luajit_luajit(): - location = _get_location("com_github_luajit_luajit") - http_archive( + external_http_archive( name = "com_github_luajit_luajit", build_file_content = BUILD_ALL_CONTENT, patches = ["@envoy//bazel/foreign_cc:luajit.patch"], patch_args = ["-p1"], patch_cmds = ["chmod u+x build.py"], - **location ) native.bind( @@ -840,14 +799,12 @@ def _com_github_luajit_luajit(): ) def _com_github_moonjit_moonjit(): - location = _get_location("com_github_moonjit_moonjit") - http_archive( + external_http_archive( name = "com_github_moonjit_moonjit", build_file_content = BUILD_ALL_CONTENT, patches = ["@envoy//bazel/foreign_cc:moonjit.patch"], patch_args = ["-p1"], patch_cmds = ["chmod u+x build.py"], - **location ) native.bind( @@ -855,19 +812,48 @@ def _com_github_moonjit_moonjit(): actual = "@envoy//bazel/foreign_cc:moonjit", ) +def _com_github_google_tcmalloc(): + external_http_archive( + name = "com_github_google_tcmalloc", + ) + + native.bind( + name = "tcmalloc", + actual = "@com_github_google_tcmalloc//tcmalloc", + ) + def _com_github_gperftools_gperftools(): - location = _get_location("com_github_gperftools_gperftools") - http_archive( + external_http_archive( name = "com_github_gperftools_gperftools", build_file_content = BUILD_ALL_CONTENT, - **location ) - native.bind( name = "gperftools", actual = "@envoy//bazel/foreign_cc:gperftools", ) +def _org_llvm_llvm(): + external_http_archive( + name = "org_llvm_llvm", + build_file_content = BUILD_ALL_CONTENT, + patch_args = ["-p1"], + patches = ["@envoy//bazel/foreign_cc:llvm.patch"], + ) + native.bind( + name = "llvm", + actual = "@envoy//bazel/foreign_cc:llvm", + ) + +def _com_github_wavm_wavm(): + external_http_archive( + name = "com_github_wavm_wavm", + build_file_content = BUILD_ALL_CONTENT, + ) + native.bind( + name = "wavm", + actual = "@envoy//bazel/foreign_cc:wavm", + ) + def _kafka_deps(): # This archive contains Kafka client source code. # We are using request/response message format files to generate parser code. @@ -883,37 +869,28 @@ filegroup( visibility = ["//visibility:public"], ) """ - http_archive( + external_http_archive( name = "kafka_source", build_file_content = KAFKASOURCE_BUILD_CONTENT, patches = ["@envoy//bazel/external:kafka_int32.patch"], - **_get_location("kafka_source") ) # This archive provides Kafka (and Zookeeper) binaries, that are used during Kafka integration # tests. - http_archive( + external_http_archive( name = "kafka_server_binary", build_file_content = BUILD_ALL_CONTENT, - **_get_location("kafka_server_binary") ) # This archive provides Kafka client in Python, so we can use it to interact with Kafka server # during interation tests. - http_archive( + external_http_archive( name = "kafka_python_client", build_file_content = BUILD_ALL_CONTENT, - **_get_location("kafka_python_client") - ) - -def _org_unicode_icuuc(): - _repository_impl( - name = "org_unicode_icuuc", - build_file = "@envoy//bazel/external:icuuc.BUILD", ) def _foreign_cc_dependencies(): - _repository_impl("rules_foreign_cc") + external_http_archive("rules_foreign_cc") def _is_linux(ctxt): return ctxt.os.name == "linux" diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index 8e19344926b8..3aafc9528d80 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -7,59 +7,94 @@ def _python_deps(): pip_repositories() pip3_import( + name = "config_validation_pip3", + requirements = "@envoy//tools/config_validation:requirements.txt", + extra_pip_args = ["--require-hashes"], + # project_name = "PyYAML", # project_url = "https://github.com/yaml/pyyaml", # version = "5.3.1", - # use_category = ["other"], + # last_update = "2020-03-18" + # use_category = ["devtools"], # cpe = "cpe:2.3:a:pyyaml:pyyaml:*", - name = "config_validation_pip3", - requirements = "@envoy//tools/config_validation:requirements.txt", - extra_pip_args = ["--require-hashes"], ) pip3_import( + name = "configs_pip3", + requirements = "@envoy//configs:requirements.txt", + extra_pip_args = ["--require-hashes"], + # project_name = "Jinja", # project_url = "http://palletsprojects.com/p/jinja", # version = "2.11.2", + # last_update = "2020-04-13" # use_category = ["test"], # cpe = "cpe:2.3:a:palletsprojects:jinja:*", - name = "configs_pip3", - requirements = "@envoy//configs:requirements.txt", - extra_pip_args = ["--require-hashes"], + + # project_name = "MarkupSafe", + # project_url = "https://markupsafe.palletsprojects.com/en/1.1.x/", + # version = "1.1.1", + # last_update = "2019-02-23" + # use_category = ["test"], ) pip3_import( + name = "kafka_pip3", + requirements = "@envoy//source/extensions/filters/network/kafka:requirements.txt", + extra_pip_args = ["--require-hashes"], + # project_name = "Jinja", # project_url = "http://palletsprojects.com/p/jinja", # version = "2.11.2", + # last_update = "2020-04-13" # use_category = ["test"], # cpe = "cpe:2.3:a:palletsprojects:jinja:*", - name = "kafka_pip3", - requirements = "@envoy//source/extensions/filters/network/kafka:requirements.txt", - extra_pip_args = ["--require-hashes"], + + # project_name = "MarkupSafe", + # project_url = "https://markupsafe.palletsprojects.com/en/1.1.x/", + # version = "1.1.1", + # last_update = "2019-02-23" + # use_category = ["test"], ) pip3_import( name = "headersplit_pip3", requirements = "@envoy//tools/envoy_headersplit:requirements.txt", extra_pip_args = ["--require-hashes"], + + # project_name = "Clang", + # project_url = "https://clang.llvm.org/", + # version = "10.0.1", + # last_update = "2020-07-21" + # use_category = ["devtools"], + # cpe = "cpe:2.3:a:llvm:clang:*", ) pip3_import( + name = "protodoc_pip3", + requirements = "@envoy//tools/protodoc:requirements.txt", + extra_pip_args = ["--require-hashes"], + # project_name = "PyYAML", # project_url = "https://github.com/yaml/pyyaml", # version = "5.3.1", - # use_category = ["other"], + # last_update = "2020-03-18" + # use_category = ["docs"], # cpe = "cpe:2.3:a:pyyaml:pyyaml:*", - name = "protodoc_pip3", - requirements = "@envoy//tools/protodoc:requirements.txt", - extra_pip_args = ["--require-hashes"], ) pip3_import( + name = "thrift_pip3", + requirements = "@envoy//test/extensions/filters/network/thrift_proxy:requirements.txt", + extra_pip_args = ["--require-hashes"], + # project_name = "Apache Thrift", # project_url = "http://thrift.apache.org/", # version = "0.11.0", - # use_category = ["dataplane"], + # last_update = "2017-12-07" + # use_category = ["test"], # cpe = "cpe:2.3:a:apache:thrift:*", - name = "thrift_pip3", - requirements = "@envoy//test/extensions/filters/network/thrift_proxy:requirements.txt", - extra_pip_args = ["--require-hashes"], + + # project_name = "Six: Python 2 and 3 Compatibility Library", + # project_url = "https://six.readthedocs.io/", + # version = "1.15.0", + # last_update = "2020-05-21" + # use_category = ["test"], ) # Envoy deps that rely on a first stage of dependency loading in envoy_dependencies(). diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index e1cddcc8513d..6a631c5a3e6b 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -1,65 +1,29 @@ -# Validation of content in this file is done on the bazel/repositories.bzl file to make it free of bazel -# constructs. This is to allow this file to be loaded into Python based build and maintenance tools. - -# Envoy dependencies may be annotated with the following attributes: -DEPENDENCY_ANNOTATIONS = [ - # List of the categories describing how the dependency is being used. This attribute is used - # for automatic tracking of security posture of Envoy's dependencies. - # Possible values are documented in the USE_CATEGORIES list below. - # This attribute is mandatory for each dependecy. - "use_category", - - # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID - # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See - # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements - # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned. - # This attribute is optional for components with use categories listed in the - # USE_CATEGORIES_WITH_CPE_OPTIONAL - "cpe", -] - -# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed -# to be declared. -USE_CATEGORIES = [ - # This dependency is used in API protos. - "api", - # This dependency is used in build process. - "build", - # This dependency is used to process xDS requests. - "controlplane", - # This dependency is used in processing downstream or upstream requests. - "dataplane", - # This dependecy is used for logging, metrics or tracing. It may process unstrusted input. - "observability", - # This dependency does not handle untrusted data and is used for various utility purposes. - "other", - # This dependency is used for unit tests. - "test", -] - -# Components with these use categories are not required to specify the 'cpe' annotation. -USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test"] - -DEPENDENCY_REPOSITORIES_SPEC = dict( +# This should match the schema defined in external_deps.bzl. +REPOSITORY_LOCATIONS_SPEC = dict( bazel_compdb = dict( - project_name = "bazil-compilation-database", + project_name = "bazel-compilation-database", + project_desc = "Clang JSON compilation database support for Bazel", project_url = "https://github.com/grailbio/bazel-compilation-database", version = "0.4.5", sha256 = "bcecfd622c4ef272fd4ba42726a52e140b961c4eac23025f18b346c968a8cfb4", strip_prefix = "bazel-compilation-database-{version}", urls = ["https://github.com/grailbio/bazel-compilation-database/archive/{version}.tar.gz"], + last_updated = "2020-08-01", use_category = ["build"], ), bazel_gazelle = dict( project_name = "Gazelle", + project_desc = "Bazel BUILD file generator for Go projects", project_url = "https://github.com/bazelbuild/bazel-gazelle", version = "0.21.1", sha256 = "cdb02a887a7187ea4d5a27452311a75ed8637379a1287d8eeb952138ea485f7d", urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v{version}/bazel-gazelle-v{version}.tar.gz"], + last_updated = "2020-05-28", use_category = ["build"], ), bazel_toolchains = dict( project_name = "bazel-toolchains", + project_desc = "Bazel toolchain configs for RBE", project_url = "https://github.com/bazelbuild/bazel-toolchains", version = "3.4.1", sha256 = "7ebb200ed3ca3d1f7505659c7dfed01c4b5cb04c3a6f34140726fe22f5d35e86", @@ -68,238 +32,304 @@ DEPENDENCY_REPOSITORIES_SPEC = dict( "https://github.com/bazelbuild/bazel-toolchains/releases/download/{version}/bazel-toolchains-{version}.tar.gz", "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/{version}.tar.gz", ], + last_updated = "2020-08-10", use_category = ["build"], ), build_bazel_rules_apple = dict( project_name = "Apple Rules for Bazel", + project_desc = "Bazel rules for Apple platforms", project_url = "https://github.com/bazelbuild/rules_apple", version = "0.19.0", sha256 = "7a7afdd4869bb201c9352eed2daf37294d42b093579b70423490c1b4d4f6ce42", urls = ["https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz"], + last_updated = "2020-10-10", use_category = ["build"], ), envoy_build_tools = dict( project_name = "envoy-build-tools", + project_desc = "Common build tools shared by the Envoy/UDPA ecosystem", project_url = "https://github.com/envoyproxy/envoy-build-tools", - version = "2d13ad4157997715a4939bd218a89c81c26ff28e", - sha256 = "0dc8ce5eb645ae069ce710c1010975456f723ffd4fc788a03dacfcd0647b05b9", + version = "0ba5aa98a6e6c5efcc63f53602f69548d2417683", + sha256 = "dc3881d16e7b0c855a7279f5757d55e4aa55fe2befbd9e34215b971818622f9e", strip_prefix = "envoy-build-tools-{version}", - # 2020-08-21 urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz"], + last_updated = "2020-10-01", use_category = ["build"], ), boringssl = dict( project_name = "BoringSSL", + project_desc = "Minimal OpenSSL fork", project_url = "https://github.com/google/boringssl", - version = "597b810379e126ae05d32c1d94b1a9464385acd0", - sha256 = "1ea42456c020daf0a9b0f9e8d8bc3a403c9314f4f54230c617257af996cd5fa6", + version = "2192bbc878822cf6ab5977d4257a1339453d9d39", + sha256 = "bb55b0ed2f0cb548b5dce6a6b8307ce37f7f748eb9f1be6bfe2d266ff2b4d52b", strip_prefix = "boringssl-{version}", # To update BoringSSL, which tracks Chromium releases: # 1. Open https://omahaproxy.appspot.com/ and note of linux/stable release. # 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags//DEPS and note . # 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges . # - # chromium-85.0.4183.83 - # 2020-06-23 + # chromium-86.0.4240.80 urls = ["https://github.com/google/boringssl/archive/{version}.tar.gz"], - use_category = ["dataplane"], - cpe = "N/A", + use_category = ["controlplane", "dataplane_core"], + last_updated = "2020-07-30", + cpe = "cpe:2.3:a:google:boringssl:*", ), boringssl_fips = dict( project_name = "BoringSSL (FIPS)", + project_desc = "FIPS compliant BoringSSL", project_url = "https://boringssl.googlesource.com/boringssl/+/master/crypto/fipsmodule/FIPS.md", version = "fips-20190808", sha256 = "3b5fdf23274d4179c2077b5e8fa625d9debd7a390aac1d165b7e47234f648bb8", urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-ae223d6138807a13006342edfeef32e813246b39.tar.xz"], - use_category = ["dataplane"], - cpe = "N/A", + use_category = ["controlplane", "dataplane_core"], + last_updated = "2019-08-08", + cpe = "cpe:2.3:a:google:boringssl:*", ), com_google_absl = dict( project_name = "Abseil", + project_desc = "Open source collection of C++ libraries drawn from the most fundamental pieces of Google’s internal codebase", project_url = "https://abseil.io/", - version = "ce4bc927755fdf0ed03d679d9c7fa041175bb3cb", - sha256 = "573baccd67aa591b8c7209bfb0c77e0d15633d77ced39d1ccbb1232828f7f7d9", + version = "093cc27604df1c4a179b73bc3f00d4d1ce2ce113", + sha256 = "55d33c75aff05a8c4a55bdf0eddad66c71a963107bc2add96cf8eb88ddb47a80", strip_prefix = "abseil-cpp-{version}", - # 2020-08-08 urls = ["https://github.com/abseil/abseil-cpp/archive/{version}.tar.gz"], - use_category = ["dataplane", "controlplane"], + use_category = ["dataplane_core", "controlplane"], + last_updated = "2020-10-01", cpe = "N/A", ), com_github_c_ares_c_ares = dict( project_name = "c-ares", + project_desc = "C library for asynchronous DNS requests", project_url = "https://c-ares.haxx.se/", version = "1.16.1", sha256 = "d08312d0ecc3bd48eee0a4cc0d2137c9f194e0a28de2028928c0f6cae85f86ce", strip_prefix = "c-ares-{version}", urls = ["https://github.com/c-ares/c-ares/releases/download/cares-{underscore_version}/c-ares-{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_core", "controlplane"], + last_updated = "2020-05-11", cpe = "cpe:2.3:a:c-ares_project:c-ares:*", ), com_github_circonus_labs_libcircllhist = dict( project_name = "libcircllhist", + project_desc = "An implementation of Circonus log-linear histograms", project_url = "https://github.com/circonus-labs/libcircllhist", - # 2019-02-11 version = "63a16dd6f2fc7bc841bb17ff92be8318df60e2e1", sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c", strip_prefix = "libcircllhist-{version}", urls = ["https://github.com/circonus-labs/libcircllhist/archive/{version}.tar.gz"], - use_category = ["observability"], + use_category = ["controlplane", "observability_core", "dataplane_core"], + last_updated = "2019-02-11", cpe = "N/A", ), com_github_cyan4973_xxhash = dict( project_name = "xxHash", + project_desc = "Extremely fast hash algorithm", project_url = "https://github.com/Cyan4973/xxHash", version = "0.7.3", sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7", strip_prefix = "xxHash-{version}", urls = ["https://github.com/Cyan4973/xxHash/archive/v{version}.tar.gz"], - use_category = ["dataplane", "controlplane"], + use_category = ["dataplane_core", "controlplane"], + last_updated = "2020-03-04", cpe = "N/A", ), com_github_envoyproxy_sqlparser = dict( project_name = "C++ SQL Parser Library", + project_desc = "Forked from Hyrise SQL Parser", project_url = "https://github.com/envoyproxy/sql-parser", - # 2020-06-10 version = "3b40ba2d106587bdf053a292f7e3bb17e818a57f", sha256 = "96c10c8e950a141a32034f19b19cdeb1da48fe859cf96ae5e19f894f36c62c71", strip_prefix = "sql-parser-{version}", urls = ["https://github.com/envoyproxy/sql-parser/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_ext"], + extensions = [ + "envoy.filters.network.mysql_proxy", + "envoy.filters.network.postgres_proxy", + ], + last_updated = "2020-06-10", cpe = "N/A", ), com_github_mirror_tclap = dict( project_name = "tclap", + project_desc = "Small, flexible library that provides a simple interface for defining and accessing command line arguments", project_url = "http://tclap.sourceforge.net", version = "1-2-1", sha256 = "f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f", strip_prefix = "tclap-tclap-{version}-release-final", urls = ["https://github.com/mirror/tclap/archive/tclap-{version}-release-final.tar.gz"], + last_updated = "2017-11-10", use_category = ["other"], ), com_github_fmtlib_fmt = dict( project_name = "fmt", + project_desc = "{fmt} is an open-source formatting library providing a fast and safe alternative to C stdio and C++ iostreams", project_url = "https://fmt.dev", version = "7.0.3", sha256 = "decfdf9ad274070fa85f26407b816f5a4d82205ae86bac1990be658d0795ea4d", strip_prefix = "fmt-{version}", urls = ["https://github.com/fmtlib/fmt/releases/download/{version}/fmt-{version}.zip"], - use_category = ["observability"], - cpe = "N/A", + use_category = ["dataplane_core", "controlplane"], + last_updated = "2020-08-07", + cpe = "cpe:2.3:a:fmt:fmt:*", ), com_github_gabime_spdlog = dict( project_name = "spdlog", + project_desc = "Very fast, header-only/compiled, C++ logging library", project_url = "https://github.com/gabime/spdlog", version = "1.7.0", sha256 = "f0114a4d3c88be9e696762f37a7c379619443ce9d668546c61b21d41affe5b62", strip_prefix = "spdlog-{version}", urls = ["https://github.com/gabime/spdlog/archive/v{version}.tar.gz"], - use_category = ["observability"], + use_category = ["dataplane_core", "controlplane"], + last_updated = "2020-07-09", cpe = "N/A", ), com_github_google_libprotobuf_mutator = dict( project_name = "libprotobuf-mutator", + project_desc = "Library to randomly mutate protobuffers", project_url = "https://github.com/google/libprotobuf-mutator", - # 2020-08-18 version = "8942a9ba43d8bb196230c321d46d6a137957a719", sha256 = "49a26dbe77c75f2eca1dd8a9fbdb31c4496d9af42df027ff57569c5a7a5d980d", strip_prefix = "libprotobuf-mutator-{version}", urls = ["https://github.com/google/libprotobuf-mutator/archive/{version}.tar.gz"], - use_category = ["test"], + last_updated = "2020-08-18", + use_category = ["test_only"], + ), + com_github_google_tcmalloc = dict( + project_name = "tcmalloc", + project_desc = "Fast, multi-threaded malloc implementation", + project_url = "https://github.com/google/tcmalloc", + version = "d1311bf409db47c3441d3de6ea07d768c6551dec", + sha256 = "e22444b6544edd81f11c987dd5e482a2e00bbff717badb388779ca57525dad50", + strip_prefix = "tcmalloc-{version}", + urls = ["https://github.com/google/tcmalloc/archive/{version}.tar.gz"], + use_category = ["dataplane_core", "controlplane"], + last_updated = "2020-09-16", + cpe = "N/A", ), com_github_gperftools_gperftools = dict( project_name = "gperftools", + project_desc = "tcmalloc and profiling libraries", project_url = "https://github.com/gperftools/gperftools", version = "2.8", sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e", strip_prefix = "gperftools-{version}", urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-{version}/gperftools-{version}.tar.gz"], - use_category = ["test"], + last_updated = "2020-07-06", + use_category = ["dataplane_core", "controlplane"], + cpe = "cpe:2.3:a:gperftools_project:gperftools:*", ), com_github_grpc_grpc = dict( project_name = "gRPC", + project_desc = "gRPC C core library", project_url = "https://grpc.io", # TODO(JimmyCYJ): Bump to release 1.27 # This sha on grpc:v1.25.x branch is specifically chosen to fix gRPC STS call credential options. - # 2020-02-11 version = "d8f4928fa779f6005a7fe55a176bdb373b0f910f", sha256 = "bbc8f020f4e85ec029b047fab939b8c81f3d67254b5c724e1003a2bc49ddd123", strip_prefix = "grpc-{version}", urls = ["https://github.com/grpc/grpc/archive/{version}.tar.gz"], - use_category = ["dataplane", "controlplane"], + use_category = ["dataplane_core", "controlplane"], + last_updated = "2020-02-11", cpe = "cpe:2.3:a:grpc:grpc:*", ), com_github_luajit_luajit = dict( project_name = "LuaJIT", + project_desc = "Just-In-Time compiler for Lua", project_url = "https://luajit.org", - version = "2.1.0-beta3", - sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8", + # The last release version, 2.1.0-beta3 has a number of CVEs filed + # against it. These may not impact correct non-malicious Lua code, but for prudence we bump. + version = "1d8b747c161db457e032a023ebbff511f5de5ec2", + sha256 = "20a159c38a98ecdb6368e8d655343b6036622a29a1621da9dc303f7ed9bf37f3", strip_prefix = "LuaJIT-{version}", - urls = ["https://github.com/LuaJIT/LuaJIT/archive/v{version}.tar.gz"], - use_category = ["dataplane"], - cpe = "N/A", + urls = ["https://github.com/LuaJIT/LuaJIT/archive/{version}.tar.gz"], + last_updated = "2020-10-13", + use_category = ["dataplane_ext"], + extensions = ["envoy.filters.http.lua"], + cpe = "cpe:2.3:a:luajit:luajit:*", ), com_github_moonjit_moonjit = dict( project_name = "Moonjit", + project_desc = "LuaJIT fork with wider platform support", project_url = "https://github.com/moonjit/moonjit", version = "2.2.0", sha256 = "83deb2c880488dfe7dd8ebf09e3b1e7613ef4b8420de53de6f712f01aabca2b6", strip_prefix = "moonjit-{version}", urls = ["https://github.com/moonjit/moonjit/archive/{version}.tar.gz"], - use_category = ["dataplane"], - cpe = "N/A", + use_category = ["dataplane_ext"], + extensions = ["envoy.filters.http.lua"], + last_updated = "2020-01-14", + cpe = "cpe:2.3:a:moonjit_project:moonjit:*", ), com_github_nghttp2_nghttp2 = dict( project_name = "Nghttp2", + project_desc = "Implementation of HTTP/2 and its header compression algorithm HPACK in Cimplementation of HTTP/2 and its header compression algorithm HPACK in C", project_url = "https://nghttp2.org", version = "1.41.0", sha256 = "eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8", strip_prefix = "nghttp2-{version}", urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["controlplane", "dataplane_core"], + last_updated = "2020-06-03", cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), io_opentracing_cpp = dict( project_name = "OpenTracing", + project_desc = "Vendor-neutral APIs and instrumentation for distributed tracing", project_url = "https://opentracing.io", version = "1.5.1", sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301", strip_prefix = "opentracing-cpp-{version}", urls = ["https://github.com/opentracing/opentracing-cpp/archive/v{version}.tar.gz"], - use_category = ["observability"], + use_category = ["observability_ext"], + extensions = [ + "envoy.tracers.datadog", + "envoy.tracers.dynamic_ot", + "envoy.tracers.lightstep", + ], + last_updated = "2019-01-16", cpe = "N/A", ), com_lightstep_tracer_cpp = dict( project_name = "lightstep-tracer-cpp", + project_desc = "LightStep distributed tracing library for C++", project_url = "https://github.com/lightstep/lightstep-tracer-cpp", - # 2020-08-24 version = "1942b3f142e218ebc143a043f32e3278dafec9aa", sha256 = "3238921a8f578beb26c2215cd277e8f6752f3d29b020b881d60d96a240a38aed", strip_prefix = "lightstep-tracer-cpp-{version}", urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/{version}.tar.gz"], - use_category = ["observability"], + use_category = ["observability_ext"], + extensions = ["envoy.tracers.lightstep"], + last_updated = "2020-08-24", cpe = "N/A", ), com_github_datadog_dd_opentracing_cpp = dict( project_name = "Datadog OpenTracing C++ Client", + project_desc = "Datadog OpenTracing C++ Client", project_url = "https://github.com/DataDog/dd-opentracing-cpp", version = "1.1.5", sha256 = "b84fd2fb0bb0578af4901db31d1c0ae909b532a1016fe6534cbe31a6c3ad6924", strip_prefix = "dd-opentracing-cpp-{version}", urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v{version}.tar.gz"], - use_category = ["observability"], + use_category = ["observability_ext"], + extensions = ["envoy.tracers.datadog"], + last_updated = "2020-05-15", cpe = "N/A", ), com_github_google_benchmark = dict( project_name = "Benchmark", + project_desc = "Library to benchmark code snippets", project_url = "https://github.com/google/benchmark", version = "1.5.1", sha256 = "23082937d1663a53b90cb5b61df4bcc312f6dee7018da78ba00dd6bd669dfef2", strip_prefix = "benchmark-{version}", urls = ["https://github.com/google/benchmark/archive/v{version}.tar.gz"], - use_category = ["test"], + use_category = ["test_only"], + last_updated = "2020-06-09", ), com_github_libevent_libevent = dict( project_name = "libevent", + project_desc = "Event notification library", project_url = "https://libevent.org", # This SHA includes the new "prepare" and "check" watchers, used for event loop performance # stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition @@ -311,286 +341,408 @@ DEPENDENCY_REPOSITORIES_SPEC = dict( # This also includes the wepoll backend for Windows (see # https://github.com/libevent/libevent/pull/1006) # TODO(adip): Update to v2.2 when it is released. - # 2020-07-31 version = "62c152d9a7cd264b993dad730c4163c6ede2e0a3", sha256 = "4c80e5fe044ce5f8055b20a2f141ee32ec2614000f3e95d2aa81611a4c8f5213", strip_prefix = "libevent-{version}", urls = ["https://github.com/libevent/libevent/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_core", "controlplane"], + last_updated = "2020-07-31", cpe = "cpe:2.3:a:libevent_project:libevent:*", ), + # This should be removed, see https://github.com/envoyproxy/envoy/issues/13261. net_zlib = dict( project_name = "zlib", + project_desc = "zlib compression library", project_url = "https://zlib.net", version = "79baebe50e4d6b73ae1f8b603f0ef41300110aa3", # Use the dev branch of zlib to resolve fuzz bugs and out of bound # errors resulting in crashes in zlib 1.2.11. # TODO(asraa): Remove when zlib > 1.2.11 is released. - # 2019-04-14 development branch sha256 = "155a8f8c1a753fb05b16a1b0cc0a0a9f61a78e245f9e0da483d13043b3bcbf2e", strip_prefix = "zlib-{version}", urls = ["https://github.com/madler/zlib/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["controlplane", "dataplane_core"], + last_updated = "2019-04-14", cpe = "cpe:2.3:a:gnu:zlib:*", ), com_github_zlib_ng_zlib_ng = dict( project_name = "zlib-ng", + project_desc = "zlib fork (higher performance)", project_url = "https://github.com/zlib-ng/zlib-ng", version = "193d8fd7dfb7927facab7a3034daa27ad5b9df1c", sha256 = "5fe543e8d007b9e7b729f3d6b3a5ee1f9b68d0eef5f6af1393745a4dcd472a98", - strip_prefix = "zlib-ng-193d8fd7dfb7927facab7a3034daa27ad5b9df1c", - # 2020-08-16 develop branch. - urls = ["https://github.com/zlib-ng/zlib-ng/archive/193d8fd7dfb7927facab7a3034daa27ad5b9df1c.tar.gz"], - use_category = ["dataplane"], + strip_prefix = "zlib-ng-{version}", + urls = ["https://github.com/zlib-ng/zlib-ng/archive/{version}.tar.gz"], + use_category = ["controlplane", "dataplane_core"], + last_updated = "2020-08-16", cpe = "N/A", ), com_github_jbeder_yaml_cpp = dict( project_name = "yaml-cpp", + project_desc = "YAML parser and emitter in C++ matching the YAML 1.2 spec", project_url = "https://github.com/jbeder/yaml-cpp", - # 2020-07-28 version = "98acc5a8874faab28b82c28936f4b400b389f5d6", sha256 = "79ab7069ef1c7c3632e7ffe095f7185d4c77b64d8035db3c085c239d4fe96d5f", strip_prefix = "yaml-cpp-{version}", urls = ["https://github.com/jbeder/yaml-cpp/archive/{version}.tar.gz"], - use_category = ["dataplane"], - cpe = "N/A", + # YAML is also used for runtime as well as controlplane. It shouldn't appear on the + # dataplane but we can't verify this automatically due to code structure today. + use_category = ["controlplane", "dataplane_core"], + last_updated = "2020-07-28", + cpe = "cpe:2.3:a:yaml-cpp_project:yaml-cpp:*", ), com_github_msgpack_msgpack_c = dict( project_name = "msgpack for C/C++", + project_desc = "MessagePack is an efficient binary serialization format", project_url = "https://github.com/msgpack/msgpack-c", version = "3.3.0", sha256 = "6e114d12a5ddb8cb11f669f83f32246e484a8addd0ce93f274996f1941c1f07b", strip_prefix = "msgpack-{version}", urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-{version}/msgpack-{version}.tar.gz"], - use_category = ["observability"], + use_category = ["observability_ext"], + extensions = ["envoy.tracers.datadog"], + last_updated = "2020-06-05", cpe = "N/A", ), com_github_google_jwt_verify = dict( project_name = "jwt_verify_lib", + project_desc = "JWT verification library for C++", project_url = "https://github.com/google/jwt_verify_lib", - # 2020-07-09 version = "7276a339af8426724b744216f619c99152f8c141", sha256 = "f1fde4f3ebb3b2d841332c7a02a4b50e0529a19709934c63bc6208d1bbe28fb1", strip_prefix = "jwt_verify_lib-{version}", urls = ["https://github.com/google/jwt_verify_lib/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_ext"], + extensions = ["envoy.filters.http.jwt_authn"], + last_updated = "2020-07-09", cpe = "N/A", ), com_github_nodejs_http_parser = dict( project_name = "HTTP Parser", + project_desc = "Parser for HTTP messages written in C", project_url = "https://github.com/nodejs/http-parser", - # 2020-07-10 # This SHA includes fix for https://github.com/nodejs/http-parser/issues/517 which allows (opt-in) to serve # requests with both Content-Legth and Transfer-Encoding: chunked headers set. version = "4f15b7d510dc7c6361a26a7c6d2f7c3a17f8d878", sha256 = "6a12896313ce1ca630cf516a0ee43a79b5f13f5a5d8143f56560ac0b21c98fac", strip_prefix = "http-parser-{version}", urls = ["https://github.com/nodejs/http-parser/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["controlplane", "dataplane_core"], + last_updated = "2020-07-10", cpe = "cpe:2.3:a:nodejs:node.js:*", ), com_github_tencent_rapidjson = dict( project_name = "RapidJSON", + project_desc = "Fast JSON parser/generator for C++", project_url = "https://rapidjson.org", - # Changes through 2019-12-02 version = "dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1", sha256 = "a2faafbc402394df0fa94602df4b5e4befd734aad6bb55dfef46f62fcaf1090b", strip_prefix = "rapidjson-{version}", urls = ["https://github.com/Tencent/rapidjson/archive/{version}.tar.gz"], - use_category = ["dataplane"], + # We're mostly using com_google_protobuf for JSON, but there are some extensions and hard to + # disentangle uses on the dataplane, e.g. header_formatter, Squash filter. + use_category = ["controlplane", "dataplane_core"], + last_updated = "2019-12-02", cpe = "cpe:2.3:a:tencent:rapidjson:*", ), com_github_twitter_common_lang = dict( project_name = "twitter.common.lang (Thrift)", + project_desc = "twitter.common Python language and compatibility facilities", project_url = "https://pypi.org/project/twitter.common.lang", version = "0.3.9", sha256 = "56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1", strip_prefix = "twitter.common.lang-{version}/src", urls = ["https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-{version}.tar.gz"], - use_category = ["dataplane"], - cpe = "N/A", + last_updated = "2018-06-26", + use_category = ["test_only"], ), com_github_twitter_common_rpc = dict( project_name = "twitter.common.rpc (Thrift)", + project_desc = "twitter.common Thrift helpers including Finagle and SSL transports", project_url = "https://pypi.org/project/twitter.common.rpc", version = "0.3.9", sha256 = "0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514", strip_prefix = "twitter.common.rpc-{version}/src", urls = ["https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-{version}.tar.gz"], - use_category = ["dataplane"], - cpe = "N/A", + last_updated = "2018-06-26", + use_category = ["test_only"], ), com_github_twitter_common_finagle_thrift = dict( project_name = "twitter.common.finagle-thrift", + project_desc = "twitter.common Thrift stubs for Zipkin RPC tracing support in Finagle", project_url = "https://pypi.org/project/twitter.common.finagle-thrift", version = "0.3.9", sha256 = "1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a", strip_prefix = "twitter.common.finagle-thrift-{version}/src", urls = ["https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-{version}.tar.gz"], - use_category = ["dataplane"], - cpe = "N/A", + last_updated = "2018-06-26", + use_category = ["test_only"], ), com_google_googletest = dict( project_name = "Google Test", + project_desc = "Google's C++ test framework", project_url = "https://github.com/google/googletest", - version = "1.10.0", - sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb", - strip_prefix = "googletest-release-{version}", - urls = ["https://github.com/google/googletest/archive/release-{version}.tar.gz"], - use_category = ["test"], + # Pick up fix for MOCK_METHOD compilation with clang-cl for Windows (resolved after 1.10.0) + # see https://github.com/google/googletest/issues/2490 + version = "a4ab0abb93620ce26efad9de9296b73b16e88588", + sha256 = "7897bfaa5ad39a479177cfb5c3ce010184dbaee22a7c3727b212282871918751", + strip_prefix = "googletest-{version}", + urls = ["https://github.com/google/googletest/archive/{version}.tar.gz"], + last_updated = "2020-09-10", + use_category = ["test_only"], ), com_google_protobuf = dict( project_name = "Protocol Buffers", + project_desc = "Language-neutral, platform-neutral extensible mechanism for serializing structured data", project_url = "https://developers.google.com/protocol-buffers", version = "3.10.1", sha256 = "d7cfd31620a352b2ee8c1ed883222a0d77e44346643458e062e86b1d069ace3e", strip_prefix = "protobuf-{version}", urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-all-{version}.tar.gz"], - use_category = ["dataplane", "controlplane"], - cpe = "N/A", + use_category = ["dataplane_core", "controlplane"], + last_updated = "2020-10-24", + cpe = "cpe:2.3:a:google:protobuf:*", ), grpc_httpjson_transcoding = dict( project_name = "grpc-httpjson-transcoding", + project_desc = "Library that supports transcoding so that HTTP/JSON can be converted to gRPC", project_url = "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding", - # 2020-03-02 version = "faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6", sha256 = "62c8cb5ea2cca1142cde9d4a0778c52c6022345c3268c60ef81666946b958ad5", strip_prefix = "grpc-httpjson-transcoding-{version}", urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_ext"], + extensions = ["envoy.filters.http.grpc_json_transcoder"], + last_updated = "2020-03-02", cpe = "N/A", ), io_bazel_rules_go = dict( project_name = "Go rules for Bazel", + project_desc = "Bazel rules for the Go language", project_url = "https://github.com/bazelbuild/rules_go", version = "0.23.7", sha256 = "0310e837aed522875791750de44408ec91046c630374990edd51827cb169f616", urls = ["https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz"], - use_category = ["build"], + use_category = ["build", "api"], + last_updated = "2020-08-06", + implied_untracked_deps = [ + "com_github_golang_protobuf", + "io_bazel_rules_nogo", + "org_golang_google_protobuf", + "org_golang_x_tools", + ], ), rules_cc = dict( project_name = "C++ rules for Bazel", + project_desc = "Bazel rules for the C++ language", project_url = "https://github.com/bazelbuild/rules_cc", - # 2020-05-13 # TODO(lizan): pin to a point releases when there's a released version. version = "818289e5613731ae410efb54218a4077fb9dbb03", sha256 = "9d48151ea71b3e225adfb6867e6d2c7d0dce46cbdc8710d9a9a628574dfd40a0", strip_prefix = "rules_cc-{version}", urls = ["https://github.com/bazelbuild/rules_cc/archive/{version}.tar.gz"], + last_updated = "2020-05-13", use_category = ["build"], ), rules_foreign_cc = dict( project_name = "Rules for using foreign build systems in Bazel", + project_desc = "Rules for using foreign build systems in Bazel", project_url = "https://github.com/bazelbuild/rules_foreign_cc", - # 2020-08-21 version = "594bf4d7731e606a705f3ad787dd0a70c5a28b30", sha256 = "2b1cf88de0b6e0195f6571cfde3a5bd406d11b42117d6adef2395c9525a1902e", strip_prefix = "rules_foreign_cc-{version}", urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/{version}.tar.gz"], + last_updated = "2020-08-21", use_category = ["build"], ), rules_python = dict( project_name = "Python rules for Bazel", + project_desc = "Bazel rules for the Python language", project_url = "https://github.com/bazelbuild/rules_python", - # 2020-04-09 # TODO(htuch): revert back to a point releases when pip3_import appears. version = "a0fbf98d4e3a232144df4d0d80b577c7a693b570", sha256 = "76a8fd4e7eca2a3590f816958faa0d83c9b2ce9c32634c5c375bcccf161d3bb5", strip_prefix = "rules_python-{version}", urls = ["https://github.com/bazelbuild/rules_python/archive/{version}.tar.gz"], + last_updated = "2020-04-09", use_category = ["build"], ), six = dict( project_name = "Six", + project_desc = "Python 2 and 3 compatibility library", project_url = "https://pypi.org/project/six", version = "1.12.0", sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73", urls = ["https://files.pythonhosted.org/packages/dd/bf/4138e7bfb757de47d1f4b6994648ec67a51efe58fa907c1e11e350cddfca/six-{version}.tar.gz"], + last_updated = "2019-11-17", use_category = ["other"], ), + org_llvm_llvm = dict( + project_name = "LLVM", + project_desc = "LLVM Compiler Infrastructure", + project_url = "https://llvm.org", + version = "10.0.0", + sha256 = "df83a44b3a9a71029049ec101fb0077ecbbdf5fe41e395215025779099a98fdf", + strip_prefix = "llvm-{version}.src", + urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/llvm-{version}.src.tar.xz"], + last_updated = "2020-10-09", + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", + "envoy.bootstrap.wasm", + "envoy.filters.http.wasm", + "envoy.filters.network.wasm", + "envoy.stat_sinks.wasm", + ], + cpe = "cpe:2.3:a:llvm:*:*", + ), + com_github_wavm_wavm = dict( + project_name = "WAVM", + project_desc = "WebAssembly Virtual Machine", + project_url = "https://wavm.github.io", + version = "e8155f1f3af88b4d08802716a7054950ef18d827", + sha256 = "cc3fcaf05d57010c9cf8eb920234679dede6c780137b55001fd34e4d14806f7c", + strip_prefix = "WAVM-{version}", + urls = ["https://github.com/WAVM/WAVM/archive/{version}.tar.gz"], + last_updated = "2020-10-09", + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", + "envoy.bootstrap.wasm", + "envoy.filters.http.wasm", + "envoy.filters.network.wasm", + "envoy.stat_sinks.wasm", + ], + cpe = "cpe:2.3:a:webassembly_virtual_machine_project:webassembly_virtual_machine:*", + ), io_opencensus_cpp = dict( project_name = "OpenCensus C++", + project_desc = "OpenCensus tracing library", project_url = "https://github.com/census-instrumentation/opencensus-cpp", - # 2020-06-01 - version = "7877337633466358ed680f9b26967da5b310d7aa", - sha256 = "12ff300fa804f97bd07e2ff071d969e09d5f3d7bbffeac438c725fa52a51a212", + version = "ba631066779a534267fdb1321b19850eb2b0c000", + sha256 = "f239a40803f6e2e42b57c9e68771b0990c4ca8b2d76b440073cdf14f4211ad26", strip_prefix = "opencensus-cpp-{version}", urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/{version}.tar.gz"], - use_category = ["observability"], + use_category = ["observability_ext"], + extensions = ["envoy.tracers.opencensus"], + last_updated = "2020-10-13", cpe = "N/A", ), + # This should be removed, see https://github.com/envoyproxy/envoy/issues/11816. com_github_curl = dict( project_name = "curl", + project_desc = "Library for transferring data with URLs", project_url = "https://curl.haxx.se", - version = "7.69.1", - sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98", + version = "7.72.0", + sha256 = "d4d5899a3868fbb6ae1856c3e55a32ce35913de3956d1973caccd37bd0174fa2", strip_prefix = "curl-{version}", urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"], - use_category = ["dataplane"], - cpe = "N/A", + use_category = ["dataplane_ext", "observability_ext"], + extensions = [ + "envoy.filters.http.aws_lambda", + "envoy.filters.http.aws_request_signing", + "envoy.grpc_credentials.aws_iam", + "envoy.tracers.opencensus", + ], + last_updated = "2020-08-19", + cpe = "cpe:2.3:a:haxx:curl:*", ), com_googlesource_chromium_v8 = dict( project_name = "V8", + project_desc = "Google’s open source high-performance JavaScript and WebAssembly engine, written in C++", project_url = "https://v8.dev", version = "8.5.210.20", # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh # and contains complete checkout of V8 with all dependencies necessary to build wee8. sha256 = "ef404643d7da6854b76b9fb9950a79a1acbd037b7a26f02c585ac379b0f7dee1", urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-{version}.tar.gz"], - use_category = ["dataplane"], - cpe = "N/A", + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", + "envoy.bootstrap.wasm", + "envoy.filters.http.wasm", + "envoy.filters.network.wasm", + "envoy.stat_sinks.wasm", + ], + last_updated = "2020-08-31", + cpe = "cpe:2.3:a:google:v8:*", ), com_googlesource_quiche = dict( project_name = "QUICHE", + project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://quiche.googlesource.com/quiche", - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/96bd860bec207d4b722ab7f319fa47be129a85cd.tar.gz - version = "96bd860bec207d4b722ab7f319fa47be129a85cd", - sha256 = "d7129a2f41f2bd00a8a38b33f9b7b955d3e7de3dec20f69b70d7000d3a856360", + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/f555d99a084cdd086a349548c70fb558ac5847cf.tar.gz + version = "f555d99a084cdd086a349548c70fb558ac5847cf", + sha256 = "1833f08e7b0f18b49d7498b029b7f3e6559a82113ec82a98a9e945553756e351", urls = ["https://storage.googleapis.com/quiche-envoy-integration/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_ext"], + extensions = ["envoy.transport_sockets.quic"], + last_updated = "2020-09-18", cpe = "N/A", ), com_googlesource_googleurl = dict( project_name = "Chrome URL parsing library", + project_desc = "Chrome URL parsing library", project_url = "https://quiche.googlesource.com/googleurl", # Static snapshot of https://quiche.googlesource.com/quiche/+archive/ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz. - # 2020-08-05 version = "ef0d23689e240e6c8de4c3a5296b209128c87373", sha256 = "d769283fed1319bca68bae8bdd47fbc3a7933999329eee850eff1f1ea61ce176", urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_ext"], + extensions = [], + last_updated = "2020-08-05", cpe = "N/A", ), com_google_cel_cpp = dict( - project_name = "Common Expression Language C++", + project_name = "Common Expression Language (CEL) C++ library", + project_desc = "Common Expression Language (CEL) C++ library", project_url = "https://opensource.google/projects/cel", - # 2020-07-14 version = "b9453a09b28a1531c4917e8792b3ea61f6b1a447", sha256 = "cad7d01139947d78e413d112cb8f7431fbb33cf66b0adf9c280824803fc2a72e", strip_prefix = "cel-cpp-{version}", urls = ["https://github.com/google/cel-cpp/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", + "envoy.bootstrap.wasm", + "envoy.filters.http.rbac", + "envoy.filters.http.wasm", + "envoy.filters.network.rbac", + "envoy.filters.network.wasm", + "envoy.stat_sinks.wasm", + ], + last_updated = "2020-07-14", cpe = "N/A", ), com_github_google_flatbuffers = dict( project_name = "FlatBuffers", + project_desc = "Cross platform serialization library architected for maximum memory efficiency", project_url = "https://github.com/google/flatbuffers", version = "a83caf5910644ba1c421c002ef68e42f21c15f9f", sha256 = "b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a", strip_prefix = "flatbuffers-{version}", urls = ["https://github.com/google/flatbuffers/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", + "envoy.bootstrap.wasm", + "envoy.filters.http.wasm", + "envoy.filters.network.wasm", + "envoy.stat_sinks.wasm", + ], + last_updated = "2020-07-29", cpe = "N/A", ), com_googlesource_code_re2 = dict( project_name = "RE2", + project_desc = "RE2, a regular expression library", project_url = "https://github.com/google/re2", - # 2020-07-06 version = "2020-07-06", sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f", strip_prefix = "re2-{version}", urls = ["https://github.com/google/re2/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["controlplane", "dataplane_core"], + last_updated = "2020-07-06", cpe = "N/A", ), # Included to access FuzzedDataProvider.h. This is compiler agnostic but @@ -598,132 +750,163 @@ DEPENDENCY_REPOSITORIES_SPEC = dict( # Clang variant as we are not a Clang-LLVM only shop today. org_llvm_releases_compiler_rt = dict( project_name = "compiler-rt", + project_desc = "LLVM compiler runtime library", project_url = "https://compiler-rt.llvm.org", version = "10.0.0", sha256 = "6a7da64d3a0a7320577b68b9ca4933bdcab676e898b759850e827333c3282c75", # Only allow peeking at fuzzer related files for now. strip_prefix = "compiler-rt-{version}.src", urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/compiler-rt-{version}.src.tar.xz"], - use_category = ["test"], + last_updated = "2020-03-24", + use_category = ["test_only"], ), upb = dict( project_name = "upb", + project_desc = "A small protobuf implementation in C (gRPC dependency)", project_url = "https://github.com/protocolbuffers/upb", - # 2019-11-19 version = "8a3ae1ef3e3e3f26b45dec735c5776737fc7247f", sha256 = "e9f281c56ab1eb1f97a80ca8a83bb7ef73d230eabb8591f83876f4e7b85d9b47", strip_prefix = "upb-{version}", urls = ["https://github.com/protocolbuffers/upb/archive/{version}.tar.gz"], - use_category = ["dataplane", "controlplane"], + use_category = ["controlplane"], + last_updated = "2019-11-19", cpe = "N/A", ), kafka_source = dict( project_name = "Kafka (source)", + project_desc = "Open-source distributed event streaming platform", project_url = "https://kafka.apache.org", version = "2.4.1", sha256 = "740236f44d66e33ea83382383b4fb7eabdab7093a644b525dd5ec90207f933bd", strip_prefix = "kafka-{version}/clients/src/main/resources/common/message", urls = ["https://github.com/apache/kafka/archive/{version}.zip"], - use_category = ["dataplane"], + use_category = ["dataplane_ext"], + extensions = ["envoy.filters.network.kafka_broker"], + last_updated = "2020-08-26", cpe = "cpe:2.3:a:apache:kafka:*", ), kafka_server_binary = dict( project_name = "Kafka (server binary)", + project_desc = "Open-source distributed event streaming platform", project_url = "https://kafka.apache.org", version = "2.4.1", sha256 = "2177cbd14118999e1d76fec628ca78ace7e6f841219dbc6035027c796bbe1a2a", strip_prefix = "kafka_2.12-{version}", urls = ["https://mirrors.gigenet.com/apache/kafka/{version}/kafka_2.12-{version}.tgz"], - use_category = ["test"], + last_updated = "2020-08-26", + use_category = ["test_only"], ), kafka_python_client = dict( project_name = "Kafka (Python client)", + project_desc = "Open-source distributed event streaming platform", project_url = "https://kafka.apache.org", version = "2.0.1", sha256 = "05f7c6eecb402f11fcb7e524c903f1ba1c38d3bdc9bf42bc8ec3cf7567b9f979", strip_prefix = "kafka-python-{version}", urls = ["https://github.com/dpkp/kafka-python/archive/{version}.tar.gz"], - use_category = ["test"], - ), - org_unicode_icuuc = dict( - project_name = "International Components for Unicode", - project_url = "https://github.com/unicode-org/icu", - version = "67.1", - strip_prefix = "icu", - sha256 = "94a80cd6f251a53bd2a997f6f1b5ac6653fe791dfab66e1eb0227740fb86d5dc", - urls = ["https://github.com/unicode-org/icu/releases/download/release-{dash_version}/icu4c-{underscore_version}-src.tgz"], - use_category = ["dataplane"], - cpe = "cpe:2.3:a:icu-project:international_components_for_unicode", + last_updated = "2020-08-26", + use_category = ["test_only"], ), proxy_wasm_cpp_sdk = dict( project_name = "WebAssembly for Proxies (C++ SDK)", + project_desc = "WebAssembly for Proxies (C++ SDK)", project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-sdk", - version = "5cec30b448975e1fd3f4117311f0957309df5cb0", - sha256 = "7d9e1f2e299215ed3e5fa8c8149740872b1100cfe3230fc639f967d9dcfd812e", + version = "7afb39d868a973caa6216a535c24e37fb666b6f3", + sha256 = "213d0b441bcc3df2c87933b24a593b5fd482fa8f4db158b707c60005b9e70040", strip_prefix = "proxy-wasm-cpp-sdk-{version}", urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", + "envoy.bootstrap.wasm", + "envoy.filters.http.wasm", + "envoy.filters.network.wasm", + "envoy.stat_sinks.wasm", + ], + last_updated = "2020-10-09", cpe = "N/A", ), proxy_wasm_cpp_host = dict( project_name = "WebAssembly for Proxies (C++ host implementation)", + project_desc = "WebAssembly for Proxies (C++ host implementation)", project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host", - version = "928db4d79ec7b90aea3ad13ea5df36dc60c9c31d", - sha256 = "494d3f81156b92bac640c26000497fbf3a7b1bc35f9789594280450c6e5d8129", + version = "c5658d34979abece30882b1eeaa95b6ee965d825", + sha256 = "dc3a794424b7679c3dbcf23548e202aa01e9f9093791b95446b99e8524e03c4f", strip_prefix = "proxy-wasm-cpp-host-{version}", urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"], - use_category = ["dataplane"], + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", + "envoy.bootstrap.wasm", + "envoy.filters.http.wasm", + "envoy.filters.network.wasm", + "envoy.stat_sinks.wasm", + ], + last_updated = "2020-10-16", cpe = "N/A", ), + # TODO: upgrade to the latest version (1.41 currently fails tests) emscripten_toolchain = dict( project_name = "Emscripten SDK", + project_desc = "Emscripten SDK (use by Wasm)", project_url = "https://github.com/emscripten-core/emsdk", - version = "dec8a63594753fe5f4ad3b47850bf64d66c14a4e", - sha256 = "2bdbee6947e32ad1e03cd075b48fda493ab16157b2b0225b445222cd528e1843", - patch_cmds = [ - "./emsdk install 1.39.19-upstream", - "./emsdk activate --embedded 1.39.19-upstream", - ], + version = "1.39.6", + sha256 = "4ac0f1f3de8b3f1373d435cd7e58bd94de4146e751f099732167749a229b443b", strip_prefix = "emsdk-{version}", urls = ["https://github.com/emscripten-core/emsdk/archive/{version}.tar.gz"], use_category = ["build"], + last_updated = "2020-10-09", + ), + io_bazel_rules_rust = dict( + project_name = "Bazel rust rules", + project_desc = "Bazel rust rules (used by Wasm)", + project_url = "https://github.com/bazelbuild/rules_rust", + version = "fb90a7484800157fbb8a5904fbeb608dc1effc0c", + sha256 = "cbb253b8c5ab1a3c1787790f900e7d6774e95ba038714fc0f710935e62f30f5f", + # Last commit where "out_binary = True" works. + # See: https://github.com/bazelbuild/rules_rust/issues/386 + strip_prefix = "rules_rust-{version}", + urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"], + use_category = ["test_only"], + last_updated = "2020-10-15", ), rules_antlr = dict( project_name = "ANTLR Rules for Bazel", + project_desc = "Bazel rules for ANTLR", project_url = "https://github.com/marcohu/rules_antlr", version = "3cc2f9502a54ceb7b79b37383316b23c4da66f9a", sha256 = "7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429", strip_prefix = "rules_antlr-{version}", urls = ["https://github.com/marcohu/rules_antlr/archive/{version}.tar.gz"], - use_category = ["build"], + # ANTLR has a runtime component, so is not purely build. + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", + "envoy.bootstrap.wasm", + "envoy.filters.http.wasm", + "envoy.filters.network.wasm", + "envoy.stat_sinks.wasm", + ], + last_updated = "2020-07-29", + cpe = "N/A", ), antlr4_runtimes = dict( project_name = "ANTLR v4", + project_desc = "ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files", project_url = "https://github.com/antlr/antlr4", - version = "4.7.1", - sha256 = "4d0714f441333a63e50031c9e8e4890c78f3d21e053d46416949803e122a6574", + version = "4.7.2", + sha256 = "46f5e1af5f4bd28ade55cb632f9a069656b31fc8c2408f9aa045f9b5f5caad64", strip_prefix = "antlr4-{version}", urls = ["https://github.com/antlr/antlr4/archive/{version}.tar.gz"], - use_category = ["build"], + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", + "envoy.bootstrap.wasm", + "envoy.filters.http.wasm", + "envoy.filters.network.wasm", + "envoy.stat_sinks.wasm", + ], + last_updated = "2020-10-09", + cpe = "N/A", ), ) - -def _format_version(s, version): - return s.format(version = version, dash_version = version.replace(".", "-"), underscore_version = version.replace(".", "_")) - -# Interpolate {version} in the above dependency specs. This code should be capable of running in both Python -# and Starlark. -def _dependency_repositories(): - locations = {} - for key, location in DEPENDENCY_REPOSITORIES_SPEC.items(): - mutable_location = dict(location) - locations[key] = mutable_location - - # Fixup with version information. - if "version" in location: - if "strip_prefix" in location: - mutable_location["strip_prefix"] = _format_version(location["strip_prefix"], location["version"]) - mutable_location["urls"] = [_format_version(url, location["version"]) for url in location["urls"]] - return locations - -DEPENDENCY_REPOSITORIES = _dependency_repositories() diff --git a/bazel/setup_clang.sh b/bazel/setup_clang.sh index 0ed987b9d4d0..d0e58478dc0a 100755 --- a/bazel/setup_clang.sh +++ b/bazel/setup_clang.sh @@ -9,9 +9,10 @@ if [[ ! -e "${LLVM_PREFIX}/bin/llvm-config" ]]; then exit 1 fi -export PATH="$(${LLVM_PREFIX}/bin/llvm-config --bindir):${PATH}" +PATH="$("${LLVM_PREFIX}"/bin/llvm-config --bindir):${PATH}" +export PATH -RT_LIBRARY_PATH="$(dirname $(find $(llvm-config --libdir) -name libclang_rt.ubsan_standalone_cxx-x86_64.a | head -1))" +RT_LIBRARY_PATH="$(dirname "$(find "$(llvm-config --libdir)" -name libclang_rt.ubsan_standalone_cxx-x86_64.a | head -1)")" echo "# Generated file, do not edit. If you want to disable clang, just delete this file. build:clang --action_env='PATH=${PATH}' @@ -28,5 +29,4 @@ build:clang-asan --linkopt=-fsanitize=vptr,function build:clang-asan --linkopt='-L${RT_LIBRARY_PATH}' build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone-x86_64.a build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone_cxx-x86_64.a -" > ${BAZELRC_FILE} - +" > "${BAZELRC_FILE}" diff --git a/bazel/setup_local_tsan.sh b/bazel/setup_local_tsan.sh index c805704af9e8..a5bd56ceb812 100755 --- a/bazel/setup_local_tsan.sh +++ b/bazel/setup_local_tsan.sh @@ -15,5 +15,4 @@ build:local-tsan --config=libc++ build:local-tsan --config=clang-tsan build:local-tsan --linkopt=-L${LIBCXX_PREFIX}/lib build:local-tsan --linkopt=-Wl,-rpath,${LIBCXX_PREFIX}/lib -" > ${BAZELRC_FILE} - +" > "${BAZELRC_FILE}" diff --git a/bazel/sh_test_wrapper.sh b/bazel/sh_test_wrapper.sh index 9e2f1138dea7..8a290d6684d5 100755 --- a/bazel/sh_test_wrapper.sh +++ b/bazel/sh_test_wrapper.sh @@ -4,8 +4,8 @@ # TODO(lizan): remove when we have a solution for # https://github.com/bazelbuild/bazel/issues/3510 -cd $(dirname "$0") +cd "$(dirname "$0")" || exit 1 if [ $# -gt 0 ]; then - "./$@" + "./${1}" "${@:2}" fi diff --git a/bazel/test_for_benchmark_wrapper.sh b/bazel/test_for_benchmark_wrapper.sh index 37de6d0d0d81..0a53ca0ada3e 100755 --- a/bazel/test_for_benchmark_wrapper.sh +++ b/bazel/test_for_benchmark_wrapper.sh @@ -3,4 +3,4 @@ # Set the benchmark time to 0 to just verify that the benchmark runs to # completion. We're interacting with two different flag parsers, so the order # of flags and the -- matters. -"${TEST_SRCDIR}/envoy/$@" --skip_expensive_benchmarks -- --benchmark_min_time=0 +"${TEST_SRCDIR}/envoy/${1}" "${@:2}" --skip_expensive_benchmarks -- --benchmark_min_time=0 diff --git a/bazel/wasm/wasm.bzl b/bazel/wasm/wasm.bzl index 65fefcb49e90..a3d89067e496 100644 --- a/bazel/wasm/wasm.bzl +++ b/bazel/wasm/wasm.bzl @@ -1,6 +1,7 @@ +load("@io_bazel_rules_rust//rust:rust.bzl", "rust_binary") load("@rules_cc//cc:defs.bzl", "cc_binary") -def _wasm_transition_impl(settings, attr): +def _wasm_cc_transition_impl(settings, attr): return { "//command_line_option:cpu": "wasm32", "//command_line_option:crosstool_top": "@proxy_wasm_cpp_sdk//toolchain:emscripten", @@ -11,46 +12,89 @@ def _wasm_transition_impl(settings, attr): "//command_line_option:cxxopt": [], "//command_line_option:linkopt": [], "//command_line_option:collect_code_coverage": "false", + "//command_line_option:fission": "no", } -wasm_transition = transition( - implementation = _wasm_transition_impl, +def _wasm_rust_transition_impl(settings, attr): + return { + "//command_line_option:platforms": "@io_bazel_rules_rust//rust/platform:wasm", + } + +wasm_cc_transition = transition( + implementation = _wasm_cc_transition_impl, inputs = [], outputs = [ "//command_line_option:cpu", "//command_line_option:crosstool_top", "//command_line_option:copt", "//command_line_option:cxxopt", + "//command_line_option:fission", "//command_line_option:linkopt", "//command_line_option:collect_code_coverage", ], ) +wasm_rust_transition = transition( + implementation = _wasm_rust_transition_impl, + inputs = [], + outputs = [ + "//command_line_option:platforms", + ], +) + def _wasm_binary_impl(ctx): out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.run_shell( - command = 'cp "{}" "{}"'.format(ctx.files.binary[0].path, out.path), - outputs = [out], - inputs = ctx.files.binary, - ) + if ctx.attr.precompile: + ctx.actions.run( + executable = ctx.executable._compile_tool, + arguments = [ctx.files.binary[0].path, out.path], + outputs = [out], + inputs = ctx.files.binary, + ) + else: + ctx.actions.run( + executable = "cp", + arguments = [ctx.files.binary[0].path, out.path], + outputs = [out], + inputs = ctx.files.binary, + ) - return [DefaultInfo(runfiles = ctx.runfiles([out]))] + return [DefaultInfo(files = depset([out]), runfiles = ctx.runfiles([out]))] + +def _wasm_attrs(transition): + return { + "binary": attr.label(mandatory = True, cfg = transition), + "precompile": attr.bool(default = False), + # This is deliberately in target configuration to avoid compiling v8 twice. + "_compile_tool": attr.label(default = "@envoy//test/tools/wee8_compile:wee8_compile_tool", executable = True, cfg = "target"), + "_whitelist_function_transition": attr.label(default = "@bazel_tools//tools/whitelists/function_transition_whitelist"), + } # WASM binary rule implementation. # This copies the binary specified in binary attribute in WASM configuration to # target configuration, so a binary in non-WASM configuration can depend on them. -wasm_binary = rule( +wasm_cc_binary_rule = rule( implementation = _wasm_binary_impl, - attrs = { - "binary": attr.label(mandatory = True, cfg = wasm_transition), - "_whitelist_function_transition": attr.label(default = "@bazel_tools//tools/whitelists/function_transition_whitelist"), - }, + attrs = _wasm_attrs(wasm_cc_transition), +) + +wasm_rust_binary_rule = rule( + implementation = _wasm_binary_impl, + attrs = _wasm_attrs(wasm_rust_transition), ) -def wasm_cc_binary(name, **kwargs): +def wasm_cc_binary(name, tags = [], repository = "", **kwargs): wasm_name = "_wasm_" + name - kwargs.setdefault("additional_linker_inputs", ["@proxy_wasm_cpp_sdk//:jslib"]) - kwargs.setdefault("linkopts", ["--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js"]) + kwargs.setdefault("additional_linker_inputs", ["@proxy_wasm_cpp_sdk//:jslib", "@envoy//source/extensions/common/wasm/ext:jslib"]) + + if repository == "@envoy": + envoy_js = "--js-library external/envoy/source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js" + else: + envoy_js = "--js-library source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js" + kwargs.setdefault("linkopts", [ + envoy_js, + "--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js", + ]) kwargs.setdefault("visibility", ["//visibility:public"]) cc_binary( name = wasm_name, @@ -61,7 +105,34 @@ def wasm_cc_binary(name, **kwargs): **kwargs ) - wasm_binary( + wasm_cc_binary_rule( + name = name, + binary = ":" + wasm_name, + tags = tags + ["manual"], + ) + +def envoy_wasm_cc_binary(name, tags = [], **kwargs): + wasm_cc_binary(name, tags, repository = "", **kwargs) + +def wasm_rust_binary(name, tags = [], **kwargs): + wasm_name = "_wasm_" + name.replace(".", "_") + kwargs.setdefault("visibility", ["//visibility:public"]) + + rust_binary( + name = wasm_name, + edition = "2018", + crate_type = "cdylib", + out_binary = True, + tags = ["manual"], + **kwargs + ) + + wasm_rust_binary_rule( name = name, + precompile = select({ + "@envoy//bazel:linux_x86_64": True, + "//conditions:default": False, + }), binary = ":" + wasm_name, + tags = tags + ["manual"], ) diff --git a/ci/Dockerfile-envoy b/ci/Dockerfile-envoy index eecb68be7d17..435250d08185 100644 --- a/ci/Dockerfile-envoy +++ b/ci/Dockerfile-envoy @@ -28,7 +28,7 @@ RUN mkdir -p /etc/envoy ARG ENVOY_BINARY_SUFFIX=_stripped ADD ${TARGETPLATFORM}/build_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/ -ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml +ADD configs/google_com_proxy.yaml /etc/envoy/envoy.yaml EXPOSE 10000 diff --git a/ci/Dockerfile-envoy-alpine b/ci/Dockerfile-envoy-alpine index de13be43162d..b7bfba617f80 100644 --- a/ci/Dockerfile-envoy-alpine +++ b/ci/Dockerfile-envoy-alpine @@ -1,7 +1,7 @@ FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31 RUN mkdir -p /etc/envoy -ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml +ADD configs/google_com_proxy.yaml /etc/envoy/envoy.yaml RUN apk add --no-cache shadow su-exec \ && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy diff --git a/ci/Dockerfile-envoy-windows b/ci/Dockerfile-envoy-windows new file mode 100644 index 000000000000..4b0db0161531 --- /dev/null +++ b/ci/Dockerfile-envoy-windows @@ -0,0 +1,19 @@ +ARG BUILD_OS=mcr.microsoft.com/windows/servercore +ARG BUILD_TAG=ltsc2019 + +FROM $BUILD_OS:$BUILD_TAG + +RUN mkdir "C:\\Program\ Files\\envoy" +RUN setx path "%path%;c:\Program Files\envoy" +ADD ["windows/amd64/envoy.exe", "C:/Program Files/envoy/"] + +RUN mkdir "C:\\ProgramData\\envoy" +ADD ["configs/google_com_proxy.yaml", "C:/ProgramData/envoy/envoy.yaml"] +# Replace temp path with Windows temp path +RUN powershell -Command "(cat C:\ProgramData\envoy\envoy.yaml -raw) -replace '/tmp/','C:\Windows\Temp\' | Set-Content -Encoding Ascii C:\ProgramData\envoy\envoy.yaml" + +EXPOSE 10000 + +COPY ci/docker-entrypoint.bat C:/ +ENTRYPOINT ["C:/docker-entrypoint.bat"] +CMD ["envoy.exe", "-c", "C:\\ProgramData\\envoy\\envoy.yaml"] diff --git a/ci/README.md b/ci/README.md index 46b1e1c65dae..028e31263b30 100644 --- a/ci/README.md +++ b/ci/README.md @@ -5,7 +5,7 @@ and an image based on Windows2019. ## Ubuntu Envoy image -The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CircleCI checks, +The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CI checks, where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). Developers may work with the latest build image SHA in [envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8) repo to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Ubuntu Envoy image. @@ -15,9 +15,9 @@ binary built from the latest tip of master that passed tests. ## Alpine Envoy image -Minimal images based on Alpine Linux allow for quicker deployment of Envoy. Two Alpine based images are built, -one with an Envoy binary with debug (`envoyproxy/envoy-alpine-debug`) symbols and one stripped of them (`envoyproxy/envoy-alpine`). -Both images are pushed with two different tags: `` and `latest`. Parallel to the Ubuntu images above, `` corresponds to the +Minimal images based on Alpine Linux allow for quicker deployment of Envoy. The Alpine base image is only built with symbols stripped. +To get the binary with symbols, use the corresponding Ubuntu based debug image. The image is pushed with two different tags: +`` and `latest`. Parallel to the Ubuntu images above, `` corresponds to the master commit at which the binary was compiled, and `latest` corresponds to a binary built from the latest tip of master that passed tests. ## Windows 2019 Envoy image @@ -50,20 +50,26 @@ run `./ci/do_ci.sh` as described below. # Building and running tests as a developer -## On Linux +The `./ci/run_envoy_docker.sh` script can be used to set up a Docker container on Linux and Windows +to build an Envoy static binary and run tests. -An example basic invocation to build a developer version of the Envoy static binary (using the Bazel `fastbuild` type) is: +The build image defaults to `envoyproxy/envoy-build-ubuntu` on Linux and +`envoyproxy/envoy-build-windows2019` on Windows, but you can choose build image by setting +`IMAGE_NAME` in the environment. + +In case your setup is behind a proxy, set `http_proxy` and `https_proxy` to the proxy servers before +invoking the build. ```bash -./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev' +IMAGE_NAME=envoyproxy/envoy-build-ubuntu http_proxy=http://proxy.foo.com:8080 https_proxy=http://proxy.bar.com:8080 ./ci/run_envoy_docker.sh ' ``` -The build image defaults to `envoyproxy/envoy-build-ubuntu`, but you can choose build image by setting `IMAGE_NAME` in the environment. +## On Linux -In case your setup is behind a proxy, set `http_proxy` and `https_proxy` to the proxy servers before invoking the build. +An example basic invocation to build a developer version of the Envoy static binary (using the Bazel `fastbuild` type) is: ```bash -IMAGE_NAME=envoyproxy/envoy-build-ubuntu http_proxy=http://proxy.foo.com:8080 https_proxy=http://proxy.bar.com:8080 ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev' +./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev' ``` The Envoy binary can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy-fastbuild` on the Docker host. You @@ -139,15 +145,27 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: An example basic invocation to build the Envoy static binary and run tests is: ```bash -./ci/run_envoy_docker_windows.sh './ci/windows_ci_steps.sh' +./ci/run_envoy_docker.sh './ci/windows_ci_steps.sh' +``` + +You can modify `./ci/windows_ci_steps.sh` to modify `bazel` arguments, tests to run, etc. as well +as set environment variables to adjust your container build environment as described above. + +The Envoy binary can be found in `C:\Windows\Temp\envoy-docker-build\envoy\source\exe` on the Docker host. You +can control this by setting `ENVOY_DOCKER_BUILD_DIR` in the environment, e.g. to +generate the binary in `C:\Users\foo\build\envoy\source\exe` you can run: + +```bash +ENVOY_DOCKER_BUILD_DIR="C:\Users\foo\build" ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev' ``` -You can modify `./ci/windows_ci_steps.sh` to modify `bazel` arguments, tests to run, etc. +Note the quotations around the `ENVOY_DOCKER_BUILD_DIR` value to preserve the backslashes in the +path. If you would like to run an interactive session to keep the build container running (to persist your local build environment), run: ```bash -./ci/run_envoy_docker_windows.sh 'bash' +./ci/run_envoy_docker.sh 'bash' ``` From an interactive session, you can invoke `bazel` manually or use the `./ci/windows_ci_steps.sh` script to build and run tests. @@ -171,10 +189,10 @@ This build the Ubuntu based `envoyproxy/envoy-build-ubuntu` image, and the final # macOS Build Flow -The macOS CI build is part of the [CircleCI](https://circleci.com/gh/envoyproxy/envoy) workflow. +The macOS CI build is part of the [Azure Pipelines](https://dev.azure.com/cncf/envoy/_build) workflow. Dependencies are installed by the `ci/mac_ci_setup.sh` script, via [Homebrew](https://brew.sh), -which is pre-installed on the CircleCI macOS image. The dependencies are cached are re-installed -on every build. The `ci/mac_ci_steps.sh` script executes the specific commands that +which is pre-installed on the [Azure Pipelines macOS image](https://github.com/actions/virtual-environments/blob/main/images/macos/macos-10.15-Readme.md). +The dependencies are cached and re-installed on every build. The `ci/mac_ci_steps.sh` script executes the specific commands that build and test Envoy. Note that the full version of Xcode (not just Command Line Tools) is required. # Coverity Scan Build Flow diff --git a/ci/api_mirror.sh b/ci/api_mirror.sh index 077cdd1d3cfe..03e8ab85d80c 100755 --- a/ci/api_mirror.sh +++ b/ci/api_mirror.sh @@ -3,16 +3,15 @@ set -e CHECKOUT_DIR=../data-plane-api +MAIN_BRANCH="refs/heads/master" +API_MAIN_BRANCH="master" -if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] -then +if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then echo "Cloning..." - git clone git@github.com:envoyproxy/data-plane-api "$CHECKOUT_DIR" + git clone git@github.com:envoyproxy/data-plane-api "$CHECKOUT_DIR" -b "${API_MAIN_BRANCH}" - git -C "$CHECKOUT_DIR" config user.name "data-plane-api(CircleCI)" + git -C "$CHECKOUT_DIR" config user.name "data-plane-api(Azure Pipelines)" git -C "$CHECKOUT_DIR" config user.email data-plane-api@users.noreply.github.com - git -C "$CHECKOUT_DIR" fetch - git -C "$CHECKOUT_DIR" checkout -B master origin/master # Determine last envoyproxy/envoy SHA in envoyproxy/data-plane-api MIRROR_MSG="Mirrored from https://github.com/envoyproxy/envoy" @@ -40,6 +39,6 @@ then done echo "Pushing..." - git -C "$CHECKOUT_DIR" push origin master + git -C "$CHECKOUT_DIR" push origin "${API_MAIN_BRANCH}" echo "Done" fi diff --git a/ci/build_setup.ps1 b/ci/build_setup.ps1 deleted file mode 100755 index 9d64fff8f1ca..000000000000 --- a/ci/build_setup.ps1 +++ /dev/null @@ -1,21 +0,0 @@ -$ErrorActionPreference = "Stop"; -trap { $host.SetShouldExit(1) } - -if ("$env:NUM_CPUS" -eq "") { - $env:NUM_CPUS = (Get-WmiObject -class Win32_computersystem).NumberOfLogicalProcessors -} - -if ("$env:ENVOY_BAZEL_ROOT" -eq "") { - Write-Host "ENVOY_BAZEL_ROOT must be set!" - throw -} - -mkdir -force "$env:ENVOY_BAZEL_ROOT" > $nul - -$env:ENVOY_SRCDIR = [System.IO.Path]::GetFullPath("$PSScriptRoot\..") - -echo "ENVOY_BAZEL_ROOT: $env:ENVOY_BAZEL_ROOT" -echo "ENVOY_SRCDIR: $env:ENVOY_SRCDIR" - -$env:BAZEL_BASE_OPTIONS="--output_base=$env:ENVOY_BAZEL_ROOT" -$env:BAZEL_BUILD_OPTIONS="--config=msvc-cl --features=compiler_param_file --strategy=Genrule=standalone --spawn_strategy=standalone --verbose_failures --jobs=$env:NUM_CPUS --show_task_finish --test_output=all $env:BAZEL_BUILD_EXTRA_OPTIONS $env:BAZEL_EXTRA_TEST_OPTIONS" diff --git a/ci/build_setup.sh b/ci/build_setup.sh index ab8705edccce..f9275c2543c8 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -6,17 +6,25 @@ set -e export PPROF_PATH=/thirdparty_build/bin/pprof -[ -z "${NUM_CPUS}" ] && NUM_CPUS=`grep -c ^processor /proc/cpuinfo` +[ -z "${NUM_CPUS}" ] && NUM_CPUS=$(grep -c ^processor /proc/cpuinfo) [ -z "${ENVOY_SRCDIR}" ] && export ENVOY_SRCDIR=/source [ -z "${ENVOY_BUILD_TARGET}" ] && export ENVOY_BUILD_TARGET=//source/exe:envoy-static [ -z "${ENVOY_BUILD_DEBUG_INFORMATION}" ] && export ENVOY_BUILD_DEBUG_INFORMATION=//source/exe:envoy-static.dwp -[ -z "${ENVOY_BUILD_ARCH}" ] && export ENVOY_BUILD_ARCH=$(uname -m) +[ -z "${ENVOY_BUILD_ARCH}" ] && { + ENVOY_BUILD_ARCH=$(uname -m) + export ENVOY_BUILD_ARCH +} + +read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:-}" +read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}" +read -ra BAZEL_OPTIONS <<< "${BAZEL_OPTIONS:-}" + echo "ENVOY_SRCDIR=${ENVOY_SRCDIR}" echo "ENVOY_BUILD_TARGET=${ENVOY_BUILD_TARGET}" echo "ENVOY_BUILD_ARCH=${ENVOY_BUILD_ARCH}" function setup_gcc_toolchain() { - if [[ ! -z "${ENVOY_STDLIB}" && "${ENVOY_STDLIB}" != "libstdc++" ]]; then + if [[ -n "${ENVOY_STDLIB}" && "${ENVOY_STDLIB}" != "libstdc++" ]]; then echo "gcc toolchain doesn't support ${ENVOY_STDLIB}." exit 1 fi @@ -26,7 +34,7 @@ function setup_gcc_toolchain() { export BAZEL_COMPILER=gcc echo "$CC/$CXX toolchain configured" else - export BAZEL_BUILD_OPTIONS="--config=remote-gcc ${BAZEL_BUILD_OPTIONS}" + BAZEL_BUILD_OPTIONS=("--config=remote-gcc" "${BAZEL_BUILD_OPTIONS[@]}") fi } @@ -34,15 +42,15 @@ function setup_clang_toolchain() { ENVOY_STDLIB="${ENVOY_STDLIB:-libc++}" if [[ -z "${ENVOY_RBE}" ]]; then if [[ "${ENVOY_STDLIB}" == "libc++" ]]; then - export BAZEL_BUILD_OPTIONS="--config=libc++ ${BAZEL_BUILD_OPTIONS}" + BAZEL_BUILD_OPTIONS=("--config=libc++" "${BAZEL_BUILD_OPTIONS[@]}") else - export BAZEL_BUILD_OPTIONS="--config=clang ${BAZEL_BUILD_OPTIONS}" + BAZEL_BUILD_OPTIONS=("--config=clang" "${BAZEL_BUILD_OPTIONS[@]}") fi else if [[ "${ENVOY_STDLIB}" == "libc++" ]]; then - export BAZEL_BUILD_OPTIONS="--config=remote-clang-libc++ ${BAZEL_BUILD_OPTIONS}" + BAZEL_BUILD_OPTIONS=("--config=remote-clang-libc++" "${BAZEL_BUILD_OPTIONS[@]}") else - export BAZEL_BUILD_OPTIONS="--config=remote-clang ${BAZEL_BUILD_OPTIONS}" + BAZEL_BUILD_OPTIONS=("--config=remote-clang" "${BAZEL_BUILD_OPTIONS[@]}") fi fi echo "clang toolchain with ${ENVOY_STDLIB} configured" @@ -61,7 +69,7 @@ export PATH=/opt/llvm/bin:${PATH} export CLANG_FORMAT="${CLANG_FORMAT:-clang-format}" if [[ -f "/etc/redhat-release" ]]; then - export BAZEL_BUILD_EXTRA_OPTIONS+="--copt=-DENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR=1" + BAZEL_BUILD_EXTRA_OPTIONS+=("--copt=-DENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR=1") fi function cleanup() { @@ -76,16 +84,28 @@ trap cleanup EXIT export LLVM_ROOT="${LLVM_ROOT:-/opt/llvm}" "$(dirname "$0")"/../bazel/setup_clang.sh "${LLVM_ROOT}" -[[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=" --nocache_test_results" +[[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=("--nocache_test_results") -export BAZEL_QUERY_OPTIONS="${BAZEL_OPTIONS}" +# TODO(phlax): deprecate/remove this - i believe it was made redundant here: +# https://github.com/envoyproxy/envoy/commit/3ebedeb708a23062332a6fcdf33b462b7070adba#diff-2fa22a1337effee365a51e6844be0ab3 +export BAZEL_QUERY_OPTIONS="${BAZEL_OPTIONS[*]}" # Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks # to save disk space. -export BAZEL_BUILD_OPTIONS=" ${BAZEL_OPTIONS} --verbose_failures --show_task_finish --experimental_generate_json_trace_profile \ - --test_output=errors --repository_cache=${BUILD_DIR}/repository_cache --experimental_repository_cache_hardlinks \ - ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" - -[[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --flaky_test_attempts=2 --test_env=HEAPCHECK=" +BAZEL_BUILD_OPTIONS=( + "${BAZEL_OPTIONS[@]}" + "--verbose_failures" + "--show_task_finish" + "--experimental_generate_json_trace_profile" + "--test_output=errors" + "--repository_cache=${BUILD_DIR}/repository_cache" + "--experimental_repository_cache_hardlinks" + "${BAZEL_BUILD_EXTRA_OPTIONS[@]}" + "${BAZEL_EXTRA_TEST_OPTIONS[@]}") + +[[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]] && BAZEL_BUILD_OPTIONS+=( + "--define" "wasm=disabled" + "--flaky_test_attempts=2" + "--test_env=HEAPCHECK=") [[ "${BAZEL_EXPUNGE}" == "1" ]] && bazel clean --expunge @@ -119,6 +139,7 @@ export BUILDOZER_BIN="${BUILDOZER_BIN:-/usr/local/bin/buildozer}" # source tree is different than the current workspace, the setup step is # skipped. if [[ "$1" != "-nofetch" && "${ENVOY_SRCDIR}" == "$(bazel info workspace)" ]]; then + # shellcheck source=ci/filter_example_setup.sh . "$(dirname "$0")"/filter_example_setup.sh else echo "Skip setting up Envoy Filter Example." diff --git a/ci/do_ci.ps1 b/ci/do_ci.ps1 deleted file mode 100755 index 86f98f74e49e..000000000000 --- a/ci/do_ci.ps1 +++ /dev/null @@ -1,69 +0,0 @@ -$ErrorActionPreference = "Stop"; -trap { $host.SetShouldExit(1) } - -. "$PSScriptRoot\build_setup.ps1" -Write-Host "building using $env:NUM_CPUS CPUs" - -function bazel_binary_build($type) { - echo "Building..." - bazel $env:BAZEL_BASE_OPTIONS.Split(" ") build $env:BAZEL_BUILD_OPTIONS.Split(" ") -c $type "//source/exe:envoy-static" - $exit = $LASTEXITCODE - if ($exit -ne 0) { - exit $exit - } -} - -function bazel_test($type, $test) { - if ($test) { - echo "running windows tests $test" - bazel $env:BAZEL_BASE_OPTIONS.Split(" ") test $env:BAZEL_BUILD_OPTIONS.Split(" ") -c $type --build_tests_only $test - } else { - echo "running all windows tests" - bazel $env:BAZEL_BASE_OPTIONS.Split(" ") test $env:BAZEL_BUILD_OPTIONS.Split(" ") -c $type "//test/..." --test_tag_filters=-skip_on_windows --build_tests_only --test_summary=terse --test_output=errors - } - exit $LASTEXITCODE -} - -$action, $test = $args - -switch ($action) { - "bazel.release" { - echo "bazel release build with tests..." - bazel_binary_build "opt" - bazel_test "opt" $test - } - "bazel.release.server_only" { - echo "bazel release build..." - bazel_binary_build "opt" - } - "bazel.release.test_only" { - echo "bazel release build with tests..." - bazel_test "opt" $test - } - "bazel.debug" { - echo "bazel debug build with tests..." - bazel_binary_build "dbg" - bazel_test "dbg" $test - } - "bazel.debug.server_only" { - echo "bazel debug build..." - bazel_binary_build "dbg" - } - "bazel.debug.test_only" { - echo "bazel debug build with tests..." - bazel_test "dbg" $test - } - "bazel.dev" { - echo "bazel fastbuild build with tests..." - bazel_binary_build "fastbuild" - bazel_test "fastbuild" $test - } - "bazel.dev.test_only" { - echo "bazel fastbuild build with tests..." - bazel_test "fastbuild" $test - } - default { - echo "unknown action: $action" - exit 1 - } -} diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 2f5f183ea937..c9c268e70e04 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -14,7 +14,9 @@ fi SRCDIR="${PWD}" NO_BUILD_SETUP="${NO_BUILD_SETUP:-}" if [[ -z "$NO_BUILD_SETUP" ]]; then + # shellcheck source=ci/setup_cache.sh . "$(dirname "$0")"/setup_cache.sh + # shellcheck source=ci/build_setup.sh . "$(dirname "$0")"/build_setup.sh $build_setup_args fi cd "${SRCDIR}" @@ -38,21 +40,21 @@ function collect_build_profile() { } function bazel_with_collection() { + local failed_logs declare -r BAZEL_OUTPUT="${ENVOY_SRCDIR}"/bazel.output.txt - bazel $* | tee "${BAZEL_OUTPUT}" + bazel "$@" | tee "${BAZEL_OUTPUT}" declare BAZEL_STATUS="${PIPESTATUS[0]}" if [ "${BAZEL_STATUS}" != "0" ] then - declare -r FAILED_TEST_LOGS="$(grep " /build.*test.log" "${BAZEL_OUTPUT}" | sed -e 's/ \/build.*\/testlogs\/\(.*\)/\1/')" pushd bazel-testlogs - for f in ${FAILED_TEST_LOGS} - do - cp --parents -f $f "${ENVOY_FAILED_TEST_LOGS}" - done + failed_logs=$(grep " /build.*test.log" "${BAZEL_OUTPUT}" | sed -e 's/ \/build.*\/testlogs\/\(.*\)/\1/') + while read -r f; do + cp --parents -f "$f" "${ENVOY_FAILED_TEST_LOGS}" + done <<< "$failed_logs" popd exit "${BAZEL_STATUS}" fi - collect_build_profile $1 + collect_build_profile "$1" run_process_test_result } @@ -112,9 +114,9 @@ function bazel_binary_build() { ENVOY_BIN=$(echo "${ENVOY_BUILD_TARGET}" | sed -e 's#^@\([^/]*\)/#external/\1#;s#^//##;s#:#/#') # This is a workaround for https://github.com/bazelbuild/bazel/issues/11834 - [[ ! -z "${ENVOY_RBE}" ]] && rm -rf bazel-bin/"${ENVOY_BIN}"* + [[ -n "${ENVOY_RBE}" ]] && rm -rf bazel-bin/"${ENVOY_BIN}"* - bazel build ${BAZEL_BUILD_OPTIONS} -c "${COMPILE_TYPE}" "${ENVOY_BUILD_TARGET}" ${CONFIG_ARGS} + bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c "${COMPILE_TYPE}" "${ENVOY_BUILD_TARGET}" ${CONFIG_ARGS} collect_build_profile "${BINARY_TYPE}"_build # Copy the built envoy binary somewhere that we can access outside of the @@ -124,7 +126,7 @@ function bazel_binary_build() { if [[ "${COMPILE_TYPE}" == "dbg" || "${COMPILE_TYPE}" == "opt" ]]; then # Generate dwp file for debugging since we used split DWARF to reduce binary # size - bazel build ${BAZEL_BUILD_OPTIONS} -c "${COMPILE_TYPE}" "${ENVOY_BUILD_DEBUG_INFORMATION}" ${CONFIG_ARGS} + bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c "${COMPILE_TYPE}" "${ENVOY_BUILD_DEBUG_INFORMATION}" ${CONFIG_ARGS} # Copy the debug information cp_debug_info_for_outside_access envoy fi @@ -142,12 +144,12 @@ CI_TARGET=$1 shift if [[ $# -ge 1 ]]; then - COVERAGE_TEST_TARGETS=$* - TEST_TARGETS="$COVERAGE_TEST_TARGETS" + COVERAGE_TEST_TARGETS=("$@") + TEST_TARGETS=("$@") else # Coverage test will add QUICHE tests by itself. - COVERAGE_TEST_TARGETS=//test/... - TEST_TARGETS="${COVERAGE_TEST_TARGETS} @com_googlesource_quiche//:ci_tests" + COVERAGE_TEST_TARGETS=("//test/...") + TEST_TARGETS=("${COVERAGE_TEST_TARGETS[@]}" "@com_googlesource_quiche//:ci_tests") fi if [[ "$CI_TARGET" == "bazel.release" ]]; then @@ -157,11 +159,11 @@ if [[ "$CI_TARGET" == "bazel.release" ]]; then # toolchain is kept consistent. This ifdef is checked in # test/common/stats/stat_test_utility.cc when computing # Stats::TestUtil::MemoryTest::mode(). - [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=ENVOY_MEMORY_TEST_EXACT=true" + [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]] && BAZEL_BUILD_OPTIONS+=("--test_env=ENVOY_MEMORY_TEST_EXACT=true") setup_clang_toolchain - echo "Testing ${TEST_TARGETS} with options: ${BAZEL_BUILD_OPTIONS}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c opt ${TEST_TARGETS} + echo "Testing ${TEST_TARGETS[*]} with options: ${BAZEL_BUILD_OPTIONS[*]}" + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c opt "${TEST_TARGETS[@]}" echo "bazel release build with tests..." bazel_binary_build release @@ -178,26 +180,26 @@ elif [[ "$CI_TARGET" == "bazel.sizeopt.server_only" ]]; then exit 0 elif [[ "$CI_TARGET" == "bazel.sizeopt" ]]; then setup_clang_toolchain - echo "Testing ${TEST_TARGETS}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=sizeopt ${TEST_TARGETS} + echo "Testing ${TEST_TARGETS[*]}" + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" --config=sizeopt "${TEST_TARGETS[@]}" echo "bazel size optimized build with tests..." bazel_binary_build sizeopt exit 0 elif [[ "$CI_TARGET" == "bazel.gcc" ]]; then - BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HEAPCHECK=" + BAZEL_BUILD_OPTIONS+=("--test_env=HEAPCHECK=") setup_gcc_toolchain - echo "Testing ${TEST_TARGETS}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c opt ${TEST_TARGETS} + echo "Testing ${TEST_TARGETS[*]}" + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild "${TEST_TARGETS[@]}" echo "bazel release build with gcc..." - bazel_binary_build release + bazel_binary_build fastbuild exit 0 elif [[ "$CI_TARGET" == "bazel.debug" ]]; then setup_clang_toolchain - echo "Testing ${TEST_TARGETS}" - bazel test ${BAZEL_BUILD_OPTIONS} -c dbg ${TEST_TARGETS} + echo "Testing ${TEST_TARGETS[*]}" + bazel test "${BAZEL_BUILD_OPTIONS[@]}" -c dbg "${TEST_TARGETS[@]}" echo "bazel debug build with tests..." bazel_binary_build debug @@ -209,36 +211,38 @@ elif [[ "$CI_TARGET" == "bazel.debug.server_only" ]]; then exit 0 elif [[ "$CI_TARGET" == "bazel.asan" ]]; then setup_clang_toolchain - BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-asan --build_tests_only" + BAZEL_BUILD_OPTIONS+=(-c opt --copt -g "--config=clang-asan" "--build_tests_only") echo "bazel ASAN/UBSAN debug build with tests" - echo "Building and testing envoy tests ${TEST_TARGETS}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${TEST_TARGETS} + echo "Building and testing envoy tests ${TEST_TARGETS[*]}" + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS[@]}" if [ "${ENVOY_BUILD_FILTER_EXAMPLE}" == "1" ]; then echo "Building and testing envoy-filter-example tests..." pushd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${ENVOY_FILTER_EXAMPLE_TESTS} + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${ENVOY_FILTER_EXAMPLE_TESTS[@]}" popd fi - if [ "${CI_SKIP_INTEGRATION_TEST_TRAFFIC_TAPPING}" != "1" ] ; then + # TODO(mattklein123): This part of the test is now flaky in CI and it's unclear why, possibly + # due to sandboxing issue. Debug and enable it again. + # if [ "${CI_SKIP_INTEGRATION_TEST_TRAFFIC_TAPPING}" != "1" ] ; then # Also validate that integration test traffic tapping (useful when debugging etc.) # works. This requires that we set TAP_PATH. We do this under bazel.asan to # ensure a debug build in CI. - echo "Validating integration test traffic tapping..." - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} \ - --run_under=@envoy//bazel/test:verify_tap_test.sh \ - //test/extensions/transport_sockets/tls/integration:ssl_integration_test - fi + # echo "Validating integration test traffic tapping..." + # bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" \ + # --run_under=@envoy//bazel/test:verify_tap_test.sh \ + # //test/extensions/transport_sockets/tls/integration:ssl_integration_test + # fi exit 0 elif [[ "$CI_TARGET" == "bazel.tsan" ]]; then setup_clang_toolchain echo "bazel TSAN debug build with tests" - echo "Building and testing envoy tests ${TEST_TARGETS}" - bazel_with_collection test --config=rbe-toolchain-tsan ${BAZEL_BUILD_OPTIONS} -c dbg --build_tests_only ${TEST_TARGETS} + echo "Building and testing envoy tests ${TEST_TARGETS[*]}" + bazel_with_collection test --config=rbe-toolchain-tsan "${BAZEL_BUILD_OPTIONS[@]}" -c dbg --build_tests_only "${TEST_TARGETS[@]}" if [ "${ENVOY_BUILD_FILTER_EXAMPLE}" == "1" ]; then echo "Building and testing envoy-filter-example tests..." pushd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-tsan ${ENVOY_FILTER_EXAMPLE_TESTS} + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c dbg --config=clang-tsan "${ENVOY_FILTER_EXAMPLE_TESTS[@]}" popd fi exit 0 @@ -246,10 +250,10 @@ elif [[ "$CI_TARGET" == "bazel.msan" ]]; then ENVOY_STDLIB=libc++ setup_clang_toolchain # rbe-toolchain-msan must comes as first to win library link order. - BAZEL_BUILD_OPTIONS="--config=rbe-toolchain-msan ${BAZEL_BUILD_OPTIONS} -c dbg --build_tests_only" + BAZEL_BUILD_OPTIONS=("--config=rbe-toolchain-msan" "${BAZEL_BUILD_OPTIONS[@]}" "-c dbg" "--build_tests_only") echo "bazel MSAN debug build with tests" - echo "Building and testing envoy tests ${TEST_TARGETS}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${TEST_TARGETS} + echo "Building and testing envoy tests ${TEST_TARGETS[*]}" + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS[@]}" exit 0 elif [[ "$CI_TARGET" == "bazel.dev" ]]; then setup_clang_toolchain @@ -258,8 +262,8 @@ elif [[ "$CI_TARGET" == "bazel.dev" ]]; then echo "Building..." bazel_binary_build fastbuild - echo "Building and testing ${TEST_TARGETS}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c fastbuild ${TEST_TARGETS} + echo "Building and testing ${TEST_TARGETS[*]}" + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild "${TEST_TARGETS[@]}" # TODO(foreseeable): consolidate this and the API tool tests in a dedicated target. bazel_with_collection //tools/envoy_headersplit:headersplit_test --spawn_strategy=local bazel_with_collection //tools/envoy_headersplit:replace_includes_test --spawn_strategy=local @@ -268,72 +272,77 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then # Right now, none of the available compile-time options conflict with each other. If this # changes, this build type may need to be broken up. # TODO(mpwarres): remove quiche=enabled once QUICHE is built by default. - COMPILE_TIME_OPTIONS="\ - --define signal_trace=disabled \ - --define hot_restart=disabled \ - --define google_grpc=disabled \ - --define boringssl=fips \ - --define log_debug_assert_in_release=enabled \ - --define quiche=enabled \ - --define path_normalization_by_default=true \ - --define deprecated_features=disabled \ - --define use_new_codecs_in_integration_tests=true \ - --define zlib=ng \ - " + COMPILE_TIME_OPTIONS=( + "--define" "signal_trace=disabled" + "--define" "hot_restart=disabled" + "--define" "google_grpc=disabled" + "--define" "boringssl=fips" + "--define" "log_debug_assert_in_release=enabled" + "--define" "quiche=enabled" + "--define" "wasm=disabled" + "--define" "path_normalization_by_default=true" + "--define" "deprecated_features=disabled" + "--define" "use_new_codecs_in_integration_tests=false" + "--define" "tcmalloc=gperftools" + "--define" "zlib=ng") + ENVOY_STDLIB="${ENVOY_STDLIB:-libstdc++}" setup_clang_toolchain # This doesn't go into CI but is available for developer convenience. echo "bazel with different compiletime options build with tests..." - if [[ "${TEST_TARGETS}" == "//test/..." ]]; then + if [[ "${TEST_TARGETS[*]}" == "//test/..." ]]; then cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" - TEST_TARGETS="@envoy//test/..." + TEST_TARGETS=("@envoy//test/...") fi # Building all the dependencies from scratch to link them against libc++. - echo "Building and testing ${TEST_TARGETS}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg ${TEST_TARGETS} --test_tag_filters=-nofips --build_tests_only + echo "Building and testing ${TEST_TARGETS[*]}" + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c dbg "${TEST_TARGETS[@]}" --test_tag_filters=-nofips --build_tests_only # Legacy codecs "--define legacy_codecs_in_integration_tests=true" should also be tested in # integration tests with asan. - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only # "--define log_debug_assert_in_release=enabled" must be tested with a release build, so run only # these tests under "-c opt" to save time in CI. - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test echo "Building binary..." - bazel build ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//source/exe:envoy-static --build_tag_filters=-nofips + bazel build "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c dbg @envoy//source/exe:envoy-static --build_tag_filters=-nofips collect_build_profile build exit 0 elif [[ "$CI_TARGET" == "bazel.api" ]]; then + # Use libstdc++ because the API booster links to prebuilt libclang*/libLLVM* installed in /opt/llvm/lib, + # which is built with libstdc++. Using libstdc++ for whole of the API CI job to avoid unnecessary rebuild. + ENVOY_STDLIB="libstdc++" setup_clang_toolchain + export LLVM_CONFIG="${LLVM_ROOT}"/bin/llvm-config echo "Validating API structure..." ./tools/api/validate_structure.py + echo "Testing API and API Boosting..." + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \ + @envoy_api_canonical//tools:tap2pcap_test @envoy_dev//clang_tools/api_booster/... echo "Building API..." - bazel build ${BAZEL_BUILD_OPTIONS} -c fastbuild @envoy_api_canonical//envoy/... - echo "Testing API..." - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \ - @envoy_api_canonical//tools:tap2pcap_test - echo "Testing API boosting (unit tests)..." - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c fastbuild @envoy_dev//clang_tools/api_booster/... + bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//envoy/... echo "Testing API boosting (golden C++ tests)..." # We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet. - LLVM_CONFIG="${LLVM_ROOT}"/bin/llvm-config BAZEL_BUILD_OPTIONS="--config=clang" python3.8 ./tools/api_boost/api_boost_test.py + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" python3.8 ./tools/api_boost/api_boost_test.py exit 0 elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then setup_clang_toolchain - echo "${CI_TARGET} build with tests ${COVERAGE_TEST_TARGETS}" + echo "${CI_TARGET} build with tests ${COVERAGE_TEST_TARGETS[*]}" [[ "$CI_TARGET" == "bazel.fuzz_coverage" ]] && export FUZZ_COVERAGE=true - test/run_envoy_bazel_coverage.sh ${COVERAGE_TEST_TARGETS} + # We use custom BAZEL_BUILD_OPTIONS here to cover profiler's code. + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]} --define tcmalloc=gperftools" test/run_envoy_bazel_coverage.sh "${COVERAGE_TEST_TARGETS[@]}" collect_build_profile coverage exit 0 elif [[ "$CI_TARGET" == "bazel.clang_tidy" ]]; then # clang-tidy will warn on standard library issues with libc++ ENVOY_STDLIB="libstdc++" setup_clang_toolchain - NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh "$@" + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh "$@" exit 0 elif [[ "$CI_TARGET" == "bazel.coverity" ]]; then # Coverity Scan version 2017.07 fails to analyze the entirely of the Envoy @@ -343,7 +352,7 @@ elif [[ "$CI_TARGET" == "bazel.coverity" ]]; then setup_gcc_toolchain echo "bazel Coverity Scan build" echo "Building..." - /build/cov-analysis/bin/cov-build --dir "${ENVOY_BUILD_DIR}"/cov-int bazel build --action_env=LD_PRELOAD ${BAZEL_BUILD_OPTIONS} \ + /build/cov-analysis/bin/cov-build --dir "${ENVOY_BUILD_DIR}"/cov-int bazel build --action_env=LD_PRELOAD "${BAZEL_BUILD_OPTIONS[@]}" \ -c opt "${ENVOY_BUILD_TARGET}" # tar up the coverity results tar czvf "${ENVOY_BUILD_DIR}"/envoy-coverity-output.tgz -C "${ENVOY_BUILD_DIR}" cov-int @@ -354,29 +363,31 @@ elif [[ "$CI_TARGET" == "bazel.coverity" ]]; then exit 0 elif [[ "$CI_TARGET" == "bazel.fuzz" ]]; then setup_clang_toolchain - FUZZ_TEST_TARGETS="$(bazel query "attr('tags','fuzzer',${TEST_TARGETS})")" - echo "bazel ASAN libFuzzer build with fuzz tests ${FUZZ_TEST_TARGETS}" + FUZZ_TEST_TARGETS=("$(bazel query "attr('tags','fuzzer',${TEST_TARGETS[*]})")") + echo "bazel ASAN libFuzzer build with fuzz tests ${FUZZ_TEST_TARGETS[*]}" echo "Building envoy fuzzers and executing 100 fuzz iterations..." - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=asan-fuzzer ${FUZZ_TEST_TARGETS} --test_arg="-runs=10" + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" --config=asan-fuzzer "${FUZZ_TEST_TARGETS[@]}" --test_arg="-runs=10" exit 0 elif [[ "$CI_TARGET" == "fix_format" ]]; then # proto_format.sh needs to build protobuf. setup_clang_toolchain + echo "fix_format..." ./tools/code_format/check_format.py fix ./tools/code_format/format_python_tools.sh fix - ./tools/proto_format/proto_format.sh fix --test + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" ./tools/proto_format/proto_format.sh fix --test exit 0 elif [[ "$CI_TARGET" == "check_format" ]]; then # proto_format.sh needs to build protobuf. setup_clang_toolchain + echo "check_format_test..." ./tools/code_format/check_format_test_helper.sh --log=WARN echo "check_format..." ./tools/code_format/check_shellcheck_format.sh ./tools/code_format/check_format.py check ./tools/code_format/format_python_tools.sh check - ./tools/proto_format/proto_format.sh check --test + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" ./tools/proto_format/proto_format.sh check --test exit 0 elif [[ "$CI_TARGET" == "check_repositories" ]]; then echo "check_repositories..." @@ -400,13 +411,23 @@ elif [[ "$CI_TARGET" == "fix_spelling_pedantic" ]]; then exit 0 elif [[ "$CI_TARGET" == "docs" ]]; then echo "generating docs..." - docs/build.sh + # Validate dependency relationships between core/extensions and external deps. + tools/dependency/validate_test.py + tools/dependency/validate.py + # Validate the CVE scanner works. TODO(htuch): create a dedicated tools CI target. + python3.8 tools/dependency/cve_scan_test.py + # Build docs. + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" docs/build.sh exit 0 elif [[ "$CI_TARGET" == "verify_examples" ]]; then echo "verify examples..." docker load < "$ENVOY_DOCKER_BUILD_DIR/docker/envoy-docker-images.tar.xz" - images=($(docker image list --format "{{.Repository}}")) - tags=($(docker image list --format "{{.Tag}}")) + _images=$(docker image list --format "{{.Repository}}") + while read -r line; do images+=("$line"); done \ + <<< "$_images" + _tags=$(docker image list --format "{{.Tag}}") + while read -r line; do tags+=("$line"); done \ + <<< "$_tags" for i in "${!images[@]}"; do if [[ "${images[i]}" =~ "envoy" ]]; then docker tag "${images[$i]}:${tags[$i]}" "${images[$i]}:latest" @@ -417,6 +438,7 @@ elif [[ "$CI_TARGET" == "verify_examples" ]]; then sudo apt-get install -y -qq --no-install-recommends redis-tools export DOCKER_NO_PULL=1 umask 027 + chmod -R o-rwx examples/ ci/verify_examples.sh exit 0 else diff --git a/ci/do_circle_ci.sh b/ci/do_circle_ci.sh deleted file mode 100755 index 29469a24b814..000000000000 --- a/ci/do_circle_ci.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -set -e - -# Workaround for argument too long issue in protoc -ulimit -s 16384 - -# bazel uses jgit internally and the default circle-ci .gitconfig says to -# convert https://github.com to ssh://git@github.com, which jgit does not support. -if [[ -e "~/.gitconfig" ]]; then - mv ~/.gitconfig ~/.gitconfig_save -fi - -# Workaround for not using ci/run_envoy_docker.sh -# Create a fake home. Python site libs tries to do getpwuid(3) if we don't and the CI -# Docker image gets confused as it has no passwd entry when running non-root -# unless we do this. -FAKE_HOME=/tmp/fake_home -mkdir -p "${FAKE_HOME}" -export HOME="${FAKE_HOME}" -export PYTHONUSERBASE="${FAKE_HOME}" -export USER=bazel - -export ENVOY_SRCDIR="$(pwd)" - -# xlarge resource_class. -# See note: https://circleci.com/docs/2.0/configuration-reference/#resource_class for why we -# hard code this (basically due to how docker works). -export NUM_CPUS=6 - -# CircleCI doesn't support IPv6 by default, so we run all tests with IPv4 only. -# IPv6 tests are run with Azure Pipelines. -export BAZEL_BUILD_EXTRA_OPTIONS+="--test_env=ENVOY_IP_TEST_VERSIONS=v4only --local_cpu_resources=${NUM_CPUS} \ - --action_env=HOME --action_env=PYTHONUSERBASE --test_env=HOME --test_env=PYTHONUSERBASE" - -function finish { - echo "disk space at end of build:" - df -h -} -trap finish EXIT - -echo "disk space at beginning of build:" -df -h - -ci/do_ci.sh $* diff --git a/ci/docker-entrypoint.bat b/ci/docker-entrypoint.bat new file mode 100644 index 000000000000..ed746d98da69 --- /dev/null +++ b/ci/docker-entrypoint.bat @@ -0,0 +1,21 @@ +@echo off +setlocal + +set CMD=%*% + +REM if the first argument look like a parameter (i.e. start with '-'), run Envoy +set first_arg=%1% +if "%first_arg:~0,1%" == "-" ( + set CMD=envoy.exe %CMD% +) + +if /i "%1" == "envoy" set is_envoy=1 +if /i "%1" == "envoy.exe" set is_envoy=1 +if defined is_envoy ( + REM set the log level if the $loglevel variable is set + if defined loglevel ( + set CMD=%CMD% --log-level %loglevel% + ) +) + +%CMD% diff --git a/ci/docker-entrypoint.sh b/ci/docker-entrypoint.sh index 677e617e9fce..4815acb1956a 100755 --- a/ci/docker-entrypoint.sh +++ b/ci/docker-entrypoint.sh @@ -1,6 +1,8 @@ #!/usr/bin/env sh set -e +loglevel="${loglevel:-}" + # if the first argument look like a parameter (i.e. start with '-'), run Envoy if [ "${1#-}" != "$1" ]; then set -- envoy "$@" diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index d4bb8e5e20e2..3bd584923bdf 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -4,6 +4,10 @@ # CI logs. set -e +function is_windows() { + [[ "$(uname -s)" == *NT* ]] +} + ENVOY_DOCKER_IMAGE_DIRECTORY="${ENVOY_DOCKER_IMAGE_DIRECTORY:-${BUILD_STAGINGDIRECTORY:-.}/build_images}" # Setting environments for buildx tools @@ -12,7 +16,7 @@ config_env() { docker run --rm --privileged multiarch/qemu-user-static --reset -p yes # Remove older build instance - docker buildx rm multi-builder | true + docker buildx rm multi-builder || : docker buildx create --use --name multi-builder --platform linux/arm64,linux/amd64 } @@ -20,10 +24,12 @@ build_platforms() { TYPE=$1 FILE_SUFFIX="${TYPE/-debug/}" - if [[ -z "${FILE_SUFFIX}" ]]; then - echo "linux/arm64,linux/amd64" + if is_windows; then + echo "windows/amd64" + elif [[ -z "${FILE_SUFFIX}" ]]; then + echo "linux/arm64,linux/amd64" else - echo "linux/amd64" + echo "linux/amd64" fi } @@ -31,56 +37,67 @@ build_args() { TYPE=$1 FILE_SUFFIX="${TYPE/-debug/}" - echo "-f ci/Dockerfile-envoy${FILE_SUFFIX}" - [[ "${TYPE}" == *-debug ]] && echo "--build-arg ENVOY_BINARY_SUFFIX=" - if [[ "${TYPE}" == "-google-vrp" ]]; then - echo "--build-arg ENVOY_VRP_BASE_IMAGE=${VRP_BASE_IMAGE}" + printf ' -f ci/Dockerfile-envoy%s' "${FILE_SUFFIX}" + if [[ "${TYPE}" == *-debug ]]; then + printf ' --build-arg ENVOY_BINARY_SUFFIX=' + elif [[ "${TYPE}" == "-google-vrp" ]]; then + printf ' --build-arg ENVOY_VRP_BASE_IMAGE=%s' "${VRP_BASE_IMAGE}" fi } use_builder() { - TYPE=$1 - if [[ "${TYPE}" == "-google-vrp" ]]; then - docker buildx use default - else - docker buildx use multi-builder + # BuildKit is not available for Windows images, skip this + if ! is_windows; then + TYPE=$1 + if [[ "${TYPE}" == "-google-vrp" ]]; then + docker buildx use default + else + docker buildx use multi-builder + fi fi } IMAGES_TO_SAVE=() build_images() { + local _args args=() TYPE=$1 BUILD_TAG=$2 use_builder "${TYPE}" - ARGS="$(build_args ${TYPE})" - PLATFORM="$(build_platforms ${TYPE})" + _args=$(build_args "${TYPE}") + read -ra args <<< "$_args" + PLATFORM="$(build_platforms "${TYPE}")" - docker buildx build --platform "${PLATFORM}" ${ARGS} -t "${BUILD_TAG}" . + docker "${BUILD_COMMAND[@]}" --platform "${PLATFORM}" "${args[@]}" -t "${BUILD_TAG}" . - PLATFORM="$(build_platforms ${TYPE} | tr ',' ' ')" - # docker buildx load cannot have multiple platform, load individually + PLATFORM="$(build_platforms "${TYPE}" | tr ',' ' ')" for ARCH in ${PLATFORM}; do - if [[ "${ARCH}" == "linux/amd64" ]]; then + if [[ "${ARCH}" == "linux/amd64" ]] || [[ "${ARCH}" == "windows/amd64" ]]; then IMAGE_TAG="${BUILD_TAG}" else IMAGE_TAG="${BUILD_TAG}-${ARCH/linux\//}" fi - docker buildx build --platform "${ARCH}" ${ARGS} -t "${IMAGE_TAG}" . --load IMAGES_TO_SAVE+=("${IMAGE_TAG}") + + # docker buildx load cannot have multiple platform, load individually + if ! is_windows; then + docker "${BUILD_COMMAND[@]}" --platform "${ARCH}" "${args[@]}" -t "${IMAGE_TAG}" . --load + fi done } push_images() { + local _args args=() TYPE=$1 BUILD_TAG=$2 use_builder "${TYPE}" - ARGS="$(build_args ${TYPE})" - PLATFORM="$(build_platforms ${TYPE})" + _args=$(build_args "${TYPE}") + read -ra args <<< "$_args" + PLATFORM="$(build_platforms "${TYPE}")" # docker buildx doesn't do push with default builder - docker buildx build --platform "${PLATFORM}" ${ARGS} -t ${BUILD_TAG} . --push || \ + docker "${BUILD_COMMAND[@]}" --platform "${PLATFORM}" "${args[@]}" -t "${BUILD_TAG}" . --push || \ docker push "${BUILD_TAG}" } @@ -90,7 +107,7 @@ RELEASE_TAG_REGEX="^refs/tags/v.*" # For master builds and release branch builds use the dev repo. Otherwise we assume it's a tag and # we push to the primary repo. -if [[ "${AZP_BRANCH}" =~ "${RELEASE_TAG_REGEX}" ]]; then +if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then IMAGE_POSTFIX="" IMAGE_NAME="${AZP_BRANCH/refs\/tags\//}" else @@ -101,14 +118,22 @@ fi # This prefix is altered for the private security images on setec builds. DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" -# "-google-vrp" must come afer "" to ensure we rebuild the local base image dependency. -BUILD_TYPES=("" "-debug" "-alpine" "-alpine-debug" "-google-vrp") -# Configure docker-buildx tools -config_env +if is_windows; then + BUILD_TYPES=("-windows") + # BuildKit is not available for Windows images, use standard build command + BUILD_COMMAND=("build") +else + # "-google-vrp" must come afer "" to ensure we rebuild the local base image dependency. + BUILD_TYPES=("" "-debug" "-alpine" "-google-vrp") -# VRP base image is only for amd64 -VRP_BASE_IMAGE="${DOCKER_IMAGE_PREFIX}${IMAGE_POSTFIX}:${IMAGE_NAME}" + # Configure docker-buildx tools + BUILD_COMMAND=("buildx" "build") + config_env + + # VRP base image is only for Linux amd64 + VRP_BASE_IMAGE="${DOCKER_IMAGE_PREFIX}${IMAGE_POSTFIX}:${IMAGE_NAME}" +fi # Test the docker build in all cases, but use a local tag that we will overwrite before push in the # cases where we do push. diff --git a/ci/docker_rebuild_google-vrp.sh b/ci/docker_rebuild_google-vrp.sh index 3a9bb5f711dc..4f3149e6732f 100755 --- a/ci/docker_rebuild_google-vrp.sh +++ b/ci/docker_rebuild_google-vrp.sh @@ -23,7 +23,8 @@ set -e # this local dep which is fairly stable. BASE_DOCKER_IMAGE="envoyproxy/envoy-dev:latest" -declare -r BUILD_DIR="$(mktemp -d)" +BUILD_DIR="$(mktemp -d)" +declare -r BUILD_DIR cp ci/Dockerfile-envoy-google-vrp "${BUILD_DIR}" declare -r DOCKER_BUILD_FILE="${BUILD_DIR}"/Dockerfile-envoy-google-vrp diff --git a/ci/envoy_build_sha.sh b/ci/envoy_build_sha.sh index 6ea4600faeef..e2923189e35e 100644 --- a/ci/envoy_build_sha.sh +++ b/ci/envoy_build_sha.sh @@ -1,2 +1,4 @@ -ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu $(dirname $0)/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu:\(.*\)#\1#' | uniq) +#!/bin/bash + +ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu "$(dirname "$0")"/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu:\(.*\)#\1#' | uniq) [[ $(wc -l <<< "${ENVOY_BUILD_SHA}" | awk '{$1=$1};1') == 1 ]] || (echo ".bazelrc envoyproxy/envoy-build-ubuntu hashes are inconsistent!" && exit 1) diff --git a/ci/filter_example_mirror.sh b/ci/filter_example_mirror.sh index 1d6d5ae05b23..8602b1677e4b 100755 --- a/ci/filter_example_mirror.sh +++ b/ci/filter_example_mirror.sh @@ -4,16 +4,15 @@ set -e ENVOY_SRCDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")/../" && pwd) CHECKOUT_DIR=../envoy-filter-example +MAIN_BRANCH="refs/heads/master" +FILTER_EXAMPLE_MAIN_BRANCH="master" -if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] -then +if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then echo "Cloning..." - git clone git@github.com:envoyproxy/envoy-filter-example "$CHECKOUT_DIR" + git clone git@github.com:envoyproxy/envoy-filter-example "$CHECKOUT_DIR" -b "${FILTER_EXAMPLE_MAIN_BRANCH}" - git -C "$CHECKOUT_DIR" config user.name "envoy-filter-example(CircleCI)" + git -C "$CHECKOUT_DIR" config user.name "envoy-filter-example(Azure Pipelines)" git -C "$CHECKOUT_DIR" config user.email envoy-filter-example@users.noreply.github.com - git -C "$CHECKOUT_DIR" fetch - git -C "$CHECKOUT_DIR" checkout -B master origin/master echo "Updating Submodule..." # Update submodule to latest Envoy SHA @@ -26,6 +25,6 @@ then echo "Committing, and Pushing..." git -C "$CHECKOUT_DIR" commit -a -m "Update Envoy submodule to $ENVOY_SHA" - git -C "$CHECKOUT_DIR" push origin master + git -C "$CHECKOUT_DIR" push origin "${FILTER_EXAMPLE_MAIN_BRANCH}" echo "Done" fi diff --git a/ci/filter_example_setup.sh b/ci/filter_example_setup.sh index 4101c63445ee..774464f15a7c 100644 --- a/ci/filter_example_setup.sh +++ b/ci/filter_example_setup.sh @@ -5,10 +5,14 @@ set -e # This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to. -ENVOY_FILTER_EXAMPLE_GITSHA="493e2e5bee10bbed1c3c097e09d83d7f672a9f2e" +ENVOY_FILTER_EXAMPLE_GITSHA="bebd0b2422ea7739905f1793565681d7266491e6" ENVOY_FILTER_EXAMPLE_SRCDIR="${BUILD_DIR}/envoy-filter-example" -export ENVOY_FILTER_EXAMPLE_TESTS="//:echo2_integration_test //http-filter-example:http_filter_integration_test //:envoy_binary_test" +# shellcheck disable=SC2034 +ENVOY_FILTER_EXAMPLE_TESTS=( + "//:echo2_integration_test" + "//http-filter-example:http_filter_integration_test" + "//:envoy_binary_test") if [[ ! -d "${ENVOY_FILTER_EXAMPLE_SRCDIR}/.git" ]]; then rm -rf "${ENVOY_FILTER_EXAMPLE_SRCDIR}" @@ -23,4 +27,4 @@ ln -sf "${ENVOY_SRCDIR}"/bazel/get_workspace_status "${ENVOY_FILTER_EXAMPLE_SRCD cp -f "${ENVOY_SRCDIR}"/.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ cp -f "$(bazel info workspace)"/*.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ -FILTER_WORKSPACE_SET=1 +export FILTER_WORKSPACE_SET=1 diff --git a/ci/flaky_test/requirements.txt b/ci/flaky_test/requirements.txt index 7d942590e612..3368c5b2bff0 100644 --- a/ci/flaky_test/requirements.txt +++ b/ci/flaky_test/requirements.txt @@ -1,4 +1,73 @@ -multidict -yarl -wheel==0.35.1 -slackclient==2.8.0 \ No newline at end of file +aiohttp==3.6.2 \ + --hash=sha256:1e984191d1ec186881ffaed4581092ba04f7c61582a177b187d3a2f07ed9719e \ + --hash=sha256:259ab809ff0727d0e834ac5e8a283dc5e3e0ecc30c4d80b3cd17a4139ce1f326 \ + --hash=sha256:2f4d1a4fdce595c947162333353d4a44952a724fba9ca3205a3df99a33d1307a \ + --hash=sha256:32e5f3b7e511aa850829fbe5aa32eb455e5534eaa4b1ce93231d00e2f76e5654 \ + --hash=sha256:344c780466b73095a72c616fac5ea9c4665add7fc129f285fbdbca3cccf4612a \ + --hash=sha256:460bd4237d2dbecc3b5ed57e122992f60188afe46e7319116da5eb8a9dfedba4 \ + --hash=sha256:4c6efd824d44ae697814a2a85604d8e992b875462c6655da161ff18fd4f29f17 \ + --hash=sha256:50aaad128e6ac62e7bf7bd1f0c0a24bc968a0c0590a726d5a955af193544bcec \ + --hash=sha256:6206a135d072f88da3e71cc501c59d5abffa9d0bb43269a6dcd28d66bfafdbdd \ + --hash=sha256:65f31b622af739a802ca6fd1a3076fd0ae523f8485c52924a89561ba10c49b48 \ + --hash=sha256:ae55bac364c405caa23a4f2d6cfecc6a0daada500274ffca4a9230e7129eac59 \ + --hash=sha256:b778ce0c909a2653741cb4b1ac7015b5c130ab9c897611df43ae6a58523cb965 +async-timeout==3.0.1 \ + --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \ + --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3 +attrs==20.2.0 \ + --hash=sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594 \ + --hash=sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc +chardet==3.0.4 \ + --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \ + --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 +idna==2.10 \ + --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ + --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 +idna_ssl==1.1.0 \ + --hash=sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c +multidict==4.7.6 \ + --hash=sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a \ + --hash=sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000 \ + --hash=sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2 \ + --hash=sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507 \ + --hash=sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5 \ + --hash=sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7 \ + --hash=sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d \ + --hash=sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463 \ + --hash=sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19 \ + --hash=sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3 \ + --hash=sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b \ + --hash=sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c \ + --hash=sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87 \ + --hash=sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7 \ + --hash=sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430 \ + --hash=sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255 \ + --hash=sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d +slackclient==2.9.1 \ + --hash=sha256:214edd4a494cc74353c8084ec184ff97a116d4b12cde287f805a9af948ef39ae \ + --hash=sha256:3a3e84fd4f13d9715740c13ce6c3c25b970147aeeeec22ef137d796124dfcf08 +typing-extensions==3.7.4.3 \ + --hash=sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918 \ + --hash=sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c \ + --hash=sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f +wheel==0.35.1 \ + --hash=sha256:497add53525d16c173c2c1c733b8f655510e909ea78cc0e29d374243544b77a2 \ + --hash=sha256:99a22d87add3f634ff917310a3d87e499f19e663413a52eb9232c447aa646c9f +yarl==1.6.0 \ + --hash=sha256:04a54f126a0732af75e5edc9addeaa2113e2ca7c6fce8974a63549a70a25e50e \ + --hash=sha256:3cc860d72ed989f3b1f3abbd6ecf38e412de722fb38b8f1b1a086315cf0d69c5 \ + --hash=sha256:5d84cc36981eb5a8533be79d6c43454c8e6a39ee3118ceaadbd3c029ab2ee580 \ + --hash=sha256:5e447e7f3780f44f890360ea973418025e8c0cdcd7d6a1b221d952600fd945dc \ + --hash=sha256:61d3ea3c175fe45f1498af868879c6ffeb989d4143ac542163c45538ba5ec21b \ + --hash=sha256:67c5ea0970da882eaf9efcf65b66792557c526f8e55f752194eff8ec722c75c2 \ + --hash=sha256:6f6898429ec3c4cfbef12907047136fd7b9e81a6ee9f105b45505e633427330a \ + --hash=sha256:7ce35944e8e61927a8f4eb78f5bc5d1e6da6d40eadd77e3f79d4e9399e263921 \ + --hash=sha256:b7c199d2cbaf892ba0f91ed36d12ff41ecd0dde46cbf64ff4bfe997a3ebc925e \ + --hash=sha256:c15d71a640fb1f8e98a1423f9c64d7f1f6a3a168f803042eaf3a5b5022fde0c1 \ + --hash=sha256:c22607421f49c0cb6ff3ed593a49b6a99c6ffdeaaa6c944cdda83c2393c8864d \ + --hash=sha256:c604998ab8115db802cc55cb1b91619b2831a6128a62ca7eea577fc8ea4d3131 \ + --hash=sha256:d088ea9319e49273f25b1c96a3763bf19a882cff774d1792ae6fba34bd40550a \ + --hash=sha256:db9eb8307219d7e09b33bcb43287222ef35cbcf1586ba9472b0a4b833666ada1 \ + --hash=sha256:e31fef4e7b68184545c3d68baec7074532e077bd1906b040ecfba659737df188 \ + --hash=sha256:e32f0fb443afcfe7f01f95172b66f279938fbc6bdaebe294b0ff6747fb6db020 \ + --hash=sha256:fcbe419805c9b20db9a51d33b942feddbf6e7fb468cb20686fd7089d4164c12a diff --git a/ci/flaky_test/run_process_xml.sh b/ci/flaky_test/run_process_xml.sh index a5c5043c92d4..38496128bb91 100755 --- a/ci/flaky_test/run_process_xml.sh +++ b/ci/flaky_test/run_process_xml.sh @@ -1,10 +1,13 @@ #!/bin/bash +export ENVOY_SRCDIR=${ENVOY_SRCDIR:-.} + +# shellcheck source=tools/shell_utils.sh . "${ENVOY_SRCDIR}"/tools/shell_utils.sh if [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]]; then export MULTIDICT_NO_EXTENSIONS=1 - export YARL_NO_EXTENSIONS=1 + export YARL_NO_EXTENSIONS=1 fi -python_venv process_xml $1 +python_venv process_xml "$1" diff --git a/ci/flaky_test/run_process_xml_mac.sh b/ci/flaky_test/run_process_xml_mac.sh deleted file mode 100755 index 9dad6b7ea7fd..000000000000 --- a/ci/flaky_test/run_process_xml_mac.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -pip3 install slackclient -./ci/flaky_test/process_xml.py diff --git a/ci/go_mirror.sh b/ci/go_mirror.sh index 80be4cc0b532..63f96d0d7969 100755 --- a/ci/go_mirror.sh +++ b/ci/go_mirror.sh @@ -2,7 +2,11 @@ set -e -if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] -then - tools/api/generate_go_protobuf.py +MAIN_BRANCH="refs/heads/master" + +# shellcheck source=ci/setup_cache.sh +. "$(dirname "$0")"/setup_cache.sh + +if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_EXTRA_OPTIONS}" tools/api/generate_go_protobuf.py fi diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh index 9303907a1be9..d69562ced31f 100755 --- a/ci/mac_ci_setup.sh +++ b/ci/mac_ci_setup.sh @@ -6,7 +6,13 @@ # https://github.com/actions/virtual-environments/blob/master/images/macos/macos-10.15-Readme.md for # a list of pre-installed tools in the macOS image. +# https://github.com/actions/virtual-environments/issues/1811 +brew uninstall openssl@1.0.2t + export HOMEBREW_NO_AUTO_UPDATE=1 +HOMEBREW_RETRY_ATTEMPTS=10 +HOMEBREW_RETRY_INTERVAL=3 + function is_installed { brew ls --versions "$1" >/dev/null @@ -20,7 +26,21 @@ function install { fi } -if ! brew update; then +function retry () { + local returns=1 i=1 + while ((i<=HOMEBREW_RETRY_ATTEMPTS)); do + if "$@"; then + returns=0 + break + else + sleep "$HOMEBREW_RETRY_INTERVAL"; + ((i++)) + fi + done + return "$returns" +} + +if ! retry brew update; then echo "Failed to update homebrew" exit 1 fi @@ -31,12 +51,6 @@ do is_installed "${DEP}" || install "${DEP}" done -if [ -n "$CIRCLECI" ]; then - # bazel uses jgit internally and the default circle-ci .gitconfig says to - # convert https://github.com to ssh://git@github.com, which jgit does not support. - mv ~/.gitconfig ~/.gitconfig_save -fi - # Required as bazel and a foreign bazelisk are installed in the latest macos vm image, we have # to unlink/overwrite them to install bazelisk echo "Installing bazelisk" @@ -48,4 +62,4 @@ fi bazel version -pip3 install slackclient +pip3 install virtualenv diff --git a/ci/mac_ci_steps.sh b/ci/mac_ci_steps.sh index 41e01d0fd134..5ebaba83ce95 100755 --- a/ci/mac_ci_steps.sh +++ b/ci/mac_ci_steps.sh @@ -11,13 +11,23 @@ trap finish EXIT echo "disk space at beginning of build:" df -h +# shellcheck source=ci/setup_cache.sh . "$(dirname "$0")"/setup_cache.sh +read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:-}" +read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}" + # TODO(zuercher): remove --flaky_test_attempts when https://github.com/envoyproxy/envoy/issues/2428 # is resolved. -BAZEL_BUILD_OPTIONS="--curses=no --show_task_finish --verbose_failures \ - --action_env=PATH=/usr/local/bin:/opt/local/bin:/usr/bin:/bin --test_output=all \ - --flaky_test_attempts=integration@2 ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" +BAZEL_BUILD_OPTIONS=( + "--curses=no" + --show_task_finish + --verbose_failures + "--action_env=PATH=/usr/local/bin:/opt/local/bin:/usr/bin:/bin" + "--test_output=all" + "--flaky_test_attempts=integration@2" + "${BAZEL_BUILD_EXTRA_OPTIONS[@]}" + "${BAZEL_EXTRA_TEST_OPTIONS[@]}") # Build envoy and run tests as separate steps so that failure output # is somewhat more deterministic (rather than interleaving the build @@ -26,10 +36,13 @@ BAZEL_BUILD_OPTIONS="--curses=no --show_task_finish --verbose_failures \ if [[ $# -gt 0 ]]; then TEST_TARGETS=$* else - TEST_TARGETS=//test/integration/... + TEST_TARGETS='//test/integration/...' fi if [[ "$TEST_TARGETS" == "//test/..." || "$TEST_TARGETS" == "//test/integration/..." ]]; then - bazel build ${BAZEL_BUILD_OPTIONS} //source/exe:envoy-static + bazel build "${BAZEL_BUILD_OPTIONS[@]}" //source/exe:envoy-static fi -bazel test ${BAZEL_BUILD_OPTIONS} ${TEST_TARGETS} +bazel test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS}" + +# Additionally run macOS specific test suites +bazel test "${BAZEL_BUILD_OPTIONS[@]}" //test/common/network:apple_dns_impl_test diff --git a/ci/repokitteh/modules/azure_pipelines.star b/ci/repokitteh/modules/azure_pipelines.star index 7d80c149b5cd..655ba0e50863 100644 --- a/ci/repokitteh/modules/azure_pipelines.star +++ b/ci/repokitteh/modules/azure_pipelines.star @@ -25,7 +25,7 @@ def _get_azp_checks(): return checks def _retry(config, comment_id, command): - msgs = "Retrying Azure Pipelines, to retry CircleCI checks, use `/retest-circle`.\n" + msgs = "Retrying Azure Pipelines.\n" checks = _get_azp_checks() retried_checks = [] diff --git a/ci/repokitteh/modules/newcontributor.star b/ci/repokitteh/modules/newcontributor.star new file mode 100644 index 000000000000..4cf644bc200f --- /dev/null +++ b/ci/repokitteh/modules/newcontributor.star @@ -0,0 +1,31 @@ + +NEW_CONTRIBUTOR_MESSAGE = """ +Hi @%s, welcome and thank you for your contribution. + +We will try to review your Pull Request as quickly as possible. + +In the meantime, please take a look at the [contribution guidelines](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md) if you have not done so already. + +""" + +def get_pr_author_association(issue_number): + return github.call( + method="GET", + path="repos/envoyproxy/envoy/pulls/%s" % issue_number)["json"]["author_association"] + +def is_newcontributor(issue_number): + return get_pr_author_association(issue_number) == "FIRST_TIME_CONTRIBUTOR" + +def should_message_newcontributor(action, issue_number): + return ( + action == 'opened' + and is_newcontributor(issue_number)) + +def send_newcontributor_message(sender): + github.issue_create_comment(NEW_CONTRIBUTOR_MESSAGE % sender) + +def _pr(action, issue_number, sender, config): + if should_message_newcontributor(action, issue_number): + send_newcontributor_message(sender) + +handlers.pull_request(func=_pr) diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index d594553b6cb2..040b5a46b895 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -37,7 +37,7 @@ function exclude_win32_impl() { # Do not run clang-tidy against macOS impl # TODO: We should run clang-tidy against macOS impl for completeness function exclude_macos_impl() { - grep -v source/common/filesystem/kqueue/ + grep -v source/common/filesystem/kqueue/ | grep -v source/common/network/apple_dns_impl | grep -v test/common/network/apple_dns_impl_test } # Do not run incremental clang-tidy on check_format testdata files. @@ -50,33 +50,59 @@ function exclude_headersplit_testdata() { grep -v tools/envoy_headersplit/ } +# Do not run clang-tidy against Chromium URL import, this needs to largely +# reflect the upstream structure. +function exclude_chromium_url() { + grep -v source/common/chromium_url/ +} + # Exclude files in third_party which are temporary forks from other OSS projects. function exclude_third_party() { grep -v third_party/ } +# Exclude files which are part of the Wasm emscripten environment +function exclude_wasm_emscripten() { + grep -v source/extensions/common/wasm/ext +} + +# Exclude files which are part of the Wasm SDK +function exclude_wasm_sdk() { + grep -v proxy_wasm_cpp_sdk +} + +# Exclude files which are part of the Wasm Host environment +function exclude_wasm_host() { + grep -v proxy_wasm_cpp_host +} + +# Exclude proxy-wasm test_data. +function exclude_wasm_test_data() { + grep -v wasm/test_data +} + function filter_excludes() { - exclude_check_format_testdata | exclude_headersplit_testdata | exclude_win32_impl | exclude_macos_impl | exclude_third_party + exclude_check_format_testdata | exclude_headersplit_testdata | exclude_chromium_url | exclude_win32_impl | exclude_macos_impl | exclude_third_party | exclude_wasm_emscripten | exclude_wasm_sdk | exclude_wasm_host | exclude_wasm_test_data } function run_clang_tidy() { python3 "${LLVM_PREFIX}/share/clang/run-clang-tidy.py" \ - -clang-tidy-binary=${CLANG_TIDY} \ - -clang-apply-replacements-binary=${CLANG_APPLY_REPLACEMENTS} \ - -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p ${SRCDIR} -quiet \ - ${APPLY_CLANG_TIDY_FIXES:+-fix} $@ + -clang-tidy-binary="${CLANG_TIDY}" \ + -clang-apply-replacements-binary="${CLANG_APPLY_REPLACEMENTS}" \ + -export-fixes=${FIX_YAML} -j "${NUM_CPUS:-0}" -p "${SRCDIR}" -quiet \ + ${APPLY_CLANG_TIDY_FIXES:+-fix} "$@" } function run_clang_tidy_diff() { - git diff $1 | filter_excludes | \ + git diff "$1" | filter_excludes | \ python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ - -clang-tidy-binary=${CLANG_TIDY} \ - -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p 1 -quiet + -clang-tidy-binary="${CLANG_TIDY}" \ + -export-fixes="${FIX_YAML}" -j "${NUM_CPUS:-0}" -p 1 -quiet } if [[ $# -gt 0 ]]; then - echo "Running clang-tidy on: $@" - run_clang_tidy $@ + echo "Running clang-tidy on: $*" + run_clang_tidy "$@" elif [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then echo "Running a full clang-tidy" run_clang_tidy @@ -87,15 +113,15 @@ else elif [[ "${BUILD_REASON}" == *CI ]]; then DIFF_REF="HEAD^" else - DIFF_REF=$(${ENVOY_SRCDIR}/tools/git/last_github_commit.sh) + DIFF_REF=$("${ENVOY_SRCDIR}"/tools/git/last_github_commit.sh) fi fi - echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse ${DIFF_REF})), current HEAD ($(git rev-parse HEAD))" - run_clang_tidy_diff ${DIFF_REF} + echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse "${DIFF_REF}")), current HEAD ($(git rev-parse HEAD))" + run_clang_tidy_diff "${DIFF_REF}" fi if [[ -s "${FIX_YAML}" ]]; then echo "clang-tidy check failed, potentially fixed by clang-apply-replacements:" - cat ${FIX_YAML} + cat "${FIX_YAML}" exit 1 fi diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 5bafffb89522..842b51b6ce89 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -2,36 +2,95 @@ set -e -. $(dirname $0)/envoy_build_sha.sh +# shellcheck source=ci/envoy_build_sha.sh +. "$(dirname "$0")"/envoy_build_sha.sh -# We run as root and later drop permissions. This is required to setup the USER -# in useradd below, which is need for correct Python execution in the Docker -# environment. -USER=root -USER_GROUP=root +function is_windows() { + [[ "$(uname -s)" == *NT* ]] +} + +read -ra ENVOY_DOCKER_OPTIONS <<< "${ENVOY_DOCKER_OPTIONS:-}" + +# TODO(phlax): uppercase these env vars +export HTTP_PROXY="${http_proxy:-}" +export HTTPS_PROXY="${https_proxy:-}" +export NO_PROXY="${no_proxy:-}" + +if is_windows; then + [[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-windows2019" + # TODO(sunjayBhatia): Currently ENVOY_DOCKER_OPTIONS is ignored on Windows because + # CI sets it to a Linux-specific value. Undo this once https://github.com/envoyproxy/envoy/issues/13272 + # is resolved. + ENVOY_DOCKER_OPTIONS=() + DEFAULT_ENVOY_DOCKER_BUILD_DIR=C:/Windows/Temp/envoy-docker-build + BUILD_DIR_MOUNT_DEST=C:/build + # Replace MSYS style drive letter (/c/) with driver letter designation (C:/) + SOURCE_DIR=$(echo "${PWD}" | sed -E "s#/([a-zA-Z])/#\1:/#") + SOURCE_DIR_MOUNT_DEST=C:/source + START_COMMAND=("bash" "-c" "cd source && $*") +else + [[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-ubuntu" + # We run as root and later drop permissions. This is required to setup the USER + # in useradd below, which is need for correct Python execution in the Docker + # environment. + ENVOY_DOCKER_OPTIONS+=(-u root:root) + ENVOY_DOCKER_OPTIONS+=(-v /var/run/docker.sock:/var/run/docker.sock) + ENVOY_DOCKER_OPTIONS+=(--cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN) + DEFAULT_ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build + BUILD_DIR_MOUNT_DEST=/build + SOURCE_DIR="${PWD}" + SOURCE_DIR_MOUNT_DEST=/source + START_COMMAND=("/bin/bash" "-lc" "groupadd --gid $(id -g) -f envoygroup \ + && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home --home-dir /build envoybuild \ + && usermod -a -G pcap envoybuild \ + && sudo -EHs -u envoybuild bash -c 'cd /source && $*'") +fi -[[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-ubuntu" # The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker # images'). [[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}" -[[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build +[[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR="${DEFAULT_ENVOY_DOCKER_BUILD_DIR}" +# Replace backslash with forward slash for Windows style paths +ENVOY_DOCKER_BUILD_DIR="${ENVOY_DOCKER_BUILD_DIR//\\//}" +mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" -[[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=" -it" -[[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=" -v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)" +[[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=("-it") +[[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=(-v "$(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)") +[[ -n "${SSH_AUTH_SOCK}" ]] && ENVOY_DOCKER_OPTIONS+=(-v "${SSH_AUTH_SOCK}:${SSH_AUTH_SOCK}" -e SSH_AUTH_SOCK) export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" -mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" # Since we specify an explicit hash, docker-run will pull from the remote repo if missing. -docker run --rm ${ENVOY_DOCKER_OPTIONS} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} -e NO_PROXY=${no_proxy} \ - -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build -v /var/run/docker.sock:/var/run/docker.sock \ - -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE -e ENVOY_STDLIB -e BUILD_REASON \ - -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE -e FUZZIT_API_KEY -e ENVOY_BUILD_IMAGE \ - -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ - -e GCS_ARTIFACT_BUCKET -e BUILD_SOURCEBRANCHNAME -e BAZELISK_BASE_URL -e ENVOY_BUILD_ARCH -e SLACK_TOKEN -e BUILD_URI\ - -e REPO_URI -v "$PWD":/source --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN "${ENVOY_BUILD_IMAGE}" \ - /bin/bash -lc "\ - groupadd --gid $(id -g) -f envoygroup \ - && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home --home-dir /build envoybuild \ - && usermod -a -G pcap envoybuild \ - && sudo -EHs -u envoybuild bash -c \"cd /source && $*\"" +docker run --rm \ + "${ENVOY_DOCKER_OPTIONS[@]}" \ + -v "${ENVOY_DOCKER_BUILD_DIR}":"${BUILD_DIR_MOUNT_DEST}" \ + -v "${SOURCE_DIR}":"${SOURCE_DIR_MOUNT_DEST}" \ + -e AZP_BRANCH \ + -e HTTP_PROXY \ + -e HTTPS_PROXY \ + -e NO_PROXY \ + -e BAZEL_STARTUP_OPTIONS \ + -e BAZEL_BUILD_EXTRA_OPTIONS \ + -e BAZEL_EXTRA_TEST_OPTIONS \ + -e BAZEL_REMOTE_CACHE \ + -e ENVOY_STDLIB \ + -e BUILD_REASON \ + -e BAZEL_REMOTE_INSTANCE \ + -e GCP_SERVICE_ACCOUNT_KEY \ + -e NUM_CPUS \ + -e ENVOY_RBE \ + -e FUZZIT_API_KEY \ + -e ENVOY_BUILD_IMAGE \ + -e ENVOY_SRCDIR \ + -e ENVOY_BUILD_TARGET \ + -e SYSTEM_PULLREQUEST_TARGETBRANCH \ + -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ + -e GCS_ARTIFACT_BUCKET \ + -e BUILD_SOURCEBRANCHNAME \ + -e BAZELISK_BASE_URL \ + -e ENVOY_BUILD_ARCH \ + -e SLACK_TOKEN \ + -e BUILD_URI\ + -e REPO_URI \ + "${ENVOY_BUILD_IMAGE}" \ + "${START_COMMAND[@]}" diff --git a/ci/run_envoy_docker_windows.sh b/ci/run_envoy_docker_windows.sh deleted file mode 100644 index a1f4e7372b52..000000000000 --- a/ci/run_envoy_docker_windows.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e - -# The image tag for the Windows image is the same as the Linux one so we use the same mechanism to find it -. $(dirname $0)/envoy_build_sha.sh - -[[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-windows2019" -# The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker -# images'). -[[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}" - -ENVOY_SOURCE_DIR=$(echo "${PWD}" | sed -E "s#/([a-zA-Z])/#\1:/#") - -[[ -f .git ]] && [[ ! -d .git ]] && GIT_VOLUME_OPTION="-v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)" - -[[ -t 1 ]] && DOCKER_TTY_OPTION=-it - -export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" - -# Since we specify an explicit hash, docker-run will pull from the remote repo if missing. -docker run --rm ${DOCKER_TTY_OPTION} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \ - ${GIT_VOLUME_OPTION} -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE \ - -e ENVOY_STDLIB -e BUILD_REASON -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE \ - -e ENVOY_BUILD_IMAGE -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH -v ${ENVOY_SOURCE_DIR}:C:/source \ - "${ENVOY_BUILD_IMAGE}" \ - bash -c "cd source && $*" diff --git a/ci/setup_cache.sh b/ci/setup_cache.sh index f615b8b41d5d..0733f679b784 100755 --- a/ci/setup_cache.sh +++ b/ci/setup_cache.sh @@ -2,7 +2,7 @@ set -e -if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY:0:1}" ]]; then +if [[ -n "${GCP_SERVICE_ACCOUNT_KEY:0:1}" ]]; then # mktemp will create a tempfile with u+rw permission minus umask, it will not be readable by all # users by default. GCP_SERVICE_ACCOUNT_KEY_FILE=$(mktemp -t gcp_service_account.XXXXXX.json) @@ -20,11 +20,11 @@ if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY:0:1}" ]]; then fi -if [[ ! -z "${BAZEL_REMOTE_CACHE}" ]]; then +if [[ -n "${BAZEL_REMOTE_CACHE}" ]]; then export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_cache=${BAZEL_REMOTE_CACHE}" echo "Set up bazel remote read/write cache at ${BAZEL_REMOTE_CACHE}." - if [[ ! -z "${BAZEL_REMOTE_INSTANCE}" ]]; then + if [[ -n "${BAZEL_REMOTE_INSTANCE}" ]]; then export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_instance_name=${BAZEL_REMOTE_INSTANCE}" echo "instance_name: ${BAZEL_REMOTE_INSTANCE}." elif [[ -z "${ENVOY_RBE}" ]]; then diff --git a/ci/upload_gcs_artifact.sh b/ci/upload_gcs_artifact.sh index 7bd5b0201359..755abf3a39d5 100755 --- a/ci/upload_gcs_artifact.sh +++ b/ci/upload_gcs_artifact.sh @@ -22,5 +22,5 @@ BRANCH=${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}} GCS_LOCATION="${GCS_ARTIFACT_BUCKET}/${BRANCH}/${TARGET_SUFFIX}" echo "Uploading to gs://${GCS_LOCATION} ..." -gsutil -mq rsync -dr ${SOURCE_DIRECTORY} gs://${GCS_LOCATION} +gsutil -mq rsync -dr "${SOURCE_DIRECTORY}" "gs://${GCS_LOCATION}" echo "Artifacts uploaded to: https://storage.googleapis.com/${GCS_LOCATION}/index.html" diff --git a/ci/verify_examples.sh b/ci/verify_examples.sh index 4e459464aeda..d034a4a30cec 100755 --- a/ci/verify_examples.sh +++ b/ci/verify_examples.sh @@ -3,7 +3,7 @@ TESTFILTER="${1:-*}" FAILED=() SRCDIR="${SRCDIR:-$(pwd)}" -EXCLUDED_BUILD_CONFIGS=${EXCLUDED_BUILD_CONFIGS:-"^./jaeger-native-tracing|docker-compose"} +EXCLUDE_EXAMPLES=${EXCLUDED_EXAMPLES:-"wasm"} trap_errors () { @@ -30,7 +30,7 @@ trap exit 1 INT run_examples () { local examples example cd "${SRCDIR}/examples" || exit 1 - examples=$(find . -mindepth 1 -maxdepth 1 -type d -name "$TESTFILTER" | sort) + examples=$(find . -mindepth 1 -maxdepth 1 -type d -name "$TESTFILTER" | grep -vE "${EXCLUDE_EXAMPLES}" | sort) for example in $examples; do pushd "$example" > /dev/null || return 1 ./verify.sh @@ -38,26 +38,8 @@ run_examples () { done } -verify_build_configs () { - local config configs missing - missing=() - cd "${SRCDIR}/examples" || return 1 - configs="$(find . -name "*.yaml" -o -name "*.lua" | grep -vE "${EXCLUDED_BUILD_CONFIGS}" | cut -d/ -f2-)" - for config in $configs; do - grep "\"$config\"" BUILD || missing+=("$config") - done - if [[ -n "${missing[*]}" ]]; then - for config in "${missing[@]}"; do - echo "Missing config: $config" >&2 - done - return 1 - fi -} - -verify_build_configs run_examples - if [[ "${#FAILED[@]}" -ne "0" ]]; then echo "TESTS FAILED:" for failed in "${FAILED[@]}"; do diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh index 498445d9b949..ff77a9ea1465 100755 --- a/ci/windows_ci_steps.sh +++ b/ci/windows_ci_steps.sh @@ -11,38 +11,74 @@ trap finish EXIT echo "disk space at beginning of build:" df -h +# shellcheck source=ci/setup_cache.sh . "$(dirname "$0")"/setup_cache.sh +read -ra BAZEL_STARTUP_OPTIONS <<< "${BAZEL_STARTUP_OPTIONS:-}" +# Default to msvc-cl if not overridden +read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:---config=msvc-cl}" +read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}" + # Set up TMPDIR so bash and non-bash can access # e.g. TMPDIR=/d/tmp, make a link from /d/d to /d so both bash and Windows programs resolve the # same path # This is due to this issue: https://github.com/bazelbuild/rules_foreign_cc/issues/334 # rules_foreign_cc does not currently use bazel output/temp directories by default, it uses mktemp # which respects the value of the TMPDIR environment variable -drive="$(readlink -f $TMPDIR | cut -d '/' -f2)" +drive="$(readlink -f "$TMPDIR" | cut -d '/' -f2)" if [ ! -e "/$drive/$drive" ]; then /c/windows/system32/cmd.exe /c "mklink /d $drive:\\$drive $drive:\\" fi -BAZEL_STARTUP_OPTIONS="--output_base=c:/_eb" -# Default to msvc-cl if not overridden -BAZEL_BUILD_EXTRA_OPTIONS=${BAZEL_BUILD_EXTRA_OPTIONS:---config=msvc-cl} -BAZEL_BUILD_OPTIONS="-c opt --show_task_finish --verbose_failures \ - --test_output=errors ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" +BUILD_DIR=${BUILD_DIR:-/c/build} +if [[ ! -d "${BUILD_DIR}" ]] +then + echo "${BUILD_DIR} mount missing - did you forget -v :${BUILD_DIR}? Creating." + mkdir -p "${BUILD_DIR}" +fi + +# Environment setup. +export TEST_TMPDIR=${BUILD_DIR}/tmp + +[[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=(--nocache_test_results) + +BAZEL_STARTUP_OPTIONS+=("--output_base=c:/_eb") +BAZEL_BUILD_OPTIONS=( + -c opt + --show_task_finish + --verbose_failures + --define "wasm=disabled" + "--test_output=errors" + "${BAZEL_BUILD_EXTRA_OPTIONS[@]}" + "${BAZEL_EXTRA_TEST_OPTIONS[@]}") + +# Also setup some space for building Envoy standalone. +ENVOY_BUILD_DIR="${BUILD_DIR}"/envoy +mkdir -p "${ENVOY_BUILD_DIR}" + +# This is where we copy build deliverables to. +ENVOY_DELIVERY_DIR="${ENVOY_BUILD_DIR}"/source/exe +mkdir -p "${ENVOY_DELIVERY_DIR}" # Test to validate updates of all dependency libraries in bazel/external and bazel/foreign_cc -# bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //bazel/... --build_tag_filters=-skip_on_windows +# bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //bazel/... --build_tag_filters=-skip_on_windows # Complete envoy-static build (nothing needs to be skipped, build failure indicates broken dependencies) -bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //source/exe:envoy-static +bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //source/exe:envoy-static + +# Copy binary to delivery directory +cp -f bazel-bin/source/exe/envoy-static.exe "${ENVOY_DELIVERY_DIR}/envoy.exe" + +# Copy for azp, creating a tar archive +tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${ENVOY_DELIVERY_DIR}" envoy.exe # Test invocations of known-working tests on Windows -bazel ${BAZEL_STARTUP_OPTIONS} test ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows,-fails_on_windows,-flaky_on_windows --build_tests_only +bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" //test/... --test_tag_filters=-skip_on_windows,-fails_on_windows,-flaky_on_windows --build_tests_only # Build tests that are known-flaky or known-failing to ensure no compilation regressions -bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows,fails_on_windows,flaky_on_windows --build_tests_only +bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //test/... --test_tag_filters=-skip_on_windows,fails_on_windows,flaky_on_windows --build_tests_only # Summarize tests bypasssed to monitor the progress of porting to Windows -echo Tests bypassed as skip_on_windows: `bazel query 'kind(".*test rule", attr("tags", "skip_on_windows", //test/...))' 2>/dev/null | sort | wc -l` known unbuildable or inapplicable tests -echo Tests bypassed as fails_on_windows: `bazel query 'kind(".*test rule", attr("tags", "fails_on_windows", //test/...))' 2>/dev/null | sort | wc -l` known incompatible tests -echo Tests bypassed as flaky_on_windows: `bazel query 'kind(".*test rule", attr("tags", "flaky_on_windows", //test/...))' 2>/dev/null | sort | wc -l` known unstable tests +echo "Tests bypassed as skip_on_windows: $(bazel query 'kind(".*test rule", attr("tags", "skip_on_windows", //test/...))' 2>/dev/null | sort | wc -l) known unbuildable or inapplicable tests" +echo "Tests bypassed as fails_on_windows: $(bazel query 'kind(".*test rule", attr("tags", "fails_on_windows", //test/...))' 2>/dev/null | sort | wc -l) known incompatible tests" +echo "Tests bypassed as flaky_on_windows: $(bazel query 'kind(".*test rule", attr("tags", "flaky_on_windows", //test/...))' 2>/dev/null | sort | wc -l) known unstable tests" diff --git a/configs/BUILD b/configs/BUILD index 128ec6642118..ca9a10935694 100644 --- a/configs/BUILD +++ b/configs/BUILD @@ -39,11 +39,17 @@ genrule( srcs = [ ":configs", "//examples:configs", + "//docs:configs", "//test/config/integration/certs", ], outs = ["example_configs.tar"], - cmd = "$(location configgen.sh) $(location configgen) $(@D) $(locations :configs) " + - "$(locations //examples:configs) $(locations //test/config/integration/certs)", + cmd = ( + "$(location configgen.sh) $(location configgen) $(@D) " + + "$(locations :configs) " + + "$(locations //examples:configs) " + + "$(locations //docs:configs) " + + "$(locations //test/config/integration/certs)" + ), tools = [ "configgen.sh", ":configgen", diff --git a/configs/Dockerfile b/configs/Dockerfile index 2d7b7a6a5e3b..ac1bc7aeece8 100644 --- a/configs/Dockerfile +++ b/configs/Dockerfile @@ -3,5 +3,5 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update -COPY google_com_proxy.v2.yaml /etc/envoy.yaml +COPY google_com_proxy.yaml /etc/envoy.yaml CMD /usr/local/bin/envoy -c /etc/envoy.yaml diff --git a/configs/access_log_format_helper.template.yaml b/configs/access_log_format_helper.template.yaml new file mode 100644 index 000000000000..9861a51e9bfb --- /dev/null +++ b/configs/access_log_format_helper.template.yaml @@ -0,0 +1,15 @@ +{% macro ingress_sampled_log() -%} + log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"} +{% endmacro %} + +{% macro ingress_full() -%} + log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"} +{% endmacro %} + +{% macro egress_error_log() -%} + log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n"} +{% endmacro %} + +{% macro egress_error_amazon_service() -%} + log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" \"%RESP(X-AMZN-RequestId)%\"\n"} +{% endmacro %} diff --git a/configs/access_log_format_helper_v2.template.yaml b/configs/access_log_format_helper_v2.template.yaml deleted file mode 100644 index 7a5d711c088b..000000000000 --- a/configs/access_log_format_helper_v2.template.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{% macro ingress_sampled_log() -%} - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" -{% endmacro %} - -{% macro ingress_full() -%} - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" -{% endmacro %} - -{% macro egress_error_log() -%} - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n" -{% endmacro %} - -{% macro egress_error_amazon_service() -%} - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" \"%RESP(X-AMZN-RequestId)%\"\n" -{% endmacro %} diff --git a/configs/configgen.py b/configs/configgen.py index d5409c481a91..8f5e20cd562d 100755 --- a/configs/configgen.py +++ b/configs/configgen.py @@ -111,16 +111,16 @@ def generate_config(template_path, template, output_file, **context): # Generate a demo config for the main front proxy. This sets up both HTTP and HTTPS listeners, # as well as a listener for the double proxy to connect to via SSL client authentication. generate_config(SCRIPT_DIR, - 'envoy_front_proxy_v2.template.yaml', - '{}/envoy_front_proxy.v2.yaml'.format(OUT_DIR), + 'envoy_front_proxy.template.yaml', + '{}/envoy_front_proxy.yaml'.format(OUT_DIR), clusters=front_envoy_clusters, tracing=tracing_enabled) # Generate a demo config for the double proxy. This sets up both an HTTP and HTTPS listeners, # and backhauls the traffic to the main front proxy. generate_config(SCRIPT_DIR, - 'envoy_double_proxy_v2.template.yaml', - '{}/envoy_double_proxy.v2.yaml'.format(OUT_DIR), + 'envoy_double_proxy.template.yaml', + '{}/envoy_double_proxy.yaml'.format(OUT_DIR), tracing=tracing_enabled) # Generate a demo config for the service to service (local) proxy. This sets up several different @@ -132,14 +132,12 @@ def generate_config(template_path, template, output_file, **context): # that Envoy proxies to listens on its own port. # optional mongo ports: built from mongos_servers above. generate_config(SCRIPT_DIR, - 'envoy_service_to_service_v2.template.yaml', + 'envoy_service_to_service.template.yaml', '{}/envoy_service_to_service.yaml'.format(OUT_DIR), internal_virtual_hosts=service_to_service_envoy_clusters, external_virtual_hosts=external_virtual_hosts, mongos_servers=mongos_servers) -for google_ext in ['v2.yaml']: - shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.%s' % google_ext), OUT_DIR) - -shutil.copy(os.path.join(SCRIPT_DIR, 'encapsulate_in_connect.v3.yaml'), OUT_DIR) -shutil.copy(os.path.join(SCRIPT_DIR, 'terminate_connect.v3.yaml'), OUT_DIR) +shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.yaml'), OUT_DIR) +shutil.copy(os.path.join(SCRIPT_DIR, 'encapsulate_in_connect.yaml'), OUT_DIR) +shutil.copy(os.path.join(SCRIPT_DIR, 'terminate_connect.yaml'), OUT_DIR) diff --git a/configs/configgen.sh b/configs/configgen.sh index 2ef145c4af75..d68db9d46784 100755 --- a/configs/configgen.sh +++ b/configs/configgen.sh @@ -9,16 +9,20 @@ shift mkdir -p "$OUT_DIR/certs" mkdir -p "$OUT_DIR/lib" +mkdir -p "$OUT_DIR/protos" "$CONFIGGEN" "$OUT_DIR" for FILE in "$@"; do case "$FILE" in - *.pem) + *.pem|*.der) cp "$FILE" "$OUT_DIR/certs" ;; *.lua) cp "$FILE" "$OUT_DIR/lib" ;; + *.pb) + cp "$FILE" "$OUT_DIR/protos" + ;; *) FILENAME="$(echo "$FILE" | sed -e 's/.*examples\///g')" @@ -29,4 +33,4 @@ for FILE in "$@"; do done # tar is having issues with -C for some reason so just cd into OUT_DIR. -(cd "$OUT_DIR"; tar -hcvf example_configs.tar -- *.yaml certs/*.pem lib/*.lua) +(cd "$OUT_DIR"; tar -hcvf example_configs.tar -- *.yaml certs/*.pem certs/*.der protos/*.pb lib/*.lua) diff --git a/configs/encapsulate_in_connect.v3.yaml b/configs/encapsulate_in_connect.yaml similarity index 100% rename from configs/encapsulate_in_connect.v3.yaml rename to configs/encapsulate_in_connect.yaml diff --git a/configs/envoy_double_proxy_v2.template.yaml b/configs/envoy_double_proxy.template.yaml similarity index 82% rename from configs/envoy_double_proxy_v2.template.yaml rename to configs/envoy_double_proxy.template.yaml index feb9f3e1f95f..aea9127c74f6 100644 --- a/configs/envoy_double_proxy_v2.template.yaml +++ b/configs/envoy_double_proxy.template.yaml @@ -11,7 +11,7 @@ transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: @@ -29,7 +29,7 @@ filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO stat_prefix: router route_config: @@ -47,24 +47,23 @@ http_filters: - name: envoy.filters.http.health_check typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck pass_through_mode: false headers: - exact_match: /healthcheck name: :path - name: envoy.filters.http.buffer typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer + "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.router typed_config: {} {% if tracing %} tracing: - operation_name: INGRESS provider: name: envoy.tracers.lightstep typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig + "@type": type.googleapis.com/envoy.config.trace.v3.LightstepConfig access_token_file: "/etc/envoy/lightstep_access_token" collector_cluster: lightstep_saas {% endif %} @@ -89,9 +88,10 @@ runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: /var/log/envoy/access_error.log - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" + log_format: + text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" {% if proxy_proto %} use_remote_address: true {%endif -%} @@ -141,7 +141,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: @@ -151,7 +151,7 @@ static_resources: validation_context: trusted_ca: filename: certs/cacert.pem - match_subject_alt_names: + match_subject_alt_names: exact: "front-proxy.yourcompany.net" http2_protocol_options: {} - name: lightstep_saas @@ -172,18 +172,18 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: filename: certs/cacert.pem - match_subject_alt_names: + match_subject_alt_names: exact: "collector-grpc.lightstep.com" flags_path: "/etc/envoy/flags" stats_sinks: - name: envoy.stat_sinks.statsd typed_config: - "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink + "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink tcp_cluster_name: statsd layered_runtime: layers: diff --git a/configs/envoy_front_proxy_v2.template.yaml b/configs/envoy_front_proxy.template.yaml similarity index 83% rename from configs/envoy_front_proxy_v2.template.yaml rename to configs/envoy_front_proxy.template.yaml index a9b9bc97f859..1dcb1e6f919f 100644 --- a/configs/envoy_front_proxy_v2.template.yaml +++ b/configs/envoy_front_proxy.template.yaml @@ -1,4 +1,4 @@ -{% import 'routing_helper_v2.template.yaml' as helper -%} +{% import 'routing_helper.template.yaml' as helper -%} {% macro router_file_content() -%}{% include kwargs['router_file'] -%}{% endmacro -%} {% macro listener(protocol, address, port_value, proxy_proto, tls, tracing) -%} name: not_required_for_static_listeners @@ -12,7 +12,7 @@ - transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: alpn_protocols: h2,http/1.1 tls_certificates: @@ -35,7 +35,7 @@ filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO stat_prefix: router {% if proxy_proto -%} @@ -43,18 +43,18 @@ {%endif-%} stat_prefix: ingress_http route_config: - {{ router_file_content(router_file='envoy_router_v2.template.yaml')|indent(10) }} + {{ router_file_content(router_file='envoy_router.template.yaml')|indent(10) }} http_filters: - name: envoy.filters.http.health_check typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck pass_through_mode: false headers: - name: ":path" exact_match: "/healthcheck" - name: envoy.filters.http.buffer typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer + "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.ratelimit typed_config: @@ -70,11 +70,10 @@ add_user_agent: true {% if tracing %} tracing: - operation_name: INGRESS provider: name: envoy.tracers.lightstep typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig + "@type": type.googleapis.com/envoy.config.trace.v3.LightstepConfig collector_cluster: lightstep_saas access_token_file: "/etc/envoy/lightstep_access_token" {% endif %} @@ -99,9 +98,10 @@ runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/access_error.log" - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" + log_format: + text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" {% endmacro -%} static_resources: listeners: diff --git a/configs/envoy_router_v2.template.yaml b/configs/envoy_router.template.yaml similarity index 93% rename from configs/envoy_router_v2.template.yaml rename to configs/envoy_router.template.yaml index 0d09269b6cab..338363af6c8c 100644 --- a/configs/envoy_router_v2.template.yaml +++ b/configs/envoy_router.template.yaml @@ -1,4 +1,4 @@ -{% import 'routing_helper_v2.template.yaml' as helper with context -%} +{% import 'routing_helper.template.yaml' as helper with context -%} name: local_route virtual_hosts: - name: www diff --git a/configs/envoy_service_to_service_v2.template.yaml b/configs/envoy_service_to_service.template.yaml similarity index 89% rename from configs/envoy_service_to_service_v2.template.yaml rename to configs/envoy_service_to_service.template.yaml index 31386c59bbf3..9237d117f035 100644 --- a/configs/envoy_service_to_service_v2.template.yaml +++ b/configs/envoy_service_to_service.template.yaml @@ -1,5 +1,5 @@ -{% import 'routing_helper_v2.template.yaml' as helper -%} -{% import 'access_log_format_helper_v2.template.yaml' as access_log_helper -%} +{% import 'routing_helper.template.yaml' as helper -%} +{% import 'access_log_format_helper.template.yaml' as access_log_helper -%} {% macro ingress_listener(protocol, address, port_value) -%} - address: socket_address: @@ -11,7 +11,7 @@ - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO stat_prefix: ingress_http route_config: @@ -35,7 +35,7 @@ http_filters: - name: envoy.filters.http.health_check typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck pass_through_mode: true headers: - name: ":path" @@ -43,7 +43,7 @@ cache_time: 2.5s - name: envoy.filters.http.buffer typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer + "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.router typed_config: {} @@ -52,7 +52,7 @@ filter: not_health_check_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/ingress_http.log" {{ access_log_helper.ingress_full()|indent(10)}} - name: envoy.access_loggers.file @@ -81,7 +81,7 @@ runtime_key: access_log.access_error.duration - not_health_check_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/ingress_http_error.log" {{ access_log_helper.ingress_sampled_log()|indent(10)}} - name: envoy.access_loggers.file @@ -92,7 +92,7 @@ - runtime_filter: runtime_key: access_log.ingress_http typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/ingress_http_sampled.log" {{ access_log_helper.ingress_sampled_log()|indent(10)}} common_http_protocol_options: @@ -111,7 +111,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO stat_prefix: egress_http route_config: @@ -149,7 +149,7 @@ static_resources: runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/egress_http_error.log" {{ access_log_helper.egress_error_log()|indent(10) }} use_remote_address: true @@ -177,7 +177,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO stat_prefix: egress_http rds: @@ -210,7 +210,7 @@ static_resources: runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/egress_http_error.log" {{ access_log_helper.egress_error_log()|indent(10) }} use_remote_address: true @@ -239,7 +239,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO common_http_protocol_options: idle_timeout: 840s @@ -259,7 +259,7 @@ static_resources: retry_policy: retry_on: connect-failure {% if host.get('host_rewrite', False) %} - host_rewrite: "{{host['host_rewrite']}}" + host_rewrite_literal: "{{host['host_rewrite']}}" {% endif %} {% endfor %} http_filters: @@ -295,7 +295,7 @@ static_resources: runtime_key: access_log.access_error.duration {% endif %} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/egress_{{ mapping['name'] }}_http_error.log" {% if mapping.get('is_amzn_service', False) -%} {{ access_log_helper.egress_error_amazon_service()|indent(10) }} @@ -315,12 +315,12 @@ static_resources: - filters: - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: mongo_{{ key }} cluster: mongo_{{ key }} - name: envoy.filters.network.mongo_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.mongo_proxy.v2.MongoProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy stat_prefix: "{{ key }}" access_log: "/var/log/envoy/mongo_{{ key }}.log" {% if value.get('ratelimit', False) %} @@ -346,7 +346,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: @@ -413,7 +413,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: www.main_website.com - name: local_service connect_timeout: 0.25s @@ -456,8 +456,12 @@ static_resources: connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN + http2_protocol_options: + connection_keepalive: + interval: 30s + timeout: 5s load_assignment: - cluster_name: local_service_grpc + cluster_name: rds endpoints: - lb_endpoints: - endpoint: @@ -501,7 +505,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: @@ -548,9 +552,8 @@ flags_path: "/etc/envoy/flags" stats_sinks: - name: envoy.stat_sinks.statsd typed_config: - "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink + "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink tcp_cluster_name: statsd -watchdog: {} layered_runtime: layers: - name: root diff --git a/configs/freebind/freebind.yaml b/configs/freebind/freebind.yaml index 08214b8b044d..367e5ba3568a 100644 --- a/configs/freebind/freebind.yaml +++ b/configs/freebind/freebind.yaml @@ -17,7 +17,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route diff --git a/configs/google_com_proxy.v2.yaml b/configs/google_com_proxy.yaml similarity index 81% rename from configs/google_com_proxy.v2.yaml rename to configs/google_com_proxy.yaml index 53c26efc7c90..32e79bb306a9 100644 --- a/configs/google_com_proxy.v2.yaml +++ b/configs/google_com_proxy.yaml @@ -17,7 +17,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route @@ -28,13 +28,13 @@ static_resources: - match: prefix: "/" route: - host_rewrite: www.google.com + host_rewrite_literal: www.google.com cluster: service_google http_filters: - name: envoy.filters.http.router clusters: - name: service_google - connect_timeout: 0.25s + connect_timeout: 30s type: LOGICAL_DNS # Comment out the following line to test on v6 networks dns_lookup_family: V4_ONLY @@ -51,5 +51,5 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: www.google.com diff --git a/configs/original-dst-cluster/proxy_config.yaml b/configs/original-dst-cluster/proxy_config.yaml index 7ac1ea020fdd..b2e925957cda 100644 --- a/configs/original-dst-cluster/proxy_config.yaml +++ b/configs/original-dst-cluster/proxy_config.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_service @@ -35,7 +35,6 @@ static_resources: lb_policy: CLUSTER_PROVIDED dns_lookup_family: V4_ONLY cluster_manager: {} -watchdog: {} admin: access_log_path: /tmp/admin_access.log address: diff --git a/configs/routing_helper_v2.template.yaml b/configs/routing_helper.template.yaml similarity index 100% rename from configs/routing_helper_v2.template.yaml rename to configs/routing_helper.template.yaml diff --git a/configs/terminate_connect.v3.yaml b/configs/terminate_connect.yaml similarity index 100% rename from configs/terminate_connect.v3.yaml rename to configs/terminate_connect.yaml diff --git a/configs/using_deprecated_config.v2.yaml b/configs/using_deprecated_config.yaml similarity index 89% rename from configs/using_deprecated_config.v2.yaml rename to configs/using_deprecated_config.yaml index 55ca2797acb9..a98e64f365b9 100644 --- a/configs/using_deprecated_config.v2.yaml +++ b/configs/using_deprecated_config.yaml @@ -17,7 +17,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route @@ -54,7 +54,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: www.google.com tracing: http: diff --git a/docs/BUILD b/docs/BUILD index ead7bddb9a7f..aad5c89f0b65 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -1,3 +1,32 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) + licenses(["notice"]) # Apache 2 exports_files(["protodoc_manifest.yaml"]) + +envoy_package() + +filegroup( + name = "configs", + srcs = glob( + [ + "root/**/*.yaml", + "root/**/*.pb", + ], + exclude = [ + # TODO(phlax/windows-dev): figure out how to get this working on windows + # "Error: unable to read file: /etc/ssl/certs/ca-certificates.crt" + "root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml", + "root/intro/arch_overview/security/_include/ssl.yaml", + ], + ) + select({ + "//bazel:windows_x86_64": [], + "//conditions:default": [ + "root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml", + "root/intro/arch_overview/security/_include/ssl.yaml", + ], + }), +) diff --git a/docs/README.md b/docs/README.md index b672f51c8a4f..5cd5444d670b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,21 +1,50 @@ -# Developer-local docs build +# Building documentation locally + +There are two methods to build the documentation, described below. + +In both cases, the generated output can be found in `generated/docs`. + +## Building in an existing Envoy development environment + +If you have an [existing Envoy development environment](https://github.com/envoyproxy/envoy/tree/master/bazel#quick-start-bazel-build-for-developers), you should have the necessary dependencies and requirements and be able to build the documentation directly. ```bash ./docs/build.sh ``` -The output can be found in `generated/docs`. By default configuration examples are going to be validated during build. -To disable validation, set `SPHINX_SKIP_CONFIG_VALIDATION` environment variable to `true`: +By default configuration examples are going to be validated during build. To disable validation, +set `SPHINX_SKIP_CONFIG_VALIDATION` environment variable to `true`: ```bash SPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh ``` +## Using the Docker build container to build the documentation + +If you *do not* have an existing development environment, you may wish to use the Docker build +image that is used in continuous integration. + +This can be done as follows: + +``` +./ci/run_envoy_docker.sh 'docs/build.sh' +``` + +To use this method you will need a minimum of 4-5GB of disk space available to accommodate the build image. + +# Creating a Pull Request with documentation changes + +When you create a Pull Request the documentation is rendered by Azure Pipelines. + +To do this: +1. Open docs job in Azure Pipelines. +2. Navigate to "Upload Docs to GCS" log. +3. Click on the link there. # How the Envoy website and docs are updated 1. The docs are published to [docs/envoy/latest](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy/latest) - on every commit to master. This process is handled by CircleCI with the + on every commit to master. This process is handled by Azure Pipelines with the [`publish.sh`](https://github.com/envoyproxy/envoy/blob/master/docs/publish.sh) script. 2. The docs are published to [docs/envoy](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy) diff --git a/docs/build.sh b/docs/build.sh index c715f4d5b8f7..c3f182a739c3 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -7,15 +7,20 @@ set -e +RELEASE_TAG_REGEX="^refs/tags/v.*" + +if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then + DOCS_TAG="${AZP_BRANCH/refs\/tags\//}" +fi + # We need to set ENVOY_DOCS_VERSION_STRING and ENVOY_DOCS_RELEASE_LEVEL for Sphinx. # We also validate that the tag and version match at this point if needed. -if [ -n "$CIRCLE_TAG" ] -then +if [[ -n "${DOCS_TAG}" ]]; then # Check the git tag matches the version number in the VERSION file. VERSION_NUMBER=$(cat VERSION) - if [ "v${VERSION_NUMBER}" != "${CIRCLE_TAG}" ]; then + if [[ "v${VERSION_NUMBER}" != "${DOCS_TAG}" ]]; then echo "Given git tag does not match the VERSION file content:" - echo "${CIRCLE_TAG} vs $(cat VERSION)" + echo "${DOCS_TAG} vs $(cat VERSION)" exit 1 fi # Check the version_history.rst contains current release version. @@ -23,9 +28,9 @@ then || (echo "Git tag not found in version_history/current.rst" && exit 1) # Now that we know there is a match, we can use the tag. - export ENVOY_DOCS_VERSION_STRING="tag-$CIRCLE_TAG" + export ENVOY_DOCS_VERSION_STRING="tag-${DOCS_TAG}" export ENVOY_DOCS_RELEASE_LEVEL=tagged - export ENVOY_BLOB_SHA="$CIRCLE_TAG" + export ENVOY_BLOB_SHA="${DOCS_TAG}" else BUILD_SHA=$(git rev-parse HEAD) VERSION_NUM=$(cat VERSION) @@ -49,7 +54,7 @@ rm -rf "${GENERATED_RST_DIR}" mkdir -p "${GENERATED_RST_DIR}" source_venv "$BUILD_DIR" -pip3 install -r "${SCRIPT_DIR}"/requirements.txt +pip3 install --require-hashes -r "${SCRIPT_DIR}"/requirements.txt # Clean up any stale files in the API tree output. Bazel remembers valid cached # files still. @@ -76,7 +81,7 @@ mkdir -p "${GENERATED_RST_DIR}"/intro/arch_overview/security ./docs/generate_extension_rst.py "${EXTENSION_DB_PATH}" "${GENERATED_RST_DIR}"/intro/arch_overview/security # Generate RST for external dependency docs in intro/arch_overview/security. -./docs/generate_external_dep_rst.py "${GENERATED_RST_DIR}"/intro/arch_overview/security +PYTHONPATH=. ./docs/generate_external_dep_rst.py "${GENERATED_RST_DIR}"/intro/arch_overview/security function generate_api_rst() { local proto_target @@ -139,7 +144,12 @@ cp -f "${CONFIGS_DIR}"/google-vrp/envoy-edge.yaml "${GENERATED_RST_DIR}"/configu rsync -rav "${API_DIR}/diagrams" "${GENERATED_RST_DIR}/api-docs" -rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${SCRIPT_DIR}"/_ext "${GENERATED_RST_DIR}" +rsync -av \ + "${SCRIPT_DIR}"/root/ \ + "${SCRIPT_DIR}"/conf.py \ + "${SCRIPT_DIR}"/redirects.txt \ + "${SCRIPT_DIR}"/_ext \ + "${GENERATED_RST_DIR}" # To speed up validate_fragment invocations in validating_code_block bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/config_validation:validate_fragment diff --git a/docs/conf.py b/docs/conf.py index 1eb5725b689b..796519e06a4a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -71,8 +71,8 @@ def setup(app): sys.path.append(os.path.abspath("./_ext")) extensions = [ - 'sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig', - 'validating_code_block' + 'sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig', 'sphinx_tabs.tabs', + 'sphinx_copybutton', 'validating_code_block', 'sphinxext.rediraffe' ] extlinks = { 'repo': ('https://github.com/envoyproxy/envoy/blob/{}/%s'.format(blob_sha), ''), @@ -88,6 +88,9 @@ def setup(app): # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] +copybutton_prompt_text = r"\$ |PS>" +copybutton_prompt_is_regexp = True + # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] @@ -179,6 +182,7 @@ def setup(app): # documentation. html_theme_options = { 'logo_only': True, + 'includehidden': False, } # Add any paths that contain custom themes here, relative to this directory. @@ -271,3 +275,8 @@ def setup(app): # Output file base name for HTML help builder. htmlhelp_basename = 'envoydoc' + +# TODO(phlax): add redirect diff (`rediraffe_branch` setting) +# - not sure how diffing will work with master merging in PRs - might need +# to be injected dynamically, somehow +rediraffe_redirects = "redirects.txt" diff --git a/docs/generate_external_dep_rst.py b/docs/generate_external_dep_rst.py index 8c0de67572c8..07488d20831e 100755 --- a/docs/generate_external_dep_rst.py +++ b/docs/generate_external_dep_rst.py @@ -7,16 +7,7 @@ import sys import urllib.parse -from importlib.util import spec_from_loader, module_from_spec -from importlib.machinery import SourceFileLoader - -# bazel/repository_locations.bzl must have a .bzl suffix for Starlark import, so -# we are forced to do this workaround. -_repository_locations_spec = spec_from_loader( - 'repository_locations', - SourceFileLoader('repository_locations', 'bazel/repository_locations.bzl')) -repository_locations = module_from_spec(_repository_locations_spec) -_repository_locations_spec.loader.exec_module(repository_locations) +from tools.dependency import utils as dep_utils # Render a CSV table given a list of table headers, widths and list of rows @@ -40,7 +31,7 @@ def RstLink(text, url): # NIST CPE database search URL for a given CPE. def NistCpeUrl(cpe): encoded_cpe = urllib.parse.quote(cpe) - return 'https://nvd.nist.gov/products/cpe/search/results?keyword=%s&status=FINAL&orderBy=CPEURI&namingFormat=2.3' % encoded_cpe + return f'https://nvd.nist.gov/vuln/search/results?form_type=Advanced&results_type=overview&query={encoded_cpe}&search_type=all' # Render version strings human readable. @@ -52,13 +43,56 @@ def RenderVersion(version): return version +def RenderTitle(title): + underline = '~' * len(title) + return f'\n{title}\n{underline}\n\n' + + +# Determine the version link URL. If it's GitHub, use some heuristics to figure +# out a release tag link, otherwise point to the GitHub tree at the respective +# SHA. Otherwise, return the tarball download. +def GetVersionUrl(metadata): + # Figure out if it's a GitHub repo. + github_repo = None + github_version = None + for url in metadata['urls']: + if url.startswith('https://github.com/'): + components = url.split('/') + github_repo = f'https://github.com/{components[3]}/{components[4]}' + if components[5] == 'archive': + # Only support .tar.gz, .zip today. Figure out the release tag from this + # filename. + if components[6].endswith('.tar.gz'): + github_version = components[6][:-len('.tar.gz')] + else: + assert (components[6].endswith('.zip')) + github_version = components[6][:-len('.zip')] + else: + # Release tag is a path component. + assert (components[5] == 'releases') + github_version = components[7] + break + # If not, direct download link for tarball + download_url = metadata['urls'][0] + if not github_repo: + return download_url + # If it's not a GH hash, it's a tagged release. + tagged_release = len(metadata['version']) != 40 + if tagged_release: + # The GitHub version should look like the metadata version, but might have + # something like a "v" prefix. + return f'{github_repo}/releases/tag/{github_version}' + assert (metadata['version'] == github_version) + return f'{github_repo}/tree/{github_version}' + + if __name__ == '__main__': security_rst_root = sys.argv[1] - Dep = namedtuple('Dep', ['name', 'sort_name', 'version', 'cpe']) - use_categories = defaultdict(list) + Dep = namedtuple('Dep', ['name', 'sort_name', 'version', 'cpe', 'last_updated']) + use_categories = defaultdict(lambda: defaultdict(list)) # Bin rendered dependencies into per-use category lists. - for k, v in repository_locations.DEPENDENCY_REPOSITORIES.items(): + for k, v in dep_utils.RepositoryLocations().items(): cpe = v.get('cpe', '') if cpe == 'N/A': cpe = '' @@ -67,17 +101,23 @@ def RenderVersion(version): project_name = v['project_name'] project_url = v['project_url'] name = RstLink(project_name, project_url) - version = RstLink(RenderVersion(v['version']), v['urls'][0]) - dep = Dep(name, project_name.lower(), version, cpe) + version = RstLink(RenderVersion(v['version']), GetVersionUrl(v)) + last_updated = v['last_updated'] + dep = Dep(name, project_name.lower(), version, cpe, last_updated) for category in v['use_category']: - use_categories[category].append(dep) + for ext in v.get('extensions', ['core']): + use_categories[category][ext].append(dep) def CsvRow(dep): - return [dep.name, dep.version, dep.cpe] + return [dep.name, dep.version, dep.last_updated, dep.cpe] # Generate per-use category RST with CSV tables. - for category, deps in use_categories.items(): - output_path = pathlib.Path(security_rst_root, f'external_dep_{category}.rst') - content = CsvTable(['Name', 'Version', 'CPE'], [2, 1, 2], - [CsvRow(dep) for dep in sorted(deps, key=lambda d: d.sort_name)]) + for category, exts in use_categories.items(): + content = '' + for ext_name, deps in sorted(exts.items()): + if ext_name != 'core': + content += RenderTitle(ext_name) + output_path = pathlib.Path(security_rst_root, f'external_dep_{category}.rst') + content += CsvTable(['Name', 'Version', 'Last updated', 'CPE'], [2, 1, 1, 2], + [CsvRow(dep) for dep in sorted(deps, key=lambda d: d.sort_name)]) output_path.write_text(content) diff --git a/docs/publish.sh b/docs/publish.sh index 498a68d0f45e..11b75f1b77c9 100755 --- a/docs/publish.sh +++ b/docs/publish.sh @@ -1,6 +1,6 @@ #!/bin/bash -# This is run on every commit that CircleCI picks up. It assumes that docs have already been built +# This is run on every commit that Azure Pipelines picks up. It assumes that docs have already been built # via docs/build.sh. The push behavior differs depending on the nature of the commit: # * Tag commit (e.g. v1.6.0): pushes docs to versioned location, e.g. # https://www.envoyproxy.io/docs/envoy/v1.6.0/. @@ -10,35 +10,36 @@ set -e DOCS_DIR=generated/docs -CHECKOUT_DIR=../envoy-docs +CHECKOUT_DIR=envoy-docs BUILD_SHA=$(git rev-parse HEAD) -if [ -n "$CIRCLE_TAG" ] -then - PUBLISH_DIR="$CHECKOUT_DIR"/docs/envoy/"$CIRCLE_TAG" -elif [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] -then - PUBLISH_DIR="$CHECKOUT_DIR"/docs/envoy/latest +MAIN_BRANCH="refs/heads/master" +RELEASE_TAG_REGEX="^refs/tags/v.*" + +if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then + PUBLISH_DIR="${CHECKOUT_DIR}"/docs/envoy/"${AZP_BRANCH/refs\/tags\//}" +elif [[ "$AZP_BRANCH" == "${MAIN_BRANCH}" ]]; then + PUBLISH_DIR="${CHECKOUT_DIR}"/docs/envoy/latest else echo "Ignoring docs push" exit 0 fi +DOCS_MAIN_BRANCH="master" + echo 'cloning' -git clone git@github.com:envoyproxy/envoyproxy.github.io "$CHECKOUT_DIR" +git clone git@github.com:envoyproxy/envoyproxy.github.io "${CHECKOUT_DIR}" -b "${DOCS_MAIN_BRANCH}" --depth 1 -git -C "$CHECKOUT_DIR" fetch -git -C "$CHECKOUT_DIR" checkout -B master origin/master rm -fr "$PUBLISH_DIR" mkdir -p "$PUBLISH_DIR" cp -r "$DOCS_DIR"/* "$PUBLISH_DIR" -cd "$CHECKOUT_DIR" +cd "${CHECKOUT_DIR}" -git config user.name "envoy-docs(travis)" +git config user.name "envoy-docs(Azure Pipelines)" git config user.email envoy-docs@users.noreply.github.com -echo 'add' + +set -x + git add . -echo 'commit' git commit -m "docs envoy@$BUILD_SHA" -echo 'push' -git push origin master +git push origin "${DOCS_MAIN_BRANCH}" diff --git a/docs/redirects.txt b/docs/redirects.txt new file mode 100644 index 000000000000..87047ab3f3b5 --- /dev/null +++ b/docs/redirects.txt @@ -0,0 +1,11 @@ +intro/arch_overview/http/websocket.rst intro/arch_overview/http/upgrades.rst +configuration/observability/access_log.rst intro/arch_overview/observability/access_logging.rst + +install/building.rst start/building.rst +install/ref_configs.rst start/install/ref_configs.rst +install/sandboxes/local_docker_build.rst start/install/sandboxes/local_docker_build.rst +install/tools/config_load_check_tool.rst start/install/tools/config_load_check_tool.rst +install/tools/route_table_check_tool.rst start/install/tools/route_table_check_tool.rst +install/tools/schema_validator_check_tool.rst start/install/tools/schema_validator_check_tool.rst +install/tools/tools.rst start/install/tools/tools.rst +install/install.rst start/start.rst diff --git a/docs/requirements.txt b/docs/requirements.txt index 584d3ba990b2..c8e98061b50e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,17 +1,127 @@ -alabaster==0.7.12 -Babel==2.8.0 -docutils==0.16 -gitdb==4.0.5 -GitPython==3.1.7 -imagesize==1.2.0 -Jinja2==2.11.2 -MarkupSafe==1.1.1 -Pygments==2.6.1 -pytz==2020.1 -requests>=2.24.0 -six==1.15.0 -smmap==3.0.4 -snowballstemmer==2.0.0 -sphinx_rtd_theme==0.5.0 -Sphinx==3.2.1 -sphinxcontrib-httpdomain==1.7.0 +alabaster==0.7.12 \ + --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \ + --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 +Babel==2.8.0 \ + --hash=sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38 \ + --hash=sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4 +certifi==2020.6.20 \ + --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \ + --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 +chardet==3.0.4 \ + --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \ + --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 +docutils==0.16 \ + --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ + --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc +gitdb==4.0.5 \ + --hash=sha256:91f36bfb1ab7949b3b40e23736db18231bf7593edada2ba5c3a174a7b23657ac \ + --hash=sha256:c9e1f2d0db7ddb9a704c2a0217be31214e91a4fe1dea1efad19ae42ba0c285c9 +GitPython==3.1.8 \ + --hash=sha256:080bf8e2cf1a2b907634761c2eaefbe83b69930c94c66ad11b65a8252959f912 \ + --hash=sha256:1858f4fd089abe92ae465f01d5aaaf55e937eca565fb2c1fce35a51b5f85c910 +idna==2.10 \ + --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ + --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 +imagesize==1.2.0 \ + --hash=sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1 \ + --hash=sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1 +Jinja2==2.11.2 \ + --hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \ + --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035 +MarkupSafe==1.1.1 \ + --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \ + --hash=sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161 \ + --hash=sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235 \ + --hash=sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5 \ + --hash=sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42 \ + --hash=sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff \ + --hash=sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b \ + --hash=sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1 \ + --hash=sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e \ + --hash=sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183 \ + --hash=sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66 \ + --hash=sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b \ + --hash=sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1 \ + --hash=sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15 \ + --hash=sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1 \ + --hash=sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e \ + --hash=sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b \ + --hash=sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905 \ + --hash=sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735 \ + --hash=sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d \ + --hash=sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e \ + --hash=sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d \ + --hash=sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c \ + --hash=sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21 \ + --hash=sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2 \ + --hash=sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5 \ + --hash=sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b \ + --hash=sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6 \ + --hash=sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f \ + --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \ + --hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \ + --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \ + --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be +packaging==20.4 \ + --hash=sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8 \ + --hash=sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181 +Pygments==2.7.1 \ + --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \ + --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 +pyparsing==2.4.7 \ + --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ + --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b +pytz==2020.1 \ + --hash=sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed \ + --hash=sha256:c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048 +requests==2.24.0 \ + --hash=sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b \ + --hash=sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898 +six==1.15.0 \ + --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \ + --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced +smmap==3.0.4 \ + --hash=sha256:54c44c197c819d5ef1991799a7e30b662d1e520f2ac75c9efbeb54a742214cf4 \ + --hash=sha256:9c98bbd1f9786d22f14b3d4126894d56befb835ec90cef151af566c7e19b5d24 +snowballstemmer==2.0.0 \ + --hash=sha256:209f257d7533fdb3cb73bdbd24f436239ca3b2fa67d56f6ff88e86be08cc5ef0 \ + --hash=sha256:df3bac3df4c2c01363f3dd2cfa78cce2840a79b9f1c2d2de9ce8d31683992f52 +Sphinx==3.2.1 \ + --hash=sha256:321d6d9b16fa381a5306e5a0b76cd48ffbc588e6340059a729c6fdd66087e0e8 \ + --hash=sha256:ce6fd7ff5b215af39e2fcd44d4a321f6694b4530b6f2b2109b64d120773faea0 +sphinx-copybutton==0.3.0 \ + --hash=sha256:4becad3a1e7c50211f1477e34fd4b6d027680e1612f497cb5b88cf85bccddaaa \ + --hash=sha256:4cd06afd0588aa43eba968bfc6105e1ec6546c50a51f880af1d89afaebc6fb58 +sphinx-rtd-theme==0.5.0 \ + --hash=sha256:22c795ba2832a169ca301cd0a083f7a434e09c538c70beb42782c073651b707d \ + --hash=sha256:373413d0f82425aaa28fb288009bf0d0964711d347763af2f1b65cafcb028c82 +sphinx-tabs==1.3.0 \ + --hash=sha256:537857f91f1b371f7b45eb8ac83001618b3e3178c78df073d2cc4558a8e66ef5 \ + --hash=sha256:54132c8a57aa19bba6e17fe26eb94ea9df531708ff3f509b119313b32d0d5aff +sphinxcontrib-applehelp==1.0.2 \ + --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ + --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 +sphinxcontrib-devhelp==1.0.2 \ + --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \ + --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4 +sphinxcontrib-htmlhelp==1.0.3 \ + --hash=sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f \ + --hash=sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b +sphinxcontrib-httpdomain==1.7.0 \ + --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \ + --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335 +sphinxcontrib-jsmath==1.0.1 \ + --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ + --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 +sphinxcontrib-qthelp==1.0.3 \ + --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \ + --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6 +sphinxext-rediraffe==0.2.4 \ + --hash=sha256:5428fb614d1fbc16964ba587aaa6b1c8ec92fd0b1d01bb6b369637446f43a27d \ + --hash=sha256:13e6474342df6643723976a3429edfc5e811e9f48b9f832c9fb6bdd9fe53fd83 +sphinxcontrib-serializinghtml==1.1.4 \ + --hash=sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc \ + --hash=sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a +urllib3==1.25.10 \ + --hash=sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a \ + --hash=sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461 diff --git a/docs/root/_include/ssl_stats.rst b/docs/root/_include/ssl_stats.rst new file mode 100644 index 000000000000..93f9b247a67e --- /dev/null +++ b/docs/root/_include/ssl_stats.rst @@ -0,0 +1,20 @@ +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + connection_error, Counter, Total TLS connection errors not including failed certificate verifications + handshake, Counter, Total successful TLS connection handshakes + session_reused, Counter, Total successful TLS session resumptions + no_certificate, Counter, Total successful TLS connections with no client certificate + fail_verify_no_cert, Counter, Total TLS connections that failed because of missing client certificate + fail_verify_error, Counter, Total TLS connections that failed CA verification + fail_verify_san, Counter, Total TLS connections that failed SAN verification + fail_verify_cert_hash, Counter, Total TLS connections that failed certificate pinning verification + ocsp_staple_failed, Counter, Total TLS connections that failed compliance with the OCSP policy + ocsp_staple_omitted, Counter, Total TLS connections that succeeded without stapling an OCSP response + ocsp_staple_responses, Counter, Total TLS connections where a valid OCSP response was available (irrespective of whether the client requested stapling) + ocsp_staple_requests, Counter, Total TLS connections where the client requested an OCSP staple + ciphers., Counter, Total successful TLS connections that used cipher + curves., Counter, Total successful TLS connections that used ECDHE curve + sigalgs., Counter, Total successful TLS connections that used signature algorithm + versions., Counter, Total successful TLS connections that used protocol version diff --git a/docs/root/_static/css/envoy.css b/docs/root/_static/css/envoy.css index c65a71f05262..8021e5df6f21 100644 --- a/docs/root/_static/css/envoy.css +++ b/docs/root/_static/css/envoy.css @@ -14,3 +14,12 @@ table.docutils div.line-block { overflow-wrap: break-word; max-width: 1000px; } + +/* To style the API version label of a search result item */ +.api-version-label { + border-radius: 20%; + background-color: #c0c0c0; + color: #ffffff; + margin-left: 4px; + padding: 4px; +} diff --git a/docs/root/_static/searchtools.js b/docs/root/_static/searchtools.js index bd46e53c3501..4c46c2de9a61 100644 --- a/docs/root/_static/searchtools.js +++ b/docs/root/_static/searchtools.js @@ -9,6 +9,9 @@ * */ +// Modified from https://raw.githubusercontent.com/sphinx-doc/sphinx/3.x/sphinx/themes/basic/static/searchtools.js +// to have renderApiVersionLabel to render the API version for each search result item. + if (!Scorer) { /** * Simple result scoring code. @@ -249,6 +252,16 @@ var Search = { //Search.lastresults = results.slice(); // a copy //console.info('search results:', Search.lastresults); + // renderApiVersionLabel renders API version for each search result item. + function renderApiVersionLabel(linkUrl) { + const filtered = linkUrl + .split("/") + .filter((part) => part.startsWith("api-v")); + return filtered.length === 1 + ? ' ' + filtered.pop() + "" + : ""; + } + // print the results var resultCount = results.length; function displayNextItem() { @@ -281,6 +294,10 @@ var Search = { .attr("href", linkUrl + highlightstring + item[2]) .html(item[1]) ); + var apiVersion = renderApiVersionLabel(linkUrl); + if (apiVersion !== "") { + listItem.append(apiVersion); + } if (item[3]) { listItem.append($(" (" + item[3] + ")")); Search.output.append(listItem); diff --git a/docs/root/about_docs.rst b/docs/root/about_docs.rst index 317639fc61f9..7bab46859642 100644 --- a/docs/root/about_docs.rst +++ b/docs/root/about_docs.rst @@ -16,3 +16,4 @@ The Envoy documentation is composed of a few major sections: * :ref:`Extending Envoy `: Information on how to write custom filters for Envoy. * :ref:`API reference `: Envoy API detailed reference. * :ref:`Envoy FAQ `: Have questions? We have answers. Hopefully. +* :ref:`Version history `: Per-version release notes. diff --git a/docs/root/api-v2/service/service.rst b/docs/root/api-v2/service/service.rst index 951c00d28972..e0357a1c2331 100644 --- a/docs/root/api-v2/service/service.rst +++ b/docs/root/api-v2/service/service.rst @@ -7,6 +7,7 @@ Services accesslog/v2/* load_stats/v2/* + auth/v2/* discovery/v2/* metrics/v2/* ratelimit/v2/* diff --git a/docs/root/api-v3/bootstrap/bootstrap.rst b/docs/root/api-v3/bootstrap/bootstrap.rst index d2397a9bf2ac..51d7b817c66d 100644 --- a/docs/root/api-v3/bootstrap/bootstrap.rst +++ b/docs/root/api-v3/bootstrap/bootstrap.rst @@ -10,3 +10,4 @@ Bootstrap ../config/metrics/v3/metrics_service.proto ../config/overload/v3/overload.proto ../config/ratelimit/v3/rls.proto + ../extensions/wasm/v3/wasm.proto diff --git a/docs/root/api-v3/config/wasm/wasm.rst b/docs/root/api-v3/config/wasm/wasm.rst index efdb96212478..a2f03f3304bb 100644 --- a/docs/root/api-v3/config/wasm/wasm.rst +++ b/docs/root/api-v3/config/wasm/wasm.rst @@ -6,3 +6,4 @@ WASM :maxdepth: 2 ../../extensions/wasm/v3/* + ../../extensions/stat_sinks/wasm/v3/* diff --git a/docs/root/api-v3/config/watchdog/watchdog.rst b/docs/root/api-v3/config/watchdog/watchdog.rst index 60f284384d59..f5906b3390d3 100644 --- a/docs/root/api-v3/config/watchdog/watchdog.rst +++ b/docs/root/api-v3/config/watchdog/watchdog.rst @@ -6,3 +6,4 @@ Watchdog :maxdepth: 2 ../../extensions/watchdog/profile_action/v3alpha/* + ../../watchdog/v3alpha/* diff --git a/docs/root/api/api_supported_versions.rst b/docs/root/api/api_supported_versions.rst index 89a6cb181ea6..93b9bdeb45c3 100644 --- a/docs/root/api/api_supported_versions.rst +++ b/docs/root/api/api_supported_versions.rst @@ -8,7 +8,7 @@ multiple major API versions at any point in time. The following versions are cur * :ref:`v2 xDS API ` (*deprecated*, end-of-life EOY 2020). This API will not accept new features after the end of Q1 2020. -* :ref:`v3 xDS API ` (*active*, end-of-life EOY 2021). Envoy developers and +* :ref:`v3 xDS API ` (*active*, end-of-life unknown). Envoy developers and operators are encouraged to be actively adopting and working with v3 xDS. The following API versions are no longer supported by Envoy: diff --git a/docs/root/configuration/best_practices/_include/edge.yaml b/docs/root/configuration/best_practices/_include/edge.yaml new file mode 100644 index 000000000000..958a231610f9 --- /dev/null +++ b/docs/root/configuration/best_practices/_include/edge.yaml @@ -0,0 +1,102 @@ +overload_manager: + refresh_interval: 0.25s + resource_monitors: + - name: "envoy.resource_monitors.fixed_heap" + typed_config: + "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig + # TODO: Tune for your system. + max_heap_size_bytes: 2147483648 # 2 GiB + actions: + - name: "envoy.overload_actions.shrink_heap" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.95 + - name: "envoy.overload_actions.stop_accepting_requests" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.98 + +admin: + access_log_path: "/var/log/envoy_admin.log" + address: + socket_address: + address: 127.0.0.1 + port_value: 9090 + +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 443 + listener_filters: + - name: "envoy.filters.listener.tls_inspector" + typed_config: {} + per_connection_buffer_limit_bytes: 32768 # 32 KiB + filter_chains: + - filter_chain_match: + server_names: ["example.com", "www.example.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "certs/servercert.pem" } + private_key: { filename: "certs/serverkey.pem" } + # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. + # use_proxy_proto: true + filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + use_remote_address: true + common_http_protocol_options: + idle_timeout: 3600s # 1 hour + headers_with_underscores_action: REJECT_REQUEST + http2_protocol_options: + max_concurrent_streams: 100 + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + route_config: + virtual_hosts: + - name: default + domains: "*" + routes: + - match: { prefix: "/" } + route: + cluster: service_foo + idle_timeout: 15s # must be disabled for long-lived and streaming requests + clusters: + name: service_foo + connect_timeout: 15s + per_connection_buffer_limit_bytes: 32768 # 32 KiB + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 + http2_protocol_options: + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + +layered_runtime: + layers: + - name: static_layer_0 + static_layer: + envoy: + resource_limits: + listener: + example_listener_name: + connection_limit: 10000 + overload: + global_downstream_max_connections: 50000 diff --git a/docs/root/configuration/best_practices/edge.rst b/docs/root/configuration/best_practices/edge.rst index fc717a5f9235..d61c4684c71a 100644 --- a/docs/root/configuration/best_practices/edge.rst +++ b/docs/root/configuration/best_practices/edge.rst @@ -30,107 +30,5 @@ HTTP proxies should additionally configure: The following is a YAML example of the above recommendation (taken from the :ref:`Google VRP ` edge server configuration): -.. code-block:: yaml - - overload_manager: - refresh_interval: 0.25s - resource_monitors: - - name: "envoy.resource_monitors.fixed_heap" - typed_config: - "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig - # TODO: Tune for your system. - max_heap_size_bytes: 2147483648 # 2 GiB - actions: - - name: "envoy.overload_actions.shrink_heap" - triggers: - - name: "envoy.resource_monitors.fixed_heap" - threshold: - value: 0.95 - - name: "envoy.overload_actions.stop_accepting_requests" - triggers: - - name: "envoy.resource_monitors.fixed_heap" - threshold: - value: 0.98 - - admin: - access_log_path: "/var/log/envoy_admin.log" - address: - socket_address: - address: 127.0.0.1 - port_value: 9090 - - static_resources: - listeners: - - address: - socket_address: - address: 0.0.0.0 - port_value: 443 - listener_filters: - - name: "envoy.filters.listener.tls_inspector" - typed_config: {} - per_connection_buffer_limit_bytes: 32768 # 32 KiB - filter_chains: - - filter_chain_match: - server_names: ["example.com", "www.example.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "example_com_cert.pem" } - private_key: { filename: "example_com_key.pem" } - # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. - # use_proxy_proto: true - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - use_remote_address: true - common_http_protocol_options: - idle_timeout: 3600s # 1 hour - headers_with_underscores_action: REJECT_REQUEST - http2_protocol_options: - max_concurrent_streams: 100 - initial_stream_window_size: 65536 # 64 KiB - initial_connection_window_size: 1048576 # 1 MiB - stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests - request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests - route_config: - virtual_hosts: - - name: default - domains: "*" - routes: - - match: { prefix: "/" } - route: - cluster: service_foo - idle_timeout: 15s # must be disabled for long-lived and streaming requests - clusters: - name: service_foo - connect_timeout: 15s - per_connection_buffer_limit_bytes: 32768 # 32 KiB - load_assignment: - cluster_name: some_service - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8080 - http2_protocol_options: - initial_stream_window_size: 65536 # 64 KiB - initial_connection_window_size: 1048576 # 1 MiB - - layered_runtime: - layers: - - name: static_layer_0 - static_layer: - envoy: - resource_limits: - listener: - example_listener_name: - connection_limit: 10000 - overload: - global_downstream_max_connections: 50000 +.. literalinclude:: _include/edge.yaml + :language: yaml diff --git a/docs/root/configuration/http/http_conn_man/http_conn_man.rst b/docs/root/configuration/http/http_conn_man/http_conn_man.rst index a726c3983a7a..d4faa90c267d 100644 --- a/docs/root/configuration/http/http_conn_man/http_conn_man.rst +++ b/docs/root/configuration/http/http_conn_man/http_conn_man.rst @@ -13,6 +13,7 @@ HTTP connection manager headers header_sanitizing local_reply + response_code_details stats runtime rds diff --git a/docs/root/configuration/http/http_conn_man/local_reply.rst b/docs/root/configuration/http/http_conn_man/local_reply.rst index 5b87d9e3ef5c..d7649f0f4eaa 100644 --- a/docs/root/configuration/http/http_conn_man/local_reply.rst +++ b/docs/root/configuration/http/http_conn_man/local_reply.rst @@ -49,6 +49,8 @@ The response body content type can be customized. If not specified, the content Local reply format can be specified as :ref:`SubstitutionFormatString `. It supports :ref:`text_format ` and :ref:`json_format `. +Optionally, content-type can be modified further via :ref:`content_type ` field. If not specified, default content-type is `text/plain` for :ref:`text_format ` and `application/json` for :ref:`json_format `. + Example of a LocalReplyConfig with `body_format` field. .. code-block:: @@ -63,7 +65,8 @@ Example of a LocalReplyConfig with `body_format` field. runtime_key: key_b status_code: 401 body_format_override: - text_format: "%LOCAL_REPLY_BODY% %REQ(:path)%" + text_format: "

%LOCAL_REPLY_BODY% %REQ(:path)%

" + content_type: "text/html; charset=UTF-8" - filter: status_code_filter: comparison: diff --git a/docs/root/configuration/http/http_conn_man/response_code_details.rst b/docs/root/configuration/http/http_conn_man/response_code_details.rst new file mode 100644 index 000000000000..350c0767f93f --- /dev/null +++ b/docs/root/configuration/http/http_conn_man/response_code_details.rst @@ -0,0 +1,101 @@ +.. _config_http_conn_man_details: + +Response Code Details +===================== + +If _%RESPONSE_CODE_DETAILS%_ is configured on via :ref:`access logging`, +or :ref:`custom headers` Envoy will communicate the detailed +reason a given stream ended. +This page lists the details sent by the HttpConnectionManager, Router filter, and codecs. It is not comprehensive as +any other filters may send their own local replies with custom details. + +Below are the list of reasons the HttpConnectionManager or Router filter may send responses or reset streams. + +.. warning:: + The following list is not guaranteed to be stable, since the details are subject to change. + +.. csv-table:: + :header: Name, Description + :widths: 1, 2 + + absolute_path_rejected, The request was rejected due to using an absolute path on a route not supporting them. + admin_filter_response, The response was generated by the admin filter. + cluster_not_found, The request was rejected by the router filter because there was no cluster found for the selected route. + downstream_local_disconnect, The client connection was locally closed for an unspecified reason. + downstream_remote_disconnect, The client disconnected unexpectedly. + duration_timeout, The max connection duration was exceeded. + direct_response, A direct response was generated by the router filter. + filter_chain_not_found, The request was rejected due to no matching filter chain. + internal_redirect, The original stream was replaced with an internal redirect. + low_version, The HTTP/1.0 or HTTP/0.9 request was rejected due to HTTP/1.0 support not being configured. + maintenance_mode, The request was rejected by the router filter because the cluster was in maintenance mode. + max_duration_timeout, The per-stream max duration timeout was exceeded. + missing_host_header, The request was rejected due to a missing Host: or :authority field. + missing_path_rejected, The request was rejected due to a missing Path or :path header field. + no_healthy_upstream, The request was rejected by the router filter because there was no healthy upstream found. + overload, The request was rejected due to the Overload Manager reaching configured resource limits. + path_normalization_failed, "The request was rejected because path normalization was configured on and failed, probably due to an invalid path." + request_headers_failed_strict_check, The request was rejected due to x-envoy-* headers failing strict header validation. + request_overall_timeout, The per-stream total request timeout was exceeded. + request_payload_exceeded_retry_buffer_limit, Envoy is doing streaming proxying but too much data arrived while waiting to attempt a retry. + request_payload_too_large, Envoy is doing non-streaming proxying and the request payload exceeded configured limits. + response_payload_too_large, Envoy is doing non-streaming proxying and the response payload exceeded configured limits. + response_payload_too_large, Envoy is doing non-streaming proxying and the response payload exceeded configured limits. + route_configuration_not_found, The request was rejected because there was no route configuration found. + route_not_found, The request was rejected because there was no route found. + stream_idle_timeout, The per-stream keepalive timeout was exceeded. + upgrade_failed, The request was rejected because it attempted an unsupported upgrade. + upstream_max_stream_duration_reached, The request was destroyed because of it exceeded the configured max stream duration. + upstream_per_try_timeout, The final upstream try timed out. + upstream_reset_after_response_started{details}, The upstream connection was reset after a response was started. This may include further details about the cause of the disconnect. + upstream_reset_before_response_started{details}, The upstream connection was reset before a response was started This may include further details about the cause of the disconnect. + upstream_response_timeout, The upstream response timed out. + via_upstream, The response code was set by the upstream. + + +.. _config_http_conn_man_details_per_codec: + +Per codec details +----------------- + +Each codec may send codec-specific details when encountering errors. + +Http1 details +~~~~~~~~~~~~~ + +All http1 details are rooted at *http1.* + +.. csv-table:: + :header: Name, Description + :widths: 1, 2 + + http1.body_disallowed, A body was sent on a request where bodies are not allowed. + http1.codec_error, Some error was encountered in the http_parser internals. + http1.connection_header_rejected, The Connection header was malformed or overly long. + http1.content_length_and_chunked_not_allowed, A request was sent with both Transfer-Encoding: chunked and a Content-Length header when disallowed by configuration. + http1.content_length_not_allowed, A content length was sent on a response it was disallowed on. + http1.headers_too_large, The overall byte size of rquest headers was larger than the configured limits. + http1.invalid_characters, The headers contained illegal characters. + http1.invalid_transfer_encoding, The Transfer-Encoding header was not valid. + http1.invalid_url, The request URL was not valid. + http1.too_many_headers, Too many headers were sent with this request. + http1.transfer_encoding_not_allowed, A transfer encoding was sent on a response it was disallowed on. + http1.unexpected_underscore, An underscore was sent in a header key when disallowed by configuration. + + +Http2 details +~~~~~~~~~~~~~ + +All http2 details are rooted at *http2.* + +.. csv-table:: + :header: Name, Description + :widths: 1, 2 + + http2.inbound_empty_frames_flood, Envoy detected an inbound HTTP/2 frame flood. + http2.invalid.header.field, One of the HTTP/2 headers was invalid + http2.outbound_frames_flood, Envoy detected an HTTP/2 frame flood from the server. + http2.too_many_headers, The number of headers (or trailers) exceeded the configured limits + http2.unexpected_underscore, Envoy was configured to drop requests with header keys beginning with underscores. + http2.unknown.nghttp2.error, An unknown error was encountered by nghttp2 + http2.violation.of.messaging.rule, The stream was in violation of a HTTP/2 messaging rule. diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index b8d4bf23591f..c6aa07f284d4 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -105,7 +105,9 @@ Each codec has the option of adding per-codec statistics. Both http1 and http2 h Http1 codec statistics ~~~~~~~~~~~~~~~~~~~~~~ -All http1 statistics are rooted at *http1.* +On the downstream side all http1 statistics are rooted at *http1.* + +On the upstream side all http1 statistics are rooted at *cluster..http1.* .. csv-table:: :header: Name, Type, Description @@ -119,7 +121,9 @@ All http1 statistics are rooted at *http1.* Http2 codec statistics ~~~~~~~~~~~~~~~~~~~~~~ -All http2 statistics are rooted at *http2.* +On the downstream side all http2 statistics are rooted at *http2.* + +On the upstream side all http2 statistics are rooted at *cluster..http2.* .. csv-table:: :header: Name, Type, Description @@ -139,6 +143,7 @@ All http2 statistics are rooted at *http2.* trailers, Counter, Total number of trailers seen on requests coming from downstream tx_flush_timeout, Counter, Total number of :ref:`stream idle timeouts ` waiting for open stream window to flush the remainder of a stream tx_reset, Counter, Total number of reset stream frames transmitted by Envoy + keepalive_timeout, Counter, Total number of connections closed due to :ref:`keepalive timeout ` streams_active, Gauge, Active streams as observed by the codec pending_send_bytes, Gauge, Currently buffered body data in bytes waiting to be written when stream/connection window is opened. diff --git a/docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml b/docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml new file mode 100644 index 000000000000..d9f51b804d48 --- /dev/null +++ b/docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml @@ -0,0 +1,67 @@ +admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 9901 +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/force-host-rewrite" + route: + cluster: dynamic_forward_proxy_cluster + typed_per_filter_config: + envoy.filters.http.dynamic_forward_proxy: + "@type": type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.PerRouteConfig + host_rewrite_literal: www.example.org + - match: + prefix: "/" + route: + cluster: dynamic_forward_proxy_cluster + http_filters: + - name: envoy.filters.http.dynamic_forward_proxy + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig + dns_cache_config: + name: dynamic_forward_proxy_cache_config + dns_lookup_family: V4_ONLY + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + clusters: + - name: dynamic_forward_proxy_cluster + connect_timeout: 1s + lb_policy: CLUSTER_PROVIDED + cluster_type: + name: envoy.clusters.dynamic_forward_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig + dns_cache_config: + name: dynamic_forward_proxy_cache_config + dns_lookup_family: V4_ONLY + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: {filename: /etc/ssl/certs/ca-certificates.crt} diff --git a/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml b/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml new file mode 100644 index 000000000000..dcbd0d06ff63 --- /dev/null +++ b/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml @@ -0,0 +1,84 @@ +admin: + access_log_path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 9901 +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + address: 0.0.0.0 + port_value: 80 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + - name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/route-with-filter-disabled" + route: + host_rewrite_literal: localhost + cluster: grpc + timeout: 5.00s + # per_filter_config disables the filter for this route + typed_per_filter_config: + envoy.filters.http.grpc_http1_reverse_bridge: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfigPerRoute + disabled: true + - match: + prefix: "/route-with-filter-enabled" + route: + host_rewrite_literal: localhost + cluster: other + timeout: 5.00s + http_filters: + - name: envoy.filters.http.grpc_http1_reverse_bridge + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfig + content_type: application/grpc+proto + withhold_grpc_frames: true + - name: envoy.filters.http.router + typed_config: {} + clusters: + - name: other + connect_timeout: 5.00s + type: LOGICAL_DNS + dns_lookup_family: V4_ONLY + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 4630 + - name: grpc + connect_timeout: 5.00s + type: strict_dns + lb_policy: round_robin + http2_protocol_options: {} + load_assignment: + cluster_name: grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 10005 diff --git a/docs/root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml b/docs/root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml new file mode 100644 index 000000000000..f9c20ddcf2e9 --- /dev/null +++ b/docs/root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml @@ -0,0 +1,59 @@ +admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: { address: 0.0.0.0, port_value: 9901 } + +static_resources: + listeners: + - name: listener1 + address: + socket_address: { address: 0.0.0.0, port_value: 51051 } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: grpc_json + codec_type: AUTO + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + # NOTE: by default, matching happens based on the gRPC route, and not on the incoming request path. + # Reference: https://www.envoyproxy.io/docs/envoy/latest/configuration/http_filters/grpc_json_transcoder_filter#route-configs-for-transcoded-requests + - match: { prefix: "/helloworld.Greeter" } + route: { cluster: grpc, timeout: { seconds: 60 } } + http_filters: + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + proto_descriptor: "protos/helloworld.pb" + services: ["helloworld.Greeter"] + print_options: + add_whitespace: true + always_print_primitive_fields: true + always_print_enums_as_ints: false + preserve_proto_field_names: false + - name: envoy.filters.http.router + + clusters: + - name: grpc + connect_timeout: 1.25s + type: logical_dns + lb_policy: round_robin + dns_lookup_family: V4_ONLY + http2_protocol_options: {} + load_assignment: + cluster_name: grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + # WARNING: "docker.for.mac.localhost" has been deprecated from Docker v18.03.0. + # If you're running an older version of Docker, please use "docker.for.mac.localhost" instead. + # Reference: https://docs.docker.com/docker-for-mac/release-notes/#docker-community-edition-18030-ce-mac59-2018-03-26 + address: host.docker.internal + port_value: 50051 diff --git a/docs/root/configuration/http/http_filters/_include/helloworld.pb b/docs/root/configuration/http/http_filters/_include/helloworld.pb new file mode 100644 index 000000000000..88eda67b2cd1 Binary files /dev/null and b/docs/root/configuration/http/http_filters/_include/helloworld.pb differ diff --git a/docs/root/configuration/http/http_filters/_include/helloworld.proto b/docs/root/configuration/http/http_filters/_include/helloworld.proto new file mode 100644 index 000000000000..9b5615252428 --- /dev/null +++ b/docs/root/configuration/http/http_filters/_include/helloworld.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package helloworld; + +import "google/api/annotations.proto"; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello(HelloRequest) returns (HelloReply) { + option (google.api.http) = { + get: "/say" + }; + } +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/docs/root/configuration/http/http_filters/admission_control_filter.rst b/docs/root/configuration/http/http_filters/admission_control_filter.rst new file mode 100644 index 000000000000..146b50dc31c3 --- /dev/null +++ b/docs/root/configuration/http/http_filters/admission_control_filter.rst @@ -0,0 +1,127 @@ +.. _config_http_filters_admission_control: + +Admission Control +================= + +.. attention:: + + The admission control filter is experimental and is currently under active development. + +See the :ref:`v3 API reference ` for details on each configuration parameter. + +Overview +-------- + +The admission control filter probabilistically rejects requests based on the success rate of +previous requests in a configurable sliding time window. It is based on `client-side +throttling `_ from the `Google SRE handbook `_. The only notable difference between the admission control +filter's load shedding and load shedding defined in client-side throttling is that users may +configure how aggressively load shedding starts at a target request success rate. Users may also +configure the definition of a successful request for the purposes of the rejection probability +calculation. + +The probability that the filter will reject a request is as follows: + +.. math:: + + P_{reject} = {(\frac{n_{total} - s}{n_{total} + 1})}^\frac{1}{aggression} + +where, + +.. math:: + + s = \frac{n_{success}}{threshold} + + +- *n* refers to a request count gathered in the sliding window. +- *threshold* is a configurable value that dictates the lowest request success rate at which the + filter will **not reject** requests. The value is normalized to [0,1] for the calculation. +- *aggression* controls the rejection probability curve such that 1.0 is a linear increase in + rejection probability as the success rate decreases. As the **aggression** increases, the + rejection probability will be higher for higher success rates. See `Aggression`_ for a more + detailed explanation. + +.. note:: + The success rate calculations are performed on a per-thread basis for increased performance. In + addition, the per-thread isolation prevents decreases the blast radius of a single bad connection + with an anomalous success rate. Therefore, the rejection probability may vary between worker + threads. + +.. note:: + Health check traffic does not count towards any of the filter's measurements. + +See the :ref:`v3 API reference +` for more +details on this parameter. + +The definition of a successful request is a :ref:`configurable parameter +` +for both HTTP and gRPC requests. + +Aggression +~~~~~~~~~~ + +The aggression value affects the rejection probabilities as shown in the following figures: + +.. image:: images/aggression_graph.png + +Since the success rate threshold in the first figure is set to 95%, the rejection probability +remains 0 until then. In the second figure, there rejection probability remains 0 until the success +rate reaches 50%. In both cases, as success rate drops to 0%, the rejection probability approaches a +value just under 100%. The aggression values dictate how high the rejection probability will be at a +given request success rate, so it will shed load more *aggressively*. + +Example Configuration +--------------------- +An example filter configuration can be found below. Not all fields are required and many of the +fields can be overridden via runtime settings. + +.. code-block:: yaml + + name: envoy.filters.http.admission_control + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl + enabled: + default_value: true + runtime_key: "admission_control.enabled" + sampling_window: 120s + sr_threshold: + default_value: 95.0 + runtime_key: "admission_control.sr_threshold" + aggression: + default_value: 1.5 + runtime_key: "admission_control.aggression" + success_criteria: + http_criteria: + http_success_status: + - start: 100 + end: 400 + - start: 404 + end: 404 + grpc_criteria: + grpc_success_status: + - 0 + - 1 + +The above configuration can be understood as follows: + +* Calculate the request success-rate over a 120s sliding window. +* Do not begin shedding any load until the request success-rate drops below 95% in the sliding + window. +* HTTP requests are considered successful if they are 1xx, 2xx, 3xx, or a 404. +* gRPC requests are considered successful if they are OK or CANCELLED. + +Statistics +---------- +The admission control filter outputs statistics in the +*http..admission_control.* namespace. The :ref:`stat prefix +` +comes from the owning HTTP connection manager. + +.. csv-table:: + :header: Name, Type, Description + :widths: auto + + rq_rejected, Counter, Total requests that were not admitted by the filter. + rq_success, Counter, Total requests that were considered a success. + rq_failure, Counter, Total requests that were considered a failure. diff --git a/docs/root/configuration/http/http_filters/cdn_loop_filter.rst b/docs/root/configuration/http/http_filters/cdn_loop_filter.rst new file mode 100644 index 000000000000..5b81d1be2522 --- /dev/null +++ b/docs/root/configuration/http/http_filters/cdn_loop_filter.rst @@ -0,0 +1,56 @@ +.. _config_http_filters_cdn_loop: + +CDN-Loop header +=============== + +The CDN-Loop header filter participates in the cross-CDN loop detection protocol specified by `RFC +8586 `_. The CDN-Loop header filter performs two actions. +First, the filter checks to see how many times a particular CDN identifier has appeared in the +CDN-Loop header. Next, if the check passes, the filter then appends the CDN identifier to the +CDN-Loop header and passes the request to the next upstream filter. If the check fails, the filter +stops processing on the request and returns an error response. + +RFC 8586 is particular in how the CDN-Loop header should be modified. As such: + +* other filters in the filter chain should not modify the CDN-Loop header and +* the HTTP route configuration's :ref:`request_headers_to_add + ` or + :ref:`request_headers_to_remove + ` fields should + not contain the CDN-Loop header. + +The filter will coalesce multiple CDN-Loop headers into a single, comma-separated header. + +Configuration +------------- + +The filter is configured with the name *envoy.filters.http.cdn_loop*. + +The `filter config `_ has two fields. + +* The *cdn_id* field sets the identifier that the filter will look for within and append to the + CDN-Loop header. RFC 8586 calls this field the "cdn-id"; "cdn-id" can either be a pseudonym or a + hostname the CDN provider has control of. The *cdn_id* field must not be empty. +* The *max_allowed_occurrences* field controls how many times *cdn_id* can appear in the CDN-Loop + header on downstream requests (before the filter appends *cdn_id* to the header). If the *cdn_id* + appears more than *max_allowed_occurrences* times in the header, the filter will reject the + downstream's request. Most users should configure *max_allowed_occurrences* to be 0 (the + default). + +Response Code Details +--------------------- + +.. list-table:: + :header-rows: 1 + + * - Name + - HTTP Status + - Description + * - invalid_cdn_loop_header + - 400 (Bad Request) + - The CDN-Loop header in the downstream is invalid or unparseable. + * - cdn_loop_detected + - 502 (Bad Gateway) + - The *cdn_id* value appears more than *max_allowed_occurrences* in the CDN-Loop header, + indicating a loop between CDNs. + diff --git a/docs/root/configuration/http/http_filters/compressor_filter.rst b/docs/root/configuration/http/http_filters/compressor_filter.rst index 862af5304065..2fa10f00d6bf 100644 --- a/docs/root/configuration/http/http_filters/compressor_filter.rst +++ b/docs/root/configuration/http/http_filters/compressor_filter.rst @@ -77,8 +77,8 @@ the extension. When compression is *applied*: - The *content-length* is removed from response headers. -- Response headers contain "*transfer-encoding: chunked*" and do not contain - "*content-encoding*" header. +- Response headers contain "*transfer-encoding: chunked*", and + "*content-encoding*" with the compression scheme used (e.g., ``gzip``). - The "*vary: accept-encoding*" header is inserted on every response. Also the "*vary: accept-encoding*" header may be inserted even if compression is *not* diff --git a/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst b/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst index 290ae4f24773..4e1ca3526341 100644 --- a/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst +++ b/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst @@ -35,75 +35,8 @@ host when forwarding. See the example below within the configured routes. If this runtime feature is disabled, cluster circuit breakers will be used even when setting the configuration of :ref:`DNS cache circuit breakers `. -.. code-block:: yaml - - admin: - access_log_path: /tmp/admin_access.log - address: - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 9901 - static_resources: - listeners: - - name: listener_0 - address: - socket_address: - protocol: TCP - address: 0.0.0.0 - port_value: 10000 - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: ["*"] - routes: - - match: - prefix: "/force-host-rewrite" - route: - cluster: dynamic_forward_proxy_cluster - typed_per_filter_config: - envoy.filters.http.dynamic_forward_proxy: - "@type": type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.PerRouteConfig - host_rewrite_literal: www.example.org - - match: - prefix: "/" - route: - cluster: dynamic_forward_proxy_cluster - http_filters: - - name: envoy.filters.http.dynamic_forward_proxy - typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig - dns_cache_config: - name: dynamic_forward_proxy_cache_config - dns_lookup_family: V4_ONLY - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - clusters: - - name: dynamic_forward_proxy_cluster - connect_timeout: 1s - lb_policy: CLUSTER_PROVIDED - cluster_type: - name: envoy.clusters.dynamic_forward_proxy - typed_config: - "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig - dns_cache_config: - name: dynamic_forward_proxy_cache_config - dns_lookup_family: V4_ONLY - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext - common_tls_context: - validation_context: - trusted_ca: {filename: /etc/ssl/certs/ca-certificates.crt} +.. literalinclude:: _include/dns-cache-circuit-breaker.yaml + :language: yaml Statistics ---------- diff --git a/docs/root/configuration/http/http_filters/ext_authz_filter.rst b/docs/root/configuration/http/http_filters/ext_authz_filter.rst index b66ef07cb464..269789a4be66 100644 --- a/docs/root/configuration/http/http_filters/ext_authz_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_authz_filter.rst @@ -22,7 +22,7 @@ configuration options at :ref:`HTTP filter `. Configuration Examples ------------------------------ +---------------------- A sample filter configuration for a gRPC authorization server: @@ -60,6 +60,38 @@ A sample filter configuration for a gRPC authorization server: # entire request. connect_timeout: 0.25s +.. note:: + + One of the features of this filter is to send HTTP request body to the configured gRPC + authorization server as part of the :ref:`check request + `. + + A sample configuration is as follows: + + .. code:: yaml + + http_filters: + - name: envoy.filters.http.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + grpc_service: + envoy_grpc: + cluster_name: ext-authz + with_request_body: + max_request_bytes: 1024 + allow_partial_message: true + pack_as_bytes: true + + Please note that by default :ref:`check request` + carries the HTTP request body as UTF-8 string and it fills the :ref:`body + ` field. To pack the request + body as raw bytes, it is needed to set :ref:`pack_as_bytes + ` field to + true. In effect to that, the :ref:`raw_body + ` + field will be set and :ref:`body + ` field will be empty. + A sample filter configuration for a raw HTTP authorization server: .. code-block:: yaml @@ -133,8 +165,10 @@ The HTTP filter outputs statistics in the *cluster..ext_au :widths: 1, 1, 2 ok, Counter, Total responses from the filter. - error, Counter, Total errors contacting the external service. + error, Counter, Total errors (including timeouts) contacting the external service. + timeout, Counter, Total timeouts contacting the external service (only counted when timeout is measured when check request is created). denied, Counter, Total responses from the authorizations service that were to deny the traffic. + disabled, Counter, Total requests that are allowed without calling external services due to the filter is disabled. failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because of failure_mode_allow set to true." diff --git a/docs/root/configuration/http/http_filters/fault_filter.rst b/docs/root/configuration/http/http_filters/fault_filter.rst index 62b9cd9e28c8..2dbd48c58846 100644 --- a/docs/root/configuration/http/http_filters/fault_filter.rst +++ b/docs/root/configuration/http/http_filters/fault_filter.rst @@ -38,7 +38,7 @@ fault configuration. The currently supported header controls are: x-envoy-fault-abort-request HTTP status code to abort a request with. The header value should be an integer that specifies - the HTTP status code to return in response to a request and must be in the range [200, 600). + the HTTP status code to return in response to a request and must be in the range [200, 600). In order for the header to work, :ref:`header_abort ` needs to be set. @@ -47,8 +47,8 @@ x-envoy-fault-abort-grpc-request the gRPC status code to return in response to a request. Its value range is [0, UInt32.Max] instead of [0, 16] to allow testing even not well-defined gRPC status codes. When this header is set, the HTTP response status code will be set to 200. In order for the header to work, :ref:`header_abort - ` needs to be set. If both - *x-envoy-fault-abort-request* and *x-envoy-fault-abort-grpc-request* headers are set then + ` needs to be set. If both + *x-envoy-fault-abort-request* and *x-envoy-fault-abort-grpc-request* headers are set then *x-envoy-fault-abort-grpc-request* header will be **ignored** and fault response http status code will be set to *x-envoy-fault-abort-request* header value. @@ -105,6 +105,10 @@ x-envoy-fault-throughput-response-percentage ` setting to limit the maximum concurrent faults that can be active at any given time. +.. note:: + + If the headers appear multiple times only the first value is used. + The following is an example configuration that enables header control for both of the above options: diff --git a/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst b/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst index ed668b936a3f..d8d918c76c3b 100644 --- a/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst @@ -42,89 +42,5 @@ with the gRPC frame header and respond with gRPC formatted responses. How to disable HTTP/1.1 reverse bridge filter per route ------------------------------------------------------- -.. code-block:: yaml - - admin: - access_log_path: /dev/stdout - address: - socket_address: - address: 0.0.0.0 - port_value: 9901 - static_resources: - listeners: - - name: listener_0 - address: - socket_address: - address: 0.0.0.0 - port_value: 80 - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - access_log: - - name: envoy.access_loggers.file - typed_config: - "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog - path: /dev/stdout - stat_prefix: ingress_http - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: ["*"] - routes: - - match: - prefix: "/route-with-filter-disabled" - route: - host_rewrite: localhost - cluster: grpc - timeout: 5.00s - # per_filter_config disables the filter for this route - typed_per_filter_config: - envoy.filters.http.grpc_http1_reverse_bridge: - "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfigPerRoute - disabled: true - - match: - prefix: "/route-with-filter-enabled" - route: - host_rewrite: localhost - cluster: other - timeout: 5.00s - http_filters: - - name: envoy.filters.http.grpc_http1_reverse_bridge - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfig - content_type: application/grpc+proto - withhold_grpc_frames: true - - name: envoy.filters.http.router - typed_config: {} - clusters: - - name: other - connect_timeout: 5.00s - type: LOGICAL_DNS - dns_lookup_family: V4_ONLY - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: some_service - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: localhost - port_value: 4630 - - name: grpc - connect_timeout: 5.00s - type: strict_dns - lb_policy: round_robin - http2_protocol_options: {} - load_assignment: - cluster_name: grpc - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: localhost - port_value: 10005 +.. literalinclude:: _include/grpc-reverse-bridge-filter.yaml + :language: yaml diff --git a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst index a8c796b5bcb4..c89093b84658 100644 --- a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst @@ -29,17 +29,18 @@ To generate a protobuf descriptor set for the gRPC service, you'll also need to googleapis repository from GitHub before running protoc, as you'll need annotations.proto in your include path, to define the HTTP mapping. -.. code-block:: bash +.. code-block:: console - git clone https://github.com/googleapis/googleapis - GOOGLEAPIS_DIR= + $ git clone https://github.com/googleapis/googleapis + $ GOOGLEAPIS_DIR= -Then run protoc to generate the descriptor set from bookstore.proto: +Then run protoc to generate the descriptor set. For example using the test +:repo:`bookstore.proto ` provided in the Envoy repository: -.. code-block:: bash +.. code-block:: console - protoc -I$(GOOGLEAPIS_DIR) -I. --include_imports --include_source_info \ - --descriptor_set_out=proto.pb test/proto/bookstore.proto + $ protoc -I$(GOOGLEAPIS_DIR) -I. --include_imports --include_source_info \ + --descriptor_set_out=proto.pb test/proto/bookstore.proto If you have more than one proto source files, you can pass all of them in one command. @@ -56,19 +57,17 @@ For example, with the following proto example, the router will process `/hellowo as the path, so the route config prefix `/say` won't match requests to `SayHello`. If you want to match the incoming request path, set `match_incoming_request_route` to true. -.. code-block:: proto +.. literalinclude:: _include/helloworld.proto + :language: proto - package helloworld; +Assuming you have checked out the google APIs as described above, and have saved the proto file as +``protos/helloworld.proto`` you can build it with: + +.. code-block:: console + + $ protoc -I$(GOOGLEAPIS_DIR) -I. --include_imports --include_source_info \ + --descriptor_set_out=protos/helloworld.pb protos/helloworld.proto - // The greeting service definition. - service Greeter { - // Sends a greeting - rpc SayHello (HelloRequest) returns (HelloReply) { - option (google.api.http) = { - get: "/say" - }; - } - } Sending arbitrary content ------------------------- @@ -103,65 +102,5 @@ Here's a sample Envoy configuration that proxies to a gRPC server running on loc gRPC requests and uses the gRPC-JSON transcoder filter to provide the RESTful JSON mapping. I.e., you can make either gRPC or RESTful JSON requests to localhost:51051. -.. code-block:: yaml - - admin: - access_log_path: /tmp/admin_access.log - address: - socket_address: { address: 0.0.0.0, port_value: 9901 } - - static_resources: - listeners: - - name: listener1 - address: - socket_address: { address: 0.0.0.0, port_value: 51051 } - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: grpc_json - codec_type: AUTO - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: ["*"] - routes: - # NOTE: by default, matching happens based on the gRPC route, and not on the incoming request path. - # Reference: https://www.envoyproxy.io/docs/envoy/latest/configuration/http_filters/grpc_json_transcoder_filter#route-configs-for-transcoded-requests - - match: { prefix: "/helloworld.Greeter" } - route: { cluster: grpc, timeout: { seconds: 60 } } - http_filters: - - name: envoy.filters.http.grpc_json_transcoder - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder - proto_descriptor: "/tmp/envoy/proto.pb" - services: ["helloworld.Greeter"] - print_options: - add_whitespace: true - always_print_primitive_fields: true - always_print_enums_as_ints: false - preserve_proto_field_names: false - - name: envoy.filters.http.router - - clusters: - - name: grpc - connect_timeout: 1.25s - type: logical_dns - lb_policy: round_robin - dns_lookup_family: V4_ONLY - http2_protocol_options: {} - load_assignment: - cluster_name: grpc - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - # WARNING: "docker.for.mac.localhost" has been deprecated from Docker v18.03.0. - # If you're running an older version of Docker, please use "docker.for.mac.localhost" instead. - # Reference: https://docs.docker.com/docker-for-mac/release-notes/#docker-community-edition-18030-ce-mac59-2018-03-26 - address: host.docker.internal - port_value: 50051 - +.. literalinclude:: _include/grpc-transcoder-filter.yaml + :language: yaml diff --git a/docs/root/configuration/http/http_filters/http_filters.rst b/docs/root/configuration/http/http_filters/http_filters.rst index 33d6fe58cfea..801d9f131943 100644 --- a/docs/root/configuration/http/http_filters/http_filters.rst +++ b/docs/root/configuration/http/http_filters/http_filters.rst @@ -7,9 +7,11 @@ HTTP filters :maxdepth: 2 adaptive_concurrency_filter + admission_control_filter aws_lambda_filter aws_request_signing_filter buffer_filter + cdn_loop_filter compressor_filter cors_filter csrf_filter @@ -28,6 +30,7 @@ HTTP filters header_to_metadata_filter ip_tagging_filter jwt_authn_filter + local_rate_limit_filter lua_filter oauth2_filter on_demand_updates_filter @@ -37,6 +40,7 @@ HTTP filters router_filter squash_filter tap_filter + wasm_filter .. TODO(toddmgreer): Remove this hack and add user-visible CacheFilter docs when CacheFilter is production-ready. .. toctree:: @@ -45,3 +49,4 @@ HTTP filters ../../../api-v3/extensions/filters/http/admission_control/v3alpha/admission_control.proto ../../../api-v3/extensions/filters/http/oauth2/v3alpha/oauth.proto ../../../api-v3/extensions/filters/http/cache/v3alpha/cache.proto + ../../../api-v3/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto diff --git a/docs/root/configuration/http/http_filters/images/aggression_graph.png b/docs/root/configuration/http/http_filters/images/aggression_graph.png new file mode 100644 index 000000000000..a8a6bde4e803 Binary files /dev/null and b/docs/root/configuration/http/http_filters/images/aggression_graph.png differ diff --git a/docs/root/configuration/http/http_filters/local_rate_limit_filter.rst b/docs/root/configuration/http/http_filters/local_rate_limit_filter.rst new file mode 100644 index 000000000000..78bbc806a78e --- /dev/null +++ b/docs/root/configuration/http/http_filters/local_rate_limit_filter.rst @@ -0,0 +1,137 @@ +.. _config_http_filters_local_rate_limit: + +Local rate limit +================ + +* Local rate limiting :ref:`architecture overview ` +* :ref:`v3 API reference ` +* This filter should be configured with the name *envoy.filters.http.local_ratelimit*. + +The HTTP local rate limit filter applies a :ref:`token bucket +` rate +limit when the request's route or virtual host has a per filter +:ref:`local rate limit configuration `. + +If the local rate limit token bucket is checked, and there are no token availables, a 429 response is returned +(the response is configurable). The local rate limit filter also sets the +:ref:`x-envoy-ratelimited` header. Additional response +headers may be configured. + +.. note:: + The token bucket is shared across all workers, thus the rate limits are applied per Envoy process. + +Example configuration +--------------------- + +Example filter configuration for a globally set rate limiter (e.g.: all vhosts/routes share the same token bucket): + +.. code-block:: yaml + + name: envoy.filters.http.local_ratelimit + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_rate_limiter + token_bucket: + max_tokens: 10000 + tokens_per_fill: 1000 + fill_interval: 1s + filter_enabled: + runtime_key: local_rate_limit_enabled + default_value: + numerator: 100 + denominator: HUNDRED + filter_enforced: + runtime_key: local_rate_limit_enforced + default_value: + numerator: 100 + denominator: HUNDRED + response_headers_to_add: + - append: false + header: + key: x-local-rate-limit + value: 'true' + + +Example filter configuration for a globally disabled rate limiter but enabled for a specific route: + +.. code-block:: yaml + + name: envoy.filters.http.local_ratelimit + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_rate_limiter + + +The route specific configuration: + +.. code-block:: yaml + + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: { prefix: "/path/with/rate/limit" } + route: { cluster: service_protected_by_rate_limit } + typed_per_filter_config: + envoy.filters.http.local_ratelimit: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + token_bucket: + max_tokens: 10000 + tokens_per_fill: 1000 + fill_interval: 1s + filter_enabled: + runtime_key: local_rate_limit_enabled + default_value: + numerator: 100 + denominator: HUNDRED + filter_enforced: + runtime_key: local_rate_limit_enforced + default_value: + numerator: 100 + denominator: HUNDRED + response_headers_to_add: + - append: false + header: + key: x-local-rate-limit + value: 'true' + - match: { prefix: "/" } + route: { cluster: default_service } + + +Note that if this filter is configured as globally disabled and there are no virtual host or route level +token buckets, no rate limiting will be applied. + +Statistics +---------- + +The local rate limit filter outputs statistics in the *.http_local_rate_limit.* namespace. +429 responses -- or the configured status code -- are emitted to the normal cluster :ref:`dynamic HTTP statistics +`. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + enabled, Counter, Total number of requests for which the rate limiter was consulted + ok, Counter, Total under limit responses from the token bucket + rate_limited, Counter, Total responses without an available token (but not necessarily enforced) + enforced, Counter, Total number of requests for which rate limiting was applied (e.g.: 429 returned) + +.. _config_http_filters_local_rate_limit_runtime: + +Runtime +------- + +The HTTP rate limit filter supports the following runtime fractional settings: + +http_filter_enabled + % of requests that will check the local rate limit decision, but not enforce, for a given *route_key* specified + in the :ref:`local rate limit configuration `. + Defaults to 0. + +http_filter_enforcing + % of requests that will enforce the local rate limit decision for a given *route_key* specified in the + :ref:`local rate limit configuration `. + Defaults to 0. This can be used to test what would happen before fully enforcing the outcome. diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 88981aa84148..ebd60a7c0ded 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -89,10 +89,10 @@ on the virtual host, route, or weighted cluster. LuaPerRoute provides two ways of overriding the `GLOBAL` Lua script: -* By providing a name reference to the defined :ref:`named Lua source codes map +* By providing a name reference to the defined :ref:`named Lua source codes map `. -* By providing inline :ref:`source code - ` (This allows the +* By providing inline :ref:`source code + ` (This allows the code to be sent through RDS). As a concrete example, given the following Lua filter configuration: @@ -143,7 +143,7 @@ The ``GLOBAL`` Lua script will be overridden by the referenced script: `. Therefore, do not use ``GLOBAL`` as name for other Lua scripts. -Or we can define a new Lua script in the LuaPerRoute configuration directly to override the `GLOBAL` +Or we can define a new Lua script in the LuaPerRoute configuration directly to override the `GLOBAL` Lua script as follows: .. code-block:: yaml @@ -237,6 +237,40 @@ more details on the supported API. response_handle:logInfo("Status: "..response_handle:headers():get(":status")) end +A common use-case is to rewrite upstream response body, for example: an upstream sends non-2xx +response with JSON data, but the application requires HTML page to be sent to browsers. + +There are two ways of doing this, the first one is via the `body()` API. + +.. code-block:: lua + + function envoy_on_response(response_handle) + local content_length = response_handle:body():setBytes("Not Found") + response_handle:headers():replace("content-length", content_length) + response_handle:headers():replace("content-type", "text/html") + end + + +Or, through `bodyChunks()` API, which let Envoy to skip buffering the upstream response data. + +.. code-block:: lua + + function envoy_on_response(response_handle) + + -- Sets the content-length. + response_handle:headers():replace("content-length", 28) + response_handle:headers():replace("content-type", "text/html") + + local last + for chunk in response_handle:bodyChunks() do + -- Clears each received chunk. + chunk:setBytes("") + last = chunk + end + + last:setBytes("Not Found") + end + .. _config_http_filters_lua_stream_handle_api: Complete example @@ -556,6 +590,17 @@ cause a buffer segment to be copied. *index* is an integer and supplies the buff copy. *length* is an integer and supplies the buffer length to copy. *index* + *length* must be less than the buffer length. +.. _config_http_filters_lua_buffer_wrapper_api_set_bytes: + +setBytes() +^^^^^^^^^^ + +.. code-block:: lua + + buffer:setBytes(string) + +Set the content of wrapped buffer with the input string. + .. _config_http_filters_lua_metadata_wrapper: Metadata object API diff --git a/docs/root/configuration/http/http_filters/oauth2_filter.rst b/docs/root/configuration/http/http_filters/oauth2_filter.rst index bf10c1839e93..acbdda6780c7 100644 --- a/docs/root/configuration/http/http_filters/oauth2_filter.rst +++ b/docs/root/configuration/http/http_filters/oauth2_filter.rst @@ -14,36 +14,82 @@ OAuth2 Example configuration --------------------- -.. code-block:: - - http_filters: - - name: oauth2 - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 - token_endpoint: - cluster: oauth - uri: oauth.com/token - timeout: 3s - authorization_endpoint: https://oauth.com/oauth/authorize/ - redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" - redirect_path_matcher: - path: - exact: /callback - signout_path: - path: - exact: /signout - credentials: - client_id: foo - token_secret: - name: token - hmac_secret: - name: hmac +The following is an example configuring the filter. + +.. validated-code-block:: yaml + :type-name: envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 + + config: + token_endpoint: + cluster: oauth + uri: oauth.com/token timeout: 3s - - name: envoy.router + authorization_endpoint: https://oauth.com/oauth/authorize/ + redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + credentials: + client_id: foo + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac.yaml" + +And the below code block is an example of how we employ it as one of +:ref:`HttpConnectionManager HTTP filters +` + +.. code-block:: yaml + + static_resources: + listeners: + - name: + address: + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + http_filters: + - name: envoy.filters.http.oauth2 + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 + config: + token_endpoint: + cluster: oauth + uri: oauth.com/token + timeout: 3s + authorization_endpoint: https://oauth.com/oauth/authorize/ + redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + credentials: + client_id: foo + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac.yaml" + - name: envoy.router clusters: - name: service - ... + # ... - name: auth connect_timeout: 5s type: LOGICAL_DNS @@ -53,21 +99,25 @@ Example configuration endpoints: - lb_endpoints: - endpoint: - address: { socket_address: { address: auth.example.com, port_value: 443 }} - tls_context: { sni: auth.example.com } + address: + socket_address: + address: auth.example.com + port_value: 443 + tls_context: + sni: auth.example.com Notes ----- -This module does not currently provide much Cross-Site-Request-Forgery protection for the redirect loop -to the OAuth server and back. +This module does not currently provide much Cross-Site-Request-Forgery protection for the redirect +loop to the OAuth server and back. The service must be served over HTTPS for this filter to work, as the cookies use `;secure`. Statistics ---------- -The OAuth filter outputs statistics in the *.* namespace. +The OAuth2 filter outputs statistics in the *.* namespace. .. csv-table:: :header: Name, Type, Description diff --git a/docs/root/configuration/http/http_filters/rate_limit_filter.rst b/docs/root/configuration/http/http_filters/rate_limit_filter.rst index 91ce997c72cd..0896f9a5b86d 100644 --- a/docs/root/configuration/http/http_filters/rate_limit_filter.rst +++ b/docs/root/configuration/http/http_filters/rate_limit_filter.rst @@ -14,7 +14,9 @@ can optionally include the virtual host rate limit configurations. More than one apply to a request. Each configuration results in a descriptor being sent to the rate limit service. If the rate limit service is called, and the response for any of the descriptors is over limit, a -429 response is returned. The rate limit filter also sets the :ref:`x-envoy-ratelimited` header. +429 response is returned. The rate limit filter also sets the :ref:`x-envoy-ratelimited` header, +unless :ref:`disable_x_envoy_ratelimited_header ` is +set to true. If there is an error in calling rate limit service or rate limit service returns an error and :ref:`failure_mode_deny ` is set to true, a 500 response is returned. diff --git a/docs/root/configuration/http/http_filters/rbac_filter.rst b/docs/root/configuration/http/http_filters/rbac_filter.rst index 5db112d924ef..e8cd4b1b69ab 100644 --- a/docs/root/configuration/http/http_filters/rbac_filter.rst +++ b/docs/root/configuration/http/http_filters/rbac_filter.rst @@ -11,6 +11,11 @@ as well as the incoming request's HTTP headers. This filter also supports policy and shadow mode, shadow mode won't effect real users, it is used to test that a new set of policies work before rolling out to production. +When a request is denied, the :ref:`RESPONSE_CODE_DETAILS` +will include the name of the matched policy that caused the deny in the format of `rbac_access_denied_matched_policy[policy_name]` +(policy_name will be `none` if no policy matched), this helps to distinguish the deny from Envoy RBAC +filter and the upstream backend. + * :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.rbac*. diff --git a/docs/root/configuration/http/http_filters/wasm_filter.rst b/docs/root/configuration/http/http_filters/wasm_filter.rst new file mode 100644 index 000000000000..89c6528a5392 --- /dev/null +++ b/docs/root/configuration/http/http_filters/wasm_filter.rst @@ -0,0 +1,36 @@ +.. _config_http_filters_wasm: + +Wasm +==== + +* :ref:`v3 API reference ` + +.. attention:: + + The Wasm filter is experimental and is currently under active development. Capabilities will + be expanded over time and the configuration structures are likely to change. + +The HTTP Wasm filter is used implement an HTTP filter with a Wasm plugin. + +Example configuration +--------------------- + +Example filter configuration: + +.. code-block:: yaml + + name: envoy.filters.http.wasm + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + config: + config: + name: "my_plugin" + vm_config: + runtime: "envoy.wasm.runtime.v8" + code: + local: + filename: "/etc/envoy_filter_http_wasm_example.wasm" + allow_precompiled: true + + +The preceding snippet configures a filter from a Wasm binary on local disk. diff --git a/docs/root/configuration/listeners/lds.rst b/docs/root/configuration/listeners/lds.rst index a54c9ab89b0f..f5b8c778e95e 100644 --- a/docs/root/configuration/listeners/lds.rst +++ b/docs/root/configuration/listeners/lds.rst @@ -18,10 +18,15 @@ The semantics of listener updates are as follows: * Listeners are effectively constant once created. Thus, when a listener is updated, an entirely new listener is created (with the same listen socket). This listener goes through the same warming process described above for a newly added listener. -* When a listener is updated or removed, the old listener will be placed into a "draining" state +* When a listener is removed, the old listener will be placed into a "draining" state much like when the entire server is drained for restart. Connections owned by the listener will be gracefully closed (if possible) for some period of time before the listener is removed and any remaining connections are closed. The drain time is set via the :option:`--drain-time-s` option. +* When a tcp listener is updated, if the new listener contains a subset of filter chains in the old listener, + the connections owned by these overlapping filter chains remain open. Only the connections owned by the + removed filter chains will be drained following the above pattern. Note that if any global listener attributes are + changed, the entire listener (and all filter chains) are drained similar to removal above. See + :ref:`filter chain only update ` for detailed rules to reason about the impacted filter chains. .. note:: diff --git a/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml b/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml new file mode 100644 index 000000000000..28b8fce20434 --- /dev/null +++ b/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml @@ -0,0 +1,42 @@ +admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 9901 +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10000 + listener_filters: + - name: envoy.filters.listener.tls_inspector + filter_chains: + - filters: + - name: envoy.filters.network.sni_dynamic_forward_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig + port_value: 443 + dns_cache_config: + name: dynamic_forward_proxy_cache_config + dns_lookup_family: V4_ONLY + - name: envoy.tcp_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy + stat_prefix: tcp + cluster: dynamic_forward_proxy_cluster + clusters: + - name: dynamic_forward_proxy_cluster + connect_timeout: 1s + lb_policy: CLUSTER_PROVIDED + cluster_type: + name: envoy.clusters.dynamic_forward_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig + dns_cache_config: + name: dynamic_forward_proxy_cache_config + dns_lookup_family: V4_ONLY diff --git a/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst b/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst index eaa6ca5bae4c..441da8ec5c37 100644 --- a/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst +++ b/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst @@ -66,6 +66,7 @@ The network filter outputs statistics in the *config.ext_authz.* namespace. total, Counter, Total responses from the filter. error, Counter, Total errors contacting the external service. denied, Counter, Total responses from the authorizations service that were to deny the traffic. + disabled, Counter, Total requests that are allowed without calling external services due to the filter is disabled. failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because of failure_mode_allow set to true." ok, Counter, Total responses from the authorization service that were to allow the traffic. diff --git a/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst b/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst index 4ab02eb7145a..c1fa26f7c0d1 100644 --- a/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst +++ b/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst @@ -8,6 +8,9 @@ Local rate limit ` * This filter should be configured with the name *envoy.filters.network.local_ratelimit*. +.. note:: + The token bucket is shared across all workers, thus the rate limits are applied per Envoy process. + .. note:: Global rate limiting is also supported via the :ref:`global rate limit filter `. diff --git a/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst index 8c5a451ba4ce..ee0896985338 100644 --- a/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst @@ -95,6 +95,10 @@ namespace. reply_size, Histogram, Size of the reply in bytes reply_time_ms, Histogram, Command time in milliseconds +The list of commands that these metrics are emitted for can be configured via the +:ref:`configuration `; +by default, metrics are emitted for *delete*, *insert*, and *update*. + .. _config_network_filters_mongo_proxy_collection_stats: Per collection query statistics diff --git a/docs/root/configuration/listeners/network_filters/network_filters.rst b/docs/root/configuration/listeners/network_filters/network_filters.rst index 4c29a385acad..f75a0f9c0e61 100644 --- a/docs/root/configuration/listeners/network_filters/network_filters.rst +++ b/docs/root/configuration/listeners/network_filters/network_filters.rst @@ -28,4 +28,5 @@ filters. thrift_proxy_filter sni_cluster_filter sni_dynamic_forward_proxy_filter + wasm_filter zookeeper_proxy_filter diff --git a/docs/root/configuration/listeners/network_filters/rbac_filter.rst b/docs/root/configuration/listeners/network_filters/rbac_filter.rst index 68ae9f2172d4..0324e5153c89 100644 --- a/docs/root/configuration/listeners/network_filters/rbac_filter.rst +++ b/docs/root/configuration/listeners/network_filters/rbac_filter.rst @@ -10,6 +10,11 @@ block-list (DENY) set of policies based on properties of the connection (IPs, po This filter also supports policy in both enforcement and shadow modes. Shadow mode won't effect real users, it is used to test that a new set of policies work before rolling out to production. +When a request is denied, the :ref:`CONNECTION_TERMINATION_DETAILS` +will include the name of the matched policy that caused the deny in the format of `rbac_access_denied_matched_policy[policy_name]` +(policy_name will be `none` if no policy matched), this helps to distinguish the deny from Envoy +RBAC filter and the upstream backend. + * :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.rbac*. diff --git a/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst index 1e01ec592240..ddd9fa7b9489 100644 --- a/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst @@ -25,48 +25,5 @@ SNI dynamic forward proxy. The following config doesn't terminate TLS in listener, so there is no need to configure TLS context in cluster. The TLS handshake is passed through by Envoy. -.. code-block:: yaml - - admin: - access_log_path: /tmp/admin_access.log - address: - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 9901 - static_resources: - listeners: - - name: listener_0 - address: - socket_address: - protocol: TCP - address: 0.0.0.0 - port_value: 10000 - listener_filters: - - name: envoy.filters.listener.tls_inspector - filter_chains: - - filters: - - name: envoy.filters.network.sni_dynamic_forward_proxy - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig - port_value: 443 - dns_cache_config: - name: dynamic_forward_proxy_cache_config - dns_lookup_family: V4_ONLY - - name: envoy.tcp_proxy - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy - stat_prefix: tcp - cluster: dynamic_forward_proxy_cluster - clusters: - - name: dynamic_forward_proxy_cluster - connect_timeout: 1s - lb_policy: CLUSTER_PROVIDED - cluster_type: - name: envoy.clusters.dynamic_forward_proxy - typed_config: - "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig - dns_cache_config: - name: dynamic_forward_proxy_cache_config - dns_lookup_family: V4_ONLY - +.. literalinclude:: _include/sni-dynamic-forward-proxy-filter.yaml + :language: yaml diff --git a/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst index 7102a47cc6a7..0004b3757c51 100644 --- a/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst @@ -59,5 +59,6 @@ statistics are rooted at *tcp..* with the following statistics: downstream_flow_control_paused_reading_total, Counter, Total number of times flow control paused reading from downstream downstream_flow_control_resumed_reading_total, Counter, Total number of times flow control resumed reading from downstream idle_timeout, Counter, Total number of connections closed due to idle timeout + max_downstream_connection_duration, Counter, Total number of connections closed due to max_downstream_connection_duration timeout upstream_flush_total, Counter, Total number of connections that continued to flush upstream data after the downstream connection was closed upstream_flush_active, Gauge, Total connections currently continuing to flush upstream data after the downstream connection was closed diff --git a/docs/root/configuration/listeners/network_filters/wasm_filter.rst b/docs/root/configuration/listeners/network_filters/wasm_filter.rst new file mode 100644 index 000000000000..c35627f00c4a --- /dev/null +++ b/docs/root/configuration/listeners/network_filters/wasm_filter.rst @@ -0,0 +1,37 @@ +.. _config_network_filters_wasm: + +Wasm Network Filter +=============================================== + +* :ref:`v3 API reference ` + +.. attention:: + + The Wasm filter is experimental and is currently under active development. Capabilities will + be expanded over time and the configuration structures are likely to change. + +The Wasm network filter is used to implement a network filter with a Wasm plugin. + + +Example configuration +--------------------- + +Example filter configuration: + +.. code-block:: yaml + + name: envoy.filters.network.wasm + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + config: + config: + name: "my_plugin" + vm_config: + runtime: "envoy.wasm.runtime.v8" + code: + local: + filename: "/etc/envoy_filter_http_wasm_example.wasm" + allow_precompiled: true + + +The preceding snippet configures a filter from a Wasm binary on local disk. diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst index ff70567aac8e..4c1d032c13d9 100644 --- a/docs/root/configuration/listeners/stats.rst +++ b/docs/root/configuration/listeners/stats.rst @@ -17,22 +17,20 @@ Every listener has a statistics tree rooted at *listener.
.* with the fo downstream_cx_active, Gauge, Total active connections downstream_cx_length_ms, Histogram, Connection length milliseconds downstream_cx_overflow, Counter, Total connections rejected due to enforcement of listener connection limit + downstream_cx_overload_reject, Counter, Total connections rejected due to configured overload actions downstream_pre_cx_timeout, Counter, Sockets that timed out during listener filter processing downstream_pre_cx_active, Gauge, Sockets currently undergoing listener filter processing global_cx_overflow, Counter, Total connections rejected due to enforecement of the global connection limit no_filter_chain_match, Counter, Total connections that didn't match any filter chain - ssl.connection_error, Counter, Total TLS connection errors not including failed certificate verifications - ssl.handshake, Counter, Total successful TLS connection handshakes - ssl.session_reused, Counter, Total successful TLS session resumptions - ssl.no_certificate, Counter, Total successful TLS connections with no client certificate - ssl.fail_verify_no_cert, Counter, Total TLS connections that failed because of missing client certificate - ssl.fail_verify_error, Counter, Total TLS connections that failed CA verification - ssl.fail_verify_san, Counter, Total TLS connections that failed SAN verification - ssl.fail_verify_cert_hash, Counter, Total TLS connections that failed certificate pinning verification - ssl.ciphers., Counter, Total successful TLS connections that used cipher - ssl.curves., Counter, Total successful TLS connections that used ECDHE curve - ssl.sigalgs., Counter, Total successful TLS connections that used signature algorithm - ssl.versions., Counter, Total successful TLS connections that used protocol version + +.. _config_listener_stats_tls: + +TLS statistics +-------------- + +The following TLS statistics are rooted at *listener.
.ssl.*: + +.. include:: ../../_include/ssl_stats.rst .. _config_listener_stats_per_handler: diff --git a/docs/root/configuration/listeners/udp_filters/_include/udp-proxy.yaml b/docs/root/configuration/listeners/udp_filters/_include/udp-proxy.yaml new file mode 100644 index 000000000000..5fde76139391 --- /dev/null +++ b/docs/root/configuration/listeners/udp_filters/_include/udp-proxy.yaml @@ -0,0 +1,35 @@ +admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 9901 +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: UDP + address: 127.0.0.1 + port_value: 1234 + listener_filters: + name: envoy.filters.udp_listener.udp_proxy + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.udp.udp_proxy.v3.UdpProxyConfig + stat_prefix: service + cluster: service_udp + clusters: + - name: service_udp + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: service_udp + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1235 diff --git a/docs/root/configuration/listeners/udp_filters/dns_filter.rst b/docs/root/configuration/listeners/udp_filters/dns_filter.rst index 0b71f4e87172..ec520866df6a 100644 --- a/docs/root/configuration/listeners/udp_filters/dns_filter.rst +++ b/docs/root/configuration/listeners/udp_filters/dns_filter.rst @@ -56,12 +56,12 @@ Example Configuration - suffix: "domain4.com" - suffix: "domain5.com" virtual_domains: - - name: "www.domain1.com" - endpoint: - address_list: - address: - - 10.0.0.1 - - 10.0.0.2 + - name: "www.domain1.com" + endpoint: + address_list: + address: + - 10.0.0.1 + - 10.0.0.2 - name: "www.domain2.com" endpoint: address_list: @@ -83,15 +83,15 @@ Example Configuration protocol: { number: 6 } ttl: 86400s targets: - - name: { host_name: "primary.voip.domain5.com" } + - host_name: "primary.voip.domain5.com" priority: 10 weight: 30 port: 5060 - - name: { host_name: "secondary.voip.domain5.com" } + - host_name: "secondary.voip.domain5.com" priority: 10 weight: 20 port: 5060 - - name: { host_name: "backup.voip.domain5.com" } + - host_name: "backup.voip.domain5.com" priority: 10 weight: 10 port: 5060 diff --git a/docs/root/configuration/listeners/udp_filters/udp_proxy.rst b/docs/root/configuration/listeners/udp_filters/udp_proxy.rst index 5d2516117b89..aff514021a12 100644 --- a/docs/root/configuration/listeners/udp_filters/udp_proxy.rst +++ b/docs/root/configuration/listeners/udp_filters/udp_proxy.rst @@ -49,43 +49,9 @@ Example configuration The following example configuration will cause Envoy to listen on UDP port 1234 and proxy to a UDP server listening on port 1235. - .. code-block:: yaml - - admin: - access_log_path: /tmp/admin_access.log - address: - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 9901 - static_resources: - listeners: - - name: listener_0 - address: - socket_address: - protocol: UDP - address: 127.0.0.1 - port_value: 1234 - listener_filters: - name: envoy.filters.udp_listener.udp_proxy - typed_config: - '@type': type.googleapis.com/envoy.extensions.filters.udp.udp_proxy.v3.UdpProxyConfig - stat_prefix: service - cluster: service_udp - clusters: - - name: service_udp - connect_timeout: 0.25s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: service_udp - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 1235 +.. literalinclude:: _include/udp-proxy.yaml + :language: yaml + Statistics ---------- diff --git a/docs/root/configuration/observability/access_log/usage.rst b/docs/root/configuration/observability/access_log/usage.rst index 476ee0c8a534..75c0630285fa 100644 --- a/docs/root/configuration/observability/access_log/usage.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -202,6 +202,13 @@ The following command operators are supported: TCP Not implemented ("-") +.. _config_access_log_format_connection_termination_details: + +%CONNECTION_TERMINATION_DETAILS% + HTTP and TCP + Connection termination details may provide additional information about why the connection was + terminated by Envoy for L4 reasons. + %BYTES_SENT% HTTP Body bytes sent. For WebSocket connection it will also include response header bytes. @@ -356,6 +363,15 @@ The following command operators are supported: %DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT% Same as **%DOWNSTREAM_LOCAL_ADDRESS%** excluding port if the address is an IP address. +.. _config_access_log_format_connection_id: + +%CONNECTION_ID% + An identifier for the downstream connection. It can be used to + cross-reference TCP access logs across multiple log sinks, or to + cross-reference timer-based reports for the same connection. The identifier + is unique with high likelihood within an execution, but can duplicate across + multiple instances or between restarts. + %GRPC_STATUS% gRPC status code which is easy to interpret with text message corresponding with number. diff --git a/docs/root/configuration/observability/statistics.rst b/docs/root/configuration/observability/statistics.rst index b531c0583b61..ed25a3e2de8d 100644 --- a/docs/root/configuration/observability/statistics.rst +++ b/docs/root/configuration/observability/statistics.rst @@ -25,6 +25,7 @@ Server related statistics are rooted at *server.* with following statistics: total_connections, Gauge, Total connections of both new and old Envoy processes version, Gauge, Integer represented version number based on SCM revision or :ref:`stats_server_version_override ` if set. days_until_first_cert_expiring, Gauge, Number of days until the next certificate being managed will expire + seconds_until_first_ocsp_response_expiring, Gauge, Number of seconds until the next OCSP response being managed will expire hot_restart_epoch, Gauge, Current hot restart epoch -- an integer passed via command line flag `--restart-epoch` usually indicating generation. hot_restart_generation, Gauge, Current hot restart generation -- like hot_restart_epoch but computed automatically by incrementing from parent. initialization_time_ms, Histogram, Total time taken for Envoy initialization in milliseconds. This is the time from server start-up until the worker threads are ready to accept new connections diff --git a/docs/root/configuration/operations/overload_manager/overload_manager.rst b/docs/root/configuration/operations/overload_manager/overload_manager.rst index ade5201f5c4c..22e7e70b33b5 100644 --- a/docs/root/configuration/operations/overload_manager/overload_manager.rst +++ b/docs/root/configuration/operations/overload_manager/overload_manager.rst @@ -68,14 +68,27 @@ Overload actions The following overload actions are supported: -.. csv-table:: - :header: Name, Description +.. list-table:: + :header-rows: 1 :widths: 1, 2 - envoy.overload_actions.stop_accepting_requests, Envoy will immediately respond with a 503 response code to new requests - envoy.overload_actions.disable_http_keepalive, Envoy will stop accepting streams on incoming HTTP connections - envoy.overload_actions.stop_accepting_connections, Envoy will stop accepting new network connections on its configured listeners - envoy.overload_actions.shrink_heap, Envoy will periodically try to shrink the heap by releasing free memory to the system + * - Name + - Description + + * - envoy.overload_actions.stop_accepting_requests + - Envoy will immediately respond with a 503 response code to new requests + + * - envoy.overload_actions.disable_http_keepalive + - Envoy will stop accepting streams on incoming HTTP connections + + * - envoy.overload_actions.stop_accepting_connections + - Envoy will stop accepting new network connections on its configured listeners + + * - envoy.overload_actions.reject_incoming_connections + - Envoy will reject incoming connections on its configured listeners without processing any data + + * - envoy.overload_actions.shrink_heap + - Envoy will periodically try to shrink the heap by releasing free memory to the system Limiting Active Connections --------------------------- diff --git a/docs/root/configuration/operations/runtime.rst b/docs/root/configuration/operations/runtime.rst index 2e72e52bb953..3ae9b3783f0e 100644 --- a/docs/root/configuration/operations/runtime.rst +++ b/docs/root/configuration/operations/runtime.rst @@ -248,7 +248,7 @@ envoy.deprecated_features:full_fieldname or envoy.deprecated_features:full_enum_ to true. For example, for a deprecated field ``Foo.Bar.Eep`` set ``envoy.deprecated_features:Foo.bar.Eep`` to ``true``. There is a production example using static runtime to allow both fail-by-default fields here: -:repo:`configs/using_deprecated_config.v2.yaml` +:repo:`configs/using_deprecated_config.yaml` Use of these override is **strongly discouraged** so please use with caution and switch to the new fields as soon as possible. Fatal-by-default configuration indicates that the removal of the old code paths is imminent. It is far better for both Envoy users and for Envoy contributors if any bugs or feature gaps diff --git a/docs/root/configuration/other_features/other_features.rst b/docs/root/configuration/other_features/other_features.rst index 84d8f49483ce..ff59885d9381 100644 --- a/docs/root/configuration/other_features/other_features.rst +++ b/docs/root/configuration/other_features/other_features.rst @@ -5,3 +5,5 @@ Other features :maxdepth: 2 rate_limit + wasm + wasm_stat_sink diff --git a/docs/root/configuration/other_features/rate_limit.rst b/docs/root/configuration/other_features/rate_limit.rst index d3503a899878..4fa374cb4a69 100644 --- a/docs/root/configuration/other_features/rate_limit.rst +++ b/docs/root/configuration/other_features/rate_limit.rst @@ -14,5 +14,5 @@ gRPC service IDL Envoy expects the rate limit service to support the gRPC IDL specified in :ref:`rls.proto `. See the IDL documentation -for more information on how the API works. See Lyft's reference implementation -`here `_. +for more information on how the API works. See Envoy's reference implementation +`here `_. diff --git a/docs/root/configuration/other_features/wasm.rst b/docs/root/configuration/other_features/wasm.rst new file mode 100644 index 000000000000..0026d6588693 --- /dev/null +++ b/docs/root/configuration/other_features/wasm.rst @@ -0,0 +1,24 @@ +.. _config_wasm_service: + +Wasm service +============ + +The :ref:`WasmService ` configuration specifies a +singleton or per-worker Wasm service for background or on-demand activities. + +Example plugin configuration: + +.. code-block:: yaml + + wasm: + config: + config: + name: "my_plugin" + vm_config: + runtime: "envoy.wasm.runtime.v8" + code: + local: + filename: "/etc/envoy_filter_http_wasm_example.wasm" + singleton: true + +The preceding snippet configures a plugin singleton service from a Wasm binary on local disk. diff --git a/docs/root/configuration/other_features/wasm_stat_sink.rst b/docs/root/configuration/other_features/wasm_stat_sink.rst new file mode 100644 index 000000000000..c3231f26ea2b --- /dev/null +++ b/docs/root/configuration/other_features/wasm_stat_sink.rst @@ -0,0 +1,7 @@ +.. _config_stat_sinks_wasm: + +Wasm Stat Sink +============== + +The :ref:`WasmService ` configuration specifies a +singleton or per-worker Wasm stat sink service. diff --git a/docs/root/configuration/overview/examples.rst b/docs/root/configuration/overview/examples.rst index bc8124c48882..50d6b6f11b84 100644 --- a/docs/root/configuration/overview/examples.rst +++ b/docs/root/configuration/overview/examples.rst @@ -108,7 +108,10 @@ on 127.0.0.1:5678 is provided below: connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - http2_protocol_options: {} + http2_protocol_options: + connection_keepalive: + interval: 30s + timeout: 5s upstream_connection_options: # configure a TCP keep-alive to detect and reconnect to the admin # server in the event of a TCP socket half open connection @@ -189,11 +192,12 @@ below: connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - http2_protocol_options: {} - upstream_connection_options: - # configure a TCP keep-alive to detect and reconnect to the admin - # server in the event of a TCP socket half open connection - tcp_keepalive: {} + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s load_assignment: cluster_name: xds_cluster endpoints: diff --git a/docs/root/configuration/overview/extension.rst b/docs/root/configuration/overview/extension.rst index dab59eaf6b97..91001530d50b 100644 --- a/docs/root/configuration/overview/extension.rst +++ b/docs/root/configuration/overview/extension.rst @@ -61,10 +61,12 @@ follows: "@type": type.googleapis.com/udpa.type.v1.TypedStruct type_url: type.googleapis.com/envoy.extensions.filters.http.router.v3Router +.. _config_overview_extension_discovery: + Discovery service ^^^^^^^^^^^^^^^^^ -Extension configuration can be supplied dynamically from a :ref:`an xDS +Extension configuration can be supplied dynamically from an :ref:`xDS management server` using :ref:`ExtensionConfiguration discovery service`. The name field in the extension configuration acts as the resource identifier. @@ -74,7 +76,7 @@ for HTTP filters. Extension config discovery service has a :ref:`statistics ` tree rooted at -*.extension_config_discovery..*. In addition +*.extension_config_discovery.*. In addition to the common subscription statistics, it also provides the following: .. csv-table:: diff --git a/docs/root/configuration/overview/mgmt_server.rst b/docs/root/configuration/overview/mgmt_server.rst index 68d1f3b1d958..4d1d8406789e 100644 --- a/docs/root/configuration/overview/mgmt_server.rst +++ b/docs/root/configuration/overview/mgmt_server.rst @@ -10,6 +10,12 @@ When an Envoy instance loses connectivity with the management server, Envoy will the previous configuration while actively retrying in the background to reestablish the connection with the management server. +It is important that Envoy detects when a connection to a management server is unhealthy so that +it can try to establish a new connection. Configuring either +:ref:`TCP keep-alives ` +or :ref:`HTTP/2 keepalives ` +in the cluster that connects to the management server is recommended. + Envoy debug logs the fact that it is not able to establish a connection with the management server every time it attempts a connection. diff --git a/docs/root/configuration/security/secret.rst b/docs/root/configuration/security/secret.rst index 060fcb79b53f..087cf388b759 100644 --- a/docs/root/configuration/security/secret.rst +++ b/docs/root/configuration/security/secret.rst @@ -99,7 +99,10 @@ This example shows how to configure secrets fetched from remote SDS servers: clusters: - name: sds_server_mtls - http2_protocol_options: {} + http2_protocol_options: + connection_keepalive: + interval: 30s + timeout: 5s load_assignment: cluster_name: sds_server_mtls endpoints: diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index 77b9380d2982..874b9d9f28fa 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -70,7 +70,7 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_rq_maintenance_mode, Counter, Total requests that resulted in an immediate 503 due to :ref:`maintenance mode` upstream_rq_timeout, Counter, Total requests that timed out waiting for a response upstream_rq_max_duration_reached, Counter, Total requests closed due to max duration reached - upstream_rq_per_try_timeout, Counter, Total requests that hit the per try timeout + upstream_rq_per_try_timeout, Counter, Total requests that hit the per try timeout (except when request hedging is enabled) upstream_rq_rx_reset, Counter, Total requests that were reset remotely upstream_rq_tx_reset, Counter, Total requests that were reset locally upstream_rq_retry, Counter, Total request retries @@ -219,6 +219,15 @@ are rooted at *cluster..* and contain the following statistics: external.upstream_rq_<\*>, Counter, External origin specific HTTP response codes external.upstream_rq_time, Histogram, External origin request time milliseconds +.. _config_cluster_manager_cluster_stats_tls: + +TLS statistics +-------------- + +If TLS is used by the cluster the following statistics are rooted at *cluster..ssl.*: + +.. include:: ../../../_include/ssl_stats.rst + .. _config_cluster_manager_cluster_stats_alt_tree: Alternate tree dynamic HTTP statistics diff --git a/docs/root/faq/api/control_plane_version_support.rst b/docs/root/faq/api/control_plane_version_support.rst index 599ec8d7d8d8..7c6e58ffbeb3 100644 --- a/docs/root/faq/api/control_plane_version_support.rst +++ b/docs/root/faq/api/control_plane_version_support.rst @@ -30,6 +30,16 @@ typical rollout sequence might look like: 4. Support for v2 is removed in the management server. The management server moves to v3 exclusively internally and can support newer fields. +Another approach for type url version migration will be to enable the support of mixed type url +protected by a runtime guard *envoy.reloadable_features.enable_type_url_downgrade_and_upgrade*. +Client can send discovery request with v2 resource type url and process discovery response with +v3 resource type url. Client can also send discovery request with v3 resource type url and process +discovery response with v2 resource type url. The upgrade and downgrade of type url is performed automatically. +If your management server does not support both v2/v3 at the same time, you can have clients +with type url upgrade and downgrade feature enabled. These clients can talk to a mix of management servers +that support either v2 or v3 exclusively. Just like the first approach, no deprecated v2 fields or new v3 fields +can be used at this point. + If you are operating a managed control plane as-a-service, you will likely need to support a wide range of client versions. In this scenario, you will require long term support for multiple major API transport and resource versions. Strategies for managing this support are described :ref:`here diff --git a/docs/root/faq/api/why_versioning.rst b/docs/root/faq/api/why_versioning.rst index 917a16ae2afe..a53f5c7cd159 100644 --- a/docs/root/faq/api/why_versioning.rst +++ b/docs/root/faq/api/why_versioning.rst @@ -26,7 +26,7 @@ For the v3 xDS APIs, a brief list of the key improvements that were made with a - Extensions now reflect the Envoy source tree layout under `envoy.extensions`. * `std::regex` regular expressions were dropped from the API, in favor of RE2. The former have dangerous security implications. -* `google.protobug.Struct` configuration of extensions was dropped from the API, in favor of +* `google.protobuf.Struct` configuration of extensions was dropped from the API, in favor of typed configuration. This provides for better support for multiple instances of extensions, e.g. in filter chains, and more flexible naming of extension instances. * Over 60 deprecated fields were removed from the API. diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index 3c87cca44d9d..ee5d501acb54 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -88,6 +88,10 @@ stream timeouts already introduced above. is sent to the downstream, which normally happens after the upstream has sent response headers. This timeout can be used with streaming endpoints to retry if the upstream fails to begin a response within the timeout. +* The route :ref:`MaxStreamDuration proto ` + can be used to override the HttpConnectionManager's + :ref:`max_stream_duration ` + for individual routes as well as setting both limits and a fixed time offset on grpc-timeout headers. TCP --- diff --git a/docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst b/docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst index eaaeca31c290..42073b1cf17c 100644 --- a/docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst +++ b/docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst @@ -5,5 +5,5 @@ Why is Envoy sending internal responses? One of the easiest ways to get an understanding of why Envoy sends a given local response, is to turn on trace logging. If you can run your instance with “-l trace” you will slow Envoy down significantly, but get detailed information on various events in the lifetime of each stream and connection. Any time Envoy sends an internally generated response it will log to the _debug_ level “Sending local reply with details [unique reason]” which gives you information about why the local response was sent. Each individual response detail is used at one point in the code base, be it a codec validation check or a failed route match. -If turning on debug logging is not plausible, the response details can be added to the access logs using _%RESPONSE_CODE_DETAILS%_, and again it will let you pinpoint the exact reason a given response was generated. +If turning on debug logging is not plausible, the response details can be added to the access logs using _%RESPONSE_CODE_DETAILS%_, and again it will let you pinpoint the exact reason a given response was generated. Documentation on response code details can be found :ref:`here` diff --git a/docs/root/faq/extensions/contract.rst b/docs/root/faq/extensions/contract.rst index 35e9a05f06ba..314701aca805 100644 --- a/docs/root/faq/extensions/contract.rst +++ b/docs/root/faq/extensions/contract.rst @@ -16,7 +16,7 @@ Is there a contract my HTTP filter must adhere to? ``continueEncoding()``/``continueDecoding()``. * A filter's ``decodeHeaders()`` implementation must not return - ``FilterHeadersStatus::ContinueAndEndStream`` when called with ``end_stream`` set to *true*. In this case + ``FilterHeadersStatus::ContinueAndDontEndStream`` when called with ``end_stream`` set to *false*. In this case ``FilterHeadersStatus::Continue`` should be returned. * A filter's ``encode100ContinueHeaders()`` must return ``FilterHeadersStatus::Continue`` or diff --git a/docs/root/index.rst b/docs/root/index.rst index e06641b35580..9711a6306a36 100644 --- a/docs/root/index.rst +++ b/docs/root/index.rst @@ -14,10 +14,9 @@ Envoy documentation about_docs intro/intro start/start - install/install - version_history/version_history configuration/configuration operations/operations extending/extending api/api faq/overview + version_history/version_history diff --git a/docs/root/install/install.rst b/docs/root/install/install.rst deleted file mode 100644 index c53acab15dc6..000000000000 --- a/docs/root/install/install.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _install: - -Building and installation -========================= - -.. toctree:: - :maxdepth: 2 - - building - ref_configs - tools/tools diff --git a/docs/root/intro/_include/life-of-a-request.yaml b/docs/root/intro/_include/life-of-a-request.yaml new file mode 100644 index 000000000000..7006dbc24221 --- /dev/null +++ b/docs/root/intro/_include/life-of-a-request.yaml @@ -0,0 +1,92 @@ + +static_resources: + listeners: + # There is a single listener bound to port 443. + - name: listener_https + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 443 + # A single listener filter exists for TLS inspector. + listener_filters: + - name: "envoy.filters.listener.tls_inspector" + typed_config: {} + # On the listener, there is a single filter chain that matches SNI for acme.com. + filter_chains: + - filter_chain_match: + # This will match the SNI extracted by the TLS Inspector filter. + server_names: ["acme.com"] + # Downstream TLS configuration. + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "certs/servercert.pem" } + private_key: { filename: "certs/serverkey.pem" } + filters: + # The HTTP connection manager is the only network filter. + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + use_remote_address: true + http2_protocol_options: + max_concurrent_streams: 100 + # File system based access logging. + access_log: + - name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: "/var/log/envoy/access.log" + # The route table, mapping /foo to some_service. + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["acme.com"] + routes: + - match: + path: "/foo" + route: + cluster: some_service + # CustomFilter and the HTTP router filter are the HTTP filter chain. + http_filters: + # - name: some.customer.filter + - name: envoy.filters.http.router + clusters: + - name: some_service + connect_timeout: 5s + # Upstream TLS configuration. + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + load_assignment: + cluster_name: some_service + # Static endpoint assignment. + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.1.2.10 + port_value: 10002 + - endpoint: + address: + socket_address: + address: 10.1.2.11 + port_value: 10002 + http2_protocol_options: + max_concurrent_streams: 100 + - name: some_statsd_sink + connect_timeout: 5s + # The rest of the configuration for statsd sink cluster. +# statsd sink. +stats_sinks: + - name: envoy.stat_sinks.statsd + typed_config: + "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink + tcp_cluster_name: some_statsd_sink diff --git a/docs/root/intro/arch_overview/arch_overview.rst b/docs/root/intro/arch_overview/arch_overview.rst index 17e87eaa2471..59433b5950eb 100644 --- a/docs/root/intro/arch_overview/arch_overview.rst +++ b/docs/root/intro/arch_overview/arch_overview.rst @@ -11,7 +11,6 @@ Architecture overview observability/observability security/security operations/operations - compression/libraries other_features/other_features other_protocols/other_protocols advanced/advanced diff --git a/docs/root/intro/arch_overview/http/upgrades.rst b/docs/root/intro/arch_overview/http/upgrades.rst index a00b43d15d0d..4f9b62f7ded5 100644 --- a/docs/root/intro/arch_overview/http/upgrades.rst +++ b/docs/root/intro/arch_overview/http/upgrades.rst @@ -48,7 +48,7 @@ a deployment of the form: In this case, if a client is for example using WebSocket, we want the Websocket to arrive at the upstream server functionally intact, which means it needs to traverse the HTTP/2 hop. -This is accomplished via `extended CONNECT `_ support, +This is accomplished via `Extended CONNECT (RFC8441) `_ support, turned on by setting :ref:`allow_connect ` true at the second layer Envoy. The WebSocket request will be transformed into an HTTP/2 CONNECT stream, with :protocol header @@ -85,7 +85,7 @@ and forward the HTTP payload upstream. On receipt of initial TCP data from upstr will synthesize 200 response headers, and then forward the TCP data as the HTTP response body. .. warning:: - This mode of CONNECT support can create major security holes if configured correctly, as the upstream + This mode of CONNECT support can create major security holes if not configured correctly, as the upstream will be forwarded *unsanitized* headers if they are in the body payload. Please use with caution Tunneling TCP over HTTP/2 @@ -97,8 +97,8 @@ An example set up proxying SMTP would look something like this [SMTP Upstream] --- raw SMTP --- [L2 Envoy] --- SMTP tunneled over HTTP/2 --- [L1 Envoy] --- raw SMTP --- [Client] Examples of such a set up can be found in the Envoy example config :repo:`directory ` -If you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.v3.yaml --base-id 1` -and `bazel-bin/source/exe/envoy-static --config-path configs/terminate_connect.v3.yaml` +If you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.yaml --base-id 1` +and `bazel-bin/source/exe/envoy-static --config-path configs/terminate_connect.yaml` you will be running two Envoys, the first listening for TCP traffic on port 10000 and encapsulating it in an HTTP/2 CONNECT request, and the second listening for HTTP/2 on 10001, stripping the CONNECT headers, and forwarding the original TCP upstream, in this case to google.com. diff --git a/docs/root/intro/arch_overview/listeners/listeners_toc.rst b/docs/root/intro/arch_overview/listeners/listeners_toc.rst index 77c377c8cbe8..121304996fa0 100644 --- a/docs/root/intro/arch_overview/listeners/listeners_toc.rst +++ b/docs/root/intro/arch_overview/listeners/listeners_toc.rst @@ -6,6 +6,7 @@ Listeners listeners listener_filters + network_filter_chain network_filters tcp_proxy udp_proxy diff --git a/docs/root/intro/arch_overview/listeners/network_filter_chain.rst b/docs/root/intro/arch_overview/listeners/network_filter_chain.rst new file mode 100644 index 000000000000..d920deaf303d --- /dev/null +++ b/docs/root/intro/arch_overview/listeners/network_filter_chain.rst @@ -0,0 +1,30 @@ +.. _arch_overview_network_filter_chain: + +Network Filter Chain +==================== + +As discussed in the :ref:`listener ` section, network level (L3/L4) filters +form the core of Envoy connection handling. + +The network filters are chained in a ordered list known as :ref:`filter chain `. +Each listener has multiple filter chains and an optional :ref:`default filter chain `. +associated with each filter chain. If the best match filter chain cannot be found, the default filter chain will be +chosen to serve the request. If the default filter chain is not supplied, the connection will be closed. + +.. _filter_chain_only_update: + +Filter chain only update +------------------------ + +:ref:`Filter chains ` can be updated indepedently. Upon listener config +update, if the listener manager determines that the listener update is a filter chain only update, the listener update +will be executed by adding, updating and removing filter chains. The connections owned by these destroying filter chains will +be drained as described in listener drain. + +If the new :ref:`filter chain ` and the old :ref:`filter chain ` +is protobuf message equivalent, the corresponding filter chain runtime info survives. The connections owned by the +survived filter chains remain open. + +Not all the listener config updates can be executed by filter chain update. For example, if the listener metadata is +updated within the new listener config, the new metadata must be picked up by the new filter chains. In this case, the +entire listener is drained and updated. diff --git a/docs/root/intro/arch_overview/listeners/udp_proxy.rst b/docs/root/intro/arch_overview/listeners/udp_proxy.rst index ea886a59fb18..bf8ad0b7e054 100644 --- a/docs/root/intro/arch_overview/listeners/udp_proxy.rst +++ b/docs/root/intro/arch_overview/listeners/udp_proxy.rst @@ -1,3 +1,5 @@ +.. _arch_overview_udp_proxy: + UDP proxy ========= diff --git a/docs/root/intro/arch_overview/operations/dynamic_configuration.rst b/docs/root/intro/arch_overview/operations/dynamic_configuration.rst index 458a4589d008..c8cbcf7c7ee0 100644 --- a/docs/root/intro/arch_overview/operations/dynamic_configuration.rst +++ b/docs/root/intro/arch_overview/operations/dynamic_configuration.rst @@ -111,6 +111,14 @@ The :ref:`RunTime Discovery Service (RTDS) API ` allows :ref:`runtime ` layers to be fetched via an xDS API. This may be favorable to, or augmented by, file system layers. +ECDS +---- + +The :ref:`Extension Config Discovery Service (ECDS) API ` +allows extension configurations (e.g. HTTP filter configuration) to be served independently from +the listener. This is useful when building systems that are more appropriately split from the +primary control plane such as WAF, fault testing, etc. + Aggregated xDS ("ADS") ---------------------- diff --git a/docs/root/intro/arch_overview/compression/libraries.rst b/docs/root/intro/arch_overview/other_features/compression/libraries.rst similarity index 100% rename from docs/root/intro/arch_overview/compression/libraries.rst rename to docs/root/intro/arch_overview/other_features/compression/libraries.rst diff --git a/docs/root/intro/arch_overview/other_features/global_rate_limiting.rst b/docs/root/intro/arch_overview/other_features/global_rate_limiting.rst index e8dbfbc2a2bb..538c5a7a1fe7 100644 --- a/docs/root/intro/arch_overview/other_features/global_rate_limiting.rst +++ b/docs/root/intro/arch_overview/other_features/global_rate_limiting.rst @@ -14,7 +14,7 @@ normally during typical request patterns but still prevent cascading failure whe to fail. Global rate limiting is a good solution for this case. Envoy integrates directly with a global gRPC rate limiting service. Although any service that -implements the defined RPC/IDL protocol can be used, Lyft provides a `reference implementation `_ +implements the defined RPC/IDL protocol can be used, Envoy provides a `reference implementation `_ written in Go which uses a Redis backend. Envoy’s rate limit integration has the following features: * **Network level rate limit filter**: Envoy will call the rate limit service for every new diff --git a/docs/root/intro/arch_overview/other_features/ip_transparency.rst b/docs/root/intro/arch_overview/other_features/ip_transparency.rst index 095ec54bc124..06e07b5b82f4 100644 --- a/docs/root/intro/arch_overview/other_features/ip_transparency.rst +++ b/docs/root/intro/arch_overview/other_features/ip_transparency.rst @@ -45,13 +45,33 @@ metadata includes the source IP. Envoy supports consuming this information using the downstream remote address for propagation into an :ref:`x-forwarded-for ` header. It can also be used in conjunction with the -:ref:`Original Src Listener Filter `. +:ref:`Original Src Listener Filter `. Finally, +Envoy supports generating this header using the :ref:`Proxy Protocol Transport Socket `. +Here is an example config for setting up the socket: + +.. code-block:: yaml + + clusters: + - name: service1 + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + transport_socket: + name: envoy.transport_sockets.upstream_proxy_protocol + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.proxy_protocol.v3.ProxyProtocolUpstreamTransport + config: + version: V1 + transport_socket: + name: envoy.transport_sockets.raw_buffer + ... + +Note: If you are wrapping a TLS socket, the header will be sent before the TLS handshake occurs. Some drawbacks to Proxy Protocol: * It only supports TCP protocols. * It requires upstream host support. -* Envoy cannot yet send it to the upstream. .. _arch_overview_ip_transparency_original_src_listener: diff --git a/docs/root/intro/arch_overview/other_features/local_rate_limiting.rst b/docs/root/intro/arch_overview/other_features/local_rate_limiting.rst index 0fc3369db24f..0e2927254c67 100644 --- a/docs/root/intro/arch_overview/other_features/local_rate_limiting.rst +++ b/docs/root/intro/arch_overview/other_features/local_rate_limiting.rst @@ -6,6 +6,11 @@ Local rate limiting Envoy supports local (non-distributed) rate limiting of L4 connections via the :ref:`local rate limit filter `. -Note that Envoy also supports :ref:`global rate limiting `. Local +Envoy additionally supports local rate limiting of HTTP requests via the +:ref:`HTTP local rate limit filter `. This can +be activated globally at the listener level or at a more specific level (e.g.: the virtual +host or route level). + +Finally, Envoy also supports :ref:`global rate limiting `. Local rate limiting can be used in conjunction with global rate limiting to reduce load on the global rate limit service. diff --git a/docs/root/intro/arch_overview/other_features/other_features.rst b/docs/root/intro/arch_overview/other_features/other_features.rst index 168e40f92a70..2e2a2c054b74 100644 --- a/docs/root/intro/arch_overview/other_features/other_features.rst +++ b/docs/root/intro/arch_overview/other_features/other_features.rst @@ -8,3 +8,4 @@ Other features global_rate_limiting scripting ip_transparency + compression/libraries diff --git a/docs/root/intro/arch_overview/other_protocols/dynamo.rst b/docs/root/intro/arch_overview/other_protocols/dynamo.rst index d757fe5aa42d..8fa20b25cc27 100644 --- a/docs/root/intro/arch_overview/other_protocols/dynamo.rst +++ b/docs/root/intro/arch_overview/other_protocols/dynamo.rst @@ -12,7 +12,7 @@ Envoy supports an HTTP level DynamoDB sniffing filter with the following feature * Batch operation partial failure statistics. The DynamoDB filter is a good example of Envoy’s extensibility and core abstractions at the HTTP -layer. At Lyft we use this filter for all application communication with DynamoDB. It provides an +layer, and can be used to filter all application communication with DynamoDB. It provides an invaluable source of data agnostic to the application platform and specific AWS SDK in use. DynamoDB filter :ref:`configuration `. diff --git a/docs/root/intro/arch_overview/other_protocols/mongo.rst b/docs/root/intro/arch_overview/other_protocols/mongo.rst index 6ae713ea2087..e5ccf0a49d9f 100644 --- a/docs/root/intro/arch_overview/other_protocols/mongo.rst +++ b/docs/root/intro/arch_overview/other_protocols/mongo.rst @@ -12,8 +12,8 @@ Envoy supports a network level MongoDB sniffing filter with the following featur * Per callsite statistics via the $comment query parameter. * Fault injection. -The MongoDB filter is a good example of Envoy’s extensibility and core abstractions. At Lyft we use -this filter between all applications and our databases. It provides an invaluable source of data +The MongoDB filter is a good example of Envoy’s extensibility and core abstractions, and can be used +to filter between all applications and MongoDB databases. It provides an invaluable source of data that is agnostic to the application platform and specific MongoDB driver in use. MongoDB proxy filter :ref:`configuration reference `. diff --git a/docs/root/intro/arch_overview/other_protocols/postgres.rst b/docs/root/intro/arch_overview/other_protocols/postgres.rst index 7fa14f5f4b68..e9a11f2dd639 100644 --- a/docs/root/intro/arch_overview/other_protocols/postgres.rst +++ b/docs/root/intro/arch_overview/other_protocols/postgres.rst @@ -1,7 +1,7 @@ .. _arch_overview_postgres: Postgres -========== +======== Envoy supports a network level Postgres sniffing filter to add network observability. By using the Postgres proxy, Envoy is able to decode `Postgres frontend/backend protocol`_ and gather @@ -14,7 +14,7 @@ offers the following features: * Decode non SSL traffic, ignore SSL traffic. * Decode session information. * Capture transaction information, including commits and rollbacks. -* Expose counters for different types of statements (INSERTs, DELETEs, UPDATEs, etc). +* Expose counters for different types of statements (INSERTs, DELETEs, UPDATEs, etc). The counters are updated based on decoding backend CommandComplete messages not by decoding SQL statements sent by a client. * Count frontend, backend and unknown messages. * Identify errors and notices backend responses. diff --git a/docs/root/intro/arch_overview/security/_include/ssl.yaml b/docs/root/intro/arch_overview/security/_include/ssl.yaml new file mode 100644 index 000000000000..8c74e56c8d93 --- /dev/null +++ b/docs/root/intro/arch_overview/security/_include/ssl.yaml @@ -0,0 +1,57 @@ +static_resources: + listeners: + - name: listener_0 + address: { socket_address: { address: 127.0.0.1, port_value: 10000 } } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + route_config: + virtual_hosts: + - name: default + domains: "*" + routes: + - match: { prefix: "/" } + route: + cluster: some_service + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "certs/servercert.pem" } + private_key: { filename: "certs/serverkey.pem" } + validation_context: + trusted_ca: + filename: certs/cacert.pem + clusters: + - name: some_service + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.2 + port_value: 1234 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + tls_certificates: + certificate_chain: { "filename": "certs/servercert.pem" } + private_key: { "filename": "certs/serverkey.pem" } + ocsp_staple: { "filename": "certs/server_ocsp_resp.der" } + validation_context: + match_subject_alt_names: + exact: "foo" + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt diff --git a/docs/root/intro/arch_overview/security/ext_authz_filter.rst b/docs/root/intro/arch_overview/security/ext_authz_filter.rst index b6935e96412d..225ba5091fcb 100644 --- a/docs/root/intro/arch_overview/security/ext_authz_filter.rst +++ b/docs/root/intro/arch_overview/security/ext_authz_filter.rst @@ -38,4 +38,4 @@ The content of the request that are passed to an authorization service is specif :glob: :maxdepth: 2 - ../../../api-v2/service/auth/v2/* + ../../../api-v3/service/auth/v3/* diff --git a/docs/root/intro/arch_overview/security/external_deps.rst b/docs/root/intro/arch_overview/security/external_deps.rst index 05ac71b0d96c..42a54fe3911f 100644 --- a/docs/root/intro/arch_overview/security/external_deps.rst +++ b/docs/root/intro/arch_overview/security/external_deps.rst @@ -6,25 +6,35 @@ External dependencies Below we enumerate the external dependencies that may be linked into the Envoy binary. We exclude dependencies that only are used in CI or developer tooling above. -Data plane ----------- +Data plane (core) +----------------- -.. include:: external_dep_dataplane.rst +.. include:: external_dep_dataplane_core.rst + +Data plane (extensions) +----------------------- + +.. include:: external_dep_dataplane_ext.rst Control plane ------------- .. include:: external_dep_controlplane.rst -Observability -------------- +API +--- + +.. include:: external_dep_api.rst -.. include:: external_dep_observability.rst +Observability (core) +-------------------- -Test ----- +.. include:: external_dep_observability_core.rst -.. include:: external_dep_test.rst +Observability (extensions) +-------------------------- + +.. include:: external_dep_observability_ext.rst Build ----- @@ -35,3 +45,11 @@ Miscellaneous ------------- .. include:: external_dep_other.rst + +Test only +--------- + +Below we provide the status of the C/C++ dependencies that are only used in tests. Tests also +include additional Java, Rust and Python dependencies that are not tracked below. + +.. include:: external_dep_test_only.rst diff --git a/docs/root/intro/arch_overview/security/rbac_filter.rst b/docs/root/intro/arch_overview/security/rbac_filter.rst index fc98580e4f84..b12d568a25ad 100644 --- a/docs/root/intro/arch_overview/security/rbac_filter.rst +++ b/docs/root/intro/arch_overview/security/rbac_filter.rst @@ -79,6 +79,7 @@ The following attributes are exposed to the language runtime: request.total_size, int, Total size of the request including the headers request.protocol, string, Request protocol e.g. "HTTP/2" response.code, int, Response HTTP status code + response.code_details, string, Internal response code details (subject to change) response.grpc_status, int, Response gRPC status code response.headers, string map, All response headers response.trailers, string map, All response trailers @@ -90,6 +91,7 @@ The following attributes are exposed to the language runtime: destination.address, string, Downstream connection local address destination.port, int, Downstream connection local port metadata, :ref:`Metadata`, Dynamic metadata + filter_state, map string to bytes, Filter state mapping data names to their serialized string value connection.mtls, bool, Indicates whether TLS is applied to the downstream connection and the peer ceritificate is presented connection.requested_server_name, string, Requested server name in the downstream TLS connection connection.tls_version, string, TLS version of the downstream TLS connection @@ -99,6 +101,7 @@ The following attributes are exposed to the language runtime: connection.dns_san_peer_certificate, string, The first DNS entry in the SAN field of the peer certificate in the downstream TLS connection connection.uri_san_local_certificate, string, The first URI entry in the SAN field of the local certificate in the downstream TLS connection connection.uri_san_peer_certificate, string, The first URI entry in the SAN field of the peer certificate in the downstream TLS connection + connection.id, uint, Downstream connection ID upstream.address, string, Upstream connection remote address upstream.port, int, Upstream connection remote port upstream.tls_version, string, TLS version of the upstream TLS connection diff --git a/docs/root/intro/arch_overview/security/security.rst b/docs/root/intro/arch_overview/security/security.rst index 0f85bb5d9a06..7de6c7ef82d2 100644 --- a/docs/root/intro/arch_overview/security/security.rst +++ b/docs/root/intro/arch_overview/security/security.rst @@ -4,10 +4,10 @@ Security .. toctree:: :maxdepth: 2 - threat_model - external_deps - google_vrp ssl jwt_authn_filter ext_authz_filter rbac_filter + threat_model + external_deps + google_vrp diff --git a/docs/root/intro/arch_overview/security/ssl.rst b/docs/root/intro/arch_overview/security/ssl.rst index 4a5d4f0ea246..c6bb2b8fd202 100644 --- a/docs/root/intro/arch_overview/security/ssl.rst +++ b/docs/root/intro/arch_overview/security/ssl.rst @@ -24,9 +24,10 @@ requirements (TLS1.2, SNI, etc.). Envoy supports the following TLS features: across hot restarts and between parallel Envoy instances (typically useful in a front proxy configuration). * **BoringSSL private key methods**: TLS private key operations (signing and decrypting) can be - performed asynchronously from an extension. This allows extending Envoy to support various key + performed asynchronously from :ref:`an extension `. This allows extending Envoy to support various key management schemes (such as TPM) and TLS acceleration. This mechanism uses `BoringSSL private key method interface `_. +* **OCSP Stapling**: Online Certificate Stapling Protocol responses may be stapled to certificates. Underlying implementation ------------------------- @@ -70,51 +71,8 @@ validation context specifies one or more trusted authority certificates. Example configuration ^^^^^^^^^^^^^^^^^^^^^ -.. code-block:: yaml - - static_resources: - listeners: - - name: listener_0 - address: { socket_address: { address: 127.0.0.1, port_value: 10000 } } - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - # ... - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - validation_context: - trusted_ca: - filename: /usr/local/my-client-ca.crt - clusters: - - name: some_service - connect_timeout: 0.25s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: some_service - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.2 - port_value: 1234 - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext - common_tls_context: - tls_certificates: - certificate_chain: { "filename": "/cert.crt" } - private_key: { "filename": "/cert.key" } - validation_context: - match_subject_alt_names: - exact: "foo" - trusted_ca: - filename: /etc/ssl/certs/ca-certificates.crt +.. literalinclude:: _include/ssl.yaml + :language: yaml */etc/ssl/certs/ca-certificates.crt* is the default path for the system CA bundle on Debian systems. :ref:`trusted_ca ` along with @@ -149,14 +107,17 @@ certificates. These may be a mix of RSA and P-256 ECDSA certificates. The follow * Only one certificate of a particular type (RSA or ECDSA) may be specified. * Non-P-256 server ECDSA certificates are rejected. -* If the client supports P-256 ECDSA, a P-256 ECDSA certificate will be selected if present in the - :ref:`DownstreamTlsContext `. +* If the client supports P-256 ECDSA, a P-256 ECDSA certificate will be selected if one is present in the + :ref:`DownstreamTlsContext ` + and it is in compliance with the OCSP policy. * If the client only supports RSA certificates, a RSA certificate will be selected if present in the :ref:`DownstreamTlsContext `. * Otherwise, the first certificate listed is used. This will result in a failed handshake if the client only supports RSA certificates and the server only has ECDSA certificates. * Static and SDS certificates may not be mixed in a given :ref:`DownstreamTlsContext `. +* The selected certificate must adhere to the OCSP policy. If no + such certificate is found, the connection is refused. Only a single TLS certificate is supported today for :ref:`UpstreamTlsContexts `. @@ -168,6 +129,41 @@ TLS certificates can be specified in the static resource or can be fetched remot Certificate rotation is supported for static resources by sourcing :ref:`SDS configuration from the filesystem ` or by pushing updates from the SDS server. Please see :ref:`SDS ` for details. +.. _arch_overview_ssl_ocsp_stapling: + +OCSP Stapling +------------- + +:ref:`DownstreamTlsContexts ` support +stapling an Online Certificate Status Protocol (OCSP) response to a TLS certificate during the handshake. The +``ocsp_staple`` field allows the operator to supply a pre-computed OCSP response per-certificate in the context. +A single response may not pertain to multiple certificates. If provided, OCSP responses must be valid and +affirm the certificate has not been revoked. Expired OCSP responses are accepted, but may cause downstream +connection errors depending on the OCSP staple policy. + +:ref:`DownstreamTlsContexts ` +support an ``ocsp_staple_policy`` field to control whether Envoy should stop using a certificate or +continue without stapling when its associated OCSP response is missing or expired. +Certificates marked as `must-staple `_ require a +valid OCSP response regardless of the OCSP staple policy. In practice, a must-staple certificate causes +cEnvoy to behave as if the OCSP staple policy is :ref:`MUST_STAPLE`. +Envoy will not use a must-staple certificate for new connections after its OCSP response expires. + +OCSP responses are never stapled to TLS requests that do not indicate support for OCSP stapling +via the ``status_request`` extension. + +The following runtime flags are provided to adjust the requirements of OCSP responses and override +the OCSP policy. These flags default to ``true``. + +* ``envoy.reloadable_features.require_ocsp_response_for_must_staple_certs``: Disabling this allows + the operator to omit an OCSP response for must-staple certs in the config. +* ``envoy.reloadable_features.check_ocsp_policy``: Disabling this will disable OCSP policy + checking. OCSP responses are stapled when available if the client supports it, even if the + response is expired. Stapling is skipped if no response is present. + +OCSP responses are ignored for :ref:`UpstreamTlsContexts +`. + .. _arch_overview_ssl_auth_filter: Authentication filter @@ -182,6 +178,30 @@ infrastructure. Client TLS authentication filter :ref:`configuration reference `. +.. _arch_overview_ssl_custom_handshaker: + +Custom handshaker extension +--------------------------- + +The :ref:`CommonTlsContext ` +has a ``custom_handshaker`` extension which can be used to override SSL handshake +behavior entirely. This is useful for implementing any TLS behavior which is +difficult to express with callbacks. It is not necessary to write a custom +handshaker to use private key methods, see the +:ref:`private key method interface ` described above. + +To avoid reimplementing all of the `Ssl::ConnectionInfo `_ interface, a custom +implementation might choose to extend +`Envoy::Extensions::TransportSockets::Tls::SslHandshakerImpl `_. + +Custom handshakers need to explicitly declare via `HandshakerCapabilities `_ +which TLS features they are responsible for. The default Envoy handshaker will +manage the remainder. + +A useful example handshaker, named ``SslHandshakerImplForTest``, lives in +`this test `_ +and demonstrates special-case ``SSL_ERROR`` handling and callbacks. + .. _arch_overview_ssl_trouble_shooting: Trouble shooting diff --git a/docs/root/intro/deployment_types/double_proxy.rst b/docs/root/intro/deployment_types/double_proxy.rst index fd2757747bf4..ccf622eb63ac 100644 --- a/docs/root/intro/deployment_types/double_proxy.rst +++ b/docs/root/intro/deployment_types/double_proxy.rst @@ -21,6 +21,5 @@ ordinarily would not be trustable (such as the x-forwarded-for HTTP header). Configuration template ^^^^^^^^^^^^^^^^^^^^^^ -The source distribution includes an example double proxy configuration that is very similar to -the version that Lyft runs in production. See :ref:`here ` for more -information. +The source distribution includes an example double proxy configuration. See +:ref:`here ` for more information. diff --git a/docs/root/intro/deployment_types/front_proxy.rst b/docs/root/intro/deployment_types/front_proxy.rst index f89e8cb17da5..d7e6494b9e79 100644 --- a/docs/root/intro/deployment_types/front_proxy.rst +++ b/docs/root/intro/deployment_types/front_proxy.rst @@ -21,6 +21,5 @@ reverse proxy provides the following features: Configuration template ^^^^^^^^^^^^^^^^^^^^^^ -The source distribution includes an example front proxy configuration that is very similar to -the version that Lyft runs in production. See :ref:`here ` for more -information. +The source distribution includes an example front proxy configuration. See +:ref:`here ` for more information. diff --git a/docs/root/intro/deployment_types/service_to_service.rst b/docs/root/intro/deployment_types/service_to_service.rst index a4200a607ab3..7fb891f900ac 100644 --- a/docs/root/intro/deployment_types/service_to_service.rst +++ b/docs/root/intro/deployment_types/service_to_service.rst @@ -64,5 +64,4 @@ load balancing, statistics gathering, etc. Configuration template ^^^^^^^^^^^^^^^^^^^^^^ -The source distribution includes :ref:`an example service-to-service configuration` -that is very similar to the version that Lyft runs in production. +The source distribution includes :ref:`an example service-to-service configuration`. diff --git a/docs/root/intro/life_of_a_request.rst b/docs/root/intro/life_of_a_request.rst index ac6e5334689b..01df2a6fcbcb 100644 --- a/docs/root/intro/life_of_a_request.rst +++ b/docs/root/intro/life_of_a_request.rst @@ -118,99 +118,8 @@ It's helpful to focus on one at a time, so this example covers the following: We assume a static bootstrap configuration file for simplicity: -.. code-block:: yaml - - static_resources: - listeners: - # There is a single listener bound to port 443. - - name: listener_https - address: - socket_address: - protocol: TCP - address: 0.0.0.0 - port_value: 443 - # A single listener filter exists for TLS inspector. - listener_filters: - - name: "envoy.filters.listener.tls_inspector" - typed_config: {} - # On the listener, there is a single filter chain that matches SNI for acme.com. - filter_chains: - - filter_chain_match: - # This will match the SNI extracted by the TLS Inspector filter. - server_names: ["acme.com"] - # Downstream TLS configuration. - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "certs/servercert.pem" } - private_key: { filename: "certs/serverkey.pem" } - filters: - # The HTTP connection manager is the only network filter. - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - use_remote_address: true - http2_protocol_options: - max_concurrent_streams: 100 - # File system based access logging. - access_log: - - name: envoy.access_loggers.file - typed_config: - "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog - path: "/var/log/envoy/access.log" - # The route table, mapping /foo to some_service. - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: ["acme.com"] - routes: - - match: - path: "/foo" - route: - cluster: some_service - # CustomFilter and the HTTP router filter are the HTTP filter chain. - http_filters: - - name: some.customer.filter - - name: envoy.filters.http.router - clusters: - - name: some_service - connect_timeout: 5s - # Upstream TLS configuration. - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext - load_assignment: - cluster_name: some_service - # Static endpoint assignment. - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 10.1.2.10 - port_value: 10002 - - endpoint: - address: - socket_address: - address: 10.1.2.11 - port_value: 10002 - http2_protocol_options: - max_concurrent_streams: 100 - - name: some_statsd_sink - connect_timeout: 5s - # The rest of the configuration for statsd sink cluster. - # statsd sink. - stats_sinks: - - name: envoy.stat_sinks.statsd - typed_config: - "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink - tcp_cluster_name: some_statsd_cluster +.. literalinclude:: _include/life-of-a-request.yaml + :language: yaml High level architecture ----------------------- diff --git a/docs/root/intro/what_is_envoy.rst b/docs/root/intro/what_is_envoy.rst index d9a6e62a3b64..9f89575877d6 100644 --- a/docs/root/intro/what_is_envoy.rst +++ b/docs/root/intro/what_is_envoy.rst @@ -24,21 +24,13 @@ approach to service to service communication: upgrades can be incredibly painful. Envoy can be deployed and upgraded quickly across an entire infrastructure transparently. -**Modern C++11 code base:** Envoy is written in C++11. Native code was chosen because we -believe that an architectural component such as Envoy should get out of the way as much as possible. -Modern application developers already deal with tail latencies that are difficult to reason about -due to deployments in shared cloud environments and the use of very productive but not particularly -well performing languages such as PHP, Python, Ruby, Scala, etc. Native code provides generally -excellent latency properties that don't add additional confusion to an already confusing situation. -Unlike other native code proxy solutions written in C, C++11 provides both excellent developer -productivity and performance. - **L3/L4 filter architecture:** At its core, Envoy is an L3/L4 network proxy. A pluggable :ref:`filter ` chain mechanism allows filters to be written to -perform different TCP proxy tasks and inserted into the main server. Filters have already been -written to support various tasks such as raw :ref:`TCP proxy `, -:ref:`HTTP proxy `, :ref:`TLS client certificate -authentication `, etc. +perform different TCP/UDP proxy tasks and inserted into the main server. Filters have already been +written to support various tasks such as raw :ref:`TCP proxy `, :ref:`UDP +proxy `, :ref:`HTTP proxy `, :ref:`TLS client +certificate authentication `, :ref:`Redis `, +:ref:`MongoDB `, :ref:`Postgres `, etc. **HTTP L7 filter architecture:** HTTP is such a critical component of modern application architectures that Envoy :ref:`supports ` an additional HTTP L7 filter @@ -52,7 +44,7 @@ Amazon's :ref:`DynamoDB `, etc. HTTP/1.1 to HTTP/2 proxy in both directions. This means that any combination of HTTP/1.1 and HTTP/2 clients and target servers can be bridged. The recommended service to service configuration uses HTTP/2 between all Envoys to create a mesh of persistent connections that requests and responses can -be multiplexed over. Envoy does not support SPDY as the protocol is being phased out. +be multiplexed over. **HTTP L7 routing:** When operating in HTTP mode, Envoy supports a :ref:`routing ` subsystem that is capable of routing and redirecting @@ -65,14 +57,6 @@ as the underlying multiplexed transport. Envoy :ref:`supports `_ is a popular database used in modern -web applications. Envoy :ref:`supports ` L7 sniffing, statistics production, -and logging for MongoDB connections. - -**DynamoDB L7 support**: `DynamoDB `_ is Amazon’s hosted key/value -NOSQL datastore. Envoy :ref:`supports ` L7 sniffing and statistics production -for DynamoDB connections. - **Service discovery and dynamic configuration:** Envoy optionally consumes a layered set of :ref:`dynamic configuration APIs ` for centralized management. The layers provide an Envoy with dynamic updates about: hosts within a backend cluster, the @@ -101,11 +85,10 @@ retries `, :ref:`circuit breaking `. Future support is planned for request racing. -**Front/edge proxy support:** Although Envoy is primarily designed as a service to service -communication system, there is benefit in using the same software at the edge (observability, -management, identical service discovery and load balancing algorithms, etc.). Envoy includes enough -features to make it usable as an edge proxy for most modern web application use cases. This includes -:ref:`TLS ` termination, HTTP/1.1 and HTTP/2 :ref:`support +**Front/edge proxy support:** There is substantial benefit in using the same software at the edge +(observability, management, identical service discovery and load balancing algorithms, etc.). Envoy +has a feature set that makes it well suited as an edge proxy for most modern web application use +cases. This includes :ref:`TLS ` termination, HTTP/1.1 and HTTP/2 :ref:`support `, as well as HTTP L7 :ref:`routing `. **Best in class observability:** As stated above, the primary goal of Envoy is to make the network @@ -115,12 +98,3 @@ includes robust :ref:`statistics ` support for all sub sink, though plugging in a different one would not be difficult. Statistics are also viewable via the :ref:`administration ` port. Envoy also supports distributed :ref:`tracing ` via thirdparty providers. - -Design goals -^^^^^^^^^^^^ - -A short note on the design goals of the code itself: Although Envoy is by no means slow (we have -spent considerable time optimizing certain fast paths), the code has been written to be modular and -easy to test versus aiming for the greatest possible absolute performance. It's our view that this -is a more efficient use of time given that typical deployments will be alongside languages and -runtimes many times slower and with many times greater memory usage. diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst index 15d518f4418f..1816322afcfe 100644 --- a/docs/root/operations/admin.rst +++ b/docs/root/operations/admin.rst @@ -35,6 +35,33 @@ modify different aspects of the server: All mutations must be sent as HTTP POST operations. When a mutation is requested via GET, the request has no effect, and an HTTP 400 (Invalid Request) response is returned. +.. note:: + + For an endpoint with *?format=json*, it dumps data as a JSON-serialized proto. Fields with default + values are not rendered. For example for */clusters?format=json*, the circuit breakers thresholds + priority field is omitted when its value is :ref:`DEFAULT priority + ` as shown below: + + .. code-block:: json + + { + "thresholds": [ + { + "max_connections": 1, + "max_pending_requests": 1024, + "max_requests": 1024, + "max_retries": 1 + }, + { + "priority": "HIGH", + "max_connections": 1, + "max_pending_requests": 1024, + "max_requests": 1024, + "max_retries": 1 + } + ] + } + .. http:get:: / Render an HTML home page with a table of links to all available options. @@ -257,7 +284,7 @@ modify different aspects of the server: Generally only used during development. With `--enable-fine-grain-logging` being set, the logger is represented by the path of the file it belongs to (to be specific, the path determined by `__FILE__`), so the logger list - will show a list of file paths, and the specific path should be used as to change the log level. + will show a list of file paths, and the specific path should be used as to change the log level. .. http:get:: /memory @@ -276,19 +303,19 @@ modify different aspects of the server: .. _operations_admin_interface_drain: .. http:post:: /drain_listeners - + :ref:`Drains ` all listeners. .. http:post:: /drain_listeners?inboundonly - :ref:`Drains ` all inbound listeners. `traffic_direction` field in - :ref:`Listener ` is used to determine whether a listener + :ref:`Drains ` all inbound listeners. `traffic_direction` field in + :ref:`Listener ` is used to determine whether a listener is inbound or outbound. .. http:post:: /drain_listeners?graceful - When draining listeners, enter a graceful drain period prior to closing listeners. - This behaviour and duration is configurable via server options or CLI + When draining listeners, enter a graceful drain period prior to closing listeners. + This behaviour and duration is configurable via server options or CLI (:option:`--drain-time-s` and :option:`--drain-strategy`). .. attention:: @@ -334,7 +361,23 @@ modify different aspects of the server: "cpuset_threads": false }, "uptime_current_epoch": "6s", - "uptime_all_epochs": "6s" + "uptime_all_epochs": "6s", + "node": { + "id": "node1", + "cluster": "cluster1", + "user_agent_name": "envoy", + "user_agent_build_version": { + "version": { + "major_number": 1, + "minor_number": 15, + "patch": 0 + } + }, + "metadata": {}, + "extensions": [], + "client_features": [], + "listening_addresses": [] + } } See the :ref:`ServerInfo proto ` for an diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index 96e4e2fcb053..cb2253321ae1 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -192,6 +192,21 @@ following are the command line options that Envoy supports. more. The administration interface usage is similar. Please see `Administration interface `_ for more detail. +.. option:: --socket-path + + *(optional)* The output file path to the socket address for :ref:`hot restart `. + Default to "@envoy_domain_socket" which will be created in the abstract namespace. Suffix _{role}_{id} + is appended to provide name. All envoy processes wanting to participate in hot-restart together must + use the same value for this option. + + **NOTE**: The path started with "@" will be created in the abstract namespace. + +.. option:: --socket-mode + + *(optional)* The socket file permission for :ref:`hot restart `. + This must be a valid octal file permission, such as 644. The default value is 600. + This flag may not be used when :option:`--socket-path` is start with "@" or not set. + .. option:: --hot-restart-version *(optional)* Outputs an opaque hot restart compatibility version for the binary. This can be diff --git a/docs/root/operations/performance.rst b/docs/root/operations/performance.rst index 8846275290da..219c9e9b8555 100644 --- a/docs/root/operations/performance.rst +++ b/docs/root/operations/performance.rst @@ -56,15 +56,18 @@ Watchdog -------- In addition to event loop statistics, Envoy also include a configurable -:ref:`watchdog ` +:ref:`watchdog ` system that can increment statistics when Envoy is not responsive and -optionally kill the server. The system also has an extension point allowing for -custom actions to be taken based on watchdog events. The statistics are -useful for understanding at a high level whether Envoy's event loop is not -responsive either because it is doing too much work, blocking, or not being -scheduled by the OS. - -The watchdog emits statistics in both the *server.* and *server..* trees. +optionally kill the server. The system has two separate watchdog configs, one +for the main thread and one for worker threads; this is helpful as the different +threads have different workloads. The system also has an extension point +allowing for custom actions to be taken based on watchdog events. The +statistics are useful for understanding at a high level whether Envoy's event +loop is not responsive either because it is doing too much work, blocking, or +not being scheduled by the OS. + +The watchdog emits aggregated statistics in both *main_thread* and *workers*. +In addition, it emits individual statistics under *server..* trees. ** is equal to *main_thread*, *worker_0*, *worker_1*, etc. .. csv-table:: diff --git a/docs/root/install/building.rst b/docs/root/start/building.rst similarity index 86% rename from docs/root/install/building.rst rename to docs/root/start/building.rst index 77dc3ba74c10..102ff52903e5 100644 --- a/docs/root/install/building.rst +++ b/docs/root/start/building.rst @@ -20,11 +20,13 @@ recent Linux including Ubuntu 18.04 LTS. Building Envoy has the following requirements: -* GCC 7+ or Clang/LLVM 7+ (for C++14 support). +* GCC 7+ or Clang/LLVM 7+ (for C++14 support). Clang/LLVM 9+ preferred where Clang is used (see below). * These :repo:`Bazel native ` dependencies. Please see the linked :repo:`CI ` and :repo:`Bazel ` documentation for more information on performing manual builds. +Please note that for Clang/LLVM 8 and lower, Envoy may need to be built with `--define tcmalloc=gperftools` +as the new tcmalloc code is not guaranteed to compile with lower versions of Clang. .. _install_binaries: @@ -40,9 +42,6 @@ be found in the following repositories: binary with debug symbols on top of an Ubuntu Bionic base. * `envoyproxy/envoy-alpine `_: Release binary with symbols stripped on top of a **glibc** alpine base. -* `envoyproxy/envoy-alpine-debug `_: - *Deprecated in favor of envoyproxy/envoy-debug.* Release binary with debug symbols on top of a - Release binary with debug symbols on top of a **glibc** alpine base. .. note:: @@ -57,9 +56,6 @@ be found in the following repositories: binary with debug symbols on top of an Ubuntu Bionic base. * `envoyproxy/envoy-alpine-dev `_: Release binary with symbols stripped on top of a **glibc** alpine base. -* `envoyproxy/envoy-alpine-debug-dev `_: - *Deprecated in favor of envoyproxy/envoy-debug-dev.* Release binary with debug symbols on top of a - **glibc** alpine base. In the above *dev* repositories, the *latest* tag points to the last Envoy SHA in master that passed tests. @@ -99,4 +95,4 @@ Envoy binary, and putting the binary in an Ubuntu container. .. toctree:: :maxdepth: 2 - sandboxes/local_docker_build + install/sandboxes/local_docker_build diff --git a/docs/root/start/distro/ambassador.rst b/docs/root/start/distro/ambassador.rst deleted file mode 100644 index ccaaebd1fb54..000000000000 --- a/docs/root/start/distro/ambassador.rst +++ /dev/null @@ -1,125 +0,0 @@ -.. _install_ambassador: - -Envoy as an API Gateway in Kubernetes with Ambassador -===================================================== - -A common scenario for using Envoy is deploying it as an edge service (API -Gateway) in Kubernetes. `Ambassador `_ is an open -source distribution of Envoy designed for Kubernetes. Ambassador uses Envoy for -all L4/L7 management and Kubernetes for reliability, availability, and -scalability. Ambassador operates as a specialized control plane to expose -Envoy's functionality as Kubernetes annotations. - -This example will walk through how you can deploy Envoy on Kubernetes via -Ambassador. - -Deploying Ambassador --------------------- - -Ambassador is configured via Kubernetes deployments. To install Ambassador/Envoy -on Kubernetes, run the following if you're using a cluster with RBAC enabled: - -.. code-block:: console - - kubectl apply -f https://www.getambassador.io/yaml/ambassador/ambassador-rbac.yaml - -or this if you are not using RBAC: - -.. code-block:: console - - kubectl apply -f https://www.getambassador.io/yaml/ambassador/ambassador-no-rbac.yaml - -The above YAML will create a Kubernetes deployment for Ambassador that includes -readiness and liveness checks. By default, it will also create 3 instances of -Ambassador. Each Ambassador instance consists of an Envoy proxy along with the -Ambassador control plane. - -We'll now need to create a Kubernetes service to point to the Ambassador -deployment. In this example, we'll use a ``LoadBalancer`` service. If your -cluster doesn't support ``LoadBalancer`` services, you'll need to change to a -``NodePort`` or ``ClusterIP``. - -.. code-block:: yaml - - --- - apiVersion: v1 - kind: Service - metadata: - labels: - service: ambassador - name: ambassador - spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 80 - selector: - service: ambassador - -Save this YAML to a file ``ambassador-svc.yaml``. Then, deploy this service to -Kubernetes: - -.. code-block:: console - - kubectl apply -f ambassador-svc.yaml - -At this point, Envoy is now running on your cluster, along with the Ambassador -control plane. - -Configuring Ambassador ----------------------- - -Ambassador uses Kubernetes annotations to add or remove configuration. This -sample YAML will add a route to Google, similar to the basic configuration -example in the :ref:`Getting Started guide `. - -.. code-block:: yaml - - --- - apiVersion: v1 - kind: Service - metadata: - name: google - annotations: - getambassador.io/config: | - --- - apiVersion: ambassador/v0 - kind: Mapping - name: google_mapping - prefix: /google/ - service: https://google.com:443 - host_rewrite: www.google.com - spec: - type: ClusterIP - clusterIP: None - -Save the above into a file called ``google.yaml``. Then run: - -.. code-block:: console - - kubectl apply -f google.yaml - -Ambassador will detect the change to your Kubernetes annotation and add the -route to Envoy. Note that we used a dummy service in this example; typically, -you would associate the annotation with your real Kubernetes service. - -Testing the mapping -------------------- - -You can test this mapping by getting the external IP address for the Ambassador -service, and then sending a request via ``curl``. - -.. code-block:: console - - $ kubectl get svc ambassador - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - ambassador 10.19.241.98 35.225.154.81 80:32491/TCP 15m - $ curl -v 35.225.154.81/google/ - -More ----- - -Ambassador exposes multiple Envoy features on mappings, such as CORS, weighted -round robin, gRPC, TLS, and timeouts. For more information, read the -`configuration documentation -`_. diff --git a/docs/root/start/distro/gloo.rst b/docs/root/start/distro/gloo.rst deleted file mode 100644 index 2dd23e1fdcf0..000000000000 --- a/docs/root/start/distro/gloo.rst +++ /dev/null @@ -1,113 +0,0 @@ -.. _install_gloo: - -Envoy as Ingress, API and Function Gateway for Kubernetes with Gloo -=================================================================== - -Kubernetes users often need to allow traffic to flow from and to the cluster, -and Envoy is great for that purpose. -The open source project `Gloo `_, which is built on top -of Envoy, is designed for microservices, monoliths and also applications that -might want to leverage function as a service. Gloo can decouple client APIs -from upstream APIs at the routing level. In a simplistic way, Gloo is a -great and easy to use tool to get traffic inside your Kubernetes cluster. - -Continue reading for more information on how to get started with Gloo. -This should only take a few minutes - -Installing Gloo ---------------- - -For this installation, there are three main prerequisites: - -* **Kubernetes version**: Gloo requires version 1.8 or higher. `Minikube `_ - is an easy way to get access to your own local Kubernetes installation. -* **kubectl**: you need access to the `kubectl` command line tool. -* **glooctl**: this is the Gloo command line tool which you will use to interact - with the open source version of Gloo. Check the `releases `_ - page under Gloo's project repository to download the latest release. - There you will find versions compatible with macOS and Linux. - -Once all you have the above, all you need to do is run the following command: - -.. code-block:: console - - glooctl install kube - -If you are familiar with Kubernetes, the command above will tell kubernetes what and -how it should run the Gloo images. The Gloo pods should be running in a namespace called -``gloo-system``. - -Your output should look similar to this: - -.. code-block:: console - - namespace/gloo-system created - customresourcedefinition.apiextensions.k8s.io/upstreams.gloo.solo.io created - customresourcedefinition.apiextensions.k8s.io/virtualservices.gloo.solo.io created - customresourcedefinition.apiextensions.k8s.io/roles.gloo.solo.io created - customresourcedefinition.apiextensions.k8s.io/attributes.gloo.solo.io created - configmap/ingress-config created - clusterrole.rbac.authorization.k8s.io/gloo-role created - clusterrole.rbac.authorization.k8s.io/gloo-discovery-role created - clusterrolebinding.rbac.authorization.k8s.io/gloo-cluster-admin-binding created - clusterrolebinding.rbac.authorization.k8s.io/gloo-discovery-cluster-admin-binding created - clusterrole.rbac.authorization.k8s.io/gloo-knative-upstream-discovery-role created - clusterrolebinding.rbac.authorization.k8s.io/gloo-knative-upstream-discovery-binding created - deployment.apps/control-plane created - service/control-plane created - deployment.apps/function-discovery created - deployment.apps/ingress created - service/ingress created - deployment.extensions/kube-ingress-controller created - deployment.extensions/upstream-discovery created - Gloo successfully installed. - -Checking your Installation --------------------------- - -For more details on what is running in the ``gloo-system`` namespace, run the following -command: - -.. code-block:: console - - kubectl get all -n gloo-system - -Your output should look similar to this: - -.. code-block:: console - - NAME READY STATUS RESTARTS AGE - pod/control-plane-6fc6dc7545-xrllk 1/1 Running 0 11m - pod/function-discovery-544c596dcd-gk8x7 1/1 Running 0 11m - pod/ingress-64f75ccb7-4z299 1/1 Running 0 11m - pod/kube-ingress-controller-665d59bc7d-t6lwk 1/1 Running 0 11m - pod/upstream-discovery-74db4d7475-gqrst 1/1 Running 0 11m - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/control-plane ClusterIP 10.101.206.34 8081/TCP 11m - service/ingress LoadBalancer 10.108.115.187 8080:32608/TCP,8443:30634/TCP 11m - - NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE - deployment.apps/control-plane 1 1 1 1 11m - deployment.apps/function-discovery 1 1 1 1 11m - deployment.apps/ingress 1 1 1 1 11m - deployment.apps/kube-ingress-controller 1 1 1 1 11m - deployment.apps/upstream-discovery 1 1 1 1 11m - - NAME DESIRED CURRENT READY AGE - replicaset.apps/control-plane-6fc6dc7545 1 1 1 11m - replicaset.apps/function-discovery-544c596dcd 1 1 1 11m - replicaset.apps/ingress-64f75ccb7 1 1 1 11m - replicaset.apps/kube-ingress-controller-665d59bc7d 1 1 1 11m - replicaset.apps/upstream-discovery-74db4d7475 1 1 1 11m - - -In case your pods are not in ``Running`` state, feel free to jump on the Gloo `slack channel -`_. -The community will be able to assist you there. - -What's next? ------------- - -For examples and more documentation on how to use the open source project Gloo, -check the `project page `_. \ No newline at end of file diff --git a/docs/root/install/ref_configs.rst b/docs/root/start/install/ref_configs.rst similarity index 85% rename from docs/root/install/ref_configs.rst rename to docs/root/start/install/ref_configs.rst index 5981498beff3..b7bb405175b2 100644 --- a/docs/root/install/ref_configs.rst +++ b/docs/root/start/install/ref_configs.rst @@ -17,16 +17,15 @@ see the :ref:`configuration reference `. Configuration generator ----------------------- -Envoy configurations can become relatively complicated. At Lyft we use `jinja -`_ templating to make the configurations easier to create and manage. The -source distribution includes a version of the configuration generator that loosely approximates what -we use at Lyft. We have also included three example configuration templates for each of the above -three scenarios. +Envoy configurations can become relatively complicated. The +source distribution includes a version of the configuration generator that uses `jinja +`_ templating to make the configurations easier to create and manage. We +have also included three example configuration templates for each of the above three scenarios. * Generator script: :repo:`configs/configgen.py` -* Service to service template: :repo:`configs/envoy_service_to_service_v2.template.yaml` -* Front proxy template: :repo:`configs/envoy_front_proxy_v2.template.yaml` -* Double proxy template: :repo:`configs/envoy_double_proxy_v2.template.yaml` +* Service to service template: :repo:`configs/envoy_service_to_service.template.yaml` +* Front proxy template: :repo:`configs/envoy_front_proxy.template.yaml` +* Double proxy template: :repo:`configs/envoy_double_proxy.template.yaml` To generate the example configurations run the following from the root of the repo: diff --git a/docs/root/install/sandboxes/local_docker_build.rst b/docs/root/start/install/sandboxes/local_docker_build.rst similarity index 100% rename from docs/root/install/sandboxes/local_docker_build.rst rename to docs/root/start/install/sandboxes/local_docker_build.rst diff --git a/docs/root/install/tools/config_load_check_tool.rst b/docs/root/start/install/tools/config_load_check_tool.rst similarity index 100% rename from docs/root/install/tools/config_load_check_tool.rst rename to docs/root/start/install/tools/config_load_check_tool.rst diff --git a/docs/root/install/tools/route_table_check_tool.rst b/docs/root/start/install/tools/route_table_check_tool.rst similarity index 100% rename from docs/root/install/tools/route_table_check_tool.rst rename to docs/root/start/install/tools/route_table_check_tool.rst diff --git a/docs/root/install/tools/schema_validator_check_tool.rst b/docs/root/start/install/tools/schema_validator_check_tool.rst similarity index 100% rename from docs/root/install/tools/schema_validator_check_tool.rst rename to docs/root/start/install/tools/schema_validator_check_tool.rst diff --git a/docs/root/install/tools/tools.rst b/docs/root/start/install/tools/tools.rst similarity index 100% rename from docs/root/install/tools/tools.rst rename to docs/root/start/install/tools/tools.rst diff --git a/docs/root/start/sandboxes/_include/docker-env-setup.rst b/docs/root/start/sandboxes/_include/docker-env-setup.rst new file mode 100644 index 000000000000..a1ee8dfede6f --- /dev/null +++ b/docs/root/start/sandboxes/_include/docker-env-setup.rst @@ -0,0 +1,23 @@ +The following documentation runs through the setup of Envoy described above. + +Step 1: Install Docker +********************** + +Ensure that you have a recent versions of ``docker`` and ``docker-compose`` installed. + +A simple way to achieve this is via the `Docker Desktop `_. + +Step 2: Clone the Envoy repo +**************************** + +If you have not cloned the Envoy repo, clone it with: + +.. tabs:: + + .. code-tab:: console SSH + + git clone git@github.com:envoyproxy/envoy + + .. code-tab:: console HTTPS + + git clone https://github.com/envoyproxy/envoy.git diff --git a/docs/root/start/sandboxes/cache.rst b/docs/root/start/sandboxes/cache.rst new file mode 100644 index 000000000000..51751978b193 --- /dev/null +++ b/docs/root/start/sandboxes/cache.rst @@ -0,0 +1,229 @@ +.. _install_sandboxes_cache_filter: + +Cache Filter +============ +.. TODO(yosrym93): When a documentation is written for a production-ready Cache Filter, link to it through this doc. + +In this example, we demonstrate how HTTP caching can be utilized in Envoy by using the Cache Filter. +The setup of this sandbox is based on the setup of the :ref:`Front Proxy sandbox `. + +All incoming requests are routed via the front Envoy, which acts as a reverse proxy sitting on +the edge of the ``envoymesh`` network. Ports ``8000`` and ``8001`` are exposed by docker +compose (see :repo:`/examples/cache/docker-compose.yaml`) to handle ``HTTP`` calls +to the services, and requests to ``/admin`` respectively. Two backend services are deployed behind the front Envoy, each with a sidecar Envoy. + +The front Envoy is configured to run the Cache Filter, which stores cacheable responses in an in-memory cache, +and serves it to subsequent requests. In this demo, the responses that are served by the deployed services are stored in :repo:`/examples/cache/responses.yaml`. +This file is mounted to both services' containers, so any changes made to the stored responses while the services are running should be instantly effective (no need to rebuild or rerun). + +For the purposes of the demo, a response's date of creation is appended to its body before being served. +An Etag is computed for every response for validation purposes, which only depends on the response body in the yaml file (i.e. the appended date is not taken into account). +Cached responses can be identified by having an ``age`` header. Validated responses can be identified by having a generation date older than the ``date`` header; +as when a response is validated the ``date`` header is updated, while the body stays the same. Validated responses do not have an ``age`` header. +Responses served from the backend service have no ``age`` header, and their ``date`` header is the same as their generation date. + +Running the Sandbox +~~~~~~~~~~~~~~~~~~~ + +.. include:: _include/docker-env-setup.rst + +Step 3: Start all of our containers +*********************************** + +.. code-block:: console + + $ pwd + envoy/examples/cache + $ docker-compose build --pull + $ docker-compose up -d + $ docker-compose ps + + Name Command State Ports + ------------------------------------------------------------------------------------------------------------------------ + cache_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + cache_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + cache_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + +Step 4: Test Envoy's HTTP caching capabilities +********************************************** + +You can now send a request to both services via the ``front-envoy``. Note that since the two services have different routes, +identical requests to different services have different cache entries (i.e. a request sent to service 2 will not be served by a cached +response produced by service 1). + +To send a request: + +``curl -i localhost:8000/service//`` + +``service_no``: The service to send the request to, 1 or 2. + +``response``: The response that is being requested. The responses are found in :repo:`/examples/cache/responses.yaml`. + + +The provided example responses are: + +- ``valid-for-minute`` + This response remains fresh in the cache for a minute. After which, the response gets validated by the backend service before being served from the cache. + If found to be updated, the new response is served (and cached). Otherwise, the cached response is served and refreshed. + +- ``private`` + This response is private; it cannot be stored by shared caches (such as proxies). It will always be served from the backend service. + +- ``no-cache`` + This response has to be validated every time before being served. + +You can change the responses' headers and bodies (or add new ones) while the sandbox is running to experiment. + +Example responses +----------------- + +1. valid-for-minute +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + $ curl -i localhost:8000/service/1/valid-for-minute + HTTP/1.1 200 OK + content-type: text/html; charset=utf-8 + content-length: 103 + cache-control: max-age=60 + custom-header: any value + etag: "172ae25df822c3299cf2248694b4ce23" + date: Fri, 11 Sep 2020 03:20:40 GMT + server: envoy + x-envoy-upstream-service-time: 11 + + This response will stay fresh for one minute + Response body generated at: Fri, 11 Sep 2020 03:20:40 GMT + +Naturally, response ``date`` header is the same time as the generated time. +Sending the same request after 30 seconds gives the same exact response with the same generation date, +but with an ``age`` header as it was served from cache: + +.. code-block:: console + + $ curl -i localhost:8000/service/1/valid-for-minute + HTTP/1.1 200 OK + content-type: text/html; charset=utf-8 + content-length: 103 + cache-control: max-age=60 + custom-header: any value + etag: "172ae25df822c3299cf2248694b4ce23" + date: Fri, 11 Sep 2020 03:20:40 GMT + server: envoy + x-envoy-upstream-service-time: 11 + age: 30 + + This response will stay fresh for one minute + Response body generated at: Fri, 11 Sep 2020 03:20:40 GMT + +After 1 minute and 1 second: + +.. code-block:: console + + $ curl -i localhost:8000/service/1/valid-for-minute + HTTP/1.1 200 OK + cache-control: max-age=60 + custom-header: any value + etag: "172ae25df822c3299cf2248694b4ce23" + date: Fri, 11 Sep 2020 03:21:41 GMT + server: envoy + x-envoy-upstream-service-time: 8 + content-length: 103 + content-type: text/html; charset=utf-8 + + This response will stay fresh for one minute + Response body generated at: Fri, 11 Sep 2020 03:20:40 GMT + +The same response was served after being validated with the backend service. +You can verify this as the response generation time is the same, +but the response ``date`` header was updated with the validation response date. +Also, no ``age`` header. + +Every time the response is validated, it stays fresh for another minute. +If the response body changes while the cached response is still fresh, +the cached response will still be served. The cached response will only be updated when it is no longer fresh. + +2. private +^^^^^^^^^^ + +.. code-block:: console + + $ curl -i localhost:8000/service/1/private + HTTP/1.1 200 OK + content-type: text/html; charset=utf-8 + content-length: 117 + cache-control: private + etag: "6bd80b59b2722606abf2b8d83ed2126d" + date: Fri, 11 Sep 2020 03:22:28 GMT + server: envoy + x-envoy-upstream-service-time: 7 + + This is a private response, it will not be cached by Envoy + Response body generated at: Fri, 11 Sep 2020 03:22:28 GMT + +No matter how many times you make this request, you will always receive a new response; +new date of generation, new ``date`` header, and no ``age`` header. + +3. no-cache +^^^^^^^^^^^ + +.. code-block:: console + + $ curl -i localhost:8000/service/1/no-cache + HTTP/1.1 200 OK + content-type: text/html; charset=utf-8 + content-length: 130 + cache-control: max-age=0, no-cache + etag: "ce39a53bd6bb8abdb2488a5a375397e4" + date: Fri, 11 Sep 2020 03:23:07 GMT + server: envoy + x-envoy-upstream-service-time: 7 + + This response can be cached, but it has to be validated on each request + Response body generated at: Fri, 11 Sep 2020 03:23:07 GMT + +After a few seconds: + +.. code-block:: console + + $ curl -i localhost:8000/service/1/no-cache + HTTP/1.1 200 OK + cache-control: max-age=0, no-cache + etag: "ce39a53bd6bb8abdb2488a5a375397e4" + date: Fri, 11 Sep 2020 03:23:12 GMT + server: envoy + x-envoy-upstream-service-time: 7 + content-length: 130 + content-type: text/html; charset=utf-8 + + This response can be cached, but it has to be validated on each request + Response body generated at: Fri, 11 Sep 2020 03:23:07 GMT + +You will receive a cached response that has the same generation time. +However, the ``date`` header will always be updated as this response will always be validated first. +Also, no ``age`` header. + +If you change the response body in the yaml file: + +.. code-block:: console + + $ curl -i localhost:8000/service/1/no-cache + HTTP/1.1 200 OK + content-type: text/html; charset=utf-8 + content-length: 133 + cache-control: max-age=0, no-cache + etag: "f4768af0ac9f6f54f88169a1f3ecc9f3" + date: Fri, 11 Sep 2020 03:24:10 GMT + server: envoy + x-envoy-upstream-service-time: 7 + + This response can be cached, but it has to be validated on each request!!! + Response body generated at: Fri, 11 Sep 2020 03:24:10 GMT + +You will receive a new response that's served from the backend service. +The new response will be cached for subsequent requests. + +You can also add new responses to the yaml file with different ``cache-control`` headers and start experimenting! +To learn more about caching and ``cache-control`` headers visit +the `MDN Web Docs `_. diff --git a/docs/root/start/sandboxes/cors.rst b/docs/root/start/sandboxes/cors.rst index 9b122b92706a..9a1aa5981a5c 100644 --- a/docs/root/start/sandboxes/cors.rst +++ b/docs/root/start/sandboxes/cors.rst @@ -29,25 +29,10 @@ The CORS enforcement choices are: Running the Sandboxes ~~~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of both services. +.. include:: _include/docker-env-setup.rst -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Start all of our containers** +Step 3: Start all of our containers +*********************************** Switch to the ``frontend`` directory in the ``cors`` example, and start the containers: @@ -79,7 +64,8 @@ Now, switch to the ``backend`` directory in the ``cors`` example, and start the backend_backend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp backend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8002->8000/tcp, 0.0.0.0:8003->8001/tcp -**Step 4: Test Envoy's CORS capabilities** +Step 4: Test Envoy's CORS capabilities +************************************** You can now open a browser to view your frontend service at http://localhost:8000. @@ -94,7 +80,8 @@ For example: Access to XMLHttpRequest at 'http://192.168.99.100:8002/cors/disabled' from origin 'http://192.168.99.101:8000' has been blocked by CORS policy: No 'Access-Control-Allow-Origin' header is present on the requested resource. -**Step 5: Check stats of backend via admin** +Step 5: Check stats of backend via admin +**************************************** When Envoy runs, it can listen to ``admin`` requests if a port is configured. diff --git a/docs/root/start/sandboxes/csrf.rst b/docs/root/start/sandboxes/csrf.rst index 0893598b21d2..e1874e4677c5 100644 --- a/docs/root/start/sandboxes/csrf.rst +++ b/docs/root/start/sandboxes/csrf.rst @@ -30,25 +30,10 @@ enforcement. The CSRF enforcement choices are: Running the Sandboxes ~~~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of both services. +.. include:: _include/docker-env-setup.rst -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Start all of our containers** +Step 3: Start all of our containers +*********************************** Switch to the ``samesite`` directory in the ``csrf`` example, and start the containers: @@ -79,7 +64,8 @@ Now, switch to the ``crosssite`` directory in the ``csrf`` example, and start th crosssite_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 0.0.0.0:8002->8000/tcp, 0.0.0.0:8003->8001/tcp crosssite_service_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 8000/tcp -**Step 4: Test Envoy's CSRF capabilities** +Step 4: Test Envoy's CSRF capabilities +************************************** You can now open a browser at http://localhost:8002 to view your ``crosssite`` frontend service. @@ -103,7 +89,8 @@ For example: If you change the destination to be the same as one displaying the website and set the ``CSRF`` enforcement to enabled the request will go through successfully. -**Step 5: Check stats of backend via admin** +Step 5: Check stats of backend via admin +**************************************** When Envoy runs, it can listen to ``admin`` requests if a port is configured. In the example configs, the backend admin is bound to port ``8001``. diff --git a/docs/root/start/sandboxes/ext_authz.rst b/docs/root/start/sandboxes/ext_authz.rst index 522d37392a04..cde983415030 100644 --- a/docs/root/start/sandboxes/ext_authz.rst +++ b/docs/root/start/sandboxes/ext_authz.rst @@ -17,23 +17,10 @@ header entry to the original request headers to be forwarded to the upstream ser Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -**Step 1: Install Docker** +.. include:: _include/docker-env-setup.rst -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Start all of our containers** +Step 3: Start all of our containers +*********************************** To build this sandbox example and start the example services, run the following commands: @@ -76,7 +63,8 @@ For example, to run Envoy with ext_authz HTTP filter with HTTP service will be: $ FRONT_ENVOY_YAML=config/http-service.yaml docker-compose up --build -d $ # Or you can update the .env file with the above FRONT_ENVOY_YAML value, so you don't have to specify it when running the "up" command. -**Step 4: Access the upstream-service behind the Front Envoy** +Step 4: Access the upstream-service behind the Front Envoy +********************************************************** You can now try to send a request to upstream-service via the front-envoy as follows: diff --git a/docs/root/start/sandboxes/fault_injection.rst b/docs/root/start/sandboxes/fault_injection.rst index c7034f37f9ec..9bd823985a33 100644 --- a/docs/root/start/sandboxes/fault_injection.rst +++ b/docs/root/start/sandboxes/fault_injection.rst @@ -8,25 +8,10 @@ This simple example demonstrates Envoy's :ref:`fault injection `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Start all of our containers** +Step 3: Start all of our containers +*********************************** Terminal 1 @@ -43,7 +28,8 @@ Terminal 1 fault-injection_backend_1 gunicorn -b 0.0.0.0:80 htt Up 0.0.0.0:8080->80/tcp fault-injection_envoy_1 /docker-entrypoint.sh /usr Up 10000/tcp, 0.0.0.0:9211->9211/tcp, 0.0.0.0:9901->9901/tcp -**Step 4: Start sending continuous stream of HTTP requests** +Step 4: Start sending continuous stream of HTTP requests +******************************************************** Terminal 2 @@ -56,7 +42,8 @@ Terminal 2 The script above (``send_request.sh``) sends a continuous stream of HTTP requests to Envoy, which in turn forwards the requests to the backend container. Fauilt injection is configured in Envoy but turned off (i.e. affects 0% of requests). Consequently, you should see a continuous sequence of HTTP 200 response codes. -**Step 5: Test Envoy's abort fault injection** +Step 5: Test Envoy's abort fault injection +****************************************** Turn on *abort* fault injection via the runtime using the commands below. @@ -78,7 +65,8 @@ Terminal 3 $ bash disable_abort_fault_injection.sh -**Step 6: Test Envoy's delay fault injection** +Step 6: Test Envoy's delay fault injection +****************************************** Turn on *delay* fault injection via the runtime using the commands below. @@ -99,7 +87,8 @@ Terminal 3 $ bash disable_delay_fault_injection.sh -**Step 7: Check the current runtime filesystem** +Step 7: Check the current runtime filesystem +******************************************** To see the current runtime filesystem overview: diff --git a/docs/root/start/sandboxes/front_proxy.rst b/docs/root/start/sandboxes/front_proxy.rst index 52c6c284a9ad..8433372adcbc 100644 --- a/docs/root/start/sandboxes/front_proxy.rst +++ b/docs/root/start/sandboxes/front_proxy.rst @@ -29,26 +29,10 @@ requests are handled by the service Envoy, and efficiently routed to your servic Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of an Envoy cluster organized -as is described in the image above. +.. include:: _include/docker-env-setup.rst -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Start all of our containers** +Step 3: Start all of our containers +*********************************** .. code-block:: console @@ -64,7 +48,8 @@ or front-proxy_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp front-proxy_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp -**Step 4: Test Envoy's routing capabilities** +Step 4: Test Envoy's routing capabilities +***************************************** You can now send a request to both services via the ``front-envoy``. @@ -160,7 +145,8 @@ We can also use ``HTTPS`` to call services behind the front Envoy. For example, < Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4 -**Step 5: Test Envoy's load balancing capabilities** +Step 5: Test Envoy's load balancing capabilities +************************************************ Now let's scale up our ``service1`` nodes to demonstrate the load balancing abilities of Envoy: @@ -228,7 +214,8 @@ requests by doing a round robin of the three ``service1`` machines: < Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4 -**Step 6: enter containers and curl services** +Step 6: enter containers and curl services +****************************************** In addition of using ``curl`` from your host machine, you can also enter the containers themselves and ``curl`` from inside them. To enter a container you @@ -247,7 +234,8 @@ enter the ``front-envoy`` container, and ``curl`` for services locally: root@81288499f9d7:/# curl localhost:8080/service/2 Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 -**Step 7: enter container and curl admin** +Step 7: enter container and curl admin +************************************** When Envoy runs it also attaches an ``admin`` to your desired port. diff --git a/docs/root/start/sandboxes/grpc_bridge.rst b/docs/root/start/sandboxes/grpc_bridge.rst index 318eff7d9111..e4c74e02a2d9 100644 --- a/docs/root/start/sandboxes/grpc_bridge.rst +++ b/docs/root/start/sandboxes/grpc_bridge.rst @@ -23,25 +23,10 @@ base routing via its route configuration. Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of the services. +.. include:: _include/docker-env-setup.rst -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Generate the protocol stubs** +Step 3: Generate the protocol stubs +*********************************** A docker-compose file is provided that generates the stubs for both ``client`` and ``server`` from the specification in the ``protos`` directory. @@ -81,7 +66,8 @@ respective directories: These generated ``python`` and ``go`` stubs can be included as external modules. -**Step 4: Start all of our containers** +Step 4: Start all of our containers +*********************************** To build this sandbox example and start the example services, run the following commands: diff --git a/docs/root/start/sandboxes/jaeger_native_tracing.rst b/docs/root/start/sandboxes/jaeger_native_tracing.rst index 505ee4d4df42..f2311c86c238 100644 --- a/docs/root/start/sandboxes/jaeger_native_tracing.rst +++ b/docs/root/start/sandboxes/jaeger_native_tracing.rst @@ -47,26 +47,10 @@ the trace headers while making an outbound call to service2. Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of an Envoy cluster organized -as is described above. +.. include:: _include/docker-env-setup.rst -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Build the sandbox** +Step 3: Build the sandbox +************************* To build this sandbox example, and start the example apps run the following commands: @@ -85,7 +69,8 @@ To build this sandbox example, and start the example apps run the following comm jaeger-native-tracing_service1_1 /start-service.sh Up 10000/tcp, 8000/tcp jaeger-native-tracing_service2_1 /start-service.sh Up 10000/tcp, 8000/tcp -**Step 4: Generate some load** +Step 4: Generate some load +************************** You can now send a request to service1 via the front-envoy as follows: @@ -109,7 +94,8 @@ You can now send a request to service1 via the front-envoy as follows: Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 * Connection #0 to host 192.168.99.100 left intact -**Step 5: View the traces in Jaeger UI** +Step 5: View the traces in Jaeger UI +************************************ Point your browser to http://localhost:16686 . You should see the Jaeger dashboard. Set the service to "front-proxy" and hit 'Find Traces'. You should see traces from the front-proxy. diff --git a/docs/root/start/sandboxes/jaeger_tracing.rst b/docs/root/start/sandboxes/jaeger_tracing.rst index 0de59e9213f5..7f6affdb262b 100644 --- a/docs/root/start/sandboxes/jaeger_tracing.rst +++ b/docs/root/start/sandboxes/jaeger_tracing.rst @@ -33,26 +33,10 @@ the trace headers while making an outbound call to service2. Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of an Envoy cluster organized -as is described above. +.. include:: _include/docker-env-setup.rst -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Build the sandbox** +Step 3: Build the sandbox +************************* To build this sandbox example, and start the example apps run the following commands: @@ -71,7 +55,8 @@ To build this sandbox example, and start the example apps run the following comm jaeger-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp jaeger-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp -**Step 4: Generate some load** +Step 4: Generate some load +************************** You can now send a request to service1 via the front-envoy as follows: @@ -95,7 +80,8 @@ You can now send a request to service1 via the front-envoy as follows: Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 * Connection #0 to host 192.168.99.100 left intact -**Step 5: View the traces in Jaeger UI** +Step 5: View the traces in Jaeger UI +************************************ Point your browser to http://localhost:16686 . You should see the Jaeger dashboard. Set the service to "front-proxy" and hit 'Find Traces'. You should see traces from the front-proxy. diff --git a/docs/root/start/sandboxes/load_reporting_service.rst b/docs/root/start/sandboxes/load_reporting_service.rst index f51bdb319260..224eba07e81c 100644 --- a/docs/root/start/sandboxes/load_reporting_service.rst +++ b/docs/root/start/sandboxes/load_reporting_service.rst @@ -18,26 +18,10 @@ LRS Server enables the stats by sending LoadStatsResponse. Sending requests to h Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of an Envoy cluster organized -as is described above. +.. include:: _include/docker-env-setup.rst -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Build the sandbox** +Step 3: Build the sandbox +************************* Terminal 1 :: @@ -59,7 +43,8 @@ Terminal 2 :: load-reporting-service_http_service_2 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 0.0.0.0:81->80/tcp, 0.0.0.0:8082->8081/tcp load-reporting-service_lrs_server_1 go run main.go Up 0.0.0.0:18000->18000/tcp -**Step 4: Start sending stream of HTTP requests** +Step 4: Start sending stream of HTTP requests +********************************************* Terminal 2 :: @@ -69,7 +54,8 @@ Terminal 2 :: The script above (``send_requests.sh``) sends requests randomly to each Envoy, which in turn forwards the requests to the backend service. -**Step 5: See Envoy Stats** +Step 5: See Envoy Stats +*********************** You should see diff --git a/docs/root/start/sandboxes/lua.rst b/docs/root/start/sandboxes/lua.rst index 42492506e646..c26dca7a3ee7 100644 --- a/docs/root/start/sandboxes/lua.rst +++ b/docs/root/start/sandboxes/lua.rst @@ -12,25 +12,10 @@ filter that contains two functions namely Running the Sandboxes ~~~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of both services. +.. include:: _include/docker-env-setup.rst -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Build the sandbox** +Step 3: Build the sandbox +************************* .. code-block:: console @@ -45,7 +30,8 @@ or lua_proxy_1 /docker-entrypoint.sh /bin Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp lua_web_service_1 node ./index.js Up 0.0.0.0:8080->80/tcp -**Step 4: Send a request to the service** +Step 4: Send a request to the service +************************************* The output from the ``curl`` command below should include the headers ``foo``. diff --git a/docs/root/start/sandboxes/mysql.rst b/docs/root/start/sandboxes/mysql.rst index 4c194d6d37c3..a251bed393ab 100644 --- a/docs/root/start/sandboxes/mysql.rst +++ b/docs/root/start/sandboxes/mysql.rst @@ -10,25 +10,10 @@ metrics. Running the Sandboxes ~~~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of the services. +.. include:: _include/docker-env-setup.rst -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Build the sandbox** +Step 3: Build the sandbox +************************* Terminal 1 @@ -46,7 +31,8 @@ Terminal 1 mysql_proxy_1 /docker-entrypoint.sh /bin Up 10000/tcp, 0.0.0.0:1999->1999/tcp, 0.0.0.0:8001->8001/tcp -**Step 4: Issue commands using mysql** +Step 4: Issue commands using mysql +********************************** Use ``mysql`` to issue some commands and verify they are routed via Envoy. Note that the current implementation of the protocol filter was tested with MySQL @@ -90,7 +76,8 @@ Terminal 1 mysql> exit Bye -**Step 5: Check egress stats** +Step 5: Check egress stats +************************** Check egress stats were updated. @@ -109,7 +96,8 @@ Terminal 1 mysql.egress_mysql.sessions: 1 mysql.egress_mysql.upgraded_to_ssl: 0 -**Step 6: Check TCP stats** +Step 6: Check TCP stats +*********************** Check TCP stats were updated. diff --git a/docs/root/start/sandboxes/redis.rst b/docs/root/start/sandboxes/redis.rst index fcd3211af572..4fe2d63c6c39 100644 --- a/docs/root/start/sandboxes/redis.rst +++ b/docs/root/start/sandboxes/redis.rst @@ -9,25 +9,10 @@ In this example, we show how a :ref:`Redis filter `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Build the sandbox** +Step 3: Build the sandbox +************************* Terminal 1 @@ -44,7 +29,8 @@ Terminal 1 redis_proxy_1 /docker-entrypoint.sh /bin Up 10000/tcp, 0.0.0.0:1999->1999/tcp, 0.0.0.0:8001->8001/tcp redis_redis_1 docker-entrypoint.sh redis Up 0.0.0.0:6379->6379/tcp -**Step 4: Issue Redis commands** +Step 4: Issue Redis commands +**************************** Issue Redis commands using your favourite Redis client, such as ``redis-cli``, and verify they are routed via Envoy. @@ -61,7 +47,8 @@ Terminal 1 $ redis-cli -h localhost -p 1999 get bar "bar" -**Step 5: Verify egress stats** +Step 5: Verify egress stats +*************************** Go to ``http://localhost:8001/stats?usedonly&filter=redis.egress_redis.command`` and verify the following stats: diff --git a/docs/root/start/sandboxes/zipkin_tracing.rst b/docs/root/start/sandboxes/zipkin_tracing.rst index 150089fb45fe..747c6cafd188 100644 --- a/docs/root/start/sandboxes/zipkin_tracing.rst +++ b/docs/root/start/sandboxes/zipkin_tracing.rst @@ -33,26 +33,10 @@ the trace headers while making an outbound call to service2. Running the Sandbox ~~~~~~~~~~~~~~~~~~~ -The following documentation runs through the setup of an Envoy cluster organized -as is described above. +.. include:: _include/docker-env-setup.rst -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker`` and ``docker-compose``. - -A simple way to achieve this is via the `Docker Desktop `_. - -**Step 2: Clone the Envoy repo** - -If you have not cloned the Envoy repo, clone it with: - -``git clone git@github.com:envoyproxy/envoy`` - -or - -``git clone https://github.com/envoyproxy/envoy.git`` - -**Step 3: Build the sandbox** +Step 3: Build the sandbox +************************* To build this sandbox example, and start the example apps run the following commands: @@ -71,7 +55,8 @@ To build this sandbox example, and start the example apps run the following comm zipkin-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp zipkin-tracing_zipkin_1 /busybox/sh run.sh Up 9410/tcp, 0.0.0.0:9411->9411/tcp -**Step 4: Generate some load** +Step 4: Generate some load +************************** You can now send a request to service1 via the front-envoy as follows: @@ -95,7 +80,8 @@ You can now send a request to service1 via the front-envoy as follows: Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 * Connection #0 to host 192.168.99.100 left intact -**Step 5: View the traces in Zipkin UI** +Step 5: View the traces in Zipkin UI +************************************ Point your browser to http://localhost:9411 . You should see the Zipkin dashboard. Set the service to "front-proxy" and set the start time to a few minutes before diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index 5e3bae8760ba..79ddfc5acd0c 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -12,6 +12,18 @@ These examples use the :ref:`v3 Envoy API `, but use only t feature of the API, which is most useful for simple requirements. For more complex requirements :ref:`Dynamic Configuration ` is supported. +.. _install: + +Building and installation +------------------------- + +.. toctree:: + :maxdepth: 2 + + building + install/ref_configs + install/tools/tools + Quick Start to Run Simple Example --------------------------------- @@ -20,7 +32,7 @@ more detailed explanation of the configuration file and execution steps for the same configuration. A very minimal Envoy configuration that can be used to validate basic plain HTTP -proxying is available in :repo:`configs/google_com_proxy.v2.yaml`. This is not +proxying is available in :repo:`configs/google_com_proxy.yaml`. This is not intended to represent a realistic Envoy deployment: .. substitution-code-block:: none @@ -205,6 +217,7 @@ features. The following sandboxes are available: .. toctree:: :maxdepth: 2 + sandboxes/cache sandboxes/cors sandboxes/csrf sandboxes/ext_authz @@ -218,15 +231,3 @@ features. The following sandboxes are available: sandboxes/mysql sandboxes/redis sandboxes/zipkin_tracing - -Other use cases ---------------- - -In addition to the proxy itself, Envoy is also bundled as part of several open -source distributions that target specific use cases. - -.. toctree:: - :maxdepth: 2 - - distro/ambassador - distro/gloo diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 627d8ad9d0d1..93ef61b84438 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -1,134 +1,43 @@ -1.16.0 (Pending) +1.17.0 (pending) ================ Incompatible Behavior Changes ----------------------------- *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* -* build: added visibility rules for upstream. If these cause visibility related breakage, see notes in //BUILD. - Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* -* build: an :ref:`Ubuntu based debug image ` is built and published in DockerHub. -* build: the debug information will be generated separately to reduce target size and reduce compilation time when build in compilation mode `dbg` and `opt`. Users will need to build dwp file to debug with gdb. -* compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. -* decompressor: headers-only requests were incorrectly not advertising accept-encoding when configured to do so. This is now fixed. -* http: added :ref:`contains ` a new string matcher type which matches if the value of the string has the substring mentioned in contains matcher. -* http: added :ref:`contains ` a new header matcher type which matches if the value of the header has the substring mentioned in contains matcher. -* http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. -* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message ` to true to restore prior HTTP/1.1 behavior (i.e. connection isn't terminated) and to retain prior HTTP/2 behavior (i.e. connection is terminated). -* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. -* http: changed Envoy to send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. This behavior may be temporarily reverted by setting `envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2` to false. -* http: changed Envoy to send error headers and body when possible. This behavior may be temporarily reverted by setting `envoy.reloadable_features.allow_response_for_timeout` to false. -* http: changed empty trailers encoding behavior by sending empty data with ``end_stream`` true (instead of sending empty trailers) for HTTP/2. This behavior can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.http2_skip_encoding_empty_trailers`` to false. -* http: clarified and enforced 1xx handling. Multiple 100-continue headers are coalesced when proxying. 1xx headers other than {100, 101} are dropped. -* http: fixed a bug in access logs where early stream termination could be incorrectly tagged as a downstream disconnect, and disconnects after partial response were not flagged. -* http: fixed the 100-continue response path to properly handle upstream failure by sending 5xx responses. This behavior can be temporarily reverted by setting `envoy.reloadable_features.allow_500_after_100` to false. -* http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might - see a change in behavior. -* logging: add fine-grain logging for file level log control with logger management at administration interface. It can be enabled by option `--enable-fine-grain-logging`. -* logging: change default log format to `"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"` and default value of :option:`--log-format-prefix-with-location` to `0`. -* logging: nghttp2 log messages no longer appear at trace level unless `ENVOY_NGHTTP2_TRACE` is set - in the environment. -* router: added transport failure reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:{}, transport failure reason:{}`.This behavior may be reverted by setting runtime feature `envoy.reloadable_features.http_transport_failure_reason_in_body` to false. -* router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.consume_all_retry_headers` to false. -* thrift_proxy: special characters {'\0', '\r', '\n'} will be stripped from thrift headers. +* build: the Alpine based debug images are no longer built in CI, use Ubuntu based images instead. +* ext_authz filter: the deprecated field :ref:`use_alpha ` is no longer supported and cannot be set anymore. +* watchdog: the watchdog action :ref:`abort_action ` is now the default action to terminate the process if watchdog kill / multikill is enabled. Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* -* csrf: fixed issues with regards to origin and host header parsing. -* dynamic_forward_proxy: only perform DNS lookups for routes to Dynamic Forward Proxy clusters since other cluster types handle DNS lookup themselves. -* fault: fixed an issue with `active_faults` gauge not being decremented for when abort faults were injected. -* fault: made the HeaderNameValues::prefix() method const. -* grpc-web: fixed an issue with failing HTTP/2 requests on some browsers. Notably, WebKit-based browsers (https://bugs.webkit.org/show_bug.cgi?id=210108), Internet Explorer 11, and Edge (pre-Chromium). -* http: made the HeaderValues::prefix() method const. -* jwt_authn: supports jwt payload without "iss" field. -* rocketmq_proxy network-level filter: fixed an issue involving incorrect header lengths. In debug mode it causes crash and in release mode it causes underflow. -* thrift_proxy: fixed crashing bug on request overflow. -* udp_proxy: fixed a crash due to UDP packets being processed after listener removal. +* http: sending CONNECT_ERROR for HTTP/2 where appropriate during CONNECT requests. Removed Config or Runtime ------------------------- *Normally occurs at the end of the* :ref:`deprecation period ` -* http: removed legacy header sanitization and the runtime guard `envoy.reloadable_features.strict_header_validation`. -* http: removed legacy transfer-encoding enforcement and runtime guard `envoy.reloadable_features.reject_unsupported_transfer_encodings`. -* http: removed configurable strict host validation and runtime guard `envoy.reloadable_features.strict_authority_validation`. -* http: removed the connection header sanitization runtime guard `envoy.reloadable_features.connection_header_sanitization`. +* ext_authz: removed auto ignore case in HTTP-based `ext_authz` header matching and the runtime guard `envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher`. To ignore case, set the :ref:`ignore_case ` field to true. +* http: flip default HTTP/1 and HTTP/2 server codec implementations to new codecs that remove the use of exceptions for control flow. To revert to old codec behavior, set the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. +* http: removed `envoy.reloadable_features.http1_flood_protection` and legacy code path for turning flood protection off. New Features ------------ -* access log: added a :ref:`dynamic metadata filter` for access logs, which filters whether to log based on matching dynamic metadata. -* access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. -* access log: added support for nested objects in :ref:`JSON logging mode `. -* access log: added :ref:`omit_empty_values` option to omit unset value from formatted log. -* admin: added the ability to dump init manager unready targets information :ref:`/init_dump ` and :ref:`/init_dump?mask={} `. -* build: enable building envoy :ref:`arm64 images ` by buildx tool in x86 CI platform. -* cluster: added new :ref:`connection_pool_per_downstream_connection ` flag, which enable creation of a new connection pool for each downstream connection. -* decompressor filter: reports compressed and uncompressed bytes in trailers. -* dns_filter: added support for answering :ref:`service record` queries. -* dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters`. -* ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. - The emitted dynamic metadata is set by :ref:`dynamic metadata ` field in a returned :ref:`CheckResponse `. -* grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. -* hds: added :ref:`cluster_endpoints_health ` to HDS responses, keeping endpoints in the same groupings as they were configured in the HDS specifier by cluster and locality instead of as a flat list. -* hds: added :ref:`transport_socket_matches ` to HDS cluster health check specifier, so the existing match filter :ref:`transport_socket_match_criteria ` in the repeated field :ref:`health_checks ` has context to match against. This unblocks support for health checks over HTTPS and HTTP/2. -* http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. -* http: added :ref:`allow_chunked_length ` configuration option for HTTP/1 codec to allow processing requests/responses with both Content-Length and Transfer-Encoding: chunked headers. If such message is served and option is enabled - per RFC Content-Length is ignored and removed. -* http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is used by default, but the new codecs can be enabled for testing by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to true. The new codecs will be in development for one month, and then enabled by default while the old codecs are deprecated. -* load balancer: added :ref:`RingHashLbConfig` to configure the table size of Maglev consistent hash. -* load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. -* load balancer: added an :ref:`option ` to optimize subset load balancing when there is only one host per subset. -* load balancer: added support for bounded load per host for consistent hash load balancers via :ref:`hash_balance_factor `. -* lua: added Lua APIs to access :ref:`SSL connection info ` object. -* lua: added Lua API for :ref:`base64 escaping a string `. -* lua: added new :ref:`source_code ` field to support the dispatching of inline Lua code in per route configuration of Lua filter. -* overload management: add :ref:`scaling ` trigger for OverloadManager actions. -* postgres network filter: :ref:`metadata ` is produced based on SQL query. -* ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. -* ratelimit: added support for optional :ref:`descriptor_key ` to Generic Key action. -* rbac filter: added a log action to the :ref:`RBAC filter ` which sets dynamic metadata to inform access loggers whether to log. -* redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. -* router: added a new :ref:`rate limited retry back off ` strategy that uses headers like `Retry-After` or `X-RateLimit-Reset` to decide the back off interval. -* router: added new - :ref:`envoy-ratelimited` - retry policy, which allows retrying envoy's own rate limited responses. -* router: added new :ref:`host_rewrite_path_regex ` - option, which allows rewriting Host header based on path. -* router: added support for DYNAMIC_METADATA :ref:`header formatter `. -* router_check_tool: added support for `request_header_matches`, `response_header_matches` to :ref:`router check tool `. -* signal: added support for calling fatal error handlers without envoy's signal handler, via FatalErrorHandler::callFatalErrorHandlers(). -* stats: added optional histograms to :ref:`cluster stats ` - that track headers and body sizes of requests and responses. -* stats: allow configuring histogram buckets for stats sinks and admin endpoints that support it. -* tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. -* tcp: switched the TCP connection pool to the new "shared" connection pool, sharing a common code base with HTTP and HTTP/2. Any unexpected behavioral changes can be temporarily reverted by setting `envoy.reloadable_features.new_tcp_connection_pool` to false. -* tcp_proxy: allow earlier network filters to set metadataMatchCriteria on the connection StreamInfo to influence load balancing. -* tls: switched from using socket BIOs to using custom BIOs that know how to interact with IoHandles. The feature can be disabled by setting runtime feature `envoy.reloadable_features.tls_use_io_handle_bio` to false. -* tracing: added ability to set some :ref:`optional segment fields` in the AWS X-Ray tracer. -* udp_proxy: added :ref:`hash_policies ` to support hash based routing. -* udp_proxy: added :ref:`use_original_src_ip ` option to replicate the downstream remote address of the packets on the upstream side of Envoy. It is similar to :ref:`original source filter `. -* watchdog: support randomizing the watchdog's kill timeout to prevent synchronized kills via a maximium jitter parameter :ref:`max_kill_timeout_jitter`. -* watchdog: supports an extension point where actions can be registered to fire on watchdog events such as miss, megamiss, kill and multikill. See ref:`watchdog actions`. -* watchdog: watchdog action extension that does cpu profiling. See ref:`Profile Action `. -* xds: added :ref:`extension config discovery` support for HTTP filters. -* zlib: added option to use `zlib-ng `_ as zlib library. +* grpc: implemented header value syntax support when defining :ref:`initial metadata ` for gRPC-based `ext_authz` :ref:`HTTP ` and :ref:`network ` filters, and :ref:`ratelimit ` filters. +* hds: added support for delta updates in the :ref:`HealthCheckSpecifier `, making only the Endpoints and Health Checkers that changed be reconstructed on receiving a new message, rather than the entire HDS. +* health_check: added option to use :ref:`no_traffic_healthy_interval ` which allows a different no traffic interval when the host is healthy. +* listener: added an optional :ref:`default filter chain `. If this field is supplied, and none of the :ref:`filter_chains ` matches, this default filter chain is used to serve the connection. +* mongo_proxy: the list of commands to produce metrics for is now :ref:`configurable `. +* ratelimit: added support for use of various :ref:`metadata ` as a ratelimit action. +* ratelimit: added :ref:`disable_x_envoy_ratelimited_header ` option to disable `X-Envoy-RateLimited` header. +* tcp: added a new :ref:`envoy.overload_actions.reject_incoming_connections ` action to reject incoming TCP connections. Deprecated ---------- - -* build: Alpine based debug image is deprecated in favor of :ref:`Ubuntu based debug image `. -* The :ref:`track_timeout_budgets ` - field has been deprecated in favor of `timeout_budgets` part of an :ref:`Optional Configuration `. -* hds: the :ref:`endpoints_health ` - field has been deprecated in favor of :ref:`cluster_endpoints_health ` to maintain - grouping by cluster and locality. -* tap: the :ref:`match_config ` field has been deprecated in favor of - :ref:`match ` field. -* ext_authz: the :ref:`dynamic metadata ` field in :ref:`OkHttpResponse ` - has been deprecated in favor of :ref:`dynamic metadata ` field in :ref:`CheckResponse `. -* router_check_tool: `request_header_fields`, `response_header_fields` config deprecated in favor of `request_header_matches`, `response_header_matches`. +* ratelimit: the :ref:`dynamic metadata ` action is deprecated in favor of the more generic :ref:`metadata ` action. diff --git a/docs/root/version_history/v1.12.6.rst b/docs/root/version_history/v1.12.6.rst new file mode 100644 index 000000000000..f40a65e1076c --- /dev/null +++ b/docs/root/version_history/v1.12.6.rst @@ -0,0 +1,3 @@ +1.12.6 (July 7, 2020) +===================== +* tls: fixed a bug where wilcard matching for "\*.foo.com" also matched domains of the form "a.b.foo.com". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false. \ No newline at end of file diff --git a/docs/root/version_history/v1.12.7.rst b/docs/root/version_history/v1.12.7.rst new file mode 100644 index 000000000000..875e2d683d1c --- /dev/null +++ b/docs/root/version_history/v1.12.7.rst @@ -0,0 +1,20 @@ +1.12.7 (September 29, 2020) +=========================== +Changes +------- +* http: fixed CVE-2020-25017. Previously header matching did not match on all headers for non-inline headers. This patch + changes the default behavior to always logically match on all headers. Multiple individual + headers will be logically concatenated with ',' similar to what is done with inline headers. This + makes the behavior effectively consistent. This behavior can be temporary reverted by setting + the runtime value "envoy.reloadable_features.http_match_on_all_headers" to "false". + + Targeted fixes have been additionally performed on the following extensions which make them + consider all duplicate headers by default as a comma concatenated list: + + 1. Any extension using CEL matching on headers. + 2. The header to metadata filter. + 3. The JWT filter. + 4. The Lua filter. + + Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting + the runtime value "envoy.reloadable_features.http_match_on_all_headers" to false. \ No newline at end of file diff --git a/docs/root/version_history/v1.13.4.rst b/docs/root/version_history/v1.13.4.rst new file mode 100644 index 000000000000..bd29b7cc0bec --- /dev/null +++ b/docs/root/version_history/v1.13.4.rst @@ -0,0 +1,3 @@ +1.13.4 (July 7, 2020) +===================== +* tls: fixed a bug where wilcard matching for "\*.foo.com" also matched domains of the form "a.b.foo.com". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false. \ No newline at end of file diff --git a/docs/root/version_history/v1.13.5.rst b/docs/root/version_history/v1.13.5.rst new file mode 100644 index 000000000000..370d9b4f376c --- /dev/null +++ b/docs/root/version_history/v1.13.5.rst @@ -0,0 +1,25 @@ +1.13.5 (September 29, 2020) +=========================== +Changes +------- +* http: fixed CVE-2020-25017. Previously header matching did not match on all headers for non-inline headers. This patch + changes the default behavior to always logically match on all headers. Multiple individual + headers will be logically concatenated with ',' similar to what is done with inline headers. This + makes the behavior effectively consistent. This behavior can be temporary reverted by setting + the runtime value "envoy.reloadable_features.http_match_on_all_headers" to "false". + + Targeted fixes have been additionally performed on the following extensions which make them + consider all duplicate headers by default as a comma concatenated list: + + 1. Any extension using CEL matching on headers. + 2. The header to metadata filter. + 3. The JWT filter. + 4. The Lua filter. + + Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting + the runtime value "envoy.reloadable_features.http_match_on_all_headers" to false. +* http: fixed CVE-2020-25017. The setCopy() header map API previously only set the first header in the case of duplicate + non-inline headers. setCopy() now behaves similarly to the other set*() APIs and replaces all found + headers with a single value. This may have had security implications in the extauth filter which + uses this API. This behavior can be disabled by setting the runtime value + "envoy.reloadable_features.http_set_copy_replace_all_headers" to false. \ No newline at end of file diff --git a/docs/root/version_history/v1.13.6.rst b/docs/root/version_history/v1.13.6.rst new file mode 100644 index 000000000000..d7d7fea91c37 --- /dev/null +++ b/docs/root/version_history/v1.13.6.rst @@ -0,0 +1,5 @@ +1.13.6 (September 29, 2020) +=========================== +Changes +------- +* test: fix flaky test. \ No newline at end of file diff --git a/docs/root/version_history/v1.14.4.rst b/docs/root/version_history/v1.14.4.rst new file mode 100644 index 000000000000..fa366227c607 --- /dev/null +++ b/docs/root/version_history/v1.14.4.rst @@ -0,0 +1,3 @@ +1.14.4 (July 7, 2020) +===================== +* tls: fixed a bug where wilcard matching for "\*.foo.com" also matched domains of the form "a.b.foo.com". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false. \ No newline at end of file diff --git a/docs/root/version_history/v1.14.5.rst b/docs/root/version_history/v1.14.5.rst new file mode 100644 index 000000000000..b252c2ac235f --- /dev/null +++ b/docs/root/version_history/v1.14.5.rst @@ -0,0 +1,25 @@ +1.14.5 (September 29, 2020) +=========================== +Changes +------- +* http: fixed CVE-2020-25017. Previously header matching did not match on all headers for non-inline headers. + This patch changes the default behavior to always logically match on all headers. Multiple individual + headers will be logically concatenated with ',' similar to what is done with inline headers. This + makes the behavior effectively consistent. This behavior can be temporary reverted by setting + the runtime value "envoy.reloadable_features.http_match_on_all_headers" to "false". + + Targeted fixes have been additionally performed on the following extensions which make them + consider all duplicate headers by default as a comma concatenated list: + + 1. Any extension using CEL matching on headers. + 2. The header to metadata filter. + 3. The JWT filter. + 4. The Lua filter. + + Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting + the runtime value "envoy.reloadable_features.http_match_on_all_headers" to false. +* http: fixed CVE-2020-25017. The setCopy() header map API previously only set the first header in the case of duplicate + non-inline headers. setCopy() now behaves similarly to the other set*() APIs and replaces all found + headers with a single value. This may have had security implications in the extauth filter which + uses this API. This behavior can be disabled by setting the runtime value + "envoy.reloadable_features.http_set_copy_replace_all_headers" to false. \ No newline at end of file diff --git a/docs/root/version_history/v1.15.1.rst b/docs/root/version_history/v1.15.1.rst new file mode 100644 index 000000000000..4bdb55a943cf --- /dev/null +++ b/docs/root/version_history/v1.15.1.rst @@ -0,0 +1,27 @@ +1.15.1 (September 29, 2020) +=========================== + +Changes +------- +* http: fixed CVE-2020-25017. Previously header matching did not match on all headers for non-inline + headers. This patch changes the default behavior to always logically match on all headers. + Multiple individual headers will be logically concatenated with ',' similar to what is done with + inline headers. This makes the behavior effectively consistent. This behavior can be temporary + reverted by setting the runtime value "envoy.reloadable_features.http_match_on_all_headers" to + "false". + + Targeted fixes have been additionally performed on the following extensions which make them + consider all duplicate headers by default as a comma concatenated list: + + 1. Any extension using CEL matching on headers. + 2. The header to metadata filter. + 3. The JWT filter. + 4. The Lua filter. + + Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting + the runtime value "envoy.reloadable_features.http_match_on_all_headers" to false. +* http: The setCopy() header map API previously only set the first header in the case of duplicate + non-inline headers. setCopy() now behaves similarly to the other set*() APIs and replaces all found + headers with a single value. This may have had security implications in the extauth filter which + uses this API. This behavior can be disabled by setting the runtime value + "envoy.reloadable_features.http_set_copy_replace_all_headers" to false. diff --git a/docs/root/version_history/v1.15.2.rst b/docs/root/version_history/v1.15.2.rst new file mode 100644 index 000000000000..2f093be2f5f0 --- /dev/null +++ b/docs/root/version_history/v1.15.2.rst @@ -0,0 +1,6 @@ +1.15.2 (September 29, 2020) +=========================== + +Changes +------- +* docs: fix docs for v1.15.1. \ No newline at end of file diff --git a/docs/root/version_history/v1.16.0.rst b/docs/root/version_history/v1.16.0.rst new file mode 100644 index 000000000000..259ed30d223c --- /dev/null +++ b/docs/root/version_history/v1.16.0.rst @@ -0,0 +1,183 @@ +1.16.0 (October 8, 2020) +======================== + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +* build: added visibility rules for upstream. If these cause visibility related breakage, see notes in :repo:`BUILD `. +* build: tcmalloc changes require Clang 9. This requirement change can be avoided by building with `--define tcmalloc=gperftools` to use the older tcmalloc code. +* config: additional warnings have been added for the use of v2 APIs. These appear as log messages + and are also captured in the :ref:`deprecated_feature_use ` counter after server + initialization. +* dns: `envoy.restart_features.use_apple_api_for_dns_lookups` is on by default. This flag only affects Apple platforms (macOS, iOS). It is incompatible to have the runtime flag set to true at the same time as specifying the ``use_tcp_for_dns_lookups`` option or custom dns resolvers. Doing so will cause failure. +* watchdog: added two guarddogs, breaking the aggregated stats for the single guarddog system. The aggregated stats for the guarddogs will have the following prefixes: `main_thread` and `workers`. Concretely, anything monitoring `server.watchdog_miss` and `server.watchdog_mega_miss` will need to be updated. + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +* adaptive concurrency: added a response body / grpc-message header for rejected requests. +* async_client: minor change to handling header only responses more similar to header-with-empty-body responses. +* build: an :ref:`Ubuntu based debug image ` is built and published in DockerHub. +* build: the debug information will be generated separately to reduce target size and reduce compilation time when build in compilation mode `dbg` and `opt`. Users will need to build dwp file to debug with gdb. +* compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. +* decompressor: headers-only requests were incorrectly not advertising accept-encoding when configured to do so. This is now fixed. +* ext_authz filter: request timeout will now count from the time the check request is created, instead of when it becomes active. This makes sure that the timeout is enforced even if the ext_authz cluster's circuit breaker is engaged. + This behavior can be reverted by setting runtime feature `envoy.reloadable_features.ext_authz_measure_timeout_on_check_created` to false. When enabled, a new `ext_authz.timeout` stat is counted when timeout occurs. See :ref:`stats `. +* grpc reverse bridge: upstream headers will no longer be propagated when the response is missing or contains an unexpected content-type. +* http: added :ref:`contains `, a new string matcher type which matches if the value of the string has the substring mentioned in contains matcher. +* http: added :ref:`contains `, a new header matcher type which matches if the value of the header has the substring mentioned in contains matcher. +* http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. +* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message ` to true to restore prior HTTP/1.1 behavior (i.e. connection isn't terminated) and to retain prior HTTP/2 behavior (i.e. connection is terminated). +* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. +* http: applying route level header modifications to local replies sent on that route. This behavior may be temporarily reverted by setting `envoy.reloadable_features.always_apply_route_header_rules` to false. +* http: changed Envoy to send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. This behavior may be temporarily reverted by setting `envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2` to false. +* http: changed Envoy to send error headers and body when possible. This behavior may be temporarily reverted by setting `envoy.reloadable_features.allow_response_for_timeout` to false. +* http: changed empty trailers encoding behavior by sending empty data with ``end_stream`` true (instead of sending empty trailers) for HTTP/2. This behavior can be reverted temporarily by setting runtime feature `envoy.reloadable_features.http2_skip_encoding_empty_trailers` to false. +* http: changed how local replies are processed for requests which transform from grpc to not-grpc, or not-grpc to grpc. Previously the initial generated reply depended on which filter sent the reply, but now the reply is consistently generated the way the downstream expects. This behavior can be temporarily reverted by setting `envoy.reloadable_features.unify_grpc_handling` to false. +* http: clarified and enforced 1xx handling. Multiple 100-continue headers are coalesced when proxying. 1xx headers other than {100, 101} are dropped. +* http: fixed a bug in access logs where early stream termination could be incorrectly tagged as a downstream disconnect, and disconnects after partial response were not flagged. +* http: fixed the 100-continue response path to properly handle upstream failure by sending 5xx responses. This behavior can be temporarily reverted by setting `envoy.reloadable_features.allow_500_after_100` to false. +* http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might + see a change in behavior. +* logging: added fine-grain logging for file level log control with logger management at administration interface. It can be enabled by option :option:`--enable-fine-grain-logging`. +* logging: changed default log format to `"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"` and default value of :option:`--log-format-prefix-with-location` to `0`. +* logging: nghttp2 log messages no longer appear at trace level unless `ENVOY_NGHTTP2_TRACE` is set + in the environment. +* lua: changed the response body returned by `httpCall()` API to raw data. Previously, the returned data was string. +* memory: switched to the `new tcmalloc `_ for linux_x86_64 builds. The `old tcmalloc `_ can still be enabled with the `--define tcmalloc=gperftools` option. +* postgres: changed log format to tokenize fields of Postgres messages. +* router: added transport failure reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:{}, transport failure reason:{}`.This behavior may be reverted by setting runtime feature `envoy.reloadable_features.http_transport_failure_reason_in_body` to false. +* router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.consume_all_retry_headers` to false. +* stats: the fake symbol table implemention has been removed from the binary, and the option `--use-fake-symbol-table` is now a no-op with a warning. +* thrift_proxy: special characters {'\0', '\r', '\n'} will be stripped from thrift headers. +* watchdog: replaced single watchdog with separate watchdog configuration for worker threads and for the main thread configured via :ref:`Watchdogs`. It works with :ref:`watchdog` by having the worker thread and main thread watchdogs have same config. + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* csrf: fixed issues with regards to origin and host header parsing. +* dynamic_forward_proxy: only perform DNS lookups for routes to Dynamic Forward Proxy clusters since other cluster types handle DNS lookup themselves. +* fault: fixed an issue with `active_faults` gauge not being decremented for when abort faults were injected. +* fault: made the HeaderNameValues::prefix() method const. +* grpc-web: fixed an issue with failing HTTP/2 requests on some browsers. Notably, WebKit-based browsers (https://bugs.webkit.org/show_bug.cgi?id=210108), Internet Explorer 11, and Edge (pre-Chromium). +* http: fixed CVE-2020-25018 by rolling back the ``GURL`` dependency to previous state (reverted: ``2d69e30``, ``d828958``, and ``c9c4709`` commits) due to potential of crashing when Unicode URIs are present in requests. +* http: fixed bugs in datadog and squash filter's handling of responses with no bodies. +* http: made the HeaderValues::prefix() method const. +* jwt_authn: supports jwt payload without "iss" field. +* listener: fixed crash at listener inplace update when connection load balancer is set. +* rocketmq_proxy: fixed an issue involving incorrect header lengths. In debug mode it causes crash and in release mode it causes underflow. +* thrift_proxy: fixed crashing bug on request overflow. +* udp_proxy: fixed a crash due to UDP packets being processed after listener removal. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +* http: removed legacy header sanitization and the runtime guard `envoy.reloadable_features.strict_header_validation`. +* http: removed legacy transfer-encoding enforcement and runtime guard `envoy.reloadable_features.reject_unsupported_transfer_encodings`. +* http: removed configurable strict host validation and runtime guard `envoy.reloadable_features.strict_authority_validation`. +* http: removed the connection header sanitization runtime guard `envoy.reloadable_features.connection_header_sanitization`. + +New Features +------------ +* access log: added a :ref:`dynamic metadata filter` for access logs, which filters whether to log based on matching dynamic metadata. +* access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. +* access log: added support for :ref:`%CONNECTION_TERMINATION_DETAILS% ` as a log command operator about why the connection is terminated by Envoy. +* access log: added support for nested objects in :ref:`JSON logging mode `. +* access log: added :ref:`omit_empty_values` option to omit unset value from formatted log. +* access log: added support for :ref:`%CONNECTION_ID% ` for the downstream connection identifier. +* admin: added :ref:`circuit breakers settings ` information to GET /clusters?format=json :ref:`cluster status `. +* admin: added :ref:`node ` information to GET /server_info :ref:`response object `. +* admin: added the ability to dump init manager unready targets information :ref:`/init_dump ` and :ref:`/init_dump?mask={} `. +* admission control: added the :ref:`admission control ` filter for client-side request throttling. +* build: enable building envoy :ref:`arm64 images ` by buildx tool in x86 CI platform. +* cluster: added new :ref:`connection_pool_per_downstream_connection ` flag, which enable creation of a new connection pool for each downstream connection. +* decompressor filter: reports compressed and uncompressed bytes in trailers. +* dns: added support for doing DNS resolution using Apple's DnsService APIs in Apple platforms (macOS, iOS). This feature is ON by default, and is only configurable via the `envoy.restart_features.use_apple_api_for_dns_lookups` runtime key. Note that this value is latched during server startup and changing the runtime key is a no-op during the lifetime of the process. +* dns_filter: added support for answering :ref:`service record` queries. +* dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters`. +* ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. + The emitted dynamic metadata is set by :ref:`dynamic metadata ` field in a returned :ref:`CheckResponse `. +* ext_authz filter: added :ref:`stat_prefix ` as an optional additional prefix for the statistics emitted from `ext_authz` HTTP filter. +* ext_authz filter: added support for enabling the filter based on :ref:`dynamic metadata `. +* ext_authz filter: added support for letting the authorization server instruct Envoy to remove headers from the original request by setting the new field :ref:`headers_to_remove ` before forwarding it to the upstream. +* ext_authz filter: added support for sending :ref:`raw bytes as request body ` of a gRPC check request by setting :ref:`pack_as_bytes ` to true. +* ext_authz_filter: added :ref:`disable_request_body_buffering ` to disable request data buffering per-route. +* grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. +* hds: added :ref:`cluster_endpoints_health ` to HDS responses, keeping endpoints in the same groupings as they were configured in the HDS specifier by cluster and locality instead of as a flat list. +* hds: added :ref:`transport_socket_matches ` to HDS cluster health check specifier, so the existing match filter :ref:`transport_socket_match_criteria ` in the repeated field :ref:`health_checks ` has context to match against. This unblocks support for health checks over HTTPS and HTTP/2. +* hot restart: added :option:`--socket-path` and :option:`--socket-mode` to configure UDS path in the filesystem and set permission to it. +* http: added HTTP/2 support for :ref:`connection keepalive ` via PING. +* http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. +* http: added :ref:`allow_chunked_length ` configuration option for HTTP/1 codec to allow processing requests/responses with both Content-Length and Transfer-Encoding: chunked headers. If such message is served and option is enabled - per RFC Content-Length is ignored and removed. +* http: added :ref:`CDN Loop filter ` and :ref:`documentation `. +* http: added :ref:`MaxStreamDuration proto ` for configuring per-route downstream duration timeouts. +* http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is used by default for HTTP/1.1 and HTTP/2 server connections. The new codecs can be enabled for testing by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to true. The new codecs will be in development for one month, and then enabled by default while the old codecs are deprecated. +* http: modified the HTTP header-map data-structure to use an underlying dictionary and a list (no change to the header-map API). To conform with previous versions, the use of a dictionary is currently disabled. It can be enabled by setting the `envoy.http.headermap.lazy_map_min_size` runtime feature to a non-negative number which defines the minimal number of headers in a request/response/trailers required for using a dictionary in addition to the list. Our current benchmarks suggest that the value 3 is a good threshold for most workloads. +* load balancer: added :ref:`RingHashLbConfig` to configure the table size of Maglev consistent hash. +* load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. +* load balancer: added an :ref:`option ` to optimize subset load balancing when there is only one host per subset. +* load balancer: added support for bounded load per host for consistent hash load balancers via :ref:`hash_balance_factor `. +* local_reply config: added :ref:`content_type` field to set content-type. +* lua: added Lua APIs to access :ref:`SSL connection info ` object. +* lua: added Lua API for :ref:`base64 escaping a string `. +* lua: added Lua API for :ref:`setting the current buffer content `. +* lua: added new :ref:`source_code ` field to support the dispatching of inline Lua code in per route configuration of Lua filter. +* overload management: add :ref:`scaling ` trigger for OverloadManager actions. +* postgres network filter: :ref:`metadata ` is produced based on SQL query. +* proxy protocol: added support for generating the header upstream using :ref:`Proxy Protocol Transport Socket `. +* ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. +* ratelimit: added :ref:`per route config ` for rate limit filter. +* ratelimit: added support for optional :ref:`descriptor_key ` to Generic Key action. +* rbac filter: added the name of the matched policy to the response code detail when a request is rejected by the RBAC filter. +* rbac filter: added a log action to the :ref:`RBAC filter ` which sets dynamic metadata to inform access loggers whether to log. +* redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. +* router: added a new :ref:`rate limited retry back off ` strategy that uses headers like `Retry-After` or `X-RateLimit-Reset` to decide the back off interval. +* router: added new + :ref:`envoy-ratelimited` + retry policy, which allows retrying envoy's own rate limited responses. +* router: added new :ref:`host_rewrite_path_regex ` + option, which allows rewriting Host header based on path. +* router: added support for DYNAMIC_METADATA :ref:`header formatter `. +* router_check_tool: added support for `request_header_matches`, `response_header_matches` to :ref:`router check tool `. +* signal: added support for calling fatal error handlers without envoy's signal handler, via FatalErrorHandler::callFatalErrorHandlers(). +* stats: added optional histograms to :ref:`cluster stats ` + that track headers and body sizes of requests and responses. +* stats: allow configuring histogram buckets for stats sinks and admin endpoints that support it. +* tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. +* tcp_proxy: added :ref:`max_downstream_connection_duration` for downstream connection. When max duration is reached the connection will be closed. +* tcp_proxy: allow earlier network filters to set metadataMatchCriteria on the connection StreamInfo to influence load balancing. +* tls: added OCSP stapling support through the :ref:`ocsp_staple ` and :ref:`ocsp_staple_policy ` configuration options. See :ref:`OCSP Stapling ` for usage and runtime flags. +* tls: introduce new :ref:`extension point` for overriding :ref:`TLS handshaker ` behavior. +* tls: switched from using socket BIOs to using custom BIOs that know how to interact with IoHandles. The feature can be disabled by setting runtime feature `envoy.reloadable_features.tls_use_io_handle_bio` to false. +* tracing: added ability to set some :ref:`optional segment fields` in the AWS X-Ray tracer. +* udp_proxy: added :ref:`hash_policies ` to support hash based routing. +* udp_proxy: added :ref:`use_original_src_ip ` option to replicate the downstream remote address of the packets on the upstream side of Envoy. It is similar to :ref:`original source filter `. +* watchdog: support randomizing the watchdog's kill timeout to prevent synchronized kills via a maximium jitter parameter :ref:`max_kill_timeout_jitter`. +* watchdog: supports an extension point where actions can be registered to fire on watchdog events such as miss, megamiss, kill and multikill. See :ref:`watchdog actions`. +* watchdog: watchdog action extension that does cpu profiling. See :ref:`Profile Action `. +* watchdog: watchdog action extension that sends SIGABRT to the stuck thread to terminate the process. See :ref:`Abort Action `. +* xds: added :ref:`extension config discovery` support for HTTP filters. +* xds: added support for mixed v2/v3 discovery response, which enable type url downgrade and upgrade. This feature is disabled by default and is controlled by runtime guard `envoy.reloadable_features.enable_type_url_downgrade_and_upgrade`. +* zlib: added option to use `zlib-ng `_ as zlib library. + +Deprecated +---------- + +* build: alpine based debug image is deprecated in favor of :ref:`Ubuntu based debug image `. +* cluster: the :ref:`track_timeout_budgets ` + field has been deprecated in favor of `timeout_budgets` part of an :ref:`Optional Configuration `. +* ext_authz: the :ref:`dynamic metadata ` field in :ref:`OkHttpResponse ` has been deprecated in favor of :ref:`dynamic metadata ` field in :ref:`CheckResponse `. +* hds: the :ref:`endpoints_health ` + field has been deprecated in favor of :ref:`cluster_endpoints_health ` to maintain + grouping by cluster and locality. +* router: the :ref:`include_vh_rate_limits ` field has been deprecated in favor of :ref:`vh_rate_limits `. +* router: the :ref:`max_grpc_timeout ` field has been deprecated in favor of :ref:`grpc_timeout_header_max `. +* router: the :ref:`grpc_timeout_offset ` field has been deprecated in favor of :ref:`grpc_timeout_header_offset `. +* tap: the :ref:`match_config ` field has been deprecated in favor of + :ref:`match ` field. +* router_check_tool: `request_header_fields`, `response_header_fields` config deprecated in favor of `request_header_matches`, `response_header_matches`. +* watchdog: :ref:`watchdog ` deprecated in favor of :ref:`watchdogs `. diff --git a/docs/root/version_history/version_history.rst b/docs/root/version_history/version_history.rst index 07db664892d4..453bda753f1f 100644 --- a/docs/root/version_history/version_history.rst +++ b/docs/root/version_history/version_history.rst @@ -7,15 +7,25 @@ Version history :titlesonly: current + v1.16.0 + v1.15.2 + v1.15.1 v1.15.0 + v1.14.5 + v1.14.4 v1.14.3 v1.14.2 v1.14.1 v1.14.0 + v1.13.6 + v1.13.5 + v1.13.4 v1.13.3 v1.13.2 v1.13.1 v1.13.0 + v1.12.7 + v1.12.6 v1.12.5 v1.12.4 v1.12.3 diff --git a/examples/BUILD b/examples/BUILD index 72c67907b879..d4c4d891ecfb 100644 --- a/examples/BUILD +++ b/examples/BUILD @@ -9,33 +9,16 @@ envoy_package() filegroup( name = "configs", - srcs = [ - "cors/backend/front-envoy.yaml", - "cors/backend/service-envoy.yaml", - "cors/frontend/front-envoy.yaml", - "cors/frontend/service-envoy.yaml", - "csrf/crosssite/front-envoy.yaml", - "csrf/samesite/front-envoy.yaml", - "csrf/service-envoy.yaml", - "ext_authz/config/grpc-service/v2.yaml", - "ext_authz/config/grpc-service/v3.yaml", - "ext_authz/config/http-service.yaml", - "ext_authz/config/opa-service/v2.yaml", - "fault-injection/envoy.yaml", - "front-proxy/front-envoy.yaml", - "front-proxy/service-envoy.yaml", - "grpc-bridge/client/envoy-proxy.yaml", - "grpc-bridge/server/envoy-proxy.yaml", - "jaeger-tracing/front-envoy-jaeger.yaml", - "jaeger-tracing/service1-envoy-jaeger.yaml", - "jaeger-tracing/service2-envoy-jaeger.yaml", - "load-reporting-service/service-envoy-w-lrs.yaml", - "lua/envoy.yaml", - "lua/lib/mylibrary.lua", - "mysql/envoy.yaml", - "redis/envoy.yaml", - "zipkin-tracing/front-envoy-zipkin.yaml", - "zipkin-tracing/service1-envoy-zipkin.yaml", - "zipkin-tracing/service2-envoy-zipkin.yaml", - ], + srcs = glob( + [ + "**/*.yaml", + "**/*.lua", + ], + exclude = [ + "cache/responses.yaml", + "jaeger-native-tracing/*", + "wasm/envoy.yaml", + "**/*docker-compose*.yaml", + ], + ), ) diff --git a/examples/DEVELOPER.md b/examples/DEVELOPER.md index d9a2c8e739dd..1538d1f605f9 100644 --- a/examples/DEVELOPER.md +++ b/examples/DEVELOPER.md @@ -182,7 +182,6 @@ bring_up_example If your sandbox has multiple compositions, and uses the `$PATHS` env var described above, `bring_up_example` will bring all of your compositions up. - ### Additional arguments to `docker-compose up -d` If you need to pass additional arguments to compose you can set the `UPARGS` @@ -208,3 +207,81 @@ export UPARGS="--scale http_service=2" If your example asks the user to run commands inside containers, you can mimick this using `docker-compose exec -T`. The `-T` flag is necessary as the tests do not have access to a `tty` in the CI pipeline. + +### Note on permissions and configuration + +The sandbox tests are run with a `umask` setting of `027` to ensure they will run in environments +where this is the case. + +As the Envoy containers run as non-root, it is essential that any configurations required +by the daemon are included in the relevant example `Dockerfile` rather than mounted in +any `docker-compose.yaml` files. + +The Docker recipe should also ensure that added configurations are world-readable. + +For example, with an added configuration file named `front-envoy.yaml`, you should add +the following in the Docker recipe: + +``` +RUN chmod go+r /etc/front-envoy.yaml +``` + +## Sandbox configuration tests + +Example configuration files are tested to ensure they are valid and well-formed, and do +not contain deprecated features. + +### Exclude configs from example configuration tests + +The CI script searches for all files in the examples folders with a `yaml` or `lua` extension. + +These files are bundled into a test and the `yaml` files are used to try to start an Envoy server. + +If your example includes `yaml` files that are either not Envoy configuration, or for some reason +cannot be tested in this way, you should add the files to the `exclude` list in the `filegroup.srcs` +section of the `examples/BUILD` file. + +The `exclude` patterns are evaluated as `globs` in the context of the `examples` folder. + + +## Verifying your sandbox + +Once you have built your sandbox, and added the `verify.sh` script you can run it directly in the +sandbox folder. + +For example: + +``` +cd examples/example-sandbox +./verify.sh + +``` + +You should see the docker composition brought up, your tests run, and the composition brought down again. + +The script should exit with `0` for the tests to pass. + + +## Verifying multiple/all sandboxes + +In continuous integration, all of the sandboxes are checked using the `ci/verify-examples.sh`. + +This can also be called with a filter argument, which is a `glob` evaluated in the context of the `examples` folder. + +For example, to run all sandboxes with names beginning `jaeger`: + +``` +./ci/verify-examples.sh jaeger* +``` + +--- + +**NOTE** + +You can use this script locally to test the sandboxes on your platform, but you should be aware that it requires +a lot of resources as it downloads and builds many Docker images, and then runs them in turn. + +--- + +One way to run the tests in an isolated environment is to mount the `envoy` source into a `docker-in-docker` container +or similar, and then run the script from inside that container. diff --git a/examples/cache/Dockerfile-frontenvoy b/examples/cache/Dockerfile-frontenvoy new file mode 100644 index 000000000000..0b2e25a0de1b --- /dev/null +++ b/examples/cache/Dockerfile-frontenvoy @@ -0,0 +1,7 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml +CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/cache/Dockerfile-service b/examples/cache/Dockerfile-service new file mode 100644 index 000000000000..9cb60da727ae --- /dev/null +++ b/examples/cache/Dockerfile-service @@ -0,0 +1,10 @@ +FROM envoyproxy/envoy-alpine-dev:latest + +RUN apk update && apk add py3-pip bash curl +RUN pip3 install -q Flask==0.11.1 requests==2.18.4 pyyaml +RUN mkdir /code +COPY ./start_service.sh /usr/local/bin/start_service.sh +COPY ./service-envoy.yaml /etc/service-envoy.yaml +COPY ./service.py /code +RUN chmod u+x /usr/local/bin/start_service.sh +ENTRYPOINT /usr/local/bin/start_service.sh diff --git a/examples/cache/README.md b/examples/cache/README.md new file mode 100644 index 000000000000..2f725f52092b --- /dev/null +++ b/examples/cache/README.md @@ -0,0 +1,2 @@ +To learn about this sandbox and for instructions on how to run it please head over +to the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/http_cache.html) diff --git a/examples/cache/docker-compose.yaml b/examples/cache/docker-compose.yaml new file mode 100644 index 000000000000..3f067efb0c06 --- /dev/null +++ b/examples/cache/docker-compose.yaml @@ -0,0 +1,48 @@ +version: "3.7" +services: + + front-envoy: + build: + context: . + dockerfile: Dockerfile-frontenvoy + networks: + - envoymesh + expose: + - "8000" + - "8001" + ports: + - "8000:8000" + - "8001:8001" + + service1: + build: + context: . + dockerfile: Dockerfile-service + volumes: + - ./responses.yaml:/etc/responses.yaml + networks: + envoymesh: + aliases: + - service1 + environment: + - SERVICE_NAME=1 + expose: + - "8000" + + service2: + build: + context: . + dockerfile: Dockerfile-service + volumes: + - ./responses.yaml:/etc/responses.yaml + networks: + envoymesh: + aliases: + - service2 + environment: + - SERVICE_NAME=2 + expose: + - "8000" + +networks: + envoymesh: {} diff --git a/examples/cache/front-envoy.yaml b/examples/cache/front-envoy.yaml new file mode 100644 index 000000000000..8e9574a0f29d --- /dev/null +++ b/examples/cache/front-envoy.yaml @@ -0,0 +1,72 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: "/service/1" + route: + cluster: service1 + - match: + prefix: "/service/2" + route: + cluster: service2 + http_filters: + - name: "envoy.filters.http.cache" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.cache.v3alpha.CacheConfig" + typed_config: + "@type": "type.googleapis.com/envoy.source.extensions.filters.http.cache.SimpleHttpCacheConfig" + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: service1 + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + http2_protocol_options: {} + load_assignment: + cluster_name: service1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service1 + port_value: 8000 + - name: service2 + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + http2_protocol_options: {} + load_assignment: + cluster_name: service2 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service2 + port_value: 8000 +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/cache/responses.yaml b/examples/cache/responses.yaml new file mode 100644 index 000000000000..1b20ac58f6a1 --- /dev/null +++ b/examples/cache/responses.yaml @@ -0,0 +1,13 @@ +valid-for-minute: + body: This response will stay fresh for one minute + headers: + cache-control: max-age=60 + custom-header: any value +private: + body: This is a private response, it will not be cached by Envoy + headers: + cache-control: private +no-cache: + body: This response can be cached, but it has to be validated on each request + headers: + cache-control: max-age=0, no-cache diff --git a/examples/cache/service-envoy.yaml b/examples/cache/service-envoy.yaml new file mode 100644 index 000000000000..046b99c9f1d5 --- /dev/null +++ b/examples/cache/service-envoy.yaml @@ -0,0 +1,47 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/service" + route: + cluster: local_service + http_filters: + - name: envoy.filters.http.router + typed_config: {} + clusters: + - name: local_service + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8081 diff --git a/examples/cache/service.py b/examples/cache/service.py new file mode 100644 index 000000000000..100f82c1545d --- /dev/null +++ b/examples/cache/service.py @@ -0,0 +1,44 @@ +from flask import Flask +from flask import request +from flask import make_response, abort +import yaml +import os +import requests +import socket +import sys +import datetime + +app = Flask(__name__) + + +@app.route('/service//') +def get(service_number, response_id): + stored_response = yaml.load(open('/etc/responses.yaml', 'r')).get(response_id) + + if stored_response is None: + abort(404, 'No response found with the given id') + + response = make_response(stored_response.get('body') + '\n') + if stored_response.get('headers'): + response.headers.update(stored_response.get('headers')) + + # Generate etag header + response.add_etag() + + # Append the date of response generation + body_with_date = "{}\nResponse generated at: {}\n".format( + response.get_data(as_text=True), + datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT")) + + response.set_data(body_with_date) + + # response.make_conditional() will change the response to a 304 response + # if a 'if-none-match' header exists in the request and matches the etag + return response.make_conditional(request) + + +if __name__ == "__main__": + if not os.path.isfile('/etc/responses.yaml'): + print('Responses file not found at /etc/responses.yaml') + exit(1) + app.run(host='127.0.0.1', port=8080, debug=True) diff --git a/examples/cache/start_service.sh b/examples/cache/start_service.sh new file mode 100644 index 000000000000..43a8c112e636 --- /dev/null +++ b/examples/cache/start_service.sh @@ -0,0 +1,3 @@ +#!/bin/sh +python3 /code/service.py & +envoy -c /etc/service-envoy.yaml --service-cluster "service${SERVICE_NAME}" diff --git a/examples/cache/verify.sh b/examples/cache/verify.sh new file mode 100755 index 000000000000..15cb42bbc0e4 --- /dev/null +++ b/examples/cache/verify.sh @@ -0,0 +1,88 @@ +#!/bin/bash -e + +export NAME=cache + +# shellcheck source=examples/verify-common.sh +. "$(dirname "${BASH_SOURCE[0]}")/../verify-common.sh" + +check_validated() { + # Get the date header and the response generation timestamp + local _dates dates + _dates=$(grep -oP '\d\d:\d\d:\d\d' <<< "$1") + while read -r line; do dates+=("$line"); done \ + <<< "$_dates" + # Make sure they are different + if [[ ${dates[0]} == "${dates[1]}" ]]; then + echo "ERROR: validated responses should have a date AFTER the generation timestamp" >&2 + return 1 + fi + # Make sure there is no age header + if grep -q "age:" <<< "$1"; then + echo "ERROR: validated responses should not have an age header" >&2 + return 1 + fi +} + +check_cached() { + # Make sure there is an age header + if ! grep -q "age:" <<< "$1"; then + echo "ERROR: cached responses should have an age header" >&2 + return 1 + fi +} + +check_from_origin() { + # Get the date header and the response generation timestamp + local _dates dates + _dates=$(grep -oP '\d\d:\d\d:\d\d' <<< "$1") + while read -r line; do dates+=("$line"); done \ + <<< "$_dates" + # Make sure they are equal + if [[ ${dates[0]} != "${dates[1]}" ]]; then + echo "ERROR: responses from origin should have a date equal to the generation timestamp" >&2 + return 1 + fi + # Make sure there is no age header + if grep -q "age:" <<< "$1" ; then + echo "ERROR: responses from origin should not have an age header" >&2 + return 1 + fi +} + + +run_log "Valid-for-minute: First request should be served by the origin" +response=$(curl -si localhost:8000/service/1/valid-for-minute) +check_from_origin "$response" + +run_log "Snooze for 30 seconds" +sleep 30 + +run_log "Valid-for-minute: Second request should be served from cache" +response=$(curl -si localhost:8000/service/1/valid-for-minute) +check_cached "$response" + +run_log "Snooze for 31 more seconds" +sleep 31 + +run_log "Valid-for-minute: More than a minute has passed, this request should get a validated response" +response=$(curl -si localhost:8000/service/1/valid-for-minute) +check_validated "$response" + +run_log "Private: Make 4 requests make sure they are all served by the origin" +for _ in {0..3} +do + response=$(curl -si localhost:8000/service/1/private) + check_from_origin "$response" +done + +run_log "No-cache: First request should be served by the origin" +response=$(curl -si localhost:8000/service/1/no-cache) +check_from_origin "$response" + +run_log "No-cache: Make 4 more requests and make sure they are all validated before being served from cache" +for _ in {0..3} +do + sleep 1 + response=$(curl -si localhost:8000/service/1/no-cache) + check_validated "$response" +done diff --git a/examples/cors/backend/Dockerfile-frontenvoy b/examples/cors/backend/Dockerfile-frontenvoy index 0b2e25a0de1b..31ee1c2e7432 100644 --- a/examples/cors/backend/Dockerfile-frontenvoy +++ b/examples/cors/backend/Dockerfile-frontenvoy @@ -4,4 +4,4 @@ RUN apt-get update && apt-get -q install -y \ curl COPY ./front-envoy.yaml /etc/front-envoy.yaml RUN chmod go+r /etc/front-envoy.yaml -CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/front-envoy.yaml", "--service-cluster", "front-proxy"] diff --git a/examples/cors/backend/Dockerfile-service b/examples/cors/backend/Dockerfile-service index 37c253fa81f1..0583e9c5846a 100644 --- a/examples/cors/backend/Dockerfile-service +++ b/examples/cors/backend/Dockerfile-service @@ -6,4 +6,4 @@ RUN mkdir /code ADD ./service.py /code/ ADD ./start_service.sh /usr/local/bin/start_service.sh RUN chmod u+x /usr/local/bin/start_service.sh -ENTRYPOINT /usr/local/bin/start_service.sh +ENTRYPOINT ["/bin/sh", "/usr/local/bin/start_service.sh"] diff --git a/examples/cors/backend/front-envoy.yaml b/examples/cors/backend/front-envoy.yaml index 0dd81339f578..57cd3cff70b9 100644 --- a/examples/cors/backend/front-envoy.yaml +++ b/examples/cors/backend/front-envoy.yaml @@ -8,13 +8,13 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http access_log: - name: envoy.access_loggers.file typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: /dev/stdout route_config: name: local_route diff --git a/examples/cors/backend/service-envoy.yaml b/examples/cors/backend/service-envoy.yaml index c49e69ccd002..b79367a2294d 100644 --- a/examples/cors/backend/service-envoy.yaml +++ b/examples/cors/backend/service-envoy.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: diff --git a/examples/cors/frontend/Dockerfile-frontenvoy b/examples/cors/frontend/Dockerfile-frontenvoy index 0b2e25a0de1b..31ee1c2e7432 100644 --- a/examples/cors/frontend/Dockerfile-frontenvoy +++ b/examples/cors/frontend/Dockerfile-frontenvoy @@ -4,4 +4,4 @@ RUN apt-get update && apt-get -q install -y \ curl COPY ./front-envoy.yaml /etc/front-envoy.yaml RUN chmod go+r /etc/front-envoy.yaml -CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/front-envoy.yaml", "--service-cluster", "front-proxy"] diff --git a/examples/cors/frontend/Dockerfile-service b/examples/cors/frontend/Dockerfile-service index 735aaf42a095..31da7f6febf3 100644 --- a/examples/cors/frontend/Dockerfile-service +++ b/examples/cors/frontend/Dockerfile-service @@ -6,4 +6,4 @@ RUN mkdir /code ADD ./service.py ./index.html /code/ ADD ./start_service.sh /usr/local/bin/start_service.sh RUN chmod u+x /usr/local/bin/start_service.sh -ENTRYPOINT /usr/local/bin/start_service.sh +ENTRYPOINT ["/bin/sh", "/usr/local/bin/start_service.sh"] diff --git a/examples/cors/frontend/front-envoy.yaml b/examples/cors/frontend/front-envoy.yaml index e871ebea1e91..1204e6406f69 100644 --- a/examples/cors/frontend/front-envoy.yaml +++ b/examples/cors/frontend/front-envoy.yaml @@ -8,13 +8,13 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http access_log: - name: envoy.access_loggers.file typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: /dev/stdout route_config: name: local_route diff --git a/examples/cors/frontend/service-envoy.yaml b/examples/cors/frontend/service-envoy.yaml index c49e69ccd002..b79367a2294d 100644 --- a/examples/cors/frontend/service-envoy.yaml +++ b/examples/cors/frontend/service-envoy.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: diff --git a/examples/csrf/crosssite/Dockerfile-frontenvoy b/examples/csrf/crosssite/Dockerfile-frontenvoy index 0b2e25a0de1b..31ee1c2e7432 100644 --- a/examples/csrf/crosssite/Dockerfile-frontenvoy +++ b/examples/csrf/crosssite/Dockerfile-frontenvoy @@ -4,4 +4,4 @@ RUN apt-get update && apt-get -q install -y \ curl COPY ./front-envoy.yaml /etc/front-envoy.yaml RUN chmod go+r /etc/front-envoy.yaml -CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/front-envoy.yaml", "--service-cluster", "front-proxy"] diff --git a/examples/csrf/crosssite/Dockerfile-service b/examples/csrf/crosssite/Dockerfile-service index 37c5296aeefc..ed8cdfdfb580 100644 --- a/examples/csrf/crosssite/Dockerfile-service +++ b/examples/csrf/crosssite/Dockerfile-service @@ -6,4 +6,4 @@ RUN mkdir /code ADD ./crosssite/service.py ./index.html /code/ ADD ./start_service.sh /usr/local/bin/start_service.sh RUN chmod u+x /usr/local/bin/start_service.sh -ENTRYPOINT /usr/local/bin/start_service.sh +ENTRYPOINT ["/bin/sh", "/usr/local/bin/start_service.sh"] diff --git a/examples/csrf/crosssite/docker-compose.yml b/examples/csrf/crosssite/docker-compose.yml index 4a2f3fdbf43e..026dfa0a4fab 100644 --- a/examples/csrf/crosssite/docker-compose.yml +++ b/examples/csrf/crosssite/docker-compose.yml @@ -1,4 +1,4 @@ -version: '2' +version: '3.7' services: front-envoy: diff --git a/examples/csrf/crosssite/front-envoy.yaml b/examples/csrf/crosssite/front-envoy.yaml index 879a0fa66576..c2cd6582d6f7 100644 --- a/examples/csrf/crosssite/front-envoy.yaml +++ b/examples/csrf/crosssite/front-envoy.yaml @@ -8,13 +8,13 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http access_log: - name: envoy.access_loggers.file typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/dev/stdout" route_config: name: local_route diff --git a/examples/csrf/samesite/Dockerfile-frontenvoy b/examples/csrf/samesite/Dockerfile-frontenvoy index 0b2e25a0de1b..799a5721130a 100644 --- a/examples/csrf/samesite/Dockerfile-frontenvoy +++ b/examples/csrf/samesite/Dockerfile-frontenvoy @@ -4,4 +4,4 @@ RUN apt-get update && apt-get -q install -y \ curl COPY ./front-envoy.yaml /etc/front-envoy.yaml RUN chmod go+r /etc/front-envoy.yaml -CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/front-envoy.yaml", "--service-cluster", "front-proxy"] diff --git a/examples/csrf/samesite/Dockerfile-service b/examples/csrf/samesite/Dockerfile-service index f2413d895618..3a78e72af04e 100644 --- a/examples/csrf/samesite/Dockerfile-service +++ b/examples/csrf/samesite/Dockerfile-service @@ -6,4 +6,4 @@ RUN mkdir /code ADD ./samesite/service.py ./index.html /code/ ADD ./start_service.sh /usr/local/bin/start_service.sh RUN chmod u+x /usr/local/bin/start_service.sh -ENTRYPOINT /usr/local/bin/start_service.sh +ENTRYPOINT ["/bin/sh", "/usr/local/bin/start_service.sh"] diff --git a/examples/csrf/samesite/docker-compose.yml b/examples/csrf/samesite/docker-compose.yml index 2fcac143f6f6..3c80498aaced 100644 --- a/examples/csrf/samesite/docker-compose.yml +++ b/examples/csrf/samesite/docker-compose.yml @@ -1,4 +1,4 @@ -version: '2' +version: '3.7' services: front-envoy: diff --git a/examples/csrf/samesite/front-envoy.yaml b/examples/csrf/samesite/front-envoy.yaml index cc18e2080a24..ac812d01f3c6 100644 --- a/examples/csrf/samesite/front-envoy.yaml +++ b/examples/csrf/samesite/front-envoy.yaml @@ -8,13 +8,13 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http access_log: - name: envoy.access_loggers.file typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/dev/stdout" route_config: name: local_route diff --git a/examples/csrf/service-envoy.yaml b/examples/csrf/service-envoy.yaml index c49e69ccd002..b79367a2294d 100644 --- a/examples/csrf/service-envoy.yaml +++ b/examples/csrf/service-envoy.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: diff --git a/examples/ext_authz/Dockerfile-frontenvoy b/examples/ext_authz/Dockerfile-frontenvoy index f329c86ce655..815937798a83 100644 --- a/examples/ext_authz/Dockerfile-frontenvoy +++ b/examples/ext_authz/Dockerfile-frontenvoy @@ -6,4 +6,4 @@ COPY ./config /etc/envoy-config COPY ./run_envoy.sh /run_envoy.sh RUN chmod go+r -R /etc/envoy-config \ && chmod go+rx /run_envoy.sh /etc/envoy-config /etc/envoy-config/* -CMD /run_envoy.sh +CMD ["/bin/sh", "/run_envoy.sh"] diff --git a/examples/ext_authz/config/grpc-service/v2.yaml b/examples/ext_authz/config/grpc-service/v2.yaml index bd1a6eee7f6d..455bd6a7957d 100644 --- a/examples/ext_authz/config/grpc-service/v2.yaml +++ b/examples/ext_authz/config/grpc-service/v2.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: diff --git a/examples/ext_authz/config/grpc-service/v3.yaml b/examples/ext_authz/config/grpc-service/v3.yaml index 2b4829e2c90c..b00396298d66 100644 --- a/examples/ext_authz/config/grpc-service/v3.yaml +++ b/examples/ext_authz/config/grpc-service/v3.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: diff --git a/examples/ext_authz/config/http-service.yaml b/examples/ext_authz/config/http-service.yaml index 85065d99806c..b6b93cd031f4 100644 --- a/examples/ext_authz/config/http-service.yaml +++ b/examples/ext_authz/config/http-service.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: diff --git a/examples/ext_authz/config/opa-service/v2.yaml b/examples/ext_authz/config/opa-service/v2.yaml index 401c6df46832..dd2f22d00e8d 100644 --- a/examples/ext_authz/config/opa-service/v2.yaml +++ b/examples/ext_authz/config/opa-service/v2.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: diff --git a/examples/fault-injection/envoy.yaml b/examples/fault-injection/envoy.yaml index ac0bd82b3568..db63f9469b8b 100644 --- a/examples/fault-injection/envoy.yaml +++ b/examples/fault-injection/envoy.yaml @@ -8,13 +8,13 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http access_log: name: envoy.access_loggers.file typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: /dev/stdout route_config: name: local_route @@ -30,7 +30,7 @@ static_resources: http_filters: - name: envoy.filters.http.fault typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.fault.v2.HTTPFault + "@type": type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault abort: http_status: 503 percentage: diff --git a/examples/front-proxy/Dockerfile-frontenvoy b/examples/front-proxy/Dockerfile-frontenvoy index 0b2e25a0de1b..31ee1c2e7432 100644 --- a/examples/front-proxy/Dockerfile-frontenvoy +++ b/examples/front-proxy/Dockerfile-frontenvoy @@ -4,4 +4,4 @@ RUN apt-get update && apt-get -q install -y \ curl COPY ./front-envoy.yaml /etc/front-envoy.yaml RUN chmod go+r /etc/front-envoy.yaml -CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/front-envoy.yaml", "--service-cluster", "front-proxy"] diff --git a/examples/front-proxy/Dockerfile-jaeger-service b/examples/front-proxy/Dockerfile-jaeger-service index 4fa93db5f249..8c3fe1bd4276 100644 --- a/examples/front-proxy/Dockerfile-jaeger-service +++ b/examples/front-proxy/Dockerfile-jaeger-service @@ -16,4 +16,4 @@ RUN echo "4a7d17d4724ee890490bcd6cfdedb12a02316a3d33214348d30979abd201f1ca /usr && mv /usr/local/lib/libjaegertracing.so.0.4.2 /usr/local/lib/libjaegertracing_plugin.so \ && sha256sum -c /tmp/checksum \ && rm /tmp/checksum -ENTRYPOINT /usr/local/bin/start_service.sh +ENTRYPOINT ["/bin/sh", "/usr/local/bin/start_service.sh"] diff --git a/examples/front-proxy/Dockerfile-service b/examples/front-proxy/Dockerfile-service index 03a6a9422ea2..0c2ae43024fa 100644 --- a/examples/front-proxy/Dockerfile-service +++ b/examples/front-proxy/Dockerfile-service @@ -6,4 +6,4 @@ RUN mkdir /code ADD ./service.py /code ADD ./start_service.sh /usr/local/bin/start_service.sh RUN chmod u+x /usr/local/bin/start_service.sh -ENTRYPOINT /usr/local/bin/start_service.sh +ENTRYPOINT ["/bin/sh", "/usr/local/bin/start_service.sh"] diff --git a/examples/front-proxy/front-envoy.yaml b/examples/front-proxy/front-envoy.yaml index c266022e6806..14204c9813f3 100644 --- a/examples/front-proxy/front-envoy.yaml +++ b/examples/front-proxy/front-envoy.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: @@ -38,7 +38,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: diff --git a/examples/front-proxy/service-envoy.yaml b/examples/front-proxy/service-envoy.yaml index 67ac03d7287f..046b99c9f1d5 100644 --- a/examples/front-proxy/service-envoy.yaml +++ b/examples/front-proxy/service-envoy.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: diff --git a/examples/front-proxy/start_service.sh b/examples/front-proxy/start_service.sh index cc529bcf2b15..43a8c112e636 100644 --- a/examples/front-proxy/start_service.sh +++ b/examples/front-proxy/start_service.sh @@ -1,3 +1,3 @@ #!/bin/sh python3 /code/service.py & -envoy -c /etc/service-envoy.yaml --service-cluster service${SERVICE_NAME} \ No newline at end of file +envoy -c /etc/service-envoy.yaml --service-cluster "service${SERVICE_NAME}" diff --git a/examples/grpc-bridge/Dockerfile-client b/examples/grpc-bridge/Dockerfile-client index da27eecaf689..aabae3de04bf 100644 --- a/examples/grpc-bridge/Dockerfile-client +++ b/examples/grpc-bridge/Dockerfile-client @@ -2,4 +2,4 @@ FROM envoyproxy/envoy-dev:latest COPY ./client/envoy-proxy.yaml /etc/client-envoy-proxy.yaml RUN chmod go+r /etc/client-envoy-proxy.yaml -CMD /usr/local/bin/envoy -c /etc/client-envoy-proxy.yaml +CMD ["/usr/local/bin/envoy", "-c", "/etc/client-envoy-proxy.yaml"] diff --git a/examples/grpc-bridge/Dockerfile-server b/examples/grpc-bridge/Dockerfile-server index a59690934ede..cdf4c91b8e0c 100644 --- a/examples/grpc-bridge/Dockerfile-server +++ b/examples/grpc-bridge/Dockerfile-server @@ -2,4 +2,4 @@ FROM envoyproxy/envoy-dev:latest COPY ./server/envoy-proxy.yaml /etc/server-envoy-proxy.yaml RUN chmod go+r /etc/server-envoy-proxy.yaml -CMD /usr/local/bin/envoy -c /etc/server-envoy-proxy.yaml --service-cluster backend-proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/server-envoy-proxy.yaml", "--service-cluster", "backend-proxy"] diff --git a/examples/grpc-bridge/client/envoy-proxy.yaml b/examples/grpc-bridge/client/envoy-proxy.yaml index 7470440a9b5b..4d558e64b261 100644 --- a/examples/grpc-bridge/client/envoy-proxy.yaml +++ b/examples/grpc-bridge/client/envoy-proxy.yaml @@ -8,13 +8,13 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto add_user_agent: true access_log: - name: envoy.access_loggers.file typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/dev/stdout" stat_prefix: egress_http common_http_protocol_options: diff --git a/examples/grpc-bridge/docker-compose-protos.yaml b/examples/grpc-bridge/docker-compose-protos.yaml index 6b791161231d..42da7d7407c7 100644 --- a/examples/grpc-bridge/docker-compose-protos.yaml +++ b/examples/grpc-bridge/docker-compose-protos.yaml @@ -1,4 +1,4 @@ -version: "3" +version: "3.7" # This is the conversion from a script to a dockerized version of the script # https://github.com/envoyproxy/envoy/blob/master/examples/grpc-bridge/service/script/gen diff --git a/examples/grpc-bridge/server/envoy-proxy.yaml b/examples/grpc-bridge/server/envoy-proxy.yaml index 2d6075e0bc07..885feeedca59 100644 --- a/examples/grpc-bridge/server/envoy-proxy.yaml +++ b/examples/grpc-bridge/server/envoy-proxy.yaml @@ -8,13 +8,13 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http access_log: - name: envoy.access_loggers.file typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/dev/stdout" route_config: name: local_route diff --git a/examples/grpc-bridge/verify.sh b/examples/grpc-bridge/verify.sh index 6b2dfe4c502f..5ff219bb3c76 100755 --- a/examples/grpc-bridge/verify.sh +++ b/examples/grpc-bridge/verify.sh @@ -10,7 +10,7 @@ export MANUAL=true run_log "Generate protocol stubs" docker-compose -f docker-compose-protos.yaml up -docker container prune -f +docker rm grpc-bridge_stubs_go_1 grpc-bridge_stubs_python_1 ls client/kv/kv_pb2.py ls server/kv/kv.pb.go diff --git a/examples/jaeger-native-tracing/Dockerfile-frontenvoy b/examples/jaeger-native-tracing/Dockerfile-frontenvoy index 5379dfe5e242..f7f1c893e065 100644 --- a/examples/jaeger-native-tracing/Dockerfile-frontenvoy +++ b/examples/jaeger-native-tracing/Dockerfile-frontenvoy @@ -14,4 +14,4 @@ RUN echo "4a7d17d4724ee890490bcd6cfdedb12a02316a3d33214348d30979abd201f1ca /usr/ && sha256sum -c /tmp/checksum \ && rm /tmp/checksum \ && chmod go+r /etc/front-envoy.yaml -CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/front-envoy.yaml", "--service-cluster", "front-proxy"] diff --git a/examples/jaeger-native-tracing/front-envoy-jaeger.yaml b/examples/jaeger-native-tracing/front-envoy-jaeger.yaml index b2f3430a3aaa..f2530538eace 100644 --- a/examples/jaeger-native-tracing/front-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/front-envoy-jaeger.yaml @@ -9,13 +9,13 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: true tracing: provider: name: envoy.tracers.dynamic_ot typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.DynamicOtConfig + "@type": type.googleapis.com/envoy.config.trace.v3.DynamicOtConfig library: /usr/local/lib/libjaegertracing_plugin.so config: service_name: front-proxy diff --git a/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml b/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml index 9be0a80c1fe1..19678e297dfb 100644 --- a/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml @@ -9,7 +9,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: @@ -37,12 +37,12 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager tracing: provider: name: envoy.tracers.dynamic_ot typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.DynamicOtConfig + "@type": type.googleapis.com/envoy.config.trace.v3.DynamicOtConfig library: /usr/local/lib/libjaegertracing_plugin.so config: service_name: service1 diff --git a/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml b/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml index 88d4f54ec594..92fe6bd69c73 100644 --- a/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml @@ -9,12 +9,12 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager tracing: provider: name: envoy.tracers.dynamic_ot typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.DynamicOtConfig + "@type": type.googleapis.com/envoy.config.trace.v3.DynamicOtConfig library: /usr/local/lib/libjaegertracing_plugin.so config: service_name: service2 diff --git a/examples/jaeger-tracing/Dockerfile-frontenvoy b/examples/jaeger-tracing/Dockerfile-frontenvoy index e955e76bb9b8..0d939a0c696a 100644 --- a/examples/jaeger-tracing/Dockerfile-frontenvoy +++ b/examples/jaeger-tracing/Dockerfile-frontenvoy @@ -4,4 +4,4 @@ RUN apt-get update && apt-get -q install -y \ curl COPY ./front-envoy-jaeger.yaml /etc/front-envoy.yaml RUN chmod go+r /etc/front-envoy.yaml -CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/front-envoy.yaml", "--service-cluster", "front-proxy"] diff --git a/examples/jaeger-tracing/front-envoy-jaeger.yaml b/examples/jaeger-tracing/front-envoy-jaeger.yaml index 07c157599499..fc65ddefc792 100644 --- a/examples/jaeger-tracing/front-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/front-envoy-jaeger.yaml @@ -9,13 +9,13 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: true tracing: provider: name: envoy.tracers.zipkin typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig + "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: jaeger collector_endpoint: "/api/v2/spans" shared_span_context: false diff --git a/examples/jaeger-tracing/service1-envoy-jaeger.yaml b/examples/jaeger-tracing/service1-envoy-jaeger.yaml index b40ec8b8f1c9..447a4cad2b34 100644 --- a/examples/jaeger-tracing/service1-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/service1-envoy-jaeger.yaml @@ -9,12 +9,12 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager tracing: provider: name: envoy.tracers.zipkin typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig + "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: jaeger collector_endpoint: "/api/v2/spans" shared_span_context: false @@ -46,12 +46,12 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager tracing: provider: name: envoy.tracers.zipkin typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig + "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: jaeger collector_endpoint: "/api/v2/spans" shared_span_context: false diff --git a/examples/jaeger-tracing/service2-envoy-jaeger.yaml b/examples/jaeger-tracing/service2-envoy-jaeger.yaml index 5b6a7d93b65a..517570f2416b 100644 --- a/examples/jaeger-tracing/service2-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/service2-envoy-jaeger.yaml @@ -9,12 +9,12 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager tracing: provider: name: envoy.tracers.zipkin typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig + "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: jaeger collector_endpoint: "/api/v2/spans" shared_span_context: false diff --git a/examples/load-reporting-service/Dockerfile-http-server b/examples/load-reporting-service/Dockerfile-http-server index 3ae32015fb8d..e71aa6b75468 100644 --- a/examples/load-reporting-service/Dockerfile-http-server +++ b/examples/load-reporting-service/Dockerfile-http-server @@ -8,4 +8,4 @@ COPY . ./code RUN pip3 install -q Flask==0.11.1 RUN chmod u+x /usr/local/bin/start_service.sh -ENTRYPOINT /usr/local/bin/start_service.sh +ENTRYPOINT ["/bin/sh", "/usr/local/bin/start_service.sh"] diff --git a/examples/load-reporting-service/send_requests.sh b/examples/load-reporting-service/send_requests.sh index b65b28489067..d8cf8f9e8721 100644 --- a/examples/load-reporting-service/send_requests.sh +++ b/examples/load-reporting-service/send_requests.sh @@ -2,11 +2,11 @@ counter=1 while [ $counter -le 50 ] -do +do # generate random Port number to send requests ports=("80" "81") port=${ports[$RANDOM % ${#ports[@]} ]} - curl -v localhost:$port/service + curl -v "localhost:${port}/service" ((counter++)) -done \ No newline at end of file +done diff --git a/examples/load-reporting-service/service-envoy-w-lrs.yaml b/examples/load-reporting-service/service-envoy-w-lrs.yaml index 958bf86b249a..50ab094dee05 100644 --- a/examples/load-reporting-service/service-envoy-w-lrs.yaml +++ b/examples/load-reporting-service/service-envoy-w-lrs.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: @@ -57,7 +57,7 @@ cluster_manager: load_stats_config: api_type: GRPC grpc_services: - envoy_grpc: + envoy_grpc: cluster_name: load_reporting_cluster admin: access_log_path: "/dev/null" diff --git a/examples/load-reporting-service/start_service.sh b/examples/load-reporting-service/start_service.sh index da0b0c47096b..3fa5d85f7a37 100644 --- a/examples/load-reporting-service/start_service.sh +++ b/examples/load-reporting-service/start_service.sh @@ -1,3 +1,3 @@ -#!/bin/sh +#!/bin/bash python3 /code/http_server.py & -/usr/local/bin/envoy -c /etc/service-envoy-w-lrs.yaml --service-node ${HOSTNAME} --service-cluster http_service +/usr/local/bin/envoy -c /etc/service-envoy-w-lrs.yaml --service-node "${HOSTNAME}" --service-cluster http_service diff --git a/examples/lua/Dockerfile-proxy b/examples/lua/Dockerfile-proxy index 03cb54ac245b..a2ea90d4344c 100644 --- a/examples/lua/Dockerfile-proxy +++ b/examples/lua/Dockerfile-proxy @@ -2,4 +2,4 @@ FROM envoyproxy/envoy-dev:latest ADD ./lib/mylibrary.lua /lib/mylibrary.lua COPY ./envoy.yaml /etc/envoy.yaml RUN chmod go+r /etc/envoy.yaml /lib/mylibrary.lua -CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug --service-cluster proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/envoy.yaml", "-l", "debug", "--service-cluster", "proxy"] diff --git a/examples/lua/envoy.yaml b/examples/lua/envoy.yaml index a106bc2feee0..2838879afcb2 100644 --- a/examples/lua/envoy.yaml +++ b/examples/lua/envoy.yaml @@ -9,7 +9,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: auto route_config: @@ -26,7 +26,7 @@ static_resources: http_filters: - name: envoy.filters.http.lua typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua inline_code: | local mylibrary = require("lib.mylibrary") diff --git a/examples/mysql/Dockerfile-proxy b/examples/mysql/Dockerfile-proxy index 09595e6e6279..f70f44311461 100644 --- a/examples/mysql/Dockerfile-proxy +++ b/examples/mysql/Dockerfile-proxy @@ -2,4 +2,4 @@ FROM envoyproxy/envoy-dev:latest COPY ./envoy.yaml /etc/envoy.yaml RUN chmod go+r /etc/envoy.yaml -CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug +CMD ["/usr/local/bin/envoy", "-c /etc/envoy.yaml", "-l", "debug"] diff --git a/examples/mysql/envoy.yaml b/examples/mysql/envoy.yaml index ee485d33b6c4..e7dc86a53702 100644 --- a/examples/mysql/envoy.yaml +++ b/examples/mysql/envoy.yaml @@ -9,11 +9,11 @@ static_resources: - filters: - name: envoy.filters.network.mysql_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy stat_prefix: egress_mysql - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: mysql_tcp cluster: mysql_cluster diff --git a/examples/redis/Dockerfile-proxy b/examples/redis/Dockerfile-proxy index 9266482b53fe..24ef1107fdba 100644 --- a/examples/redis/Dockerfile-proxy +++ b/examples/redis/Dockerfile-proxy @@ -2,4 +2,4 @@ FROM envoyproxy/envoy-dev:latest COPY ./envoy.yaml /etc/envoy.yaml RUN chmod go+r /etc/envoy.yaml -CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug --service-cluster proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/envoy.yaml", "-l", "debug", "--service-cluster", "proxy"] diff --git a/examples/redis/envoy.yaml b/examples/redis/envoy.yaml index e886be7e0750..b228864c7727 100644 --- a/examples/redis/envoy.yaml +++ b/examples/redis/envoy.yaml @@ -9,7 +9,7 @@ static_resources: - filters: - name: envoy.filters.network.redis_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy stat_prefix: egress_redis settings: op_timeout: 5s diff --git a/examples/verify-common.sh b/examples/verify-common.sh index d27155f39f0e..917ec952fd08 100644 --- a/examples/verify-common.sh +++ b/examples/verify-common.sh @@ -55,7 +55,6 @@ cleanup_stack () { path="$1" run_log "Cleanup ($path)" docker-compose down - docker system prune -f } cleanup () { diff --git a/examples/wasm/BUILD b/examples/wasm/BUILD new file mode 100644 index 000000000000..09cdc34d9ac4 --- /dev/null +++ b/examples/wasm/BUILD @@ -0,0 +1,17 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load("//bazel/wasm:wasm.bzl", "envoy_wasm_cc_binary") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_wasm_cc_binary( + name = "envoy_filter_http_wasm_example.wasm", + srcs = ["envoy_filter_http_wasm_example.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], +) diff --git a/examples/wasm/Dockerfile-proxy b/examples/wasm/Dockerfile-proxy new file mode 100644 index 000000000000..c7e135dfce3e --- /dev/null +++ b/examples/wasm/Dockerfile-proxy @@ -0,0 +1,5 @@ +FROM envoyproxy/envoy-dev:latest +COPY ./envoy.yaml /etc/envoy.yaml +COPY ./envoy_filter_http_wasm_example.wasm /etc/envoy_filter_http_wasm_example.wasm +RUN chmod go+r /etc/envoy.yaml +CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug --service-cluster proxy diff --git a/examples/wasm/Dockerfile-web-service b/examples/wasm/Dockerfile-web-service new file mode 100644 index 000000000000..edf3810fa79b --- /dev/null +++ b/examples/wasm/Dockerfile-web-service @@ -0,0 +1 @@ +FROM solsson/http-echo diff --git a/examples/wasm/README.md b/examples/wasm/README.md new file mode 100644 index 000000000000..2922b607f9b7 --- /dev/null +++ b/examples/wasm/README.md @@ -0,0 +1,59 @@ +# Envoy WebAssembly Filter + +In this example, we show how a WebAssembly(WASM) filter can be used with the Envoy +proxy. The Envoy proxy [configuration](./envoy.yaml) includes a Webassembly filter +as documented [here](https://www.envoyproxy.io/docs/envoy/latest/). + + + + +## Quick Start + +1. `docker-compose build` +2. `docker-compose up` +3. `curl -v localhost:18000` + +Curl output should include our headers: + +``` +# curl -v localhost:8000 +* Rebuilt URL to: localhost:18000/ +* Trying 127.0.0.1... +* TCP_NODELAY set +* Connected to localhost (127.0.0.1) port 18000 (#0) +> GET / HTTP/1.1 +> Host: localhost:18000 +> User-Agent: curl/7.58.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< content-length: 13 +< content-type: text/plain +< location: envoy-wasm +< date: Tue, 09 Jul 2019 00:47:14 GMT +< server: envoy +< x-envoy-upstream-service-time: 0 +< newheader: newheadervalue +< +example body +* Connection #0 to host localhost left intact +``` + +## Build WASM Module + +Now you want to make changes to the C++ filter ([envoy_filter_http_wasm_example.cc](envoy_filter_http_wasm_example.cc)) +and build the WASM module ([envoy_filter_http_wasm_example.wasm](envoy_filter_http_wasm_example.wasm)). + +1. Build WASM module + ```shell + bazel build //examples/wasm:envoy_filter_http_wasm_example.wasm + ``` + +## Build the Envoy WASM Image + + + +For Envoy WASM runtime developers, if you want to make changes, please + +1. Follow [instructions](https://github.com/envoyproxy/envoy-wasm/blob/master/WASM.md). +2. Modify `docker-compose.yaml` to mount your own Envoy. diff --git a/examples/wasm/docker-compose.yaml b/examples/wasm/docker-compose.yaml new file mode 100644 index 000000000000..d2e16eb79848 --- /dev/null +++ b/examples/wasm/docker-compose.yaml @@ -0,0 +1,37 @@ +version: '3.7' +services: + + proxy: + build: + context: . + dockerfile: Dockerfile-proxy + volumes: + - ./envoy.yaml:/etc/envoy.yaml + - ./envoy_wasm_example.wasm:/etc/envoy_wasm_example.wasm + - ./envoy_filter_http_wasm_example.wasm:/etc/envoy_filter_http_wasm_example.wasm + # Uncomment this line if you want to use your own Envoy with WASM enabled. + #- /tmp/envoy-docker-build/envoy/source/exe/envoy:/usr/local/bin/envoy + networks: + - envoymesh + expose: + - "80" + - "8001" + ports: + - "18000:80" + - "18001:8001" + + web_service: + build: + context: . + dockerfile: Dockerfile-web-service + networks: + envoymesh: + aliases: + - web_service + expose: + - "80" + ports: + - "18080:80" + +networks: + envoymesh: {} diff --git a/examples/wasm/envoy.yaml b/examples/wasm/envoy.yaml new file mode 100644 index 000000000000..2e5a27eb2041 --- /dev/null +++ b/examples/wasm/envoy.yaml @@ -0,0 +1,94 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.wasm + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + name: "my_plugin" + root_id: "my_root_id" + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {} + vm_config: + runtime: "envoy.wasm.runtime.v8" + vm_id: "my_vm_id" + code: + local: + filename: "/etc/envoy_filter_http_wasm_example.wasm" + configuration: {} + - name: envoy.filters.http.router + typed_config: {} + - name: staticreply + address: + socket_address: + address: 127.0.0.1 + port_value: 8099 + filter_chains: + - filters: + - name: envoy.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: auto + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + direct_response: + status: 200 + body: + inline_string: "foo\n" + http_filters: + - name: envoy.router + config: {} + clusters: + - name: web_service + connect_timeout: 0.25s + type: static + lb_policy: round_robin + load_assignment: + cluster_name: service1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8099 +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/wasm/envoy_filter_http_wasm_example.cc b/examples/wasm/envoy_filter_http_wasm_example.cc new file mode 100644 index 000000000000..f3eabc3353a5 --- /dev/null +++ b/examples/wasm/envoy_filter_http_wasm_example.cc @@ -0,0 +1,90 @@ +// NOLINT(namespace-envoy) +#include +#include +#include + +#include "proxy_wasm_intrinsics.h" + +class ExampleRootContext : public RootContext { +public: + explicit ExampleRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {} + + bool onStart(size_t) override; + bool onConfigure(size_t) override; + void onTick() override; +}; + +class ExampleContext : public Context { +public: + explicit ExampleContext(uint32_t id, RootContext* root) : Context(id, root) {} + + void onCreate() override; + FilterHeadersStatus onRequestHeaders(uint32_t headers, bool end_of_stream) override; + FilterDataStatus onRequestBody(size_t body_buffer_length, bool end_of_stream) override; + FilterHeadersStatus onResponseHeaders(uint32_t headers, bool end_of_stream) override; + FilterDataStatus onResponseBody(size_t body_buffer_length, bool end_of_stream) override; + void onDone() override; + void onLog() override; + void onDelete() override; +}; +static RegisterContextFactory register_ExampleContext(CONTEXT_FACTORY(ExampleContext), + ROOT_FACTORY(ExampleRootContext), + "my_root_id"); + +bool ExampleRootContext::onStart(size_t) { + LOG_TRACE("onStart"); + return true; +} + +bool ExampleRootContext::onConfigure(size_t) { + LOG_TRACE("onConfigure"); + proxy_set_tick_period_milliseconds(1000); // 1 sec + return true; +} + +void ExampleRootContext::onTick() { LOG_TRACE("onTick"); } + +void ExampleContext::onCreate() { LOG_WARN(std::string("onCreate " + std::to_string(id()))); } + +FilterHeadersStatus ExampleContext::onRequestHeaders(uint32_t, bool) { + LOG_DEBUG(std::string("onRequestHeaders ") + std::to_string(id())); + auto result = getRequestHeaderPairs(); + auto pairs = result->pairs(); + LOG_INFO(std::string("headers: ") + std::to_string(pairs.size())); + for (auto& p : pairs) { + LOG_INFO(std::string(p.first) + std::string(" -> ") + std::string(p.second)); + } + return FilterHeadersStatus::Continue; +} + +FilterHeadersStatus ExampleContext::onResponseHeaders(uint32_t, bool) { + LOG_DEBUG(std::string("onResponseHeaders ") + std::to_string(id())); + auto result = getResponseHeaderPairs(); + auto pairs = result->pairs(); + LOG_INFO(std::string("headers: ") + std::to_string(pairs.size())); + for (auto& p : pairs) { + LOG_INFO(std::string(p.first) + std::string(" -> ") + std::string(p.second)); + } + addResponseHeader("newheader", "newheadervalue"); + replaceResponseHeader("location", "envoy-wasm"); + return FilterHeadersStatus::Continue; +} + +FilterDataStatus ExampleContext::onRequestBody(size_t body_buffer_length, + bool /* end_of_stream */) { + auto body = getBufferBytes(WasmBufferType::HttpRequestBody, 0, body_buffer_length); + LOG_ERROR(std::string("onRequestBody ") + std::string(body->view())); + return FilterDataStatus::Continue; +} + +FilterDataStatus ExampleContext::onResponseBody(size_t /* body_buffer_length */, + bool /* end_of_stream */) { + setBuffer(WasmBufferType::HttpResponseBody, 0, 3, "foo"); + return FilterDataStatus::Continue; +} + +void ExampleContext::onDone() { LOG_WARN(std::string("onDone " + std::to_string(id()))); } + +void ExampleContext::onLog() { LOG_WARN(std::string("onLog " + std::to_string(id()))); } + +void ExampleContext::onDelete() { LOG_WARN(std::string("onDelete " + std::to_string(id()))); } diff --git a/examples/wasm/verify.sh b/examples/wasm/verify.sh new file mode 100755 index 000000000000..4a4f15bf496c --- /dev/null +++ b/examples/wasm/verify.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +export NAME=wasm + +# shellcheck source=examples/verify-common.sh +. "$(dirname "${BASH_SOURCE[0]}")/../verify-common.sh" + + +run_log "Test connection" +responds_with \ + foo \ + http://localhost:8000 + +run_log "Test header" +responds_with_header \ + "newheader: newheadervalue" \ + http://localhost:8000 diff --git a/examples/zipkin-tracing/Dockerfile-frontenvoy b/examples/zipkin-tracing/Dockerfile-frontenvoy index 87040962caf2..cd80edc3ba04 100644 --- a/examples/zipkin-tracing/Dockerfile-frontenvoy +++ b/examples/zipkin-tracing/Dockerfile-frontenvoy @@ -4,4 +4,4 @@ RUN apt-get update && apt-get -q install -y \ curl COPY ./front-envoy-zipkin.yaml /etc/front-envoy.yaml RUN chmod go+r /etc/front-envoy.yaml -CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy +CMD ["/usr/local/bin/envoy", "-c", "/etc/front-envoy.yaml", "--service-cluster", "front-proxy"] diff --git a/examples/zipkin-tracing/front-envoy-zipkin.yaml b/examples/zipkin-tracing/front-envoy-zipkin.yaml index 41e864552c3c..232d76aa8728 100644 --- a/examples/zipkin-tracing/front-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/front-envoy-zipkin.yaml @@ -9,13 +9,13 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: true tracing: provider: name: envoy.tracers.zipkin typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig + "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: zipkin collector_endpoint: "/api/v2/spans" collector_endpoint_version: HTTP_JSON diff --git a/examples/zipkin-tracing/service1-envoy-zipkin.yaml b/examples/zipkin-tracing/service1-envoy-zipkin.yaml index fe7318366db6..7b592306ecb5 100644 --- a/examples/zipkin-tracing/service1-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/service1-envoy-zipkin.yaml @@ -9,12 +9,12 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager tracing: provider: name: envoy.tracers.zipkin typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig + "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: zipkin collector_endpoint: "/api/v2/spans" collector_endpoint_version: HTTP_JSON @@ -45,12 +45,12 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager tracing: provider: name: envoy.tracers.zipkin typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig + "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: zipkin collector_endpoint: "/api/v2/spans" collector_endpoint_version: HTTP_JSON diff --git a/examples/zipkin-tracing/service2-envoy-zipkin.yaml b/examples/zipkin-tracing/service2-envoy-zipkin.yaml index ceebbcf60917..bc929c5b2c2e 100644 --- a/examples/zipkin-tracing/service2-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/service2-envoy-zipkin.yaml @@ -9,12 +9,12 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager tracing: provider: name: envoy.tracers.zipkin typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig + "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: zipkin collector_endpoint: "/api/v2/spans" collector_endpoint_version: HTTP_JSON diff --git a/generated_api_shadow/bazel/envoy_http_archive.bzl b/generated_api_shadow/bazel/envoy_http_archive.bzl index 13b98f770619..15fd65b2af27 100644 --- a/generated_api_shadow/bazel/envoy_http_archive.bzl +++ b/generated_api_shadow/bazel/envoy_http_archive.bzl @@ -10,8 +10,7 @@ def envoy_http_archive(name, locations, **kwargs): # This repository has already been defined, probably because the user # wants to override the version. Do nothing. return - loc_key = kwargs.pop("repository_key", name) - location = locations[loc_key] + location = locations[name] # HTTP tarball at a given URL. Add a BUILD file if requested. http_archive( diff --git a/generated_api_shadow/bazel/external_deps.bzl b/generated_api_shadow/bazel/external_deps.bzl new file mode 100644 index 000000000000..588879c4bd0a --- /dev/null +++ b/generated_api_shadow/bazel/external_deps.bzl @@ -0,0 +1,140 @@ +load("@envoy_api//bazel:repository_locations_utils.bzl", "load_repository_locations_spec") + +# Envoy dependencies may be annotated with the following attributes: +DEPENDENCY_ANNOTATIONS = [ + # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID + # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See + # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements + # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned. + # This attribute is optional for components with use categories listed in the + # USE_CATEGORIES_WITH_CPE_OPTIONAL + "cpe", + + # A list of extensions when 'use_category' contains 'dataplane_ext' or 'observability_ext'. + "extensions", + + # Additional dependencies loaded transitively via this dependency that are not tracked in + # Envoy (see the external dependency at the given version for information). + "implied_untracked_deps", + + # When the dependency was last updated in Envoy. + "last_updated", + + # Project metadata. + "project_desc", + "project_name", + "project_url", + + # List of the categories describing how the dependency is being used. This attribute is used + # for automatic tracking of security posture of Envoy's dependencies. + # Possible values are documented in the USE_CATEGORIES list below. + # This attribute is mandatory for each dependecy. + "use_category", + + # The dependency version. This may be either a tagged release (preferred) + # or git SHA (as an exception when no release tagged version is suitable). + "version", +] + +# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed +# to be declared. +USE_CATEGORIES = [ + # This dependency is used in API protos. + "api", + # This dependency is used in build process. + "build", + # This dependency is used to process xDS requests. + "controlplane", + # This dependency is used in processing downstream or upstream requests (core). + "dataplane_core", + # This dependency is used in processing downstream or upstream requests (extensions). + "dataplane_ext", + # This dependecy is used for logging, metrics or tracing (core). It may process unstrusted input. + "observability_core", + # This dependecy is used for logging, metrics or tracing (extensions). It may process unstrusted input. + "observability_ext", + # This dependency does not handle untrusted data and is used for various utility purposes. + "other", + # This dependency is used only in tests. + "test_only", + # Documentation generation + "docs", + # Developer tools (not used in build or docs) + "devtools", +] + +# Components with these use categories are not required to specify the 'cpe' +# and 'last_updated' annotation. +USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test_only", "api"] + +def _fail_missing_attribute(attr, key): + fail("The '%s' attribute must be defined for external dependecy " % attr + key) + +# Method for verifying content of the repository location specifications. +# +# We also remove repository metadata attributes so that further consumers, e.g. +# http_archive, are not confused by them. +def load_repository_locations(repository_locations_spec): + locations = {} + for key, location in load_repository_locations_spec(repository_locations_spec).items(): + mutable_location = dict(location) + locations[key] = mutable_location + + if "sha256" not in location or len(location["sha256"]) == 0: + _fail_missing_attribute("sha256", key) + + if "project_name" not in location: + _fail_missing_attribute("project_name", key) + + if "project_desc" not in location: + _fail_missing_attribute("project_desc", key) + + if "project_url" not in location: + _fail_missing_attribute("project_url", key) + project_url = location["project_url"] + if not project_url.startswith("https://") and not project_url.startswith("http://"): + fail("project_url must start with https:// or http://: " + project_url) + + if "version" not in location: + _fail_missing_attribute("version", key) + + if "use_category" not in location: + _fail_missing_attribute("use_category", key) + use_category = location["use_category"] + + if "dataplane_ext" in use_category or "observability_ext" in use_category: + if "extensions" not in location: + _fail_missing_attribute("extensions", key) + + if "last_updated" not in location: + _fail_missing_attribute("last_updated", key) + last_updated = location["last_updated"] + + # Starlark doesn't have regexes. + if len(last_updated) != 10 or last_updated[4] != "-" or last_updated[7] != "-": + fail("last_updated must match YYYY-DD-MM: " + last_updated) + + if "cpe" in location: + cpe = location["cpe"] + + # Starlark doesn't have regexes. + cpe_components = len(cpe.split(":")) + + # We allow cpe:2.3:a:foo:*:* and cpe:2.3.:a:foo:bar:* only. + cpe_components_valid = (cpe_components == 6) + cpe_matches = (cpe == "N/A" or (cpe.startswith("cpe:2.3:a:") and cpe.endswith(":*") and cpe_components_valid)) + if not cpe_matches: + fail("CPE must match cpe:2.3:a:::*: " + cpe) + elif not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location["use_category"]]: + _fail_missing_attribute("cpe", key) + + for category in location["use_category"]: + if category not in USE_CATEGORIES: + fail("Unknown use_category value '" + category + "' for dependecy " + key) + + # Remove any extra annotations that we add, so that we don't confuse http_archive etc. + for annotation in DEPENDENCY_ANNOTATIONS: + if annotation in mutable_location: + mutable_location.pop(annotation) + + return locations diff --git a/generated_api_shadow/bazel/repositories.bzl b/generated_api_shadow/bazel/repositories.bzl index a64e733cf74a..a12a0ea98b3a 100644 --- a/generated_api_shadow/bazel/repositories.bzl +++ b/generated_api_shadow/bazel/repositories.bzl @@ -1,40 +1,43 @@ load(":envoy_http_archive.bzl", "envoy_http_archive") -load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") +load(":external_deps.bzl", "load_repository_locations") +load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC") -def api_dependencies(): +REPOSITORY_LOCATIONS = load_repository_locations(REPOSITORY_LOCATIONS_SPEC) + +# Use this macro to reference any HTTP archive from bazel/repository_locations.bzl. +def external_http_archive(name, **kwargs): envoy_http_archive( - "bazel_skylib", + name, locations = REPOSITORY_LOCATIONS, + **kwargs ) - envoy_http_archive( - "com_envoyproxy_protoc_gen_validate", - locations = REPOSITORY_LOCATIONS, + +def api_dependencies(): + external_http_archive( + name = "bazel_skylib", ) - envoy_http_archive( + external_http_archive( + name = "com_envoyproxy_protoc_gen_validate", + ) + external_http_archive( name = "com_google_googleapis", - locations = REPOSITORY_LOCATIONS, ) - envoy_http_archive( + external_http_archive( name = "com_github_cncf_udpa", - locations = REPOSITORY_LOCATIONS, ) - envoy_http_archive( + external_http_archive( name = "prometheus_metrics_model", - locations = REPOSITORY_LOCATIONS, build_file_content = PROMETHEUSMETRICS_BUILD_CONTENT, ) - envoy_http_archive( + external_http_archive( name = "opencensus_proto", - locations = REPOSITORY_LOCATIONS, ) - envoy_http_archive( + external_http_archive( name = "rules_proto", - locations = REPOSITORY_LOCATIONS, ) - envoy_http_archive( + external_http_archive( name = "com_github_openzipkin_zipkinapi", - locations = REPOSITORY_LOCATIONS, build_file_content = ZIPKINAPI_BUILD_CONTENT, ) diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index 2f0fdc723dbb..bdcf31e867d2 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -1,66 +1,91 @@ -BAZEL_SKYLIB_RELEASE = "1.0.3" -BAZEL_SKYLIB_SHA256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c" - -OPENCENSUS_PROTO_RELEASE = "0.3.0" -OPENCENSUS_PROTO_SHA256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0" - -PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020 -PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8" - -GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019 -GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405" - -PROMETHEUS_GIT_SHA = "60555c9708c786597e6b07bf846d0dc5c2a46f54" # Jun 23, 2020 -PROMETHEUS_SHA = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e" - -UDPA_RELEASE = "0.0.1" -UDPA_SHA256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8" - -ZIPKINAPI_RELEASE = "0.2.2" -ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" - -RULES_PROTO_GIT_SHA = "40298556293ae502c66579620a7ce867d5f57311" # Aug 17, 2020 -RULES_PROTO_SHA256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5" - -REPOSITORY_LOCATIONS = dict( +# This should match the schema defined in external_deps.bzl. +REPOSITORY_LOCATIONS_SPEC = dict( bazel_skylib = dict( - sha256 = BAZEL_SKYLIB_SHA256, - urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/" + BAZEL_SKYLIB_RELEASE + "/bazel-skylib-" + BAZEL_SKYLIB_RELEASE + ".tar.gz"], + project_name = "bazel-skylib", + project_desc = "Common useful functions and rules for Bazel", + project_url = "https://github.com/bazelbuild/bazel-skylib", + version = "1.0.3", + sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", + urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"], + last_updated = "2020-08-27", + use_category = ["api"], ), com_envoyproxy_protoc_gen_validate = dict( - sha256 = PGV_SHA256, - strip_prefix = "protoc-gen-validate-" + PGV_GIT_SHA, - urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/" + PGV_GIT_SHA + ".tar.gz"], + project_name = "protoc-gen-validate (PGV)", + project_desc = "protoc plugin to generate polyglot message validators", + project_url = "https://github.com/envoyproxy/protoc-gen-validate", + version = "278964a8052f96a2f514add0298098f63fb7f47f", + sha256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8", + strip_prefix = "protoc-gen-validate-{version}", + urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/{version}.tar.gz"], + last_updated = "2020-06-09", + use_category = ["api"], + ), + com_github_cncf_udpa = dict( + project_name = "Universal Data Plane API", + project_desc = "Universal Data Plane API Working Group (UDPA-WG)", + project_url = "https://github.com/cncf/udpa", + version = "0.0.1", + sha256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8", + strip_prefix = "udpa-{version}", + urls = ["https://github.com/cncf/udpa/archive/v{version}.tar.gz"], + last_updated = "2020-09-23", + use_category = ["api"], + ), + com_github_openzipkin_zipkinapi = dict( + project_name = "Zipkin API", + project_desc = "Zipkin's language independent model and HTTP Api Definitions", + project_url = "https://github.com/openzipkin/zipkin-api", + version = "0.2.2", + sha256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b", + strip_prefix = "zipkin-api-{version}", + urls = ["https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz"], + last_updated = "2020-09-23", + use_category = ["api"], ), com_google_googleapis = dict( # TODO(dio): Consider writing a Starlark macro for importing Google API proto. - sha256 = GOOGLEAPIS_SHA, - strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA, - urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"], + project_name = "Google APIs", + project_desc = "Public interface definitions of Google APIs", + project_url = "https://github.com/googleapis/googleapis", + version = "82944da21578a53b74e547774cf62ed31a05b841", + sha256 = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405", + strip_prefix = "googleapis-{version}", + urls = ["https://github.com/googleapis/googleapis/archive/{version}.tar.gz"], + last_updated = "2019-12-02", + use_category = ["api"], ), - com_github_cncf_udpa = dict( - sha256 = UDPA_SHA256, - strip_prefix = "udpa-" + UDPA_RELEASE, - urls = ["https://github.com/cncf/udpa/archive/v" + UDPA_RELEASE + ".tar.gz"], + opencensus_proto = dict( + project_name = "OpenCensus Proto", + project_desc = "Language Independent Interface Types For OpenCensus", + project_url = "https://github.com/census-instrumentation/opencensus-proto", + version = "0.3.0", + sha256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0", + strip_prefix = "opencensus-proto-{version}/src", + urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz"], + last_updated = "2020-06-20", + use_category = ["api"], ), prometheus_metrics_model = dict( - sha256 = PROMETHEUS_SHA, - strip_prefix = "client_model-" + PROMETHEUS_GIT_SHA, - urls = ["https://github.com/prometheus/client_model/archive/" + PROMETHEUS_GIT_SHA + ".tar.gz"], - ), - opencensus_proto = dict( - sha256 = OPENCENSUS_PROTO_SHA256, - strip_prefix = "opencensus-proto-" + OPENCENSUS_PROTO_RELEASE + "/src", - urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v" + OPENCENSUS_PROTO_RELEASE + ".tar.gz"], + project_name = "Prometheus client model", + project_desc = "Data model artifacts for Prometheus", + project_url = "https://github.com/prometheus/client_model", + version = "60555c9708c786597e6b07bf846d0dc5c2a46f54", + sha256 = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e", + strip_prefix = "client_model-{version}", + urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"], + last_updated = "2020-06-23", + use_category = ["api"], ), rules_proto = dict( - sha256 = RULES_PROTO_SHA256, - strip_prefix = "rules_proto-" + RULES_PROTO_GIT_SHA + "", - urls = ["https://github.com/bazelbuild/rules_proto/archive/" + RULES_PROTO_GIT_SHA + ".tar.gz"], - ), - com_github_openzipkin_zipkinapi = dict( - sha256 = ZIPKINAPI_SHA256, - strip_prefix = "zipkin-api-" + ZIPKINAPI_RELEASE, - urls = ["https://github.com/openzipkin/zipkin-api/archive/" + ZIPKINAPI_RELEASE + ".tar.gz"], + project_name = "Protobuf Rules for Bazel", + project_desc = "Protocol buffer rules for Bazel", + project_url = "https://github.com/bazelbuild/rules_proto", + version = "40298556293ae502c66579620a7ce867d5f57311", + sha256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5", + strip_prefix = "rules_proto-{version}", + urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"], + last_updated = "2020-08-17", + use_category = ["api"], ), ) diff --git a/generated_api_shadow/bazel/repository_locations_utils.bzl b/generated_api_shadow/bazel/repository_locations_utils.bzl new file mode 100644 index 000000000000..3b984e1bc580 --- /dev/null +++ b/generated_api_shadow/bazel/repository_locations_utils.bzl @@ -0,0 +1,20 @@ +def _format_version(s, version): + return s.format(version = version, dash_version = version.replace(".", "-"), underscore_version = version.replace(".", "_")) + +# Generate a "repository location specification" from raw repository +# specification. The information should match the format required by +# external_deps.bzl. This function mostly does interpolation of {version} in +# the repository info fields. This code should be capable of running in both +# Python and Starlark. +def load_repository_locations_spec(repository_locations_spec): + locations = {} + for key, location in repository_locations_spec.items(): + mutable_location = dict(location) + locations[key] = mutable_location + + # Fixup with version information. + if "version" in location: + if "strip_prefix" in location: + mutable_location["strip_prefix"] = _format_version(location["strip_prefix"], location["version"]) + mutable_location["urls"] = [_format_version(url, location["version"]) for url in location["urls"]] + return locations diff --git a/generated_api_shadow/envoy/admin/v3/BUILD b/generated_api_shadow/envoy/admin/v3/BUILD index 4163de8e0aba..38eadcb09fea 100644 --- a/generated_api_shadow/envoy/admin/v3/BUILD +++ b/generated_api_shadow/envoy/admin/v3/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/admin/v2alpha:pkg", "//envoy/annotations:pkg", "//envoy/config/bootstrap/v3:pkg", + "//envoy/config/cluster/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/tap/v3:pkg", "//envoy/type/v3:pkg", diff --git a/generated_api_shadow/envoy/admin/v3/certs.proto b/generated_api_shadow/envoy/admin/v3/certs.proto index 158c8aead28f..5580bb5ef17d 100644 --- a/generated_api_shadow/envoy/admin/v3/certs.proto +++ b/generated_api_shadow/envoy/admin/v3/certs.proto @@ -34,11 +34,19 @@ message Certificate { repeated CertificateDetails cert_chain = 2; } -// [#next-free-field: 7] +// [#next-free-field: 8] message CertificateDetails { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CertificateDetails"; + message OcspDetails { + // Indicates the time from which the OCSP response is valid. + google.protobuf.Timestamp valid_from = 1; + + // Indicates the time at which the OCSP response expires. + google.protobuf.Timestamp expiration = 2; + } + // Path of the certificate. string path = 1; @@ -56,6 +64,9 @@ message CertificateDetails { // Indicates the time at which the certificate expires. google.protobuf.Timestamp expiration_time = 6; + + // Details related to the OCSP response associated with this certificate, if any. + OcspDetails ocsp_details = 7; } message SubjectAlternateName { diff --git a/generated_api_shadow/envoy/admin/v3/clusters.proto b/generated_api_shadow/envoy/admin/v3/clusters.proto index fc05c8a10de2..8eeaec20becc 100644 --- a/generated_api_shadow/envoy/admin/v3/clusters.proto +++ b/generated_api_shadow/envoy/admin/v3/clusters.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.admin.v3; import "envoy/admin/v3/metrics.proto"; +import "envoy/config/cluster/v3/circuit_breaker.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/health_check.proto"; @@ -28,7 +29,7 @@ message Clusters { } // Details an individual cluster's current status. -// [#next-free-field: 6] +// [#next-free-field: 7] message ClusterStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClusterStatus"; @@ -76,6 +77,9 @@ message ClusterStatus { // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. type.v3.Percent local_origin_success_rate_ejection_threshold = 5; + + // :ref:`Circuit breaking ` settings of the cluster. + config.cluster.v3.CircuitBreakers circuit_breakers = 6; } // Current state of a particular host. diff --git a/generated_api_shadow/envoy/admin/v3/server_info.proto b/generated_api_shadow/envoy/admin/v3/server_info.proto index 320bc022a5d6..c7a19453b882 100644 --- a/generated_api_shadow/envoy/admin/v3/server_info.proto +++ b/generated_api_shadow/envoy/admin/v3/server_info.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.admin.v3; +import "envoy/config/core/v3/base.proto"; + import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; @@ -17,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Proto representation of the value returned by /server_info, containing // server version/server status information. -// [#next-free-field: 7] +// [#next-free-field: 8] message ServerInfo { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ServerInfo"; @@ -52,9 +54,12 @@ message ServerInfo { // Command line options the server is currently running with. CommandLineOptions command_line_options = 6; + + // Populated node identity of this server. + config.core.v3.Node node = 7; } -// [#next-free-field: 35] +// [#next-free-field: 37] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -178,6 +183,12 @@ message CommandLineOptions { // See :option:`--enable-fine-grain-logging` for details. bool enable_fine_grain_logging = 34; + // See :option:`--socket-path` for details. + string socket_path = 35; + + // See :option:`--socket-mode` for details. + uint32 socket_mode = 36; + uint64 hidden_envoy_deprecated_max_stats = 20 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; diff --git a/generated_api_shadow/envoy/admin/v3/tap.proto b/generated_api_shadow/envoy/admin/v3/tap.proto index ca7ab4405a9b..934170b2deea 100644 --- a/generated_api_shadow/envoy/admin/v3/tap.proto +++ b/generated_api_shadow/envoy/admin/v3/tap.proto @@ -21,7 +21,7 @@ message TapRequest { // The opaque configuration ID used to match the configuration to a loaded extension. // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string config_id = 1 [(validate.rules).string = {min_len: 1}]; // The tap configuration to load. config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; diff --git a/generated_api_shadow/envoy/admin/v4alpha/BUILD b/generated_api_shadow/envoy/admin/v4alpha/BUILD index f2cb1a2a70c0..28f1e7d8c821 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/BUILD +++ b/generated_api_shadow/envoy/admin/v4alpha/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/admin/v3:pkg", "//envoy/annotations:pkg", "//envoy/config/bootstrap/v4alpha:pkg", + "//envoy/config/cluster/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/tap/v4alpha:pkg", "//envoy/type/v3:pkg", diff --git a/generated_api_shadow/envoy/admin/v4alpha/certs.proto b/generated_api_shadow/envoy/admin/v4alpha/certs.proto index 585b09bccf4c..0dd868f71fa6 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/certs.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/certs.proto @@ -34,10 +34,21 @@ message Certificate { repeated CertificateDetails cert_chain = 2; } -// [#next-free-field: 7] +// [#next-free-field: 8] message CertificateDetails { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CertificateDetails"; + message OcspDetails { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.CertificateDetails.OcspDetails"; + + // Indicates the time from which the OCSP response is valid. + google.protobuf.Timestamp valid_from = 1; + + // Indicates the time at which the OCSP response expires. + google.protobuf.Timestamp expiration = 2; + } + // Path of the certificate. string path = 1; @@ -55,6 +66,9 @@ message CertificateDetails { // Indicates the time at which the certificate expires. google.protobuf.Timestamp expiration_time = 6; + + // Details related to the OCSP response associated with this certificate, if any. + OcspDetails ocsp_details = 7; } message SubjectAlternateName { diff --git a/generated_api_shadow/envoy/admin/v4alpha/clusters.proto b/generated_api_shadow/envoy/admin/v4alpha/clusters.proto index 9056262cae86..10d920976930 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/clusters.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/clusters.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.admin.v4alpha; import "envoy/admin/v4alpha/metrics.proto"; +import "envoy/config/cluster/v4alpha/circuit_breaker.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/health_check.proto"; @@ -28,7 +29,7 @@ message Clusters { } // Details an individual cluster's current status. -// [#next-free-field: 6] +// [#next-free-field: 7] message ClusterStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClusterStatus"; @@ -76,6 +77,9 @@ message ClusterStatus { // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. type.v3.Percent local_origin_success_rate_ejection_threshold = 5; + + // :ref:`Circuit breaking ` settings of the cluster. + config.cluster.v4alpha.CircuitBreakers circuit_breakers = 6; } // Current state of a particular host. diff --git a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto index 3f3570af0111..6f56978d49fe 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.admin.v4alpha; +import "envoy/config/core/v4alpha/base.proto"; + import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; @@ -17,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // Proto representation of the value returned by /server_info, containing // server version/server status information. -// [#next-free-field: 7] +// [#next-free-field: 8] message ServerInfo { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ServerInfo"; @@ -52,9 +54,12 @@ message ServerInfo { // Command line options the server is currently running with. CommandLineOptions command_line_options = 6; + + // Populated node identity of this server. + config.core.v4alpha.Node node = 7; } -// [#next-free-field: 35] +// [#next-free-field: 37] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -178,4 +183,10 @@ message CommandLineOptions { // See :option:`--enable-fine-grain-logging` for details. bool enable_fine_grain_logging = 34; + + // See :option:`--socket-path` for details. + string socket_path = 35; + + // See :option:`--socket-mode` for details. + uint32 socket_mode = 36; } diff --git a/generated_api_shadow/envoy/admin/v4alpha/tap.proto b/generated_api_shadow/envoy/admin/v4alpha/tap.proto index 039dfcfeb812..e89259380418 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/tap.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/tap.proto @@ -21,7 +21,7 @@ message TapRequest { // The opaque configuration ID used to match the configuration to a loaded extension. // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string config_id = 1 [(validate.rules).string = {min_len: 1}]; // The tap configuration to load. config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; diff --git a/generated_api_shadow/envoy/api/v2/cluster.proto b/generated_api_shadow/envoy/api/v2/cluster.proto index d1a50fbdb91e..fab95f71b763 100644 --- a/generated_api_shadow/envoy/api/v2/cluster.proto +++ b/generated_api_shadow/envoy/api/v2/cluster.proto @@ -352,6 +352,10 @@ message Cluster { // This header isn't sanitized by default, so enabling this feature allows HTTP clients to // route traffic to arbitrary hosts and/or ports, which may have serious security // consequences. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. bool use_http_header = 1; } @@ -677,10 +681,16 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple's API only allows overriding DNS resolvers via system settings. repeated core.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. diff --git a/generated_api_shadow/envoy/api/v2/route/route_components.proto b/generated_api_shadow/envoy/api/v2/route/route_components.proto index 339c7bcbc53a..c1e84a5618a7 100644 --- a/generated_api_shadow/envoy/api/v2/route/route_components.proto +++ b/generated_api_shadow/envoy/api/v2/route/route_components.proto @@ -756,6 +756,10 @@ message RouteAction { // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string cluster_header = 2 [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; @@ -866,6 +870,10 @@ message RouteAction { // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string auto_host_rewrite_header = 29 [ (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, (udpa.annotations.field_migrate).rename = "host_rewrite_header" diff --git a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto index f1a8c29a4921..3d8f3bb8e16e 100644 --- a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto @@ -162,7 +162,7 @@ message RuntimeFilter { // Runtime key to get an optional overridden numerator for use in the // *percent_sampled* field. If found in runtime, this value will replace the // default numerator. - string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; // The default sampling percentage. If not specified, defaults to 0% with // denominator of 100. @@ -252,6 +252,7 @@ message ResponseFlagFilter { in: "UMSDR" in: "RFCF" in: "NFCF" + in: "DT" } } }]; diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto index bd4bcd48c4b4..0714b614c41d 100644 --- a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto @@ -164,7 +164,7 @@ message RuntimeFilter { // Runtime key to get an optional overridden numerator for use in the // *percent_sampled* field. If found in runtime, this value will replace the // default numerator. - string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; // The default sampling percentage. If not specified, defaults to 0% with // denominator of 100. @@ -253,6 +253,7 @@ message ResponseFlagFilter { in: "UMSDR" in: "RFCF" in: "NFCF" + in: "DT" } } }]; diff --git a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto index da88dce786ae..30c276f24276 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto @@ -169,6 +169,9 @@ message Bootstrap { // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 20; } diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index 27c69eae991b..cce2dceb72d8 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -40,7 +40,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 27] +// [#next-free-field: 28] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -174,7 +174,13 @@ message Bootstrap { }]; // Optional watchdog configuration. - Watchdog watchdog = 8; + // This is for a single watchdog configuration for the entire system. + // Deprecated in favor of *watchdogs* which has finer granularity. + Watchdog watchdog = 8 [deprecated = true]; + + // Optional watchdogs configuration. + // This is used for specifying different watchdogs for the different subsystems. + Watchdogs watchdogs = 27; // Configuration for an external tracing provider. // @@ -226,6 +232,9 @@ message Bootstrap { // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 20; // Specifies optional bootstrap extensions to be instantiated at startup time. @@ -337,6 +346,17 @@ message ClusterManager { core.v3.ApiConfigSource load_stats_config = 4; } +// Allows you to specify different watchdog configs for different subsystems. +// This allows finer tuned policies for the watchdog. If a subsystem is omitted +// the default values for that system will be used. +message Watchdogs { + // Watchdog for the main thread. + Watchdog main_thread_watchdog = 1; + + // Watchdog for the worker threads. + Watchdog worker_watchdog = 2; +} + // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. @@ -487,7 +507,7 @@ message RuntimeLayer { // Descriptive name for the runtime layer. This is only used for the runtime // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof layer_specifier { option (validate.required) = true; diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index bf70a52e9d1d..57f37e5ff6ee 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -39,7 +39,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 27] +// [#next-free-field: 28] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -175,7 +175,13 @@ message Bootstrap { }]; // Optional watchdog configuration. - Watchdog watchdog = 8; + // This is for a single watchdog configuration for the entire system. + // Deprecated in favor of *watchdogs* which has finer granularity. + Watchdog hidden_envoy_deprecated_watchdog = 8 [deprecated = true]; + + // Optional watchdogs configuration. + // This is used for specifying different watchdogs for the different subsystems. + Watchdogs watchdogs = 27; // Configuration for an external tracing provider. // @@ -227,6 +233,9 @@ message Bootstrap { // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 20; // Specifies optional bootstrap extensions to be instantiated at startup time. @@ -335,6 +344,20 @@ message ClusterManager { core.v4alpha.ApiConfigSource load_stats_config = 4; } +// Allows you to specify different watchdog configs for different subsystems. +// This allows finer tuned policies for the watchdog. If a subsystem is omitted +// the default values for that system will be used. +message Watchdogs { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.Watchdogs"; + + // Watchdog for the main thread. + Watchdog main_thread_watchdog = 1; + + // Watchdog for the worker threads. + Watchdog worker_watchdog = 2; +} + // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. @@ -489,7 +512,7 @@ message RuntimeLayer { // Descriptive name for the runtime layer. This is only used for the runtime // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof layer_specifier { option (validate.required) = true; diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 7560baed3434..bc39aaa8799e 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -170,7 +170,7 @@ message Cluster { "envoy.api.v2.Cluster.CustomClusterType"; // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Cluster specific configuration which depends on the cluster being instantiated. // See the supported cluster for further documentation. @@ -436,6 +436,10 @@ message Cluster { // This header isn't sanitized by default, so enabling this feature allows HTTP clients to // route traffic to arbitrary hosts and/or ports, which may have serious security // consequences. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. bool use_http_header = 1; } @@ -612,7 +616,32 @@ message Cluster { // // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can // harm latency more than the prefetching helps. - google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1 + [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + + // Indicates how many many streams (rounded up) can be anticipated across a cluster for each + // stream, useful for low QPS services. This is currently supported for a subset of + // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). + // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a + // cluster, doing best effort predictions of what upstream would be picked next and + // pre-establishing a connection. + // + // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be prefetched - one to the first upstream for this + // cluster, one to the second on the assumption there will be a follow-up stream. + // + // Prefetching will be limited to one prefetch per configured upstream in the cluster. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight, so during warm up and in steady state if a connection + // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for + // connection establishment. + // + // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met, + // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream. + // TODO(alyssawilk) per LB docs and LB overview docs when unhiding. + google.protobuf.DoubleValue predictive_prefetch_ratio = 2 + [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } reserved 12, 15; @@ -673,7 +702,7 @@ message Cluster { // :ref:`statistics ` if :ref:`alt_stat_name // ` is not provided. // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // An optional alternative to the cluster name to be used while emitting stats. // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be @@ -802,10 +831,16 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple's API only allows overriding DNS resolvers via system settings. repeated core.v3.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. diff --git a/generated_api_shadow/envoy/config/cluster/v3/filter.proto b/generated_api_shadow/envoy/config/cluster/v3/filter.proto index af3116ec26eb..74f4a1137dab 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/filter.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/filter.proto @@ -21,7 +21,7 @@ message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index 55915350dced..d83b54cabeb4 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -172,7 +172,7 @@ message Cluster { "envoy.config.cluster.v3.Cluster.CustomClusterType"; // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Cluster specific configuration which depends on the cluster being instantiated. // See the supported cluster for further documentation. @@ -442,6 +442,10 @@ message Cluster { // This header isn't sanitized by default, so enabling this feature allows HTTP clients to // route traffic to arbitrary hosts and/or ports, which may have serious security // consequences. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. bool use_http_header = 1; } @@ -622,7 +626,32 @@ message Cluster { // // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can // harm latency more than the prefetching helps. - google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1 + [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + + // Indicates how many many streams (rounded up) can be anticipated across a cluster for each + // stream, useful for low QPS services. This is currently supported for a subset of + // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). + // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a + // cluster, doing best effort predictions of what upstream would be picked next and + // pre-establishing a connection. + // + // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be prefetched - one to the first upstream for this + // cluster, one to the second on the assumption there will be a follow-up stream. + // + // Prefetching will be limited to one prefetch per configured upstream in the cluster. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight, so during warm up and in steady state if a connection + // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for + // connection establishment. + // + // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met, + // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream. + // TODO(alyssawilk) per LB docs and LB overview docs when unhiding. + google.protobuf.DoubleValue predictive_prefetch_ratio = 2 + [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } reserved 12, 15, 7, 11, 35; @@ -685,7 +714,7 @@ message Cluster { // :ref:`statistics ` if :ref:`alt_stat_name // ` is not provided. // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // An optional alternative to the cluster name to be used while emitting stats. // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be @@ -814,10 +843,16 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple's API only allows overriding DNS resolvers via system settings. repeated core.v4alpha.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto index eb825fdeb6d5..5a4a4facbd81 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto @@ -21,7 +21,7 @@ message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/generated_api_shadow/envoy/config/core/v3/address.proto b/generated_api_shadow/envoy/config/core/v3/address.proto index 5102c2d57591..8228450eb93c 100644 --- a/generated_api_shadow/envoy/config/core/v3/address.proto +++ b/generated_api_shadow/envoy/config/core/v3/address.proto @@ -24,12 +24,24 @@ message Pipe { // abstract namespace. The starting '@' is replaced by a null byte by Envoy. // Paths starting with '@' will result in an error in environments other than // Linux. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; + string path = 1 [(validate.rules).string = {min_len: 1}]; // The mode for the Pipe. Not applicable for abstract sockets. uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; } +// [#not-implemented-hide:] The address represents an envoy internal listener. +// TODO(lambdai): Make this address available for listener and endpoint. +// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30. +message EnvoyInternalAddress { + oneof address_name_specifier { + option (validate.required) = true; + + // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. + string server_listener_name = 1; + } +} + // [#next-free-field: 7] message SocketAddress { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketAddress"; @@ -52,7 +64,7 @@ message SocketAddress { // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string = {min_bytes: 1}]; + string address = 2 [(validate.rules).string = {min_len: 1}]; oneof port_specifier { option (validate.required) = true; @@ -129,6 +141,9 @@ message Address { SocketAddress socket_address = 1; Pipe pipe = 2; + + // [#not-implemented-hide:] + EnvoyInternalAddress envoy_internal_address = 3; } } @@ -138,7 +153,7 @@ message CidrRange { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.CidrRange"; // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Length of prefix, e.g. 0, 32. google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; diff --git a/generated_api_shadow/envoy/config/core/v3/base.proto b/generated_api_shadow/envoy/config/core/v3/base.proto index 472b6b7d91d3..35b015710e5c 100644 --- a/generated_api_shadow/envoy/config/core/v3/base.proto +++ b/generated_api_shadow/envoy/config/core/v3/base.proto @@ -235,7 +235,16 @@ message RuntimeUInt32 { uint32 default_value = 2; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 3 [(validate.rules).string = {min_len: 1}]; +} + +// Runtime derived percentage with a default when not specified. +message RuntimePercent { + // Default value if runtime value is not available. + type.v3.Percent default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived double with a default when not specified. @@ -246,7 +255,7 @@ message RuntimeDouble { double default_value = 1; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived bool with a default when not specified. @@ -260,7 +269,7 @@ message RuntimeFeatureFlag { // Runtime key to get value for comparison. This value is used if defined. The boolean value must // be represented via its // `canonical JSON encoding `_. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Header name/value pair. @@ -270,7 +279,7 @@ message HeaderValue { // Header name. string key = 1 [(validate.rules).string = - {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; + {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Header value. // @@ -310,13 +319,13 @@ message DataSource { option (validate.required) = true; // Local filesystem data source. - string filename = 1 [(validate.rules).string = {min_bytes: 1}]; + string filename = 1 [(validate.rules).string = {min_len: 1}]; // Bytes inlined in the configuration. bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; + string inline_string = 3 [(validate.rules).string = {min_len: 1}]; } } @@ -343,7 +352,7 @@ message RemoteDataSource { HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; + string sha256 = 2 [(validate.rules).string = {min_len: 1}]; // Retry policy for fetching remote data. RetryPolicy retry_policy = 3; @@ -373,7 +382,7 @@ message TransportSocket { // The name of the transport socket to instantiate. The name must match a supported transport // socket implementation. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto b/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto index 800d7b5332a0..e79ec24e0201 100644 --- a/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto +++ b/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto @@ -22,7 +22,7 @@ message GrpcMethodList { "envoy.api.v2.core.GrpcMethodList.Service"; // The name of the gRPC service. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The names of the gRPC methods in this service. repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto index 552817ffd06f..fb05f3b73a5f 100644 --- a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto @@ -35,13 +35,13 @@ message GrpcService { // The name of the upstream gRPC cluster. SSL credentials will be supplied // in the :ref:`Cluster ` :ref:`transport_socket // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. string authority = 2 [(validate.rules).string = - {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; + {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // [#next-free-field: 9] @@ -158,10 +158,10 @@ message GrpcService { // The path of subject token, a security token that represents the // identity of the party on behalf of whom the request is being made. - string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}]; + string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; // Type of the subject token. - string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}]; + string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; // The path of actor token, a security token that represents the identity // of the acting party. The acting party is authorized to use the @@ -228,7 +228,7 @@ message GrpcService { // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; + string target_uri = 1 [(validate.rules).string = {min_len: 1}]; ChannelCredentials channel_credentials = 2; @@ -245,7 +245,7 @@ message GrpcService { // // streams_total, Counter, Total number of streams opened // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; // The name of the Google gRPC credentials factory to use. This must have been registered with // Envoy. If this is empty, a default credentials factory will be used that sets up channel @@ -284,8 +284,10 @@ message GrpcService { // request. google.protobuf.Duration timeout = 3; - // Additional metadata to include in streams initiated to the GrpcService. - // This can be used for scenarios in which additional ad hoc authorization - // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + // Additional metadata to include in streams initiated to the GrpcService. This can be used for + // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to + // be injected. For more information, including details on header value syntax, see the + // documentation on :ref:`custom request headers + // `. repeated HeaderValue initial_metadata = 5; } diff --git a/generated_api_shadow/envoy/config/core/v3/health_check.proto b/generated_api_shadow/envoy/config/core/v3/health_check.proto index 05af0a8cef06..5f8fd325aa95 100644 --- a/generated_api_shadow/envoy/config/core/v3/health_check.proto +++ b/generated_api_shadow/envoy/config/core/v3/health_check.proto @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 24] +// [#next-free-field: 25] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; @@ -67,7 +67,7 @@ message HealthCheck { option (validate.required) = true; // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string = {min_bytes: 1}]; + string text = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] Binary payload. bytes binary = 2; @@ -87,9 +87,8 @@ message HealthCheck { // Specifies the HTTP path that will be requested during health checking. For example // */healthcheck*. - string path = 2 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; + string path = 2 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; @@ -184,7 +183,7 @@ message HealthCheck { "envoy.api.v2.core.HealthCheck.CustomHealthCheck"; // The registered name of the custom health checker. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. @@ -284,6 +283,21 @@ message HealthCheck { // The default value for "no traffic interval" is 60 seconds. google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; + // The "no traffic healthy interval" is a special health check interval that + // is used for hosts that are currently passing active health checking + // (including new hosts) when the cluster has received no traffic. + // + // This is useful for when we want to send frequent health checks with + // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once + // a host in the cluster is marked as healthy. + // + // Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. + // + // If no_traffic_healthy_interval is not set, it will default to the + // no traffic interval and send that interval regardless of health state. + google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}]; + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. diff --git a/generated_api_shadow/envoy/config/core/v3/http_uri.proto b/generated_api_shadow/envoy/config/core/v3/http_uri.proto index 42bcd4f61572..5d1fc239e07e 100644 --- a/generated_api_shadow/envoy/config/core/v3/http_uri.proto +++ b/generated_api_shadow/envoy/config/core/v3/http_uri.proto @@ -27,7 +27,7 @@ message HttpUri { // // uri: https://www.googleapis.com/oauth2/v1/certs // - string uri = 1 [(validate.rules).string = {min_bytes: 1}]; + string uri = 1 [(validate.rules).string = {min_len: 1}]; // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or @@ -45,7 +45,7 @@ message HttpUri { // // cluster: jwks_cluster // - string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 2 [(validate.rules).string = {min_len: 1}]; } // Sets the maximum duration in milliseconds that a response can take to arrive upon request. diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto index 3e20f3b449ae..17a6955d6851 100644 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v3/protocol.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.core.v3; +import "envoy/type/v3/percent.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -177,7 +179,27 @@ message Http1ProtocolOptions { google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; } -// [#next-free-field: 15] +message KeepaliveSettings { + // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. + google.protobuf.Duration interval = 1 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // How long to wait for a response to a keepalive PING. If a response is not received within this + // time period, the connection will be aborted. + google.protobuf.Duration timeout = 2 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // A random jitter amount as a percentage of interval that will be added to each interval. + // A value of zero means there will be no jitter. + // The default value is 15%. + type.v3.Percent interval_jitter = 3; +} + +// [#next-free-field: 16] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http2ProtocolOptions"; @@ -345,6 +367,10 @@ message Http2ProtocolOptions { // `_ for // standardized identifiers. repeated SettingsParameter custom_settings_parameters = 13; + + // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer + // does not respond within the configured timeout, the connection will be aborted. + KeepaliveSettings connection_keepalive = 15; } // [#not-implemented-hide:] diff --git a/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto index 6c129707b2e2..10d99b878bdd 100644 --- a/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto +++ b/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto @@ -23,17 +23,20 @@ message SubstitutionFormatString { // Specify a format with command operators to form a text string. // Its details is described in :ref:`format string`. // - // .. code-block:: + // For example, setting ``text_format`` like below, // - // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // The following plain text will be created: + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // - // .. code-block:: + // generates plain text similar to: // - // upstream connect error:204:path=/foo + // .. code-block:: text // - string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + // upstream connect error:503:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_len: 1}]; // Specify a format with command operators to form a JSON string. // Its details is described in :ref:`format dictionary`. @@ -41,11 +44,12 @@ message SubstitutionFormatString { // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). // See the documentation for a specific command operator for details. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // json_format: - // status: %RESPONSE_CODE% - // message: %LOCAL_REPLY_BODY% + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" // // The following JSON object would be created: // @@ -65,4 +69,15 @@ message SubstitutionFormatString { // empty string, so that empty values are omitted entirely. // * for ``json_format`` the keys with null values are omitted in the output structure. bool omit_empty_values = 3; + + // Specify a *content_type* field. + // If this field is not set then ``text/plain`` is used for *text_format* and + // ``application/json`` is used for *json_format*. + // + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // content_type: "text/html; charset=UTF-8" + // + string content_type = 4; } diff --git a/generated_api_shadow/envoy/config/core/v4alpha/address.proto b/generated_api_shadow/envoy/config/core/v4alpha/address.proto index ffade4bed75b..6ae82359504e 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/address.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/address.proto @@ -24,12 +24,27 @@ message Pipe { // abstract namespace. The starting '@' is replaced by a null byte by Envoy. // Paths starting with '@' will result in an error in environments other than // Linux. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; + string path = 1 [(validate.rules).string = {min_len: 1}]; // The mode for the Pipe. Not applicable for abstract sockets. uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; } +// [#not-implemented-hide:] The address represents an envoy internal listener. +// TODO(lambdai): Make this address available for listener and endpoint. +// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30. +message EnvoyInternalAddress { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.EnvoyInternalAddress"; + + oneof address_name_specifier { + option (validate.required) = true; + + // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. + string server_listener_name = 1; + } +} + // [#next-free-field: 7] message SocketAddress { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketAddress"; @@ -52,7 +67,7 @@ message SocketAddress { // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string = {min_bytes: 1}]; + string address = 2 [(validate.rules).string = {min_len: 1}]; oneof port_specifier { option (validate.required) = true; @@ -129,6 +144,9 @@ message Address { SocketAddress socket_address = 1; Pipe pipe = 2; + + // [#not-implemented-hide:] + EnvoyInternalAddress envoy_internal_address = 3; } } @@ -138,7 +156,7 @@ message CidrRange { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.CidrRange"; // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Length of prefix, e.g. 0, 32. google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; diff --git a/generated_api_shadow/envoy/config/core/v4alpha/base.proto b/generated_api_shadow/envoy/config/core/v4alpha/base.proto index af53aa402a47..03fcc5a461e0 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/base.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/base.proto @@ -236,7 +236,19 @@ message RuntimeUInt32 { uint32 default_value = 2; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 3 [(validate.rules).string = {min_len: 1}]; +} + +// Runtime derived percentage with a default when not specified. +message RuntimePercent { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.RuntimePercent"; + + // Default value if runtime value is not available. + type.v3.Percent default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived double with a default when not specified. @@ -247,7 +259,7 @@ message RuntimeDouble { double default_value = 1; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived bool with a default when not specified. @@ -261,7 +273,7 @@ message RuntimeFeatureFlag { // Runtime key to get value for comparison. This value is used if defined. The boolean value must // be represented via its // `canonical JSON encoding `_. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Header name/value pair. @@ -271,7 +283,7 @@ message HeaderValue { // Header name. string key = 1 [(validate.rules).string = - {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; + {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Header value. // @@ -311,13 +323,13 @@ message DataSource { option (validate.required) = true; // Local filesystem data source. - string filename = 1 [(validate.rules).string = {min_bytes: 1}]; + string filename = 1 [(validate.rules).string = {min_len: 1}]; // Bytes inlined in the configuration. bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; + string inline_string = 3 [(validate.rules).string = {min_len: 1}]; } } @@ -344,7 +356,7 @@ message RemoteDataSource { HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; + string sha256 = 2 [(validate.rules).string = {min_len: 1}]; // Retry policy for fetching remote data. RetryPolicy retry_policy = 3; @@ -380,7 +392,7 @@ message TransportSocket { // The name of the transport socket to instantiate. The name must match a supported transport // socket implementation. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto index a4a7be077b27..371ea32c10f3 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto @@ -23,7 +23,7 @@ message GrpcMethodList { "envoy.config.core.v3.GrpcMethodList.Service"; // The name of the gRPC service. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The names of the gRPC methods in this service. repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto index 51f11fa1f346..9ea35b456470 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto @@ -35,13 +35,13 @@ message GrpcService { // The name of the upstream gRPC cluster. SSL credentials will be supplied // in the :ref:`Cluster ` :ref:`transport_socket // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. string authority = 2 [(validate.rules).string = - {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; + {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // [#next-free-field: 9] @@ -160,10 +160,10 @@ message GrpcService { // The path of subject token, a security token that represents the // identity of the party on behalf of whom the request is being made. - string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}]; + string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; // Type of the subject token. - string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}]; + string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; // The path of actor token, a security token that represents the identity // of the acting party. The acting party is authorized to use the @@ -236,7 +236,7 @@ message GrpcService { // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; + string target_uri = 1 [(validate.rules).string = {min_len: 1}]; ChannelCredentials channel_credentials = 2; @@ -253,7 +253,7 @@ message GrpcService { // // streams_total, Counter, Total number of streams opened // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; // The name of the Google gRPC credentials factory to use. This must have been registered with // Envoy. If this is empty, a default credentials factory will be used that sets up channel @@ -292,8 +292,10 @@ message GrpcService { // request. google.protobuf.Duration timeout = 3; - // Additional metadata to include in streams initiated to the GrpcService. - // This can be used for scenarios in which additional ad hoc authorization - // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + // Additional metadata to include in streams initiated to the GrpcService. This can be used for + // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to + // be injected. For more information, including details on header value syntax, see the + // documentation on :ref:`custom request headers + // `. repeated HeaderValue initial_metadata = 5; } diff --git a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto index 39badc334b01..2761b856a3d7 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 24] +// [#next-free-field: 25] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; @@ -67,7 +67,7 @@ message HealthCheck { option (validate.required) = true; // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string = {min_bytes: 1}]; + string text = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] Binary payload. bytes binary = 2; @@ -91,9 +91,8 @@ message HealthCheck { // Specifies the HTTP path that will be requested during health checking. For example // */healthcheck*. - string path = 2 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; + string path = 2 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; @@ -187,7 +186,7 @@ message HealthCheck { reserved "config"; // The registered name of the custom health checker. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. @@ -285,6 +284,21 @@ message HealthCheck { // The default value for "no traffic interval" is 60 seconds. google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; + // The "no traffic healthy interval" is a special health check interval that + // is used for hosts that are currently passing active health checking + // (including new hosts) when the cluster has received no traffic. + // + // This is useful for when we want to send frequent health checks with + // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once + // a host in the cluster is marked as healthy. + // + // Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. + // + // If no_traffic_healthy_interval is not set, it will default to the + // no traffic interval and send that interval regardless of health state. + google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}]; + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. diff --git a/generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto b/generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto index e88a9aa7d7df..ae1c0c9a3d4e 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto @@ -27,7 +27,7 @@ message HttpUri { // // uri: https://www.googleapis.com/oauth2/v1/certs // - string uri = 1 [(validate.rules).string = {min_bytes: 1}]; + string uri = 1 [(validate.rules).string = {min_len: 1}]; // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or @@ -45,7 +45,7 @@ message HttpUri { // // cluster: jwks_cluster // - string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 2 [(validate.rules).string = {min_len: 1}]; } // Sets the maximum duration in milliseconds that a response can take to arrive upon request. diff --git a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto index 44314f61fbae..885972885ebc 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.core.v4alpha; +import "envoy/type/v3/percent.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -177,7 +179,30 @@ message Http1ProtocolOptions { google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; } -// [#next-free-field: 15] +message KeepaliveSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.KeepaliveSettings"; + + // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. + google.protobuf.Duration interval = 1 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // How long to wait for a response to a keepalive PING. If a response is not received within this + // time period, the connection will be aborted. + google.protobuf.Duration timeout = 2 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // A random jitter amount as a percentage of interval that will be added to each interval. + // A value of zero means there will be no jitter. + // The default value is 15%. + type.v3.Percent interval_jitter = 3; +} + +// [#next-free-field: 16] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http2ProtocolOptions"; @@ -345,6 +370,10 @@ message Http2ProtocolOptions { // `_ for // standardized identifiers. repeated SettingsParameter custom_settings_parameters = 13; + + // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer + // does not respond within the configured timeout, the connection will be aborted. + KeepaliveSettings connection_keepalive = 15; } // [#not-implemented-hide:] diff --git a/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto index ffff2fe3e754..e996bcbc0cf6 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto @@ -27,17 +27,20 @@ message SubstitutionFormatString { // Specify a format with command operators to form a text string. // Its details is described in :ref:`format string`. // - // .. code-block:: + // For example, setting ``text_format`` like below, // - // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // The following plain text will be created: + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // - // .. code-block:: + // generates plain text similar to: // - // upstream connect error:204:path=/foo + // .. code-block:: text // - string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + // upstream connect error:503:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_len: 1}]; // Specify a format with command operators to form a JSON string. // Its details is described in :ref:`format dictionary`. @@ -45,11 +48,12 @@ message SubstitutionFormatString { // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). // See the documentation for a specific command operator for details. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // json_format: - // status: %RESPONSE_CODE% - // message: %LOCAL_REPLY_BODY% + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" // // The following JSON object would be created: // @@ -69,4 +73,15 @@ message SubstitutionFormatString { // empty string, so that empty values are omitted entirely. // * for ``json_format`` the keys with null values are omitted in the output structure. bool omit_empty_values = 3; + + // Specify a *content_type* field. + // If this field is not set then ``text/plain`` is used for *text_format* and + // ``application/json`` is used for *json_format*. + // + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // content_type: "text/html; charset=UTF-8" + // + string content_type = 4; } diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto index 7233d5f9561a..3572b7b0af21 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto @@ -46,7 +46,7 @@ message ClusterLoadAssignment { "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload"; // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_bytes: 1}]; + string category = 1 [(validate.rules).string = {min_len: 1}]; // Percentage of traffic that should be dropped for the category. type.v3.FractionalPercent drop_percentage = 2; @@ -105,7 +105,7 @@ message ClusterLoadAssignment { // ` value if specified // in the cluster :ref:`EdsClusterConfig // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // List of endpoints to load balance to. repeated LocalityLbEndpoints endpoints = 2; diff --git a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto b/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto index 3f067737ec25..7140ca05afc7 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto @@ -129,14 +129,14 @@ message ClusterStats { "envoy.api.v2.endpoint.ClusterStats.DroppedRequests"; // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_bytes: 1}]; + string category = 1 [(validate.rules).string = {min_len: 1}]; // Total number of deliberately dropped requests for the category. uint64 dropped_count = 2; } // The name of the cluster. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // The eds_cluster_config service_name of the cluster. // It's possible that two clusters send the same service_name to EDS, diff --git a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto index d08b5462fd88..98035c05d45a 100644 --- a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto +++ b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto @@ -48,17 +48,14 @@ message CacheConfig { // Config specific to the cache storage implementation. google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - // [#not-implemented-hide:] - // - // - // List of allowed *Vary* headers. + // List of matching rules that defines allowed *Vary* headers. // // The *vary* response header holds a list of header names that affect the // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't in + // response's *vary* header mentions any header names that aren't matched by any rules in // *allowed_vary_headers*, that response will not be cached. // // During lookup, *allowed_vary_headers* controls what request headers will be diff --git a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto index 29aa8380191b..436bb6bf4616 100644 --- a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto @@ -51,6 +51,10 @@ message PerRouteConfig { // :ref:`HCM host rewrite header ` // given that the value set here would be used for DNS lookups whereas the value set in the HCM // would be used for host header forwarding which is not the desired outcome. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string auto_host_rewrite_header = 2 [(udpa.annotations.field_migrate).rename = "host_rewrite_header"]; } diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 06b13acb2f63..c05032df21a4 100644 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -586,6 +586,10 @@ message ScopedRoutes { } // The name of the header field to extract the value from. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The element separator (e.g., ';' separates 'a;b;c;d'). diff --git a/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto b/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto index eeb5d93ec689..e2e9c7da4833 100644 --- a/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto +++ b/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto @@ -24,7 +24,7 @@ message AwsIamConfig { // of the Grpc endpoint. // // Example: appmesh - string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string service_name = 1 [(validate.rules).string = {min_len: 1}]; // The `region `_ hosting the Grpc // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment diff --git a/generated_api_shadow/envoy/config/listener/v3/listener.proto b/generated_api_shadow/envoy/config/listener/v3/listener.proto index d57b12950535..7376e2e2b716 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener.proto @@ -33,10 +33,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Listener list collections. Entries are *Listener* resources or references. // [#not-implemented-hide:] message ListenerCollection { - udpa.core.v1.CollectionEntry entries = 1; + repeated udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 25] +// [#next-free-field: 26] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -114,6 +114,10 @@ message Listener { // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + FilterChain default_filter_chain = 25; + // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto index 6e2b82dc6b4d..0d073197cabd 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto @@ -31,7 +31,7 @@ message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. @@ -66,6 +66,18 @@ message Filter { // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // @@ -299,7 +311,7 @@ message ListenerFilter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto index 753f6d733cc0..fbc65d0880f3 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto @@ -36,10 +36,10 @@ message ListenerCollection { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.ListenerCollection"; - udpa.core.v1.CollectionEntry entries = 1; + repeated udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 25] +// [#next-free-field: 26] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; @@ -119,6 +119,10 @@ message Listener { // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + FilterChain default_filter_chain = 25; + // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto index 61babe8e622f..0c75f92b4027 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto @@ -32,7 +32,7 @@ message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. @@ -65,6 +65,18 @@ message Filter { // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // @@ -307,7 +319,7 @@ message ListenerFilter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. diff --git a/generated_api_shadow/envoy/config/metrics/v2/stats.proto b/generated_api_shadow/envoy/config/metrics/v2/stats.proto index c6113bf5a5d3..62afcf56e4e7 100644 --- a/generated_api_shadow/envoy/config/metrics/v2/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v2/stats.proto @@ -201,7 +201,7 @@ message TagSpecifier { // // { // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\.((.+?)\.)" + // "regex": "^cluster\\.((.+?)\\.)" // } // // Note that the regex will remove ``foo_cluster.`` making the tag extracted @@ -218,11 +218,11 @@ message TagSpecifier { // [ // { // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" // }, // { // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\.((.*?)\.)" + // "regex": "^http\\.((.*?)\\.)" // } // ] // diff --git a/generated_api_shadow/envoy/config/metrics/v3/stats.proto b/generated_api_shadow/envoy/config/metrics/v3/stats.proto index 8b66a83a55e6..2d53df66f9c1 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/stats.proto @@ -242,7 +242,7 @@ message TagSpecifier { // // { // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\.((.+?)\.)" + // "regex": "^cluster\\.((.+?)\\.)" // } // // Note that the regex will remove ``foo_cluster.`` making the tag extracted @@ -259,11 +259,11 @@ message TagSpecifier { // [ // { // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" // }, // { // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\.((.*?)\.)" + // "regex": "^http\\.((.*?)\\.)" // } // ] // diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto index 6265118cf9b8..bd37875c0bf9 100644 --- a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto @@ -244,7 +244,7 @@ message TagSpecifier { // // { // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\.((.+?)\.)" + // "regex": "^cluster\\.((.+?)\\.)" // } // // Note that the regex will remove ``foo_cluster.`` making the tag extracted @@ -261,11 +261,11 @@ message TagSpecifier { // [ // { // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" // }, // { // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\.((.*?)\.)" + // "regex": "^http\\.((.*?)\\.)" // } // ] // diff --git a/generated_api_shadow/envoy/config/overload/v3/overload.proto b/generated_api_shadow/envoy/config/overload/v3/overload.proto index 7c32906d142b..2be633f0ad1b 100644 --- a/generated_api_shadow/envoy/config/overload/v3/overload.proto +++ b/generated_api_shadow/envoy/config/overload/v3/overload.proto @@ -33,7 +33,7 @@ message ResourceMonitor { // ` // * :ref:`envoy.resource_monitors.injected_resource // ` - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Configuration for the resource monitor being instantiated. oneof config_type { @@ -67,7 +67,7 @@ message Trigger { "envoy.config.overload.v2alpha.Trigger"; // The name of the resource this is a trigger for. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof trigger_oneof { option (validate.required) = true; @@ -85,7 +85,7 @@ message OverloadAction { // The name of the overload action. This is just a well-known string that listeners can // use for registering callbacks. Custom overload actions should be named using reverse // DNS to ensure uniqueness. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A set of triggers for this action. The state of the action is the maximum // state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index cd47154aa6f4..e203cdcf4e84 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -58,7 +58,7 @@ message VirtualHost { // The logical name of the virtual host. This is used when emitting certain // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A list of domains (host/authority header) that will be matched to this // virtual host. Wildcard hosts are supported in the suffix or prefix form. @@ -111,7 +111,7 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each request // handled by this virtual host. repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a list of HTTP headers that should be added to each response @@ -126,7 +126,7 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Indicates that the virtual host has a CORS policy. @@ -262,7 +262,7 @@ message Route { // Specifies a list of HTTP headers that should be removed from each request // matching this route. repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a set of headers that will be added to responses to requests @@ -277,7 +277,7 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Presence of the object defines whether the connection manager's tracing configuration @@ -311,7 +311,7 @@ message WeightedCluster { // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, @@ -338,7 +338,9 @@ message WeightedCluster { // Specifies a list of HTTP headers that should be removed from each request when // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`. - repeated string request_headers_to_remove = 9; + repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Specifies a list of headers to be added to responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`. @@ -352,7 +354,9 @@ message WeightedCluster { // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`. - repeated string response_headers_to_remove = 6; + repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as @@ -441,7 +445,7 @@ message RouteMatch { // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style // upgrades. // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, - // where CONNECT requests may have a path, the path matchers will work if + // where Extended CONNECT requests may have a path, the path matchers will work if // there is a path present. // Note that CONNECT support is currently considered alpha in Envoy. // [#comment:TODO(htuch): Replace the above comment with an alpha tag. @@ -557,7 +561,7 @@ message CorsPolicy { [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}]; } -// [#next-free-field: 36] +// [#next-free-field: 37] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; @@ -595,7 +599,7 @@ message RouteAction { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // If not specified, all requests to the target cluster will be mirrored. // @@ -627,9 +631,8 @@ message RouteAction { // The name of the request header that will be used to obtain the hash // key. If the request header is not present, no hash will be produced. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If specified, the request header value will be rewritten and used // to produce the hash key. @@ -657,7 +660,7 @@ message RouteAction { // The name of the cookie that will be used to obtain the hash key. If the // cookie is not present and ttl below is not set, no hash will be // produced. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // If specified, a cookie with the TTL will be generated if the cookie is // not present. If the TTL is present and zero, the generated cookie will @@ -684,7 +687,7 @@ message RouteAction { // The name of the URL query parameter that will be used to obtain the hash // key. If the parameter is not present, no hash will be produced. Query // parameter names are case-sensitive. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; } message FilterState { @@ -694,7 +697,7 @@ message RouteAction { // The name of the Object in the per-request filterState, which is an // Envoy::Http::Hashable object. If there is no data associated with the key, // or the stored object is not Envoy::Http::Hashable, no hash will be produced. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } oneof policy_specifier { @@ -771,6 +774,32 @@ message RouteAction { ConnectConfig connect_config = 3; } + message MaxStreamDuration { + // Specifies the maximum duration allowed for streams on the route. If not specified, the value + // from the :ref:`max_stream_duration + // ` field in + // :ref:`HttpConnectionManager.common_http_protocol_options + // ` + // is used. If this field is set explicitly to zero, any + // HttpConnectionManager max_stream_duration timeout will be disabled for + // this route. + google.protobuf.Duration max_stream_duration = 1; + + // If present, and the request contains a `grpc-timeout header + // `_, use that value as the + // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. + // If set to 0, the `grpc-timeout` header is used without modification. + google.protobuf.Duration grpc_timeout_header_max = 2; + + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by + // subtracting the provided duration from the header. This is useful for allowing Envoy to set + // its global timeout to be less than that of the deadline imposed by the calling client, which + // makes it more likely that Envoy will handle the timeout instead of having the call canceled + // by the client. If, after applying the offset, the resulting timeout is zero or negative, + // the stream will timeout immediately. + google.protobuf.Duration grpc_timeout_header_offset = 3; + } + reserved 12, 18, 19, 16, 22, 21; oneof cluster_specifier { @@ -778,7 +807,7 @@ message RouteAction { // Indicates the upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the @@ -789,8 +818,12 @@ message RouteAction { // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string cluster_header = 2 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -897,6 +930,10 @@ message RouteAction { // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string host_rewrite_header = 29 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; @@ -977,7 +1014,9 @@ message RouteAction { // limits. By default, if the route configured rate limits, the virtual host // :ref:`rate_limits ` are not applied to the // request. - google.protobuf.BoolValue include_vh_rate_limits = 14; + // + // This field is deprecated. Please use :ref:`vh_rate_limits ` + google.protobuf.BoolValue include_vh_rate_limits = 14 [deprecated = true]; // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to @@ -996,6 +1035,7 @@ message RouteAction { // Indicates that the route has a CORS policy. CorsPolicy cors = 17; + // Deprecated by :ref:`grpc_timeout_header_max ` // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, // or its default value (infinity) instead of @@ -1015,8 +1055,9 @@ message RouteAction { // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. - google.protobuf.Duration max_grpc_timeout = 23; + google.protobuf.Duration max_grpc_timeout = 23 [deprecated = true]; + // Deprecated by :ref:`grpc_timeout_header_offset `. // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting // the provided duration from the header. This is useful in allowing Envoy to set its global // timeout to be less than that of the deadline imposed by the calling client, which makes it more @@ -1024,7 +1065,7 @@ message RouteAction { // The offset will only be applied if the provided grpc_timeout is greater than the offset. This // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning // infinity). - google.protobuf.Duration grpc_timeout_offset = 28; + google.protobuf.Duration grpc_timeout_offset = 28 [deprecated = true]; repeated UpgradeConfig upgrade_configs = 25; @@ -1057,6 +1098,9 @@ message RouteAction { // (e.g.: policies are not merged, most internal one becomes the enforced policy). HedgePolicy hedge_policy = 27; + // Specifies the maximum stream duration for this route. + MaxStreamDuration max_stream_duration = 36; + RequestMirrorPolicy hidden_envoy_deprecated_request_mirror_policy = 10 [deprecated = true]; } @@ -1074,7 +1118,7 @@ message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy.RetryPriority"; - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; @@ -1087,7 +1131,7 @@ message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy.RetryHostPredicate"; - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; @@ -1117,9 +1161,15 @@ message RetryPolicy { } message ResetHeader { + // The name of the reset header. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + // The format of the reset header. ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; } @@ -1385,7 +1435,7 @@ message Decorator { // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden // by the :ref:`x-envoy-decorator-operation // ` header. - string operation = 1 [(validate.rules).string = {min_bytes: 1}]; + string operation = 1 [(validate.rules).string = {min_len: 1}]; // Whether the decorated details should be propagated to the other party. The default is true. google.protobuf.BoolValue propagate = 2; @@ -1456,7 +1506,7 @@ message VirtualCluster { // Specifies the name of the virtual cluster. The virtual cluster name as well // as the virtual host name are used when emitting statistics. The statistics are emitted by the // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + string name = 2 [(validate.rules).string = {min_len: 1}]; string hidden_envoy_deprecated_pattern = 1 [ deprecated = true, @@ -1472,7 +1522,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 8] + // [#next-free-field: 9] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -1523,12 +1573,11 @@ message RateLimit { // The header name to be queried from the request headers. The header’s // value is used to populate the value of the descriptor entry for the // descriptor_key. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; // If set to true, Envoy skips the descriptor while calling rate limiting service // when header is not present in the request. By default it skips calling the @@ -1557,7 +1606,7 @@ message RateLimit { "envoy.api.v2.route.RateLimit.Action.GenericKey"; // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // An optional key to use in the descriptor entry. If not set it defaults // to 'generic_key' as the descriptor key. @@ -1574,7 +1623,7 @@ message RateLimit { "envoy.api.v2.route.RateLimit.Action.HeaderValueMatch"; // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // If set to true, the action will append a descriptor entry when the // request matches the headers. If set to false, the action will append a @@ -1590,14 +1639,18 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } - // The following descriptor entry is appended when the dynamic metadata contains a key value: + // The following descriptor entry is appended when the + // :ref:`dynamic metadata ` contains a key value: // // .. code-block:: cpp // - // ("", "") + // ("", "") + // + // .. attention:: + // This action has been deprecated in favor of the :ref:`metadata ` action message DynamicMetaData { // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. @@ -1608,6 +1661,35 @@ message RateLimit { string default_value = 3; } + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message MetaData { + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + } + oneof action_specifier { option (validate.required) = true; @@ -1630,7 +1712,14 @@ message RateLimit { HeaderValueMatch header_value_match = 6; // Rate limit on dynamic metadata. - DynamicMetaData dynamic_metadata = 7; + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`metadata ` field + DynamicMetaData dynamic_metadata = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // Rate limit on metadata. + MetaData metadata = 8; } } @@ -1711,7 +1800,7 @@ message HeaderMatcher { // Specifies the name of the header in the request. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Specifies how the header match will be performed to route the request. oneof header_match_specifier { @@ -1746,7 +1835,7 @@ message HeaderMatcher { // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; + string prefix_match = 9 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. @@ -1754,7 +1843,7 @@ message HeaderMatcher { // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; + string suffix_match = 10 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on whether the header value contains // the given value or not. @@ -1763,7 +1852,7 @@ message HeaderMatcher { // Examples: // // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. - string contains_match = 12 [(validate.rules).string = {min_bytes: 1}]; + string contains_match = 12 [(validate.rules).string = {min_len: 1}]; string hidden_envoy_deprecated_regex_match = 5 [ deprecated = true, @@ -1790,7 +1879,7 @@ message QueryParameterMatcher { // Specifies the name of a key that must be present in the requested // *path*'s query string. - string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; + string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. diff --git a/generated_api_shadow/envoy/config/route/v3/scoped_route.proto b/generated_api_shadow/envoy/config/route/v3/scoped_route.proto index d6611b0b1d06..b7e3aa66e07f 100644 --- a/generated_api_shadow/envoy/config/route/v3/scoped_route.proto +++ b/generated_api_shadow/envoy/config/route/v3/scoped_route.proto @@ -108,12 +108,12 @@ message ScopedRouteConfiguration { bool on_demand = 4; // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v3.DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` associated // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; // The key to match against. Key key = 3 [(validate.rules).message = {required: true}]; diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 5666ab800bd4..12c56dd834a4 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -59,7 +59,7 @@ message VirtualHost { // The logical name of the virtual host. This is used when emitting certain // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A list of domains (host/authority header) that will be matched to this // virtual host. Wildcard hosts are supported in the suffix or prefix form. @@ -112,7 +112,7 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each request // handled by this virtual host. repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a list of HTTP headers that should be added to each response @@ -127,7 +127,7 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Indicates that the virtual host has a CORS policy. @@ -262,7 +262,7 @@ message Route { // Specifies a list of HTTP headers that should be removed from each request // matching this route. repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a set of headers that will be added to responses to requests @@ -277,7 +277,7 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Presence of the object defines whether the connection manager's tracing configuration @@ -311,7 +311,7 @@ message WeightedCluster { // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, @@ -338,7 +338,9 @@ message WeightedCluster { // Specifies a list of HTTP headers that should be removed from each request when // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. - repeated string request_headers_to_remove = 9; + repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Specifies a list of headers to be added to responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. @@ -352,7 +354,9 @@ message WeightedCluster { // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. - repeated string response_headers_to_remove = 6; + repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as @@ -442,7 +446,7 @@ message RouteMatch { // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style // upgrades. // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, - // where CONNECT requests may have a path, the path matchers will work if + // where Extended CONNECT requests may have a path, the path matchers will work if // there is a path present. // Note that CONNECT support is currently considered alpha in Envoy. // [#comment:TODO(htuch): Replace the above comment with an alpha tag. @@ -547,7 +551,7 @@ message CorsPolicy { core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 36] +// [#next-free-field: 37] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; @@ -589,7 +593,7 @@ message RouteAction { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // If not specified, all requests to the target cluster will be mirrored. // @@ -618,9 +622,8 @@ message RouteAction { // The name of the request header that will be used to obtain the hash // key. If the request header is not present, no hash will be produced. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If specified, the request header value will be rewritten and used // to produce the hash key. @@ -648,7 +651,7 @@ message RouteAction { // The name of the cookie that will be used to obtain the hash key. If the // cookie is not present and ttl below is not set, no hash will be // produced. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // If specified, a cookie with the TTL will be generated if the cookie is // not present. If the TTL is present and zero, the generated cookie will @@ -675,7 +678,7 @@ message RouteAction { // The name of the URL query parameter that will be used to obtain the hash // key. If the parameter is not present, no hash will be produced. Query // parameter names are case-sensitive. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; } message FilterState { @@ -685,7 +688,7 @@ message RouteAction { // The name of the Object in the per-request filterState, which is an // Envoy::Http::Hashable object. If there is no data associated with the key, // or the stored object is not Envoy::Http::Hashable, no hash will be produced. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } oneof policy_specifier { @@ -765,6 +768,35 @@ message RouteAction { ConnectConfig connect_config = 3; } + message MaxStreamDuration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.MaxStreamDuration"; + + // Specifies the maximum duration allowed for streams on the route. If not specified, the value + // from the :ref:`max_stream_duration + // ` field in + // :ref:`HttpConnectionManager.common_http_protocol_options + // ` + // is used. If this field is set explicitly to zero, any + // HttpConnectionManager max_stream_duration timeout will be disabled for + // this route. + google.protobuf.Duration max_stream_duration = 1; + + // If present, and the request contains a `grpc-timeout header + // `_, use that value as the + // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. + // If set to 0, the `grpc-timeout` header is used without modification. + google.protobuf.Duration grpc_timeout_header_max = 2; + + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by + // subtracting the provided duration from the header. This is useful for allowing Envoy to set + // its global timeout to be less than that of the deadline imposed by the calling client, which + // makes it more likely that Envoy will handle the timeout instead of having the call canceled + // by the client. If, after applying the offset, the resulting timeout is zero or negative, + // the stream will timeout immediately. + google.protobuf.Duration grpc_timeout_header_offset = 3; + } + reserved 12, 18, 19, 16, 22, 21, 10; reserved "request_mirror_policy"; @@ -774,7 +806,7 @@ message RouteAction { // Indicates the upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the @@ -785,8 +817,12 @@ message RouteAction { // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string cluster_header = 2 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -893,6 +929,10 @@ message RouteAction { // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string host_rewrite_header = 29 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; @@ -973,7 +1013,9 @@ message RouteAction { // limits. By default, if the route configured rate limits, the virtual host // :ref:`rate_limits ` are not applied to the // request. - google.protobuf.BoolValue include_vh_rate_limits = 14; + // + // This field is deprecated. Please use :ref:`vh_rate_limits ` + google.protobuf.BoolValue hidden_envoy_deprecated_include_vh_rate_limits = 14 [deprecated = true]; // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to @@ -992,6 +1034,7 @@ message RouteAction { // Indicates that the route has a CORS policy. CorsPolicy cors = 17; + // Deprecated by :ref:`grpc_timeout_header_max ` // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, // or its default value (infinity) instead of @@ -1011,8 +1054,9 @@ message RouteAction { // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. - google.protobuf.Duration max_grpc_timeout = 23; + google.protobuf.Duration hidden_envoy_deprecated_max_grpc_timeout = 23 [deprecated = true]; + // Deprecated by :ref:`grpc_timeout_header_offset `. // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting // the provided duration from the header. This is useful in allowing Envoy to set its global // timeout to be less than that of the deadline imposed by the calling client, which makes it more @@ -1020,7 +1064,7 @@ message RouteAction { // The offset will only be applied if the provided grpc_timeout is greater than the offset. This // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning // infinity). - google.protobuf.Duration grpc_timeout_offset = 28; + google.protobuf.Duration hidden_envoy_deprecated_grpc_timeout_offset = 28 [deprecated = true]; repeated UpgradeConfig upgrade_configs = 25; @@ -1053,6 +1097,9 @@ message RouteAction { // it'll take precedence over the virtual host level hedge policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). HedgePolicy hedge_policy = 27; + + // Specifies the maximum stream duration for this route. + MaxStreamDuration max_stream_duration = 36; } // HTTP retry :ref:`architecture overview `. @@ -1073,7 +1120,7 @@ message RetryPolicy { reserved "config"; - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; @@ -1088,7 +1135,7 @@ message RetryPolicy { reserved "config"; - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; @@ -1119,9 +1166,15 @@ message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy.ResetHeader"; + // The name of the reset header. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + // The format of the reset header. ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; } @@ -1390,7 +1443,7 @@ message Decorator { // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden // by the :ref:`x-envoy-decorator-operation // ` header. - string operation = 1 [(validate.rules).string = {min_bytes: 1}]; + string operation = 1 [(validate.rules).string = {min_len: 1}]; // Whether the decorated details should be propagated to the other party. The default is true. google.protobuf.BoolValue propagate = 2; @@ -1466,14 +1519,14 @@ message VirtualCluster { // Specifies the name of the virtual cluster. The virtual cluster name as well // as the virtual host name are used when emitting statistics. The statistics are emitted by the // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + string name = 2 [(validate.rules).string = {min_len: 1}]; } // Global rate limiting :ref:`architecture overview `. message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; - // [#next-free-field: 8] + // [#next-free-field: 9] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action"; @@ -1524,12 +1577,11 @@ message RateLimit { // The header name to be queried from the request headers. The header’s // value is used to populate the value of the descriptor entry for the // descriptor_key. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; // If set to true, Envoy skips the descriptor while calling rate limiting service // when header is not present in the request. By default it skips calling the @@ -1558,7 +1610,7 @@ message RateLimit { "envoy.config.route.v3.RateLimit.Action.GenericKey"; // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // An optional key to use in the descriptor entry. If not set it defaults // to 'generic_key' as the descriptor key. @@ -1575,7 +1627,7 @@ message RateLimit { "envoy.config.route.v3.RateLimit.Action.HeaderValueMatch"; // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // If set to true, the action will append a descriptor entry when the // request matches the headers. If set to false, the action will append a @@ -1591,17 +1643,21 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } - // The following descriptor entry is appended when the dynamic metadata contains a key value: + // The following descriptor entry is appended when the + // :ref:`dynamic metadata ` contains a key value: // // .. code-block:: cpp // - // ("", "") + // ("", "") + // + // .. attention:: + // This action has been deprecated in favor of the :ref:`metadata ` action message DynamicMetaData { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. @@ -1612,6 +1668,38 @@ message RateLimit { string default_value = 3; } + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message MetaData { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.MetaData"; + + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + } + oneof action_specifier { option (validate.required) = true; @@ -1634,7 +1722,14 @@ message RateLimit { HeaderValueMatch header_value_match = 6; // Rate limit on dynamic metadata. - DynamicMetaData dynamic_metadata = 7; + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`metadata ` field + DynamicMetaData hidden_envoy_deprecated_dynamic_metadata = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // Rate limit on metadata. + MetaData metadata = 8; } } @@ -1724,7 +1819,7 @@ message HeaderMatcher { // Specifies the name of the header in the request. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Specifies how the header match will be performed to route the request. oneof header_match_specifier { @@ -1759,7 +1854,7 @@ message HeaderMatcher { // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; + string prefix_match = 9 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. @@ -1767,7 +1862,7 @@ message HeaderMatcher { // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; + string suffix_match = 10 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on whether the header value contains // the given value or not. @@ -1776,7 +1871,7 @@ message HeaderMatcher { // Examples: // // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. - string contains_match = 12 [(validate.rules).string = {min_bytes: 1}]; + string contains_match = 12 [(validate.rules).string = {min_len: 1}]; } // If specified, the match result will be inverted before checking. Defaults to false. @@ -1801,7 +1896,7 @@ message QueryParameterMatcher { // Specifies the name of a key that must be present in the requested // *path*'s query string. - string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; + string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. diff --git a/generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto b/generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto index 33fc756a60a4..0704ceacbbac 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto @@ -108,12 +108,12 @@ message ScopedRouteConfiguration { bool on_demand = 4; // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v4alpha.DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` associated // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; // The key to match against. Key key = 3 [(validate.rules).message = {required: true}]; diff --git a/generated_api_shadow/envoy/config/tap/v3/common.proto b/generated_api_shadow/envoy/config/tap/v3/common.proto index 42783115f871..a8324a6ebc1a 100644 --- a/generated_api_shadow/envoy/config/tap/v3/common.proto +++ b/generated_api_shadow/envoy/config/tap/v3/common.proto @@ -261,7 +261,7 @@ message FilePerTapSink { // Path prefix. The output file will be of the form _.pb, where is an // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string path_prefix = 1 [(validate.rules).string = {min_len: 1}]; } // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto index d18ba1db94c1..05b8fe0ac0dd 100644 --- a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto +++ b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto @@ -262,7 +262,7 @@ message FilePerTapSink { // Path prefix. The output file will be of the form _.pb, where is an // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string path_prefix = 1 [(validate.rules).string = {min_len: 1}]; } // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC diff --git a/generated_api_shadow/envoy/config/trace/v3/datadog.proto b/generated_api_shadow/envoy/config/trace/v3/datadog.proto index f1fe3e666125..c101ab2f03c9 100644 --- a/generated_api_shadow/envoy/config/trace/v3/datadog.proto +++ b/generated_api_shadow/envoy/config/trace/v3/datadog.proto @@ -22,8 +22,8 @@ message DatadogConfig { "envoy.config.trace.v2.DatadogConfig"; // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string service_name = 2 [(validate.rules).string = {min_len: 1}]; } diff --git a/generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto b/generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto index fb372da8c52a..c28106871542 100644 --- a/generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto +++ b/generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto @@ -28,7 +28,7 @@ message DynamicOtConfig { // Dynamic library implementing the `OpenTracing API // `_. - string library = 1 [(validate.rules).string = {min_bytes: 1}]; + string library = 1 [(validate.rules).string = {min_len: 1}]; // The configuration to use when creating a tracer from the given dynamic // library. diff --git a/generated_api_shadow/envoy/config/trace/v3/http_tracer.proto b/generated_api_shadow/envoy/config/trace/v3/http_tracer.proto index 6470a70de43d..23d0efc47a38 100644 --- a/generated_api_shadow/envoy/config/trace/v3/http_tracer.proto +++ b/generated_api_shadow/envoy/config/trace/v3/http_tracer.proto @@ -48,7 +48,7 @@ message Tracing { // - *envoy.tracers.datadog* // - *envoy.tracers.opencensus* // - *envoy.tracers.xray* - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Trace driver specific configuration which depends on the driver being instantiated. // See the trace drivers for examples: diff --git a/generated_api_shadow/envoy/config/trace/v3/lightstep.proto b/generated_api_shadow/envoy/config/trace/v3/lightstep.proto index 0e0b60b5bddb..0b7be7c4e609 100644 --- a/generated_api_shadow/envoy/config/trace/v3/lightstep.proto +++ b/generated_api_shadow/envoy/config/trace/v3/lightstep.proto @@ -38,11 +38,11 @@ message LightstepConfig { } // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // File containing the access token to the `LightStep // `_ API. - string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; + string access_token_file = 2 [(validate.rules).string = {min_len: 1}]; // Propagation modes to use by LightStep's tracer. repeated PropagationMode propagation_modes = 3 diff --git a/generated_api_shadow/envoy/config/trace/v3/zipkin.proto b/generated_api_shadow/envoy/config/trace/v3/zipkin.proto index 10cf6d4ec3c4..b31f697b16ca 100644 --- a/generated_api_shadow/envoy/config/trace/v3/zipkin.proto +++ b/generated_api_shadow/envoy/config/trace/v3/zipkin.proto @@ -49,12 +49,12 @@ message ZipkinConfig { // The cluster manager cluster that hosts the Zipkin collectors. Note that the // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster // resources `. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The API endpoint of the Zipkin service where the spans will be sent. When // using a standard Zipkin installation, the API endpoint is typically // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Determines whether a 128bit trace id will be used when creating a new // trace instance. The default value is false, which will result in a 64 bit trace id being used. diff --git a/generated_api_shadow/envoy/config/trace/v4alpha/http_tracer.proto b/generated_api_shadow/envoy/config/trace/v4alpha/http_tracer.proto index 663886a97bb4..ea918ec2bff5 100644 --- a/generated_api_shadow/envoy/config/trace/v4alpha/http_tracer.proto +++ b/generated_api_shadow/envoy/config/trace/v4alpha/http_tracer.proto @@ -52,7 +52,7 @@ message Tracing { // - *envoy.tracers.datadog* // - *envoy.tracers.opencensus* // - *envoy.tracers.xray* - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Trace driver specific configuration which depends on the driver being instantiated. // See the trace drivers for examples: diff --git a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto index c16b5be1ff0e..af7edab5836a 100644 --- a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 23] +// [#next-free-field: 24] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -272,6 +272,9 @@ message ResponseFlags { // Indicates that a filter configuration is not available. bool no_filter_config_found = 22; + + // Indicates that request or connection exceeded the downstream connection duration. + bool duration_timeout = 23; } // Properties of a negotiated TLS connection. diff --git a/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto b/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto index ae1ad4c94d17..f87cd1582b09 100644 --- a/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto +++ b/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto @@ -88,10 +88,10 @@ message OutlierDetectionEvent { google.protobuf.UInt64Value secs_since_last_action = 3; // The :ref:`cluster ` that owns the ejected host. - string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 4 [(validate.rules).string = {min_len: 1}]; // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}]; + string upstream_url = 5 [(validate.rules).string = {min_len: 1}]; // The action that took place. Action action = 6 [(validate.rules).enum = {defined_only: true}]; diff --git a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto b/generated_api_shadow/envoy/data/core/v3/health_check_event.proto index 88b195b92b3d..2b0f9d888f46 100644 --- a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto +++ b/generated_api_shadow/envoy/data/core/v3/health_check_event.proto @@ -40,7 +40,7 @@ message HealthCheckEvent { config.core.v3.Address host = 2; - string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; + string cluster_name = 3 [(validate.rules).string = {min_len: 1}]; oneof event { option (validate.required) = true; diff --git a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto index 354ad69fca66..4398403b7ed0 100644 --- a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto +++ b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto @@ -86,7 +86,8 @@ message DnsTable { // This message defines a service selection record returned for a service query in a domain message DnsService { // The name of the service without the protocol or domain name - string service_name = 1; + string service_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The service protocol. This can be specified as a string or the numeric value of the protocol DnsServiceProtocol protocol = 2; diff --git a/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto index 140ca4489c20..f142cfa7bf8c 100644 --- a/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto +++ b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto @@ -95,7 +95,8 @@ message DnsTable { "envoy.data.dns.v3.DnsTable.DnsService"; // The name of the service without the protocol or domain name - string service_name = 1; + string service_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The service protocol. This can be specified as a string or the numeric value of the protocol DnsServiceProtocol protocol = 2; diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto index de33623c207f..f17a2e7f4ca9 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto @@ -27,7 +27,7 @@ message FileAccessLog { "envoy.config.accesslog.v2.FileAccessLog"; // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; + string path = 1 [(validate.rules).string = {min_len: 1}]; oneof access_log_format { // Access log :ref:`format string`. diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto index c2a2c753f5bb..afdcfd9e5011 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto @@ -27,7 +27,7 @@ message FileAccessLog { "envoy.extensions.access_loggers.file.v3.FileAccessLog"; // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; + string path = 1 [(validate.rules).string = {min_len: 1}]; oneof access_log_format { // Access log :ref:`format string`. diff --git a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto index 4996a877a9c6..968dfbeec016 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto @@ -62,7 +62,7 @@ message CommonGrpcAccessLogConfig { // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier // `. This allows the // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string log_name = 1 [(validate.rules).string = {min_len: 1}]; // The gRPC service for the access log service. config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto index cd9db5906436..413743a203f0 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto @@ -12,9 +12,12 @@ option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [[#not-implemented-hide:] +// [#protodoc-title: Wasm access log] +// [#extension: envoy.access_loggers.wasm] + // Custom configuration for an :ref:`AccessLog ` -// that calls into a WASM VM. +// that calls into a WASM VM. Configures the built-in *envoy.access_loggers.wasm* +// AccessLog. message WasmAccessLog { envoy.extensions.wasm.v3.PluginConfig config = 1; } diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 79cd583486ac..5579cc16bd97 100644 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -37,7 +37,7 @@ message DnsCacheConfig { // configurations with the same name *must* otherwise have the same settings when referenced // from different configuration components. Configuration will fail to load if this is not // the case. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The DNS lookup family to use during resolution. // @@ -98,5 +98,8 @@ message DnsCacheConfig { // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. + // Setting this value causes failure if the + // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during + // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 8; } diff --git a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto index 9255deb4b64d..30efa6026218 100644 --- a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto @@ -69,10 +69,10 @@ message RateLimitDescriptor { "envoy.api.v2.ratelimit.RateLimitDescriptor.Entry"; // Descriptor key. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; // Descriptor value. - string value = 2 [(validate.rules).string = {min_bytes: 1}]; + string value = 2 [(validate.rules).string = {min_len: 1}]; } // Override rate limit to apply to this descriptor instead of the limit diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto index 68e80dad76b4..aa7ae8264757 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto +++ b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto @@ -64,5 +64,5 @@ message AdminConfig { // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string config_id = 1 [(validate.rules).string = {min_len: 1}]; } diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto index 536f13d049c3..efa7744e357f 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto @@ -65,5 +65,5 @@ message AdminConfig { // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string config_id = 1 [(validate.rules).string = {min_len: 1}]; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto index 8dd851f4020a..c524e022e859 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto @@ -51,10 +51,11 @@ message GradientControllerConfig { "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig." "MinimumRTTCalculationParams"; - // The time interval between recalculating the minimum request round-trip time. + // The time interval between recalculating the minimum request round-trip time. Has to be + // positive. google.protobuf.Duration interval = 1 [(validate.rules).duration = { required: true - gt {} + gte {nanos: 1000000} }]; // The number of requests to aggregate/sample during the minRTT recalculation window before diff --git a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto index 6f01c88885f4..c77d93762099 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto @@ -23,6 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Admission Control] // [#extension: envoy.filters.http.admission_control] +// [#next-free-field: 6] message AdmissionControl { // Default method of specifying what constitutes a successful request. All status codes that // indicate a successful request must be explicitly specified if not relying on the default @@ -75,16 +76,23 @@ message AdmissionControl { } // The sliding time window over which the success rate is calculated. The window is rounded to the - // nearest second. Defaults to 120s. + // nearest second. Defaults to 30s. google.protobuf.Duration sampling_window = 3; // Rejection probability is defined by the formula:: // - // max(0, (rq_count - aggression_coefficient * rq_success_count) / (rq_count + 1)) + // max(0, (rq_count - rq_success_count / sr_threshold) / (rq_count + 1)) ^ (1 / aggression) // - // The coefficient dictates how aggressively the admission controller will throttle requests as - // the success rate drops. Lower values will cause throttling to kick in at higher success rates - // and result in more aggressive throttling. Any values less than 1.0, will be set to 1.0. If the - // message is unspecified, the coefficient is 2.0. - config.core.v3.RuntimeDouble aggression_coefficient = 4; + // The aggression dictates how heavily the admission controller will throttle requests upon SR + // dropping at or below the threshold. A value of 1 will result in a linear increase in + // rejection probability as SR drops. Any values less than 1.0, will be set to 1.0. If the + // message is unspecified, the aggression is 1.0. See `the admission control documentation + // `_ + // for a diagram illustrating this. + config.core.v3.RuntimeDouble aggression = 4; + + // Dictates the success rate at which the rejection probability is non-zero. As success rate drops + // below this threshold, rejection probability will increase. Any success rate above the threshold + // results in a rejection probability of 0. Defaults to 95%. + config.core.v3.RuntimePercent sr_threshold = 5; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto b/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto index b80bc1b82108..6a516b430028 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto @@ -25,13 +25,13 @@ message AwsRequestSigning { // of the HTTP endpoint. // // Example: s3 - string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string service_name = 1 [(validate.rules).string = {min_len: 1}]; // The `region `_ hosting the HTTP // endpoint. // // Example: us-west-2 - string region = 2 [(validate.rules).string = {min_bytes: 1}]; + string region = 2 [(validate.rules).string = {min_len: 1}]; // Indicates that before signing headers, the host header will be swapped with // this value. If not set or empty, the original host header value diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto index f78b1d24ac2c..9260abe94a96 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto @@ -52,17 +52,14 @@ message CacheConfig { // Config specific to the cache storage implementation. google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - // [#not-implemented-hide:] - // - // - // List of allowed *Vary* headers. + // List of matching rules that defines allowed *Vary* headers. // // The *vary* response header holds a list of header names that affect the // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't in + // response's *vary* header mentions any header names that aren't matched by any rules in // *allowed_vary_headers*, that response will not be cached. // // During lookup, *allowed_vary_headers* controls what request headers will be diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto index 19921edb0310..ad9bb4c639a4 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto @@ -52,17 +52,14 @@ message CacheConfig { // Config specific to the cache storage implementation. google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - // [#not-implemented-hide:] - // - // - // List of allowed *Vary* headers. + // List of matching rules that defines allowed *Vary* headers. // // The *vary* response header holds a list of header names that affect the // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't in + // response's *vary* header mentions any header names that aren't matched by any rules in // *allowed_vary_headers*, that response will not be cached. // // During lookup, *allowed_vary_headers* controls what request headers will be diff --git a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto b/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto new file mode 100644 index 000000000000..7952f9b3d448 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.cdn_loop.v3alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha"; +option java_outer_classname = "CdnLoopProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: HTTP CDN-Loop Filter] +// [#extension: envoy.filters.http.cdn_loop] + +// CDN-Loop Header filter config. See the :ref:`configuration overview +// ` for more information. +message CdnLoopConfig { + // The CDN identifier to use for loop checks and to append to the + // CDN-Loop header. + // + // RFC 8586 calls this the cdn-id. The cdn-id can either be a + // pseudonym or hostname the CDN is in control of. + // + // cdn_id must not be empty. + string cdn_id = 1 [(validate.rules).string = {min_len: 1}]; + + // The maximum allowed count of cdn_id in the downstream CDN-Loop + // request header. + // + // The default of 0 means a request can transit the CdnLoopFilter + // once. A value of 1 means that a request can transit the + // CdnLoopFilter twice and so on. + uint32 max_allowed_occurrences = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto b/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto index b8a2525dbf54..70dd21a324b3 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto @@ -55,6 +55,10 @@ message PerRouteConfig { // :ref:`HCM host rewrite header ` // given that the value set here would be used for DNS lookups whereas the value set in the HCM // would be used for host header forwarding which is not the desired outcome. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. string host_rewrite_header = 2; } } diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 0c99cb6997f8..b1ab3989f20e 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/core/v3/http_uri.proto"; +import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http_status.proto"; @@ -23,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 13] +// [#next-free-field: 15] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; @@ -99,6 +100,10 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. config.core.v3.RuntimeFractionalPercent filter_enabled = 9; + // Specifies if the filter is enabled with metadata matcher. + // If this field is not specified, the filter will be enabled for all requests. + type.matcher.v3.MetadataMatcher filter_enabled_metadata = 14; + // Specifies whether to deny the requests, when the filter is disabled. // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to determine whether to deny request for @@ -114,6 +119,23 @@ message ExtAuthz { // :ref:`certificate`. bool include_peer_certificate = 10; + // Optional additional prefix to use when emitting statistics. This allows to distinguish + // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example: + // + // .. code-block:: yaml + // + // http_filters: + // - name: envoy.filters.http.ext_authz + // typed_config: + // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. + // - name: envoy.filters.http.ext_authz + // typed_config: + // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc. + // + string stat_prefix = 13; + bool hidden_envoy_deprecated_use_alpha = 4 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } @@ -133,6 +155,13 @@ message BufferSettings { // The authorization request will be dispatched and no 413 HTTP error will be returned by the // filter. bool allow_partial_message = 2; + + // If true, the body sent to the external authorization service is set with raw bytes, it sets + // the :ref:`raw_body` + // field of HTTP request attribute context. Otherwise, :ref:` + // body` will be filled + // with UTF-8 string request body. + bool pack_as_bytes = 3; } // HttpService is used for raw HTTP communication between the filter and the authorization service. @@ -242,11 +271,7 @@ message ExtAuthzPerRoute { } } -// Extra settings for the check request. You can use this to provide extra context for the -// external authorization server on specific virtual hosts \ routes. For example, adding a context -// extension on the virtual host level can give the ext-authz server information on what virtual -// host is used without needing to parse the host header. If CheckSettings is specified in multiple -// per-filter-configs, they will be merged in order, and the result will be used. +// Extra settings for the check request. message CheckSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.CheckSettings"; @@ -254,6 +279,12 @@ message CheckSettings { // Context extensions to set on the CheckRequest's // :ref:`AttributeContext.context_extensions` // + // You can use this to provide extra context for the external authorization server on specific + // virtual hosts/routes. For example, adding a context extension on the virtual host level can + // give the ext-authz server information on what virtual host is used without needing to parse the + // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged + // in order, and the result will be used. + // // Merge semantics for this field are such that keys from more specific configs override. // // .. note:: @@ -261,4 +292,8 @@ message CheckSettings { // These settings are only applied to a filter configured with a // :ref:`grpc_service`. map context_extensions = 1; + + // When set to true, disable the configured :ref:`with_request_body + // ` for a route. + bool disable_request_body_buffering = 2; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto index 05ced9299258..ec8854f5d1be 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/v3/http_status.proto"; @@ -23,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 13] +// [#next-free-field: 15] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; @@ -103,6 +104,10 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; + // Specifies if the filter is enabled with metadata matcher. + // If this field is not specified, the filter will be enabled for all requests. + type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 14; + // Specifies whether to deny the requests, when the filter is disabled. // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to determine whether to deny request for @@ -117,6 +122,23 @@ message ExtAuthz { // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 10; + + // Optional additional prefix to use when emitting statistics. This allows to distinguish + // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example: + // + // .. code-block:: yaml + // + // http_filters: + // - name: envoy.filters.http.ext_authz + // typed_config: + // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. + // - name: envoy.filters.http.ext_authz + // typed_config: + // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc. + // + string stat_prefix = 13; } // Configuration for buffering the request data. @@ -134,6 +156,13 @@ message BufferSettings { // The authorization request will be dispatched and no 413 HTTP error will be returned by the // filter. bool allow_partial_message = 2; + + // If true, the body sent to the external authorization service is set with raw bytes, it sets + // the :ref:`raw_body` + // field of HTTP request attribute context. Otherwise, :ref:` + // body` will be filled + // with UTF-8 string request body. + bool pack_as_bytes = 3; } // HttpService is used for raw HTTP communication between the filter and the authorization service. @@ -243,11 +272,7 @@ message ExtAuthzPerRoute { } } -// Extra settings for the check request. You can use this to provide extra context for the -// external authorization server on specific virtual hosts \ routes. For example, adding a context -// extension on the virtual host level can give the ext-authz server information on what virtual -// host is used without needing to parse the host header. If CheckSettings is specified in multiple -// per-filter-configs, they will be merged in order, and the result will be used. +// Extra settings for the check request. message CheckSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.CheckSettings"; @@ -255,6 +280,12 @@ message CheckSettings { // Context extensions to set on the CheckRequest's // :ref:`AttributeContext.context_extensions` // + // You can use this to provide extra context for the external authorization server on specific + // virtual hosts/routes. For example, adding a context extension on the virtual host level can + // give the ext-authz server information on what virtual host is used without needing to parse the + // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged + // in order, and the result will be used. + // // Merge semantics for this field are such that keys from more specific configs override. // // .. note:: @@ -262,4 +293,8 @@ message CheckSettings { // These settings are only applied to a filter configured with a // :ref:`grpc_service`. map context_extensions = 1; + + // When set to true, disable the configured :ref:`with_request_body + // ` for a route. + bool disable_request_body_buffering = 2; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto index 85d7cbe1cecd..b2c4ad2ee681 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto @@ -23,7 +23,7 @@ message FilterConfig { // The content-type to pass to the upstream when the gRPC bridge filter is applied. // The filter will also validate that the upstream responds with the same content type. - string content_type = 1 [(validate.rules).string = {min_bytes: 1}]; + string content_type = 1 [(validate.rules).string = {min_len: 1}]; // If true, Envoy will assume that the upstream doesn't understand gRPC frames and // strip the gRPC frame from the request, and add it back in to the response. This will diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index ace7c535069a..5e399790a7ec 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -56,7 +56,7 @@ message Config { string metadata_namespace = 1; // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; + string key = 2 [(validate.rules).string = {min_len: 1}]; // The value to pair with the given key. // diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto index 0d7c814584dc..5b06f1e78556 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -55,7 +55,7 @@ message Config { string metadata_namespace = 1; // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; + string key = 2 [(validate.rules).string = {min_len: 1}]; oneof value_type { // The value to pair with the given key. diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 39fe6187f64f..5588961bf512 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -62,7 +62,7 @@ message JwtProvider { // Example: https://securetoken.google.com // Example: 1234567-compute@developer.gserviceaccount.com // - string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; + string issuer = 1 [(validate.rules).string = {min_len: 1}]; // The list of JWT `audiences `_ are // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, @@ -220,7 +220,7 @@ message JwtHeader { // The HTTP header name. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The value prefix. The value format is "value_prefix" // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the @@ -414,7 +414,7 @@ message FilterStateRule { "envoy.config.filter.http.jwt_authn.v2alpha.FilterStateRule"; // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto index 302cf7253dde..12d4fa5fe1d3 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto @@ -62,7 +62,7 @@ message JwtProvider { // Example: https://securetoken.google.com // Example: 1234567-compute@developer.gserviceaccount.com // - string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; + string issuer = 1 [(validate.rules).string = {min_len: 1}]; // The list of JWT `audiences `_ are // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, @@ -220,7 +220,7 @@ message JwtHeader { // The HTTP header name. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The value prefix. The value format is "value_prefix" // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the @@ -414,7 +414,7 @@ message FilterStateRule { "envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule"; // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. diff --git a/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD new file mode 100644 index 000000000000..ad2fc9a9a84f --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto new file mode 100644 index 000000000000..94f21edd3eed --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.local_ratelimit.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/v3/http_status.proto"; +import "envoy/type/v3/token_bucket.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.local_ratelimit.v3"; +option java_outer_classname = "LocalRateLimitProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Local Rate limit] +// Local Rate limit :ref:`configuration overview `. +// [#extension: envoy.filters.http.local_ratelimit] + +// [#next-free-field: 7] +message LocalRateLimit { + // The human readable prefix to use when emitting stats. + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // This field allows for a custom HTTP response status code to the downstream client when + // the request has been rate limited. + // Defaults to 429 (TooManyRequests). + // + // .. note:: + // If this is set to < 400, 429 will be used instead. + type.v3.HttpStatus status = 2; + + // The token bucket configuration to use for rate limiting requests that are processed by this + // filter. Each request processed by the filter consumes a single token. If the token is available, + // the request will be allowed. If no tokens are available, the request will receive the configured + // rate limit status. + // + // .. note:: + // It's fine for the token bucket to be unset for the global configuration since the rate limit + // can be applied at a the virtual host or route level. Thus, the token bucket must be set + // for the per route configuration otherwise the config will be rejected. + // + // .. note:: + // When using per route configuration, the bucket becomes unique to that route. + // + // .. note:: + // In the current implementation the token bucket's :ref:`fill_interval + // ` must be >= 50ms to avoid too aggressive + // refills. + type.v3.TokenBucket token_bucket = 3; + + // If set, this will enable -- but not necessarily enforce -- the rate limit for the given + // fraction of requests. + // Defaults to 0% of requests for safety. + config.core.v3.RuntimeFractionalPercent filter_enabled = 4; + + // If set, this will enforce the rate limit decisions for the given fraction of requests. + // + // Note: this only applies to the fraction of enabled requests. + // + // Defaults to 0% of requests for safety. + config.core.v3.RuntimeFractionalPercent filter_enforced = 5; + + // Specifies a list of HTTP headers that should be added to each response for requests that + // have been rate limited. + repeated config.core.v3.HeaderValueOption response_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 10}]; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto index fc348c2365cd..1636c01ab1c7 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto @@ -25,7 +25,7 @@ message Lua { // further loads code from disk if desired. Note that if JSON configuration is used, the code must // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. - string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; + string inline_code = 1 [(validate.rules).string = {min_len: 1}]; // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute // `. The Lua source codes can be diff --git a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto b/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto index 53678996de6c..e4be64167ed2 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto @@ -26,7 +26,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message OAuth2Credentials { // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. - string client_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string client_id = 1 [(validate.rules).string = {min_len: 1}]; // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server. transport_sockets.tls.v3.SdsSecretConfig token_secret = 2 @@ -50,7 +50,7 @@ message OAuth2Config { config.core.v3.HttpUri token_endpoint = 1; // The endpoint redirect to for authorization in response to unauthorized requests. - string authorization_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Credentials used for OAuth. OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}]; @@ -60,7 +60,7 @@ message OAuth2Config { // documentation on :ref:`custom request headers `. // // This URI should not contain any query parameters. - string redirect_uri = 4 [(validate.rules).string = {min_bytes: 1}]; + string redirect_uri = 4 [(validate.rules).string = {min_len: 1}]; // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server. type.matcher.v3.PathMatcher redirect_path_matcher = 5 diff --git a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto b/generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto index 547a3060e16b..ee51e1f96099 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto @@ -29,7 +29,7 @@ message OAuth2Credentials { "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Credentials"; // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. - string client_id = 1 [(validate.rules).string = {min_bytes: 1}]; + string client_id = 1 [(validate.rules).string = {min_len: 1}]; // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server. transport_sockets.tls.v4alpha.SdsSecretConfig token_secret = 2 @@ -56,7 +56,7 @@ message OAuth2Config { config.core.v4alpha.HttpUri token_endpoint = 1; // The endpoint redirect to for authorization in response to unauthorized requests. - string authorization_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Credentials used for OAuth. OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}]; @@ -66,7 +66,7 @@ message OAuth2Config { // documentation on :ref:`custom request headers `. // // This URI should not contain any query parameters. - string redirect_uri = 4 [(validate.rules).string = {min_bytes: 1}]; + string redirect_uri = 4 [(validate.rules).string = {min_len: 1}]; // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server. type.matcher.v4alpha.PathMatcher redirect_path_matcher = 5 diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 781fddc1939c..bc58e7f9b2e1 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 9] +// [#next-free-field: 10] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; @@ -34,7 +34,7 @@ message RateLimit { } // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string = {min_bytes: 1}]; + string domain = 1 [(validate.rules).string = {min_len: 1}]; // Specifies the rate limit configurations to be applied with the same // stage number. If not set, the default stage number is 0. @@ -60,7 +60,6 @@ message RateLimit { // The filter's behaviour in case the rate limiting service does // not respond back. When it is set to true, Envoy will not allow traffic in case of // communication failure between rate limiting service and the proxy. - // Defaults to false. bool failure_mode_deny = 5; // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead @@ -99,4 +98,25 @@ message RateLimit { // Disabled by default. XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 [(validate.rules).enum = {defined_only: true}]; + + // Disables emitting the :ref:`x-envoy-ratelimited` header + // in case of rate limiting (i.e. 429 responses). + // Having this header not present potentially makes the request retriable. + bool disable_x_envoy_ratelimited_header = 9; +} + +message RateLimitPerRoute { + enum VhRateLimitsOptions { + // Use the virtual host rate limits unless the route has a rate limit policy. + OVERRIDE = 0; + + // Use the virtual host rate limits even if the route has a rate limit policy. + INCLUDE = 1; + + // Ignore the virtual host rate limits even if the route does not have a rate limit policy. + IGNORE = 2; + } + + // Specifies if the rate limit filter should include the virtual host rate limits. + VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto b/generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto index 0ea335a414fa..f9bc9cceceb9 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto @@ -24,7 +24,7 @@ message Squash { "envoy.config.filter.http.squash.v2.Squash"; // The name of the cluster that hosts the Squash server. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // When the filter requests the Squash server to create a DebugAttachment, it will use this // structure as template for the body of the request. It can contain reference to environment diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto index a812992a5b84..55eba141f45f 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto @@ -13,7 +13,10 @@ option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [[#not-implemented-hide:] +// [#protodoc-title: Wasm] +// [#extension: envoy.filters.http.wasm] +// Wasm :ref:`configuration overview `. + message Wasm { // General Plugin configuration. envoy.extensions.wasm.v3.PluginConfig config = 1; diff --git a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto index 8fd0c63d0c82..fb8047d391e9 100644 --- a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto +++ b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto @@ -24,7 +24,7 @@ message ProxyProtocol { string metadata_namespace = 1; // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; + string key = 2 [(validate.rules).string = {min_len: 1}]; } // A Rule defines what metadata to apply when a header is present or missing. diff --git a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto index b3af267a77ad..2ed14c7f0e23 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto @@ -30,11 +30,11 @@ message ClientSSLAuth { // of principals. The service must support the expected :ref:`REST API // `. string auth_api_cluster = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; // Time in milliseconds between principal refreshes from the // authentication service. Default is 60000 (60s). The actual fetch time diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto index 749708880d71..646f053ca9b6 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto @@ -37,7 +37,7 @@ message DubboProxy { "envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy"; // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Configure the protocol used. ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; @@ -62,7 +62,7 @@ message DubboFilter { // The name of the filter to instantiate. The name must match a supported // filter. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto index 4894c7693fd7..30499c27f6f0 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto @@ -37,7 +37,7 @@ message DubboProxy { "envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy"; // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Configure the protocol used. ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; @@ -62,7 +62,7 @@ message DubboFilter { // The name of the filter to instantiate. The name must match a supported // filter. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD index a4e298b42619..a5c5b57b7227 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD @@ -8,6 +8,7 @@ api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/ext_authz/v2:pkg", + "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto index 50161f1cb92b..78f4167ccc33 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto @@ -4,6 +4,7 @@ package envoy.extensions.filters.network.ext_authz.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; +import "envoy/type/matcher/v3/metadata.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -23,13 +24,13 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // gRPC Authorization API defined by // :ref:`CheckRequest `. // A failed check will cause this filter to close the TCP connection. -// [#next-free-field: 6] +// [#next-free-field: 7] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.ext_authz.v2.ExtAuthz"; // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The external authorization gRPC service configuration. // The default timeout is set to 200ms by this filter. @@ -51,4 +52,8 @@ message ExtAuthz { // version of Check{Request,Response} used on the wire. config.core.v3.ApiVersion transport_api_version = 5 [(validate.rules).enum = {defined_only: true}]; + + // Specifies if the filter is enabled with metadata matcher. + // If this field is not specified, the filter will be enabled for all requests. + type.matcher.v3.MetadataMatcher filter_enabled_metadata = 6; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD new file mode 100644 index 000000000000..6d146b1c64d1 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/network/ext_authz/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto new file mode 100644 index 000000000000..f877a3ed8502 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.ext_authz.v4alpha; + +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v4alpha"; +option java_outer_classname = "ExtAuthzProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Network External Authorization ] +// The network layer external authorization service configuration +// :ref:`configuration overview `. +// [#extension: envoy.filters.network.ext_authz] + +// External Authorization filter calls out to an external service over the +// gRPC Authorization API defined by +// :ref:`CheckRequest `. +// A failed check will cause this filter to close the TCP connection. +// [#next-free-field: 7] +message ExtAuthz { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.ext_authz.v3.ExtAuthz"; + + // The prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // The external authorization gRPC service configuration. + // The default timeout is set to 200ms by this filter. + config.core.v4alpha.GrpcService grpc_service = 2; + + // The filter's behaviour in case the external authorization service does + // not respond back. When it is set to true, Envoy will also allow traffic in case of + // communication failure between authorization service and the proxy. + // Defaults to false. + bool failure_mode_allow = 3; + + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 4; + + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of Check{Request,Response} used on the wire. + config.core.v4alpha.ApiVersion transport_api_version = 5 + [(validate.rules).enum = {defined_only: true}]; + + // Specifies if the filter is enabled with metadata matcher. + // If this field is not specified, the filter will be enabled for all requests. + type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 6; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index d3bd19a7abc0..d26ce2ffee96 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -254,7 +254,7 @@ message HttpConnectionManager { // The human readable prefix to use when emitting statistics for the // connection manager. See the :ref:`statistics documentation ` for // more information. - string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; oneof route_specifier { option (validate.required) = true; @@ -576,27 +576,29 @@ message LocalReplyConfig { // The configuration to form response body from the :ref:`command operators ` // and to specify response content type as one of: plain/text or application/json. // - // Example one: plain/text body_format. + // Example one: "plain/text" ``body_format``. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // - // The following response body in `plain/text` format will be generated for a request with + // The following response body in "plain/text" format will be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. // - // .. code-block:: + // .. code-block:: text // // upstream connect error:503:path=/foo // - // Example two: application/json body_format. + // Example two: "application/json" ``body_format``. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // json_format: - // status: %RESPONSE_CODE% - // message: %LOCAL_REPLY_BODY% - // path: $REQ(:path)% + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // path: "%REQ(:path)%" // // The following response body in "application/json" format would be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. @@ -721,14 +723,18 @@ message ScopedRoutes { // If an element contains no separator, the whole element is parsed as key and the // fragment value is an empty string. // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string = {min_bytes: 1}]; + string separator = 1 [(validate.rules).string = {min_len: 1}]; // The key to match on. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; + string key = 2 [(validate.rules).string = {min_len: 1}]; } // The name of the header field to extract the value from. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string name = 1 [(validate.rules).string = {min_len: 1}]; // The element separator (e.g., ';' separates 'a;b;c;d'). // Default: empty string. This causes the entirety of the header field to be extracted. @@ -762,7 +768,7 @@ message ScopedRoutes { } // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The algorithm to use for constructing a scope key for each request. ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; @@ -810,15 +816,15 @@ message HttpFilter { // The name of the filter configuration. The name is used as a fallback to // select an extension if the type of the configuration proto is not // sufficient. It also serves as a resource name in ExtensionConfigDS. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. oneof config_type { + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. google.protobuf.Any typed_config = 4; // Configuration source specifier for an extension configuration discovery service. - // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // In case of a failure and without the default configuration, the HTTP listener responds with code 500. // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). config.core.v3.ExtensionConfigSource config_discovery = 5; diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 9db92927ebbe..ceb7f4a65a1f 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -251,7 +251,7 @@ message HttpConnectionManager { // The human readable prefix to use when emitting statistics for the // connection manager. See the :ref:`statistics documentation ` for // more information. - string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; oneof route_specifier { option (validate.required) = true; @@ -573,27 +573,29 @@ message LocalReplyConfig { // The configuration to form response body from the :ref:`command operators ` // and to specify response content type as one of: plain/text or application/json. // - // Example one: plain/text body_format. + // Example one: "plain/text" ``body_format``. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // - // The following response body in `plain/text` format will be generated for a request with + // The following response body in "plain/text" format will be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. // - // .. code-block:: + // .. code-block:: text // // upstream connect error:503:path=/foo // - // Example two: application/json body_format. + // Example two: "application/json" ``body_format``. // - // .. code-block:: + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString // - // json_format: - // status: %RESPONSE_CODE% - // message: %LOCAL_REPLY_BODY% - // path: $REQ(:path)% + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // path: "%REQ(:path)%" // // The following response body in "application/json" format would be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. @@ -721,14 +723,18 @@ message ScopedRoutes { // If an element contains no separator, the whole element is parsed as key and the // fragment value is an empty string. // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string = {min_bytes: 1}]; + string separator = 1 [(validate.rules).string = {min_len: 1}]; // The key to match on. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; + string key = 2 [(validate.rules).string = {min_len: 1}]; } // The name of the header field to extract the value from. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string name = 1 [(validate.rules).string = {min_len: 1}]; // The element separator (e.g., ';' separates 'a;b;c;d'). // Default: empty string. This causes the entirety of the header field to be extracted. @@ -762,7 +768,7 @@ message ScopedRoutes { } // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // The algorithm to use for constructing a scope key for each request. ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; @@ -813,15 +819,15 @@ message HttpFilter { // The name of the filter configuration. The name is used as a fallback to // select an extension if the type of the configuration proto is not // sufficient. It also serves as a resource name in ExtensionConfigDS. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. oneof config_type { + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. google.protobuf.Any typed_config = 4; // Configuration source specifier for an extension configuration discovery service. - // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // In case of a failure and without the default configuration, the HTTP listener responds with code 500. // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). config.core.v4alpha.ExtensionConfigSource config_discovery = 5; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto index 497e688f4c3d..0fac07427d0c 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto @@ -20,5 +20,5 @@ message KafkaBroker { "envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker"; // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto index 027bc0e3fc98..37eb8c62d0e2 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto @@ -24,7 +24,7 @@ message LocalRateLimit { // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The token bucket configuration to use for rate limiting connections that are processed by the // filter's filter chain. Each incoming connection processed by the filter consumes a single diff --git a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto index 7bd17600d145..ebdfb6f2fcc0 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto @@ -17,13 +17,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // MongoDB :ref:`configuration overview `. // [#extension: envoy.filters.network.mongo_proxy] +// [#next-free-field: 6] message MongoProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.mongo_proxy.v2.MongoProxy"; // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The optional path to use for writing Mongo access logs. If not access log // path is specified no access logs will be written. Note that access log is @@ -39,4 +40,9 @@ message MongoProxy { // Flag to specify whether :ref:`dynamic metadata // ` should be emitted. Defaults to false. bool emit_dynamic_metadata = 4; + + // List of commands to emit metrics for. Defaults to "delete", "insert", and "update". + // Note that metrics will not be emitted for "find" commands, since those are considered + // queries, and metrics for those are emitted under a dedicated "query" namespace. + repeated string commands = 5; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto index 663449b27035..9dfdb14d3f11 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto @@ -21,7 +21,7 @@ message MySQLProxy { // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] The optional path to use for writing MySQL access logs. // If the access log field is empty, access logs will not be written. diff --git a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto index b92d3cee2541..2fcdda846b6a 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto @@ -26,10 +26,10 @@ message RateLimit { "envoy.config.filter.network.rate_limit.v2.RateLimit"; // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string = {min_bytes: 1}]; + string domain = 2 [(validate.rules).string = {min_len: 1}]; // The rate limit descriptor list to use in the rate limit service request. repeated common.ratelimit.v3.RateLimitDescriptor descriptors = 3 diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto b/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto index e62f7b4c419e..6b8d3b0181b9 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto @@ -46,7 +46,7 @@ message RBAC { config.rbac.v3.RBAC shadow_rules = 2; // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; // RBAC enforcement strategy. By default RBAC will be enforced only once // when the first byte of data arrives from the downstream. When used in diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto index 8452a89822c1..a1508997df62 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto @@ -46,7 +46,7 @@ message RBAC { config.rbac.v4alpha.RBAC shadow_rules = 2; // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; // RBAC enforcement strategy. By default RBAC will be enforced only once // when the first byte of data arrives from the downstream. When used in diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index a537435a46e9..dce9f7962f66 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -140,7 +140,7 @@ message RedisProxy { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // If not specified or the runtime key is not present, all requests to the target cluster // will be mirrored. @@ -162,7 +162,7 @@ message RedisProxy { bool remove_prefix = 2; // Upstream cluster to forward the command to. - string cluster = 3 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 3 [(validate.rules).string = {min_len: 1}]; // Indicates that the route has a request mirroring policy. repeated RequestMirrorPolicy request_mirror_policy = 4; @@ -208,7 +208,7 @@ message RedisProxy { } // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Network settings for the connection pool to the upstream clusters. ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto index ee77ab909592..e29a3d10af0a 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message RocketmqProxy { // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is specified in this property. RouteConfiguration route_config = 2; diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto index 5fe5d33ffacf..899debcbde7b 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto @@ -48,7 +48,7 @@ message RouteMatch { message RouteAction { // Indicates the upstream cluster to which the request should be routed. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. config.core.v3.Metadata metadata_match = 2; diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto index a765734e66db..cbc66fcd9979 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto @@ -25,7 +25,7 @@ message RocketmqProxy { "envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy"; // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is specified in this property. RouteConfiguration route_config = 2; diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto index 995e8bcb05e3..0925afef833d 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto @@ -60,7 +60,7 @@ message RouteAction { "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction"; // Indicates the upstream cluster to which the request should be routed. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. config.core.v4alpha.Metadata metadata_match = 2; diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto index 27d187ed2c33..82a9c31c4410 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto @@ -23,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // TCP Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.tcp_proxy] -// [#next-free-field: 13] +// [#next-free-field: 14] message TcpProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.tcp_proxy.v2.TcpProxy"; @@ -40,7 +40,7 @@ message TcpProxy { "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is // determined by its weight. The sum of weights across all entries in the @@ -68,7 +68,7 @@ message TcpProxy { "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.TunnelingConfig"; // The hostname to send in the synthesized CONNECT headers to the upstream proxy. - string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; + string hostname = 1 [(validate.rules).string = {min_len: 1}]; } message DeprecatedV1 { @@ -97,7 +97,7 @@ message TcpProxy { // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; oneof cluster_specifier { option (validate.required) = true; @@ -156,5 +156,11 @@ message TcpProxy { // will be proxied upstream as per usual. TunnelingConfig tunneling_config = 12; + // The maximum duration of a connection. The duration is defined as the period since a connection + // was established. If not set, there is no max duration. When max_downstream_connection_duration + // is reached the connection will be closed. Duration must be at least 1ms. + google.protobuf.Duration max_downstream_connection_duration = 13 + [(validate.rules).duration = {gte {nanos: 1000000}}]; + DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 6 [deprecated = true]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto index 1857f2abcd4e..9a2f395176b1 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // TCP Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.tcp_proxy] -// [#next-free-field: 13] +// [#next-free-field: 14] message TcpProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy"; @@ -39,7 +39,7 @@ message TcpProxy { "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is // determined by its weight. The sum of weights across all entries in the @@ -67,7 +67,7 @@ message TcpProxy { "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig"; // The hostname to send in the synthesized CONNECT headers to the upstream proxy. - string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; + string hostname = 1 [(validate.rules).string = {min_len: 1}]; } reserved 6; @@ -76,7 +76,7 @@ message TcpProxy { // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; oneof cluster_specifier { option (validate.required) = true; @@ -134,4 +134,10 @@ message TcpProxy { // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload // will be proxied upstream as per usual. TunnelingConfig tunneling_config = 12; + + // The maximum duration of a connection. The duration is defined as the period since a connection + // was established. If not set, there is no max duration. When max_downstream_connection_duration + // is reached the connection will be closed. Duration must be at least 1ms. + google.protobuf.Duration max_downstream_connection_duration = 13 + [(validate.rules).duration = {gte {nanos: 1000000}}]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto index 4fc3289ae33d..c93b4d1e8e5a 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto @@ -25,7 +25,7 @@ message RateLimit { "envoy.config.filter.thrift.rate_limit.v2alpha1.RateLimit"; // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string = {min_bytes: 1}]; + string domain = 1 [(validate.rules).string = {min_len: 1}]; // Specifies the rate limit configuration stage. Each configured rate limit filter performs a // rate limit check using descriptors configured in the diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index b7afc4f0b803..f00b0e6983d1 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -91,7 +91,7 @@ message RouteAction { // Indicates a single upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -103,9 +103,8 @@ message RouteAction { // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. - string cluster_header = 6 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; + string cluster_header = 6 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in @@ -138,7 +137,7 @@ message WeightedCluster { "envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is determined by its // weight. The sum of weights across all entries in the clusters array determines the total diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto index cc8e89439420..88f7b013fec7 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto @@ -72,7 +72,7 @@ message ThriftProxy { ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is static and is specified in this property. RouteConfiguration route_config = 4; @@ -95,7 +95,7 @@ message ThriftFilter { // [#comment:TODO(zuercher): Auto generate the following list] // * :ref:`envoy.filters.thrift.router ` // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto index 374cc131ddf8..b73a78c4f2cc 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto @@ -91,7 +91,7 @@ message RouteAction { // Indicates a single upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -103,9 +103,8 @@ message RouteAction { // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. - string cluster_header = 6 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; + string cluster_header = 6 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in @@ -138,7 +137,7 @@ message WeightedCluster { "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is determined by its // weight. The sum of weights across all entries in the clusters array determines the total diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto index 6bf055da3ce6..b75d0e39eaf2 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto @@ -72,7 +72,7 @@ message ThriftProxy { ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is static and is specified in this property. RouteConfiguration route_config = 4; @@ -99,7 +99,7 @@ message ThriftFilter { // [#comment:TODO(zuercher): Auto generate the following list] // * :ref:`envoy.filters.thrift.router ` // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto index 131582762b59..0c1ac6af440e 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto @@ -13,7 +13,10 @@ option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [[#not-implemented-hide:] +// [#protodoc-title: Wasm] +// [#extension: envoy.filters.network.wasm] +// Wasm :ref:`configuration overview `. + message Wasm { // General Plugin configuration. envoy.extensions.wasm.v3.PluginConfig config = 1; diff --git a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto index a90f777d79ec..eb2c202c58f1 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto @@ -23,7 +23,7 @@ message ZooKeeperProxy { // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. // If the access log field is empty, access logs will not be written. diff --git a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto index d3f6123548f8..1e986434f777 100644 --- a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto @@ -35,13 +35,13 @@ message UdpProxyConfig { } // The stat prefix used when emitting UDP proxy filter stats. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; oneof route_specifier { option (validate.required) = true; // The upstream cluster to connect to. - string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + string cluster = 2 [(validate.rules).string = {min_len: 1}]; } // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by diff --git a/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD new file mode 100644 index 000000000000..c37174bdefc4 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto new file mode 100644 index 000000000000..3fc5dae91795 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.extensions.stat_sinks.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.stat_sinks.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Wasm] +// Wasm :ref:`configuration overview `. +// [#extension: envoy.stat_sinks.wasm] + +message Wasm { + // General Plugin configuration. + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/datadog.proto b/generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/datadog.proto index 94359ce837bf..f41c8added21 100644 --- a/generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/datadog.proto +++ b/generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/datadog.proto @@ -20,8 +20,8 @@ message DatadogConfig { "envoy.config.trace.v3.DatadogConfig"; // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string service_name = 2 [(validate.rules).string = {min_len: 1}]; } diff --git a/generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto b/generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto index d311304a3ddf..21455a974d3b 100644 --- a/generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto +++ b/generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto @@ -25,7 +25,7 @@ message DynamicOtConfig { // Dynamic library implementing the `OpenTracing API // `_. - string library = 1 [(validate.rules).string = {min_bytes: 1}]; + string library = 1 [(validate.rules).string = {min_len: 1}]; // The configuration to use when creating a tracer from the given dynamic // library. diff --git a/generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto b/generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto index 93ea47ba6a10..d7e306754dc9 100644 --- a/generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto +++ b/generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto @@ -35,11 +35,11 @@ message LightstepConfig { } // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // File containing the access token to the `LightStep // `_ API. - string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; + string access_token_file = 2 [(validate.rules).string = {min_len: 1}]; // Propagation modes to use by LightStep's tracer. repeated PropagationMode propagation_modes = 3 diff --git a/generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto b/generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto index 4207546ff701..a33aae4bc57e 100644 --- a/generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto +++ b/generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto @@ -47,12 +47,12 @@ message ZipkinConfig { // The cluster manager cluster that hosts the Zipkin collectors. Note that the // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster // resources `. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The API endpoint of the Zipkin service where the spans will be sent. When // using a standard Zipkin installation, the API endpoint is typically // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Determines whether a 128bit trace id will be used when creating a new // trace instance. The default value is false, which will result in a 64 bit trace id being used. diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto b/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto index 6c001be1c746..85a8c66d0c0e 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto @@ -22,7 +22,7 @@ message Alts { // The location of a handshaker service, this is usually 169.254.169.254:8080 // on GCE. - string handshaker_service = 1 [(validate.rules).string = {min_bytes: 1}]; + string handshaker_service = 1 [(validate.rules).string = {min_len: 1}]; // The acceptable service accounts from peer, peers not in the list will be rejected in the // handshake validation step. If empty, no validation will be performed. diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto index c6c2ee9798d6..687226574d29 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto @@ -6,7 +6,6 @@ import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3"; @@ -16,9 +15,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Upstream Proxy Protocol] // [#extension: envoy.transport_sockets.upstream_proxy_protocol] -// [#not-implemented-hide:] + // Configuration for PROXY protocol socket message ProxyProtocolUpstreamTransport { + // The PROXY protocol settings config.core.v3.ProxyProtocolConfig config = 1; // The underlying transport socket being wrapped. diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto index df5334dfe72a..dee271f310d8 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -116,7 +116,7 @@ message PrivateKeyProvider { // Private key method provider name. The name must match a // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string provider_name = 1 [(validate.rules).string = {min_len: 1}]; // Private key method provider specific configuration. oneof config_type { @@ -150,7 +150,9 @@ message TlsCertificate { // TLS private key is not password encrypted. config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - // [#not-implemented-hide:] + // The OCSP response to be stapled with this certificate during the handshake. + // The response must be DER-encoded and may only be provided via ``filename`` or + // ``inline_bytes``. The response may pertain to only one certificate. config.core.v3.DataSource ocsp_staple = 4; // [#not-implemented-hide:] @@ -204,6 +206,8 @@ message CertificateValidationContext { ACCEPT_UNTRUSTED = 1; } + reserved 5; + // TLS certificate data containing certificate authority certificates to use in verifying // a presented peer certificate (e.g. server certificate for clusters or client certificate // for listeners). If not specified and a peer certificate is presented it will not be @@ -259,7 +263,7 @@ message CertificateValidationContext { // because SPKI is tied to a private key, so it doesn't change when the certificate // is renewed using the same private key. repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. @@ -288,7 +292,7 @@ message CertificateValidationContext { // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; // An optional list of Subject Alternative name matchers. Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified matches. @@ -310,9 +314,6 @@ message CertificateValidationContext { // `. repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - // [#not-implemented-hide:] Must present signed certificate time-stamp. google.protobuf.BoolValue require_signed_certificate_timestamp = 6; diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto index 80c68a56f5ce..f25370c3c9f6 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -12,6 +12,7 @@ import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "SecretProto"; @@ -33,7 +34,10 @@ message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + string name = 1 [ + (validate.rules).string = {min_len: 1}, + (udpa.annotations.field_migrate).oneof_promotion = "name_specifier" + ]; // Resource locator for SDS. This is mutually exclusive to *name*. // [#not-implemented-hide:] diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto index f746f3d2f1cf..e11b2691978c 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -54,11 +54,33 @@ message UpstreamTlsContext { google.protobuf.UInt32Value max_session_keys = 4; } -// [#next-free-field: 8] +// [#next-free-field: 9] message DownstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.DownstreamTlsContext"; + enum OcspStaplePolicy { + // OCSP responses are optional. If an OCSP response is absent + // or expired, the associated certificate will be used for + // connections without an OCSP staple. + LENIENT_STAPLING = 0; + + // OCSP responses are optional. If an OCSP response is absent, + // the associated certificate will be used without an + // OCSP staple. If a response is provided but is expired, + // the associated certificate will not be used for + // subsequent connections. If no suitable certificate is found, + // the connection is rejected. + STRICT_STAPLING = 1; + + // OCSP responses are required. Configuration will fail if + // a certificate is provided without an OCSP response. If a + // response expires, the associated certificate will not be + // used connections. If no suitable certificate is found, the + // connection is rejected. + MUST_STAPLE = 2; + } + // Common TLS context settings. CommonTlsContext common_tls_context = 1; @@ -96,6 +118,11 @@ message DownstreamTlsContext { lt {seconds: 4294967296} gte {} }]; + + // Config for whether to use certificates if they do not have + // an accompanying OCSP response or if the response expires at runtime. + // Defaults to LENIENT_STAPLING + OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; } // TLS context shared by both client and server TLS contexts. @@ -108,7 +135,7 @@ message CommonTlsContext { message CertificateProvider { // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Provider specific config. // Note: an implementation is expected to dedup multiple instances of the same config diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto index 589dd17b543a..3608f93ffe30 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto @@ -121,7 +121,7 @@ message PrivateKeyProvider { // Private key method provider name. The name must match a // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + string provider_name = 1 [(validate.rules).string = {min_len: 1}]; // Private key method provider specific configuration. oneof config_type { @@ -153,7 +153,9 @@ message TlsCertificate { // TLS private key is not password encrypted. config.core.v4alpha.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - // [#not-implemented-hide:] + // The OCSP response to be stapled with this certificate during the handshake. + // The response must be DER-encoded and may only be provided via ``filename`` or + // ``inline_bytes``. The response may pertain to only one certificate. config.core.v4alpha.DataSource ocsp_staple = 4; // [#not-implemented-hide:] @@ -207,7 +209,7 @@ message CertificateValidationContext { ACCEPT_UNTRUSTED = 1; } - reserved 4; + reserved 4, 5; reserved "verify_subject_alt_name"; @@ -266,7 +268,7 @@ message CertificateValidationContext { // because SPKI is tied to a private key, so it doesn't change when the certificate // is renewed using the same private key. repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. @@ -295,7 +297,7 @@ message CertificateValidationContext { // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; // An optional list of Subject Alternative name matchers. Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified matches. @@ -317,9 +319,6 @@ message CertificateValidationContext { // `. repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9; - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - // [#not-implemented-hide:] Must present signed certificate time-stamp. google.protobuf.BoolValue require_signed_certificate_timestamp = 6; diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto index 11306f21415a..9848eaadef0b 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto @@ -11,6 +11,7 @@ import "udpa/core/v1/resource_locator.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; option java_outer_classname = "SecretProto"; @@ -35,7 +36,7 @@ message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Resource locator for SDS. This is mutually exclusive to *name*. // [#not-implemented-hide:] diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto index 44963f687073..6a49cb352ec4 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto @@ -53,11 +53,33 @@ message UpstreamTlsContext { google.protobuf.UInt32Value max_session_keys = 4; } -// [#next-free-field: 8] +// [#next-free-field: 9] message DownstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; + enum OcspStaplePolicy { + // OCSP responses are optional. If an OCSP response is absent + // or expired, the associated certificate will be used for + // connections without an OCSP staple. + LENIENT_STAPLING = 0; + + // OCSP responses are optional. If an OCSP response is absent, + // the associated certificate will be used without an + // OCSP staple. If a response is provided but is expired, + // the associated certificate will not be used for + // subsequent connections. If no suitable certificate is found, + // the connection is rejected. + STRICT_STAPLING = 1; + + // OCSP responses are required. Configuration will fail if + // a certificate is provided without an OCSP response. If a + // response expires, the associated certificate will not be + // used connections. If no suitable certificate is found, the + // connection is rejected. + MUST_STAPLE = 2; + } + // Common TLS context settings. CommonTlsContext common_tls_context = 1; @@ -95,6 +117,11 @@ message DownstreamTlsContext { lt {seconds: 4294967296} gte {} }]; + + // Config for whether to use certificates if they do not have + // an accompanying OCSP response or if the response expires at runtime. + // Defaults to LENIENT_STAPLING + OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; } // TLS context shared by both client and server TLS contexts. @@ -111,7 +138,7 @@ message CommonTlsContext { // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Provider specific config. // Note: an implementation is expected to dedup multiple instances of the same config diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto index c6b02364aa2d..44e207172c9b 100644 --- a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto +++ b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto @@ -5,7 +5,7 @@ package envoy.extensions.upstreams.http.generic.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.generic.v3"; -option java_outer_classname = "GenericConnectionPoolProto"; +option java_outer_classname = "GenericConnectionPoolProtoOuterClass"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto index e4c2d6ff9b84..8318f3c666d9 100644 --- a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto +++ b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto @@ -5,7 +5,7 @@ package envoy.extensions.upstreams.http.http.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.http.v3"; -option java_outer_classname = "HttpConnectionPoolProto"; +option java_outer_classname = "HttpConnectionPoolProtoOuterClass"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto index 5bc8734cb3f7..7c1d633432e9 100644 --- a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto +++ b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto @@ -5,7 +5,7 @@ package envoy.extensions.upstreams.http.tcp.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3"; -option java_outer_classname = "TcpConnectionPoolProto"; +option java_outer_classname = "TcpConnectionPoolProtoOuterClass"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto index 26f458214466..b42fb75a0bf7 100644 --- a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto @@ -16,8 +16,8 @@ option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] +// [#extension: envoy.bootstrap.wasm] -// [[#not-implemented-hide:] // Configuration for a Wasm VM. // [#next-free-field: 7] message VmConfig { @@ -29,7 +29,7 @@ message VmConfig { string vm_id = 1; // The Wasm runtime type (either "v8" or "null" for code compiled into Envoy). - string runtime = 2 [(validate.rules).string = {min_bytes: 1}]; + string runtime = 2 [(validate.rules).string = {min_len: 1}]; // The Wasm code that Envoy will execute. config.core.v3.AsyncDataSource code = 3; @@ -51,7 +51,6 @@ message VmConfig { bool nack_on_code_cache_miss = 6; } -// [[#not-implemented-hide:] // Base Configuration for Wasm Plugins e.g. filters and services. // [#next-free-field: 6] message PluginConfig { @@ -66,9 +65,9 @@ message PluginConfig { string root_id = 2; // Configuration for finding or starting VM. - oneof vm_config { - VmConfig inline_vm_config = 3; - // In the future add referential VM configurations. + oneof vm { + VmConfig vm_config = 3; + // TODO: add referential VM configurations. } // Filter/service configuration used to configure or reconfigure a plugin @@ -86,7 +85,6 @@ message PluginConfig { bool fail_open = 5; } -// [[#not-implemented-hide:] // WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService // ` This opaque configuration will be used to create a Wasm Service. message WasmService { diff --git a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto b/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto index 02636d0fb25f..fb2369089151 100644 --- a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto +++ b/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto @@ -23,7 +23,7 @@ message ProfileActionConfig { google.protobuf.Duration profile_duration = 1; // File path to the directory to output profiles. - string profile_path = 2 [(validate.rules).string = {min_bytes: 1}]; + string profile_path = 2 [(validate.rules).string = {min_len: 1}]; // Limits the max number of profiles that can be generated by this action // over its lifetime to avoid filling the disk. diff --git a/generated_api_shadow/envoy/service/accesslog/v3/als.proto b/generated_api_shadow/envoy/service/accesslog/v3/als.proto index 3f5e37325cc5..5421c2304918 100644 --- a/generated_api_shadow/envoy/service/accesslog/v3/als.proto +++ b/generated_api_shadow/envoy/service/accesslog/v3/als.proto @@ -50,7 +50,7 @@ message StreamAccessLogsMessage { // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig // `. - string log_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string log_name = 2 [(validate.rules).string = {min_len: 1}]; } // Wrapper for batches of HTTP access log entries. diff --git a/generated_api_shadow/envoy/service/accesslog/v4alpha/als.proto b/generated_api_shadow/envoy/service/accesslog/v4alpha/als.proto index 4edb5eade0f2..e7e96583fd2c 100644 --- a/generated_api_shadow/envoy/service/accesslog/v4alpha/als.proto +++ b/generated_api_shadow/envoy/service/accesslog/v4alpha/als.proto @@ -50,7 +50,7 @@ message StreamAccessLogsMessage { // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig // `. - string log_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string log_name = 2 [(validate.rules).string = {min_len: 1}]; } // Wrapper for batches of HTTP access log entries. diff --git a/generated_api_shadow/envoy/service/auth/v2/external_auth.proto b/generated_api_shadow/envoy/service/auth/v2/external_auth.proto index 0f580fe7dc34..7dbfd3556968 100644 --- a/generated_api_shadow/envoy/service/auth/v2/external_auth.proto +++ b/generated_api_shadow/envoy/service/auth/v2/external_auth.proto @@ -43,7 +43,8 @@ message DeniedHttpResponse { type.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers - // to the downstream client. + // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. repeated api.v2.core.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data @@ -55,9 +56,10 @@ message DeniedHttpResponse { message OkHttpResponse { // HTTP entity headers in addition to the original request headers. This allows the authorization // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`, - // the filter will append the correspondent header value to the matched request header. Note that - // by Leaving `append` as false, the filter will either add a new header, or override an existing + // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. By setting the `append` field to `true`, + // the filter will append the correspondent header value to the matched request header. + // By leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated api.v2.core.HeaderValueOption headers = 2; } diff --git a/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto b/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto index 3c4fe0af665e..cdf3ee9f96e4 100644 --- a/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto +++ b/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto @@ -97,7 +97,7 @@ message AttributeContext { // This message defines attributes for an HTTP request. // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. - // [#next-free-field: 12] + // [#next-free-field: 13] message HttpRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.AttributeContext.HttpRequest"; @@ -145,6 +145,12 @@ message AttributeContext { // The HTTP request body. string body = 11; + + // The HTTP request body in bytes. This is used instead of + // :ref:`body ` when + // :ref:`pack_as_bytes ` + // is set to true. + bytes raw_body = 12; } // The source of a network activity, such as starting a TCP connection. diff --git a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto index 317d83abe485..9e2bf8fccd5b 100644 --- a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto +++ b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto @@ -50,7 +50,8 @@ message DeniedHttpResponse { type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers - // to the downstream client. + // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. repeated config.core.v3.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data @@ -59,18 +60,37 @@ message DeniedHttpResponse { } // HTTP attributes for an OK response. +// [#next-free-field: 6] message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.OkHttpResponse"; // HTTP entity headers in addition to the original request headers. This allows the authorization // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`, - // the filter will append the correspondent header value to the matched request header. Note that - // by Leaving `append` as false, the filter will either add a new header, or override an existing + // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. By setting the `append` field to `true`, + // the filter will append the correspondent header value to the matched request header. + // By leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated config.core.v3.HeaderValueOption headers = 2; + // HTTP entity headers to remove from the original request before dispatching + // it to the upstream. This allows the authorization service to act on auth + // related headers (like `Authorization`), process them, and consume them. + // Under this model, the upstream will either receive the request (if it's + // authorized) or not receive it (if it's not), but will not see headers + // containing authorization credentials. + // + // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as + // the header `Host`, may not be removed as that would make the request + // malformed. If mentioned in `headers_to_remove` these special headers will + // be ignored. + // + // When using the HTTP service this must instead be set by the HTTP + // authorization service as a comma separated list like so: + // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``. + repeated string headers_to_remove = 5; + // This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata // `. Until it is removed, // setting this field overrides :ref:`CheckResponse.dynamic_metadata diff --git a/generated_api_shadow/envoy/service/auth/v4alpha/attribute_context.proto b/generated_api_shadow/envoy/service/auth/v4alpha/attribute_context.proto index 24f728c7adef..a1bf9c9c62cb 100644 --- a/generated_api_shadow/envoy/service/auth/v4alpha/attribute_context.proto +++ b/generated_api_shadow/envoy/service/auth/v4alpha/attribute_context.proto @@ -97,7 +97,7 @@ message AttributeContext { // This message defines attributes for an HTTP request. // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. - // [#next-free-field: 12] + // [#next-free-field: 13] message HttpRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.AttributeContext.HttpRequest"; @@ -145,6 +145,12 @@ message AttributeContext { // The HTTP request body. string body = 11; + + // The HTTP request body in bytes. This is used instead of + // :ref:`body ` when + // :ref:`pack_as_bytes ` + // is set to true. + bytes raw_body = 12; } // The source of a network activity, such as starting a TCP connection. diff --git a/generated_api_shadow/envoy/service/auth/v4alpha/external_auth.proto b/generated_api_shadow/envoy/service/auth/v4alpha/external_auth.proto index 2cac45ddc6ae..dbb8dd61b301 100644 --- a/generated_api_shadow/envoy/service/auth/v4alpha/external_auth.proto +++ b/generated_api_shadow/envoy/service/auth/v4alpha/external_auth.proto @@ -50,7 +50,8 @@ message DeniedHttpResponse { type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers - // to the downstream client. + // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. repeated config.core.v4alpha.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data @@ -59,18 +60,37 @@ message DeniedHttpResponse { } // HTTP attributes for an OK response. +// [#next-free-field: 6] message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.OkHttpResponse"; // HTTP entity headers in addition to the original request headers. This allows the authorization // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`, - // the filter will append the correspondent header value to the matched request header. Note that - // by Leaving `append` as false, the filter will either add a new header, or override an existing + // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to + // false when used in this message. By setting the `append` field to `true`, + // the filter will append the correspondent header value to the matched request header. + // By leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated config.core.v4alpha.HeaderValueOption headers = 2; + // HTTP entity headers to remove from the original request before dispatching + // it to the upstream. This allows the authorization service to act on auth + // related headers (like `Authorization`), process them, and consume them. + // Under this model, the upstream will either receive the request (if it's + // authorized) or not receive it (if it's not), but will not see headers + // containing authorization credentials. + // + // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as + // the header `Host`, may not be removed as that would make the request + // malformed. If mentioned in `headers_to_remove` these special headers will + // be ignored. + // + // When using the HTTP service this must instead be set by the HTTP + // authorization service as a comma separated list like so: + // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``. + repeated string headers_to_remove = 5; + // This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata // `. Until it is removed, // setting this field overrides :ref:`CheckResponse.dynamic_metadata diff --git a/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto b/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto index 652355b707e3..d0b703312346 100644 --- a/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto +++ b/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto @@ -16,7 +16,7 @@ option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: ExtensionConfigDS] +// [#protodoc-title: Extension Config Discovery Service (ECDS)] // Return extension configurations. service ExtensionConfigDiscoveryService { diff --git a/generated_api_shadow/envoy/service/runtime/v3/rtds.proto b/generated_api_shadow/envoy/service/runtime/v3/rtds.proto index b12844233883..796b6fac24e6 100644 --- a/generated_api_shadow/envoy/service/runtime/v3/rtds.proto +++ b/generated_api_shadow/envoy/service/runtime/v3/rtds.proto @@ -52,7 +52,7 @@ message Runtime { // Runtime resource name. This makes the Runtime a self-describing xDS // resource. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; google.protobuf.Struct layer = 2; } diff --git a/generated_api_shadow/envoy/service/status/v3/csds.proto b/generated_api_shadow/envoy/service/status/v3/csds.proto index 23f1352bf489..8e81dcdd2bff 100644 --- a/generated_api_shadow/envoy/service/status/v3/csds.proto +++ b/generated_api_shadow/envoy/service/status/v3/csds.proto @@ -9,6 +9,7 @@ import "envoy/type/matcher/v3/node.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -21,9 +22,8 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Client Status Discovery Service (CSDS)] // CSDS is Client Status Discovery Service. It can be used to get the status of -// an xDS-compliant client from the management server's point of view. In the -// future, it can potentially be used as an interface to get the current -// state directly from the client. +// an xDS-compliant client from the management server's point of view. It can +// also be used to get the current xDS states directly from the client. service ClientStatusDiscoveryService { rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { } @@ -34,7 +34,7 @@ service ClientStatusDiscoveryService { } } -// Status of a config. +// Status of a config from a management server view. enum ConfigStatus { // Status info is not available/unknown. UNKNOWN = 0; @@ -49,10 +49,30 @@ enum ConfigStatus { // ACK/NACK. STALE = 3; - // Management server has sent the config to client but received NACK. + // Management server has sent the config to client but received NACK. The + // attached config dump will be the latest config (the rejected one), since + // it is the persisted version in the management server. ERROR = 4; } +// Config status from a client-side view. +enum ClientConfigStatus { + // Config status is not available/unknown. + CLIENT_UNKNOWN = 0; + + // Client requested the config but hasn't received any config from management + // server yet. + CLIENT_REQUESTED = 1; + + // Client received the config and replied with ACK. + CLIENT_ACKED = 2; + + // Client received the config and replied with NACK. Notably, the attached + // config dump is not the NACKed version, but the most recent accepted one. If + // no config is accepted yet, the attached config dump will be empty. + CLIENT_NACKED = 3; +} + // Request for client status of clients identified by a list of NodeMatchers. message ClientStatusRequest { option (udpa.annotations.versioning).previous_message_type = @@ -67,12 +87,20 @@ message ClientStatusRequest { } // Detailed config (per xDS) with status. -// [#next-free-field: 7] +// [#next-free-field: 8] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v2.PerXdsConfig"; - ConfigStatus status = 1; + // Config status generated by management servers. Will not be present if the + // CSDS server is an xDS client. + ConfigStatus status = 1 [(udpa.annotations.field_migrate).oneof_promotion = "status_config"]; + + // Client config status is populated by xDS clients. Will not be present if + // the CSDS server is an xDS server. No matter what the client config status + // is, xDS clients should always dump the most recent accepted xDS config. + ClientConfigStatus client_status = 7 + [(udpa.annotations.field_migrate).oneof_promotion = "status_config"]; oneof per_xds_config { admin.v3.ListenersConfigDump listener_config = 2; diff --git a/generated_api_shadow/envoy/service/status/v4alpha/csds.proto b/generated_api_shadow/envoy/service/status/v4alpha/csds.proto index 37758954cadb..e1556de8b913 100644 --- a/generated_api_shadow/envoy/service/status/v4alpha/csds.proto +++ b/generated_api_shadow/envoy/service/status/v4alpha/csds.proto @@ -21,9 +21,8 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Client Status Discovery Service (CSDS)] // CSDS is Client Status Discovery Service. It can be used to get the status of -// an xDS-compliant client from the management server's point of view. In the -// future, it can potentially be used as an interface to get the current -// state directly from the client. +// an xDS-compliant client from the management server's point of view. It can +// also be used to get the current xDS states directly from the client. service ClientStatusDiscoveryService { rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { } @@ -34,7 +33,7 @@ service ClientStatusDiscoveryService { } } -// Status of a config. +// Status of a config from a management server view. enum ConfigStatus { // Status info is not available/unknown. UNKNOWN = 0; @@ -49,10 +48,30 @@ enum ConfigStatus { // ACK/NACK. STALE = 3; - // Management server has sent the config to client but received NACK. + // Management server has sent the config to client but received NACK. The + // attached config dump will be the latest config (the rejected one), since + // it is the persisted version in the management server. ERROR = 4; } +// Config status from a client-side view. +enum ClientConfigStatus { + // Config status is not available/unknown. + CLIENT_UNKNOWN = 0; + + // Client requested the config but hasn't received any config from management + // server yet. + CLIENT_REQUESTED = 1; + + // Client received the config and replied with ACK. + CLIENT_ACKED = 2; + + // Client received the config and replied with NACK. Notably, the attached + // config dump is not the NACKed version, but the most recent accepted one. If + // no config is accepted yet, the attached config dump will be empty. + CLIENT_NACKED = 3; +} + // Request for client status of clients identified by a list of NodeMatchers. message ClientStatusRequest { option (udpa.annotations.versioning).previous_message_type = @@ -67,12 +86,21 @@ message ClientStatusRequest { } // Detailed config (per xDS) with status. -// [#next-free-field: 7] +// [#next-free-field: 8] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v3.PerXdsConfig"; - ConfigStatus status = 1; + oneof status_config { + // Config status generated by management servers. Will not be present if the + // CSDS server is an xDS client. + ConfigStatus status = 1; + + // Client config status is populated by xDS clients. Will not be present if + // the CSDS server is an xDS server. No matter what the client config status + // is, xDS clients should always dump the most recent accepted xDS config. + ClientConfigStatus client_status = 7; + } oneof per_xds_config { admin.v4alpha.ListenersConfigDump listener_config = 2; diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/BUILD b/generated_api_shadow/envoy/service/tap/v2alpha/BUILD index 267aeaa0efab..8e0561a169c5 100644 --- a/generated_api_shadow/envoy/service/tap/v2alpha/BUILD +++ b/generated_api_shadow/envoy/service/tap/v2alpha/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ - "//envoy/api/v2:pkg", "//envoy/api/v2/core:pkg", "//envoy/api/v2/route:pkg", "//envoy/data/tap/v2alpha:pkg", diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/tapds.proto b/generated_api_shadow/envoy/service/tap/v2alpha/tapds.proto deleted file mode 100644 index 81b9cb0e447b..000000000000 --- a/generated_api_shadow/envoy/service/tap/v2alpha/tapds.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v2alpha; - -import "envoy/api/v2/discovery.proto"; -import "envoy/service/tap/v2alpha/common.proto"; - -import "google/api/annotations.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; -option java_outer_classname = "TapdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap discovery service] - -// [#not-implemented-hide:] Tap discovery service. -service TapDiscoveryService { - rpc StreamTapConfigs(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { - } - - rpc DeltaTapConfigs(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } - - rpc FetchTapConfigs(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:tap_configs"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name -// The filter TapDS config references this name. -message TapResource { - // The name of the tap configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Tap config to apply - TapConfig config = 2; -} diff --git a/generated_api_shadow/envoy/service/tap/v3/BUILD b/generated_api_shadow/envoy/service/tap/v3/BUILD index 0aa82fa145be..5ee1ce553f48 100644 --- a/generated_api_shadow/envoy/service/tap/v3/BUILD +++ b/generated_api_shadow/envoy/service/tap/v3/BUILD @@ -8,9 +8,7 @@ api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/tap/v3:pkg", "//envoy/data/tap/v3:pkg", - "//envoy/service/discovery/v3:pkg", "//envoy/service/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/service/tap/v3/tapds.proto b/generated_api_shadow/envoy/service/tap/v3/tapds.proto deleted file mode 100644 index 51393d6e14c7..000000000000 --- a/generated_api_shadow/envoy/service/tap/v3/tapds.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v3; - -import "envoy/config/tap/v3/common.proto"; -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v3"; -option java_outer_classname = "TapdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap discovery service] - -// [#not-implemented-hide:] Tap discovery service. -service TapDiscoveryService { - rpc StreamTapConfigs(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaTapConfigs(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchTapConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:tap_configs"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name -// The filter TapDS config references this name. -message TapResource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.TapResource"; - - // The name of the tap configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Tap config to apply - config.tap.v3.TapConfig config = 2; -} diff --git a/generated_api_shadow/envoy/service/tap/v4alpha/BUILD b/generated_api_shadow/envoy/service/tap/v4alpha/BUILD index 8e407d4f61e3..cb89a6907d9a 100644 --- a/generated_api_shadow/envoy/service/tap/v4alpha/BUILD +++ b/generated_api_shadow/envoy/service/tap/v4alpha/BUILD @@ -8,9 +8,7 @@ api_proto_package( has_services = True, deps = [ "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v4alpha:pkg", "//envoy/data/tap/v3:pkg", - "//envoy/service/discovery/v4alpha:pkg", "//envoy/service/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/service/tap/v4alpha/tapds.proto b/generated_api_shadow/envoy/service/tap/v4alpha/tapds.proto deleted file mode 100644 index a041beea2697..000000000000 --- a/generated_api_shadow/envoy/service/tap/v4alpha/tapds.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v4alpha; - -import "envoy/config/tap/v4alpha/common.proto"; -import "envoy/service/discovery/v4alpha/discovery.proto"; - -import "google/api/annotations.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; -option java_outer_classname = "TapdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tap discovery service] - -// [#not-implemented-hide:] Tap discovery service. -service TapDiscoveryService { - rpc StreamTapConfigs(stream discovery.v4alpha.DiscoveryRequest) - returns (stream discovery.v4alpha.DiscoveryResponse) { - } - - rpc DeltaTapConfigs(stream discovery.v4alpha.DeltaDiscoveryRequest) - returns (stream discovery.v4alpha.DeltaDiscoveryResponse) { - } - - rpc FetchTapConfigs(discovery.v4alpha.DiscoveryRequest) - returns (discovery.v4alpha.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:tap_configs"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name -// The filter TapDS config references this name. -message TapResource { - option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v3.TapResource"; - - // The name of the tap configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Tap config to apply - config.tap.v4alpha.TapConfig config = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/metadata.proto b/generated_api_shadow/envoy/type/matcher/metadata.proto index 2cbc602564c5..ed58d04adb02 100644 --- a/generated_api_shadow/envoy/type/matcher/metadata.proto +++ b/generated_api_shadow/envoy/type/matcher/metadata.proto @@ -83,12 +83,12 @@ message MetadataMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_bytes: 1}]; + string filter = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; diff --git a/generated_api_shadow/envoy/type/matcher/regex.proto b/generated_api_shadow/envoy/type/matcher/regex.proto index b23c0bff3075..6c499235bbe2 100644 --- a/generated_api_shadow/envoy/type/matcher/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/regex.proto @@ -48,7 +48,7 @@ message RegexMatcher { } // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_bytes: 1}]; + string regex = 2 [(validate.rules).string = {min_len: 1}]; } // Describes how to match a string and then produce a new string using a regular diff --git a/generated_api_shadow/envoy/type/matcher/string.proto b/generated_api_shadow/envoy/type/matcher/string.proto index 431043e00ec1..499eaf21775f 100644 --- a/generated_api_shadow/envoy/type/matcher/string.proto +++ b/generated_api_shadow/envoy/type/matcher/string.proto @@ -34,7 +34,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string prefix = 2 [(validate.rules).string = {min_len: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -42,7 +42,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + string suffix = 3 [(validate.rules).string = {min_len: 1}]; // The input string must match the regular expression specified here. // The regex grammar is defined `here diff --git a/generated_api_shadow/envoy/type/matcher/struct.proto b/generated_api_shadow/envoy/type/matcher/struct.proto index f65b1d121845..10d4672e0622 100644 --- a/generated_api_shadow/envoy/type/matcher/struct.proto +++ b/generated_api_shadow/envoy/type/matcher/struct.proto @@ -72,7 +72,7 @@ message StructMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } diff --git a/generated_api_shadow/envoy/type/matcher/v3/metadata.proto b/generated_api_shadow/envoy/type/matcher/v3/metadata.proto index 65ec4f47ffff..a7184ee98050 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/metadata.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/metadata.proto @@ -89,12 +89,12 @@ message MetadataMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_bytes: 1}]; + string filter = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; diff --git a/generated_api_shadow/envoy/type/matcher/v3/regex.proto b/generated_api_shadow/envoy/type/matcher/v3/regex.proto index 6087c6f90fad..f5913c460c46 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/regex.proto @@ -54,7 +54,7 @@ message RegexMatcher { } // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_bytes: 1}]; + string regex = 2 [(validate.rules).string = {min_len: 1}]; } // Describes how to match a string and then produce a new string using a regular diff --git a/generated_api_shadow/envoy/type/matcher/v3/string.proto b/generated_api_shadow/envoy/type/matcher/v3/string.proto index 574b65ee4a18..2ab13a672083 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/string.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/string.proto @@ -37,7 +37,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string prefix = 2 [(validate.rules).string = {min_len: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -45,7 +45,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + string suffix = 3 [(validate.rules).string = {min_len: 1}]; // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; @@ -56,7 +56,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc.def* - string contains = 7 [(validate.rules).string = {min_bytes: 1}]; + string contains = 7 [(validate.rules).string = {min_len: 1}]; string hidden_envoy_deprecated_regex = 4 [ deprecated = true, diff --git a/generated_api_shadow/envoy/type/matcher/v3/struct.proto b/generated_api_shadow/envoy/type/matcher/v3/struct.proto index b88d7b11bc2a..c753d07a5c0a 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/struct.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/struct.proto @@ -78,7 +78,7 @@ message StructMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto index 8abe14e7b667..35af650391ff 100644 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto @@ -90,12 +90,12 @@ message MetadataMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_bytes: 1}]; + string filter = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto index f64614728733..2d0e90028d31 100644 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto @@ -54,7 +54,7 @@ message RegexMatcher { } // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_bytes: 1}]; + string regex = 2 [(validate.rules).string = {min_len: 1}]; } // Describes how to match a string and then produce a new string using a regular diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto index fc17946fe3b5..1bc0118ced9b 100644 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto @@ -42,7 +42,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string prefix = 2 [(validate.rules).string = {min_len: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -50,7 +50,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + string suffix = 3 [(validate.rules).string = {min_len: 1}]; // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; @@ -61,7 +61,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc.def* - string contains = 7 [(validate.rules).string = {min_bytes: 1}]; + string contains = 7 [(validate.rules).string = {min_len: 1}]; } // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto index 643cc5a47570..328ac555bd81 100644 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto @@ -79,7 +79,7 @@ message StructMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } diff --git a/generated_api_shadow/envoy/type/metadata/v3/metadata.proto b/generated_api_shadow/envoy/type/metadata/v3/metadata.proto index ddcce6882057..b971d8debbe5 100644 --- a/generated_api_shadow/envoy/type/metadata/v3/metadata.proto +++ b/generated_api_shadow/envoy/type/metadata/v3/metadata.proto @@ -49,13 +49,13 @@ message MetadataKey { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The key name of Metadata to retrieve the Struct from the metadata. // Typically, it represents a builtin subsystem or custom extension. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; + string key = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. It can be a prefix or a full path, // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example, diff --git a/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto b/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto index 42518ead59d1..bcebe5779ba1 100644 --- a/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto +++ b/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto @@ -26,7 +26,7 @@ message CustomTag { "envoy.type.tracing.v2.CustomTag.Literal"; // Static literal value to populate the tag value. - string value = 1 [(validate.rules).string = {min_bytes: 1}]; + string value = 1 [(validate.rules).string = {min_len: 1}]; } // Environment type custom tag with environment name and default value. @@ -35,7 +35,7 @@ message CustomTag { "envoy.type.tracing.v2.CustomTag.Environment"; // Environment variable name to obtain the value to populate the tag value. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; + string name = 1 [(validate.rules).string = {min_len: 1}]; // When the environment variable is not found, // the tag value will be populated with this default value if specified, @@ -50,7 +50,7 @@ message CustomTag { // Header name to obtain the value to populate the tag value. string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // When the header does not exist, // the tag value will be populated with this default value if specified, @@ -80,7 +80,7 @@ message CustomTag { } // Used to populate the tag name. - string tag = 1 [(validate.rules).string = {min_bytes: 1}]; + string tag = 1 [(validate.rules).string = {min_len: 1}]; // Used to specify what kind of custom tag. oneof type { diff --git a/generated_api_shadow/envoy/watchdog/v3alpha/BUILD b/generated_api_shadow/envoy/watchdog/v3alpha/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/generated_api_shadow/envoy/watchdog/v3alpha/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto b/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto new file mode 100644 index 000000000000..3f47fddaa77e --- /dev/null +++ b/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.watchdog.v3alpha; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.watchdog.v3alpha"; +option java_outer_classname = "AbortActionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Watchdog Action that kills a stuck thread to kill the process.] + +// A GuardDogAction that will terminate the process by killing the +// stuck thread. This would allow easier access to the call stack of the stuck +// thread since we would run signal handlers on that thread. By default +// this will be registered to run as the last watchdog action on KILL and +// MULTIKILL events if those are enabled. +message AbortActionConfig { + // How long to wait for the thread to respond to the thread kill function + // before killing the process from this action. This is a blocking action. + // By default this is 5 seconds. + google.protobuf.Duration wait_duration = 1; +} diff --git a/include/abi/wasm/proxy_wasm_common.h b/include/abi/wasm/proxy_wasm_common.h deleted file mode 100644 index 626e2d2838c6..000000000000 --- a/include/abi/wasm/proxy_wasm_common.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Common enumerations available to WASM modules and shared with sandbox. - */ -// NOLINT(namespace-envoy) - -#pragma once - -#include - -enum class WasmResult : uint32_t { - Ok = 0, - // The result could not be found, e.g. a provided key did not appear in a table. - NotFound = 1, - // An argument was bad, e.g. did not not conform to the required range. - BadArgument = 2, - // Data could not be serialized. - SerializationFailure = 3, - // Data could not be parsed. - ParseFailure = 4, - // A provided expression (e.g. "foo.bar") was illegal or unrecognized. - BadExpression = 5, - // A provided memory range was not legal. - InvalidMemoryAccess = 6, - // Data was requested from an empty container. - Empty = 7, - // The provided value did not match that of the stored data. - CompareAndSwapMismatch = 8, - // Returned result was unexpected, e.g. of the incorrect size. - ResultMismatch = 9, - // Internal failure: trying check logs of the surrounding system. - InternalFailure = 10, - // The connection/stream/pipe was broken/closed unexpectedly. - BrokenConnection = 11, -}; - -#define _CASE(_e) \ - case WasmResult::_e: \ - return #_e -inline std::string toString(WasmResult r) { - switch (r) { - _CASE(Ok); - _CASE(NotFound); - _CASE(BadArgument); - _CASE(SerializationFailure); - _CASE(ParseFailure); - _CASE(BadExpression); - _CASE(InvalidMemoryAccess); - _CASE(Empty); - _CASE(CompareAndSwapMismatch); - _CASE(ResultMismatch); - _CASE(InternalFailure); - _CASE(BrokenConnection); - } -} -#undef _CASE diff --git a/include/abi/wasm/proxy_wasm_exports.h b/include/abi/wasm/proxy_wasm_exports.h deleted file mode 100644 index f2536a8c51a4..000000000000 --- a/include/abi/wasm/proxy_wasm_exports.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Proxy-WASM ABI. - */ -// NOLINT(namespace-envoy) - -#pragma once - -#include -#include - -// -// ABI functions export from the VM to the host for calls from the host into the VM. -// -// These will typically be implemented by a language specific SDK which will provide an API on top -// of this ABI e.g. the C++ SDK provides a proxy_wasm_api.h implementation of the API on top of -// this ABI. -// -// The Wasm VM can only access memory in the VM. Consequently, all data must be passed as integral -// call parameters or by the host allocating memory in the VM which is then owned by the Wasm code. -// For consistency and to enable diverse Wasm languages (e.g. languages with GC), the ABI uses a -// single mechanism for allocating memory in the VM and requires that all memory allocations be -// explicitly requested by calls from the VM and that the Wasm code then owns the allocated memory. -// - -// Non-stream calls. - -/** - * Called when the VM starts by the first plugin to use the VM. - * @param root_context_id is an identifier for one or more related plugins. - * @param vm_configuration_size is the size of any configuration available via - * proxy_get_configuration during the lifetime of this call. - * @return non-zero on success and zero on failure (e.g. bad configuration). - */ -enum class WasmOnVmStartResult : uint32_t { - Ok = 0, - BadConfiguration = 1, -}; -extern "C" WasmOnVmStartResult proxy_on_vm_start(uint32_t root_context_id, - uint32_t vm_configuration_size); - -/** - * Can be called to validate a configuration (e.g. from bootstrap or xDS) both before - * proxy_on_start() to verify the VM configuration or after proxy_on_start() to verify a plugin - * configuration. - * @param root_context_id is a unique identifier for the configuration verification context. - * @param configuration_size is the size of any configuration available via - * proxy_get_configuration(). - * @return non-zero on success and zero on failure (i.e. bad configuration). - */ -enum class WasmOnValidateConfigurationResult : uint32_t { - Ok = 0, - BadConfiguration = 1, -}; -extern "C" WasmOnValidateConfigurationResult -proxy_validate_configuration(uint32_t root_context_id, uint32_t configuration_size); -/** - * Called when a plugin loads or when plugin configuration changes dynamically. - * @param root_context_id is an identifier for one or more related plugins. - * @param plugin_configuration_size is the size of any configuration available via - * proxy_get_configuration(). - * @return non-zero on success and zero on failure (e.g. bad configuration). - */ -enum class WasmOnConfigureResult : uint32_t { - Ok = 0, - BadConfiguration = 1, -} -extern "C" WasmOnConfigureResult proxy_on_configure(uint32_t root_context_id, - uint32_t plugin_configuration_size); - -// Stream calls. - -/** - * Called when a request, stream or other ephemeral context is created. - * @param context_id is an identifier of the ephemeral context. - * @param configuration_size is the size of any configuration available via - * proxy_get_configuration(). - */ -extern "C" void proxy_on_context_create(uint32_t context_id, uint32_t root_context_id); - -// Stream and Non-stream calls. - -/** - * For stream contexts, called when the stream has completed. Note: if applicable proxy_on_log() is - * called after proxy_on_done() and before proxy_on_delete(). For root contexts, proxy_on_done() is - * called when the VM is going to shutdown. - * @param context_id is an identifier the context. - * @return non-zero to indicate that this context is done. Stream contexts must return non-zero. - * Root contexts may return zero to defer the VM shutdown and the proxy_on_delete call until after a - * future proxy_done() call by the root context. - */ -enum class WasmOnDoneResult : uint32_t { - Done = 0, - NotDone = 1, -} -extern "C" WasmOnDoneResult proxy_on_done(uint32_t context_id); - -/** - * Called when the context is being deleted and will no longer receive any more calls. - * @param context_id is an identifier the context. - */ -extern "C" void proxy_on_delete(uint32_t context_id); diff --git a/include/abi/wasm/proxy_wasm_imports.h b/include/abi/wasm/proxy_wasm_imports.h deleted file mode 100644 index 634142003502..000000000000 --- a/include/abi/wasm/proxy_wasm_imports.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Proxy-WASM ABI. - */ -// NOLINT(namespace-envoy) - -#pragma once - -#include -#include - -// -// ABI functions imported from the host into the VM for calls from the VM to the host. -// - -// Configuration and Status - -/** - * Called from the VM to get any configuration. Valid only when in proxy_on_start() (where it will - * return a VM configuration), proxy_on_configure() (where it will return a plugin configuration) or - * in proxy_validate_configuration() (where it will return a VM configuration before - * proxy_on_start() has been called and a plugin configuration after). - * @param start is the offset of the first byte to retrieve. - * @param length is the number of the bytes to retrieve. If start + length exceeds the number of - * bytes available then configuration_size will be set to the number of bytes returned. - * @param configuration_ptr a pointer to a location which will be filled with either nullptr (if no - * configuration is available) or a pointer to a allocated block containing the configuration - * bytes. - * @param configuration_size a pointer to a location containing the size (or zero) of any returned - * configuration byte block. - * @return a WasmResult: OK, InvalidMemoryAccess. Note: if OK is returned *configuration_ptr may - * be nullptr. - */ -extern "C" WasmResult proxy_get_configuration((uint32_t start, uint32_t length, - const char** configuration_ptr, size_t* configuration_size); - -// Logging -// -// level: trace = 0, debug = 1, info = 2, warn = 3, error = 4, critical = 5 - -/** - * Called from the VM to log a message. - * @param level is one of trace = 0, debug = 1, info = 2, warn = 3, error = 4, critical = 5. - * @param log_message is a pointer to a message to log. - * @param log_message_size is the size of the message. Messages need not have a newline or be null - * terminated. - * @return a WasmResult: OK, InvalidMemoryAccess. - */ -enum class WasmLogLevel : uint32_t { - Trace = 0, Debug = 1, Info = 2, Warning = 3, Error = 4, Critical = 5, -} -extern "C" WasmResult proxy_log(WasmLogLevel level, const char* log_message, size_t log_message_size); - -// System - -/** - * Called from the VM by a root context after returning zero from proxy_on_done() to indicate that - * the root context is now done and the proxy_on_delete can be called and the VM shutdown and - * deleted. - * @return a WasmResult: OK, NotFound (if the caller did not previous return zero from - * proxy_on_done()). - */ -extern "C" WasmResult proxy_done(); diff --git a/include/envoy/api/BUILD b/include/envoy/api/BUILD index 6855cc6b8688..2d9c47ddc0b6 100644 --- a/include/envoy/api/BUILD +++ b/include/envoy/api/BUILD @@ -12,6 +12,7 @@ envoy_cc_library( name = "api_interface", hdrs = ["api.h"], deps = [ + "//include/envoy/common:random_generator_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/filesystem:filesystem_interface", "//include/envoy/server:process_context_interface", diff --git a/include/envoy/api/api.h b/include/envoy/api/api.h index de98f1c7cf39..e9b3506c0312 100644 --- a/include/envoy/api/api.h +++ b/include/envoy/api/api.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/common/time.h" #include "envoy/event/dispatcher.h" #include "envoy/filesystem/filesystem.h" @@ -60,6 +61,11 @@ class Api { */ virtual const Stats::Scope& rootScope() PURE; + /** + * @return a reference to the RandomGenerator. + */ + virtual Random::RandomGenerator& randomGenerator() PURE; + /** * @return an optional reference to the ProcessContext */ diff --git a/include/envoy/buffer/BUILD b/include/envoy/buffer/BUILD index d16a2fe36505..499ec8605a2e 100644 --- a/include/envoy/buffer/BUILD +++ b/include/envoy/buffer/BUILD @@ -16,7 +16,6 @@ envoy_cc_library( ], deps = [ "//include/envoy/api:os_sys_calls_interface", - "//include/envoy/network:io_handle_interface", "//source/common/common:byte_order_lib", "//source/common/common:utility_lib", ], diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index ab2d9af281ac..3ab150504ccd 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -9,7 +9,6 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/common/pure.h" -#include "envoy/network/io_handle.h" #include "common/common/byte_order.h" #include "common/common/utility.h" @@ -193,15 +192,6 @@ class Instance { */ virtual void move(Instance& rhs, uint64_t length) PURE; - /** - * Read from a file descriptor directly into the buffer. - * @param io_handle supplies the io handle to read from. - * @param max_length supplies the maximum length to read. - * @return a IoCallUint64Result with err_ = nullptr and rc_ = the number of bytes - * read if successful, or err_ = some IoError for failure. If call failed, rc_ shouldn't be used. - */ - virtual Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) PURE; - /** * Reserve space in the buffer. * @param length supplies the amount of space to reserve. @@ -246,15 +236,6 @@ class Instance { */ virtual std::string toString() const PURE; - /** - * Write the buffer out to a file descriptor. - * @param io_handle supplies the io_handle to write to. - * @return a IoCallUint64Result with err_ = nullptr and rc_ = the number of bytes - * written if successful, or err_ = some IoError for failure. If call failed, rc_ shouldn't be - * used. - */ - virtual Api::IoCallUint64Result write(Network::IoHandle& io_handle) PURE; - /** * Copy an integer out of the buffer. * @param start supplies the buffer index to start copying from. @@ -283,7 +264,7 @@ class Instance { * deduced from the size of the type T */ template - T peekInt(uint64_t start = 0) { + T peekInt(uint64_t start = 0) const { static_assert(Size <= sizeof(T), "requested size is bigger than integer being read"); if (length() < start + Size) { @@ -319,7 +300,7 @@ class Instance { * @param start supplies the buffer index to start copying from. * @param Size how many bytes to read out of the buffer. */ - template T peekLEInt(uint64_t start = 0) { + template T peekLEInt(uint64_t start = 0) const { return peekInt(start); } @@ -328,7 +309,7 @@ class Instance { * @param start supplies the buffer index to start copying from. * @param Size how many bytes to read out of the buffer. */ - template T peekBEInt(uint64_t start = 0) { + template T peekBEInt(uint64_t start = 0) const { return peekInt(start); } diff --git a/include/envoy/common/conn_pool.h b/include/envoy/common/conn_pool.h index 0619e7e32479..c80e80db21ed 100644 --- a/include/envoy/common/conn_pool.h +++ b/include/envoy/common/conn_pool.h @@ -50,8 +50,8 @@ class Instance { using DrainedCb = std::function; /** - * Register a callback that gets called when the connection pool is fully drained. No actual - * draining is done. The owner of the connection pool is responsible for not creating any + * Register a callback that gets called when the connection pool is fully drained and kicks + * off a drain. The owner of the connection pool is responsible for not creating any * new streams. */ virtual void addDrainedCallback(DrainedCb cb) PURE; @@ -68,6 +68,14 @@ class Instance { * @return Upstream::HostDescriptionConstSharedPtr the host for which connections are pooled. */ virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; + + /** + * Prefetches an upstream connection, if existing connections do not meet both current and + * anticipated load. + * + * @return true if a connection was prefetched, false otherwise. + */ + virtual bool maybePrefetch(float prefetch_ratio) PURE; }; enum class PoolFailureReason { diff --git a/include/envoy/common/platform.h b/include/envoy/common/platform.h index 3bc541286362..7357544f156f 100644 --- a/include/envoy/common/platform.h +++ b/include/envoy/common/platform.h @@ -31,6 +31,7 @@ #undef GetMessage #undef interface #undef TRUE +#undef IGNORE #include #include @@ -61,6 +62,7 @@ typedef ptrdiff_t ssize_t; typedef uint32_t mode_t; typedef SOCKET os_fd_t; +typedef HANDLE filesystem_os_id_t; // NOLINT(modernize-use-using) typedef unsigned int sa_family_t; @@ -116,6 +118,7 @@ struct msghdr { #define IPV6_RECVPKTINFO IPV6_PKTINFO #endif +#define INVALID_HANDLE INVALID_HANDLE_VALUE #define SOCKET_VALID(sock) ((sock) != INVALID_SOCKET) #define SOCKET_INVALID(sock) ((sock) == INVALID_SOCKET) #define SOCKET_FAILURE(rc) ((rc) == SOCKET_ERROR) @@ -143,6 +146,9 @@ struct msghdr { #define SOCKET_ERROR_INVAL WSAEINVAL #define SOCKET_ERROR_ADDR_IN_USE WSAEADDRINUSE +#define HANDLE_ERROR_PERM ERROR_ACCESS_DENIED +#define HANDLE_ERROR_INVALID ERROR_INVALID_HANDLE + namespace Platform { constexpr absl::string_view null_device_path{"NUL"}; } @@ -206,7 +212,9 @@ constexpr absl::string_view null_device_path{"NUL"}; #endif typedef int os_fd_t; +typedef int filesystem_os_id_t; // NOLINT(modernize-use-using) +#define INVALID_HANDLE -1 #define INVALID_SOCKET -1 #define SOCKET_VALID(sock) ((sock) >= 0) #define SOCKET_INVALID(sock) ((sock) == -1) @@ -231,6 +239,10 @@ typedef int os_fd_t; #define SOCKET_ERROR_INVAL EINVAL #define SOCKET_ERROR_ADDR_IN_USE EADDRINUSE +// Mapping POSIX file errors to common error names +#define HANDLE_ERROR_PERM EACCES +#define HANDLE_ERROR_INVALID EBADF + namespace Platform { constexpr absl::string_view null_device_path{"/dev/null"}; } @@ -243,7 +255,7 @@ constexpr absl::string_view null_device_path{"/dev/null"}; // Therefore, we decided to remove the Android check introduced here in // https://github.com/envoyproxy/envoy/pull/10120. If someone out there encounters problems with // this please bring up in Envoy's slack channel #envoy-udp-quic-dev. -#if defined(__linux__) +#if defined(__linux__) || defined(__EMSCRIPTEN__) #define ENVOY_MMSG_MORE 1 #else #define ENVOY_MMSG_MORE 0 diff --git a/include/envoy/common/random_generator.h b/include/envoy/common/random_generator.h index 90fb1b7c1543..f778d909a49b 100644 --- a/include/envoy/common/random_generator.h +++ b/include/envoy/common/random_generator.h @@ -46,6 +46,18 @@ class RandomGenerator { * for example, 7c25513b-0466-4558-a64c-12c6704f37ed */ virtual std::string uuid() PURE; + + /** + * @return a random boolean value, with probability `p` equaling true. + */ + bool bernoulli(float p) { + if (p <= 0) { + return false; + } else if (p >= 1) { + return true; + } + return random() < static_cast(p * static_cast(max())); + } }; using RandomGeneratorPtr = std::unique_ptr; diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index 6c268d1076b0..43725cb30233 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -107,6 +107,9 @@ class GrpcMux { virtual void requestOnDemandUpdate(const std::string& type_url, const std::set& for_update) PURE; + + using TypeUrlMap = absl::flat_hash_map; + static TypeUrlMap& typeUrlMap() { MUTABLE_CONSTRUCT_ON_FIRST_USE(TypeUrlMap, {}); } }; using GrpcMuxPtr = std::unique_ptr; diff --git a/include/envoy/config/subscription_factory.h b/include/envoy/config/subscription_factory.h index eb08360e7dda..ff6dd4d9642f 100644 --- a/include/envoy/config/subscription_factory.h +++ b/include/envoy/config/subscription_factory.h @@ -28,6 +28,27 @@ class SubscriptionFactory { absl::string_view type_url, Stats::Scope& scope, SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder) PURE; + + /** + * Collection subscription factory interface for UDPA URLs. + * + * @param collection_locator collection resource locator. + * @param config envoy::config::core::v3::ConfigSource for authority resolution. + * @param type_url type URL for the resources inside the collection. + * @param scope stats scope for any stats tracked by the subscription. + * @param callbacks the callbacks needed by all [Collection]Subscription objects, to deliver + * config updates. The callbacks must not result in the deletion of the + * CollectionSubscription object. + * @param resource_decoder how incoming opaque resource objects are to be decoded. + * + * @return SubscriptionPtr subscription object corresponding for collection_locator. + */ + virtual SubscriptionPtr + collectionSubscriptionFromUrl(const udpa::core::v1::ResourceLocator& collection_locator, + const envoy::config::core::v3::ConfigSource& config, + absl::string_view type_url, Stats::Scope& scope, + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) PURE; }; } // namespace Config diff --git a/include/envoy/event/BUILD b/include/envoy/event/BUILD index ad215d6cc133..f4caa201ff14 100644 --- a/include/envoy/event/BUILD +++ b/include/envoy/event/BUILD @@ -40,6 +40,15 @@ envoy_cc_library( hdrs = ["file_event.h"], ) +envoy_cc_library( + name = "range_timer_interface", + hdrs = ["range_timer.h"], + deps = [ + ":timer_interface", + "//include/envoy/common:time_interface", + ], +) + envoy_cc_library( name = "schedulable_cb_interface", hdrs = ["schedulable_cb.h"], diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h index 9fcf55707c99..0e7865b656d8 100644 --- a/include/envoy/event/dispatcher.h +++ b/include/envoy/event/dispatcher.h @@ -161,7 +161,7 @@ class Dispatcher { * @param cb supplies the udp listener callbacks to invoke for listener events. * @return Network::ListenerPtr a new listener that is owned by the caller. */ - virtual Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr&& socket, + virtual Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr socket, Network::UdpListenerCallbacks& cb) PURE; /** * Allocates a timer. @see Timer for docs on how to use the timer. diff --git a/include/envoy/event/range_timer.h b/include/envoy/event/range_timer.h new file mode 100644 index 000000000000..a4e431047dcc --- /dev/null +++ b/include/envoy/event/range_timer.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/common/time.h" +#include "envoy/event/timer.h" + +namespace Envoy { +namespace Event { + +/** + * An abstract event timer that can be scheduled for a timeout within a range. The actual timeout + * used is left up to individual implementations. + */ +class RangeTimer { +public: + virtual ~RangeTimer() = default; + + /** + * Disable a pending timeout without destroying the underlying timer. + */ + virtual void disableTimer() PURE; + + /** + * Enable a pending timeout within the given range. If a timeout is already pending, it will be + * reset to the new timeout. + * + * @param min_ms supplies the minimum duration of the alarm in milliseconds. + * @param max_ms supplies the maximum duration of the alarm in milliseconds. + * @param object supplies an optional scope for the duration of the alarm. + */ + virtual void enableTimer(std::chrono::milliseconds min_ms, std::chrono::milliseconds max_ms, + const ScopeTrackedObject* object = nullptr) PURE; + + /** + * Return whether the timer is currently armed. + */ + virtual bool enabled() PURE; +}; + +using RangeTimerPtr = std::unique_ptr; + +} // namespace Event +} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/event/timer.h b/include/envoy/event/timer.h index c02a6a648b65..f09c98752575 100644 --- a/include/envoy/event/timer.h +++ b/include/envoy/event/timer.h @@ -40,7 +40,7 @@ class Timer { * @param ms supplies the duration of the alarm in milliseconds. * @param object supplies an optional scope for the duration of the alarm. */ - virtual void enableTimer(const std::chrono::milliseconds& ms, + virtual void enableTimer(std::chrono::milliseconds ms, const ScopeTrackedObject* object = nullptr) PURE; /** @@ -50,7 +50,7 @@ class Timer { * @param us supplies the duration of the alarm in microseconds. * @param object supplies an optional scope for the duration of the alarm. */ - virtual void enableHRTimer(const std::chrono::microseconds& us, + virtual void enableHRTimer(std::chrono::microseconds us, const ScopeTrackedObject* object = nullptr) PURE; /** * Return whether the timer is currently armed. diff --git a/include/envoy/grpc/async_client.h b/include/envoy/grpc/async_client.h index b2005723fab2..bba3d106df00 100644 --- a/include/envoy/grpc/async_client.h +++ b/include/envoy/grpc/async_client.h @@ -187,6 +187,7 @@ class RawAsyncClient { }; using RawAsyncClientPtr = std::unique_ptr; +using RawAsyncClientSharedPtr = std::shared_ptr; } // namespace Grpc } // namespace Envoy diff --git a/include/envoy/http/async_client.h b/include/envoy/http/async_client.h index 066ccb04e716..5121a3c7b969 100644 --- a/include/envoy/http/async_client.h +++ b/include/envoy/http/async_client.h @@ -6,6 +6,7 @@ #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/event/dispatcher.h" #include "envoy/http/message.h" +#include "envoy/stream_info/stream_info.h" #include "envoy/tracing/http_tracer.h" #include "common/protobuf/protobuf.h" @@ -168,6 +169,13 @@ class AsyncClient { virtual ~AsyncClient() = default; + /** + * A context from the caller of an async client. + */ + struct ParentContext { + const StreamInfo::StreamInfo* stream_info; + }; + /** * A structure to hold the options for AsyncStream object. */ @@ -193,6 +201,10 @@ class AsyncClient { hash_policy = v; return *this; } + StreamOptions& setParentContext(const ParentContext& v) { + parent_context = v; + return *this; + } // For gmock test bool operator==(const StreamOptions& src) const { @@ -215,6 +227,9 @@ class AsyncClient { // Provides the hash policy for hashing load balancing strategies. Protobuf::RepeatedPtrField hash_policy; + + // Provides parent context. Currently, this holds stream info from the caller. + ParentContext parent_context; }; /** @@ -242,6 +257,10 @@ class AsyncClient { StreamOptions::setHashPolicy(v); return *this; } + RequestOptions& setParentContext(const ParentContext& v) { + StreamOptions::setParentContext(v); + return *this; + } RequestOptions& setParentSpan(Tracing::Span& parent_span) { parent_span_ = &parent_span; return *this; diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index 877313e5f849..5e977c8a7d09 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -251,7 +251,9 @@ enum class StreamResetReason { // If the stream was locally reset due to connection termination. ConnectionTermination, // The stream was reset because of a resource overflow. - Overflow + Overflow, + // Either there was an early TCP error for a CONNECT request or the peer reset with CONNECT_ERROR + ConnectError }; /** diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index 440792171783..dbb5cddd3387 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -32,10 +32,6 @@ enum class FilterHeadersStatus { // FilterDataStatus::Continue from decodeData()/encodeData() or calling // continueDecoding()/continueEncoding() MUST be called if continued filter iteration is desired. StopIteration, - // Continue headers iteration to remaining filters, but ignore any subsequent data or trailers. - // This results in creating a header only request/response. - // This status MUST NOT be returned by decodeHeaders() when end_stream is set to true. - ContinueAndEndStream, // Continue headers iteration to remaining filters, but delay ending the stream. This status MUST // NOT be returned when end_stream is already set to false. // @@ -416,8 +412,10 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { * * @param headers supplies the headers to be encoded. * @param end_stream supplies whether this is a header only request/response. + * @param details supplies the details of why this response was sent. */ - virtual void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) PURE; + virtual void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, + absl::string_view details) PURE; /** * Called with data to be encoded, optionally indicating end of stream. @@ -526,12 +524,28 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { }; /** - * Common base class for both decoder and encoder filters. + * Common base class for both decoder and encoder filters. Functions here are related to the + * lifecycle of a filter. Currently the life cycle is as follows: + * - All filters receive onStreamComplete() + * - All log handlers receive log() + * - All filters receive onDestroy() + * + * This means: + * - onStreamComplete can be used to make state changes that are intended to appear in the access + * logs (like streamInfo().dynamicMetadata() or streamInfo().filterState()). + * - onDestroy is used to cleanup all pending filter resources like pending http requests and + * timers. */ class StreamFilterBase { public: virtual ~StreamFilterBase() = default; + /** + * This routine is called before the access log handlers' log() is called. Filters can use this + * callback to enrich the data passed in to the log handlers. + */ + virtual void onStreamComplete() {} + /** * This routine is called prior to a filter being destroyed. This may happen after normal stream * finish (both downstream and upstream) or due to reset. Every filter is responsible for making @@ -699,6 +713,27 @@ class StreamEncoderFilterCallbacks : public virtual StreamFilterCallbacks { */ virtual ResponseTrailerMap& addEncodedTrailers() PURE; + /** + * Attempts to create a locally generated response using the provided response_code and body_text + * parameters. If the request was a gRPC request the local reply will be encoded as a gRPC + * response with a 200 HTTP response code and grpc-status and grpc-message headers mapped from the + * provided parameters. + * + * If a response has already started (e.g. if the router calls sendSendLocalReply after encoding + * headers) this will either ship the reply directly to the downstream codec, or reset the stream. + * + * @param response_code supplies the HTTP response code. + * @param body_text supplies the optional body text which is sent using the text/plain content + * type, or encoded in the grpc-message header. + * @param modify_headers supplies an optional callback function that can modify the + * response headers. + * @param grpc_status the gRPC status code to override the httpToGrpcStatus mapping with. + * @param details a string detailing why this local reply was sent. + */ + virtual void sendLocalReply(Code response_code, absl::string_view body_text, + std::function modify_headers, + const absl::optional grpc_status, + absl::string_view details) PURE; /** * Adds new metadata to be encoded. * diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 358b642ed4d1..26b6bdf926b6 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -505,12 +505,31 @@ class HeaderMap { */ virtual uint64_t byteSize() const PURE; + /** + * This is a wrapper for the return result from get(). It avoids a copy when translating from + * non-const HeaderEntry to const HeaderEntry and only provides const access to the result. + */ + using NonConstGetResult = absl::InlinedVector; + class GetResult { + public: + GetResult() = default; + explicit GetResult(NonConstGetResult&& result) : result_(std::move(result)) {} + void operator=(GetResult&& rhs) noexcept { result_ = std::move(rhs.result_); } + + bool empty() const { return result_.empty(); } + size_t size() const { return result_.size(); } + const HeaderEntry* operator[](size_t i) const { return result_[i]; } + + private: + NonConstGetResult result_; + }; + /** * Get a header by key. * @param key supplies the header key. - * @return the header entry if it exists otherwise nullptr. + * @return all header entries matching the key. */ - virtual const HeaderEntry* get(const LowerCaseString& key) const PURE; + virtual GetResult get(const LowerCaseString& key) const PURE; // aliases to make iterate() and iterateReverse() callbacks easier to read enum class Iterate { Continue, Break }; diff --git a/include/envoy/http/message.h b/include/envoy/http/message.h index 7fa0c8c910a9..426bb0f225fe 100644 --- a/include/envoy/http/message.h +++ b/include/envoy/http/message.h @@ -22,10 +22,9 @@ template class Message { virtual HeaderType& headers() PURE; /** - * @return Buffer::InstancePtr& the message body, if any. Callers are free to reallocate, remove, - * etc. the body. + * @return Buffer::Instance the message body, if any. Callers are free to modify the body. */ - virtual Buffer::InstancePtr& body() PURE; + virtual Buffer::Instance& body() PURE; /** * @return TrailerType* the message trailers, if any. diff --git a/include/envoy/local_info/local_info.h b/include/envoy/local_info/local_info.h index 379db4c4de0b..7c4a0f5b4795 100644 --- a/include/envoy/local_info/local_info.h +++ b/include/envoy/local_info/local_info.h @@ -22,22 +22,22 @@ class LocalInfo { virtual Network::Address::InstanceConstSharedPtr address() const PURE; /** - * Human readable zone name. E.g., "us-east-1a". + * @return the human readable zone name. E.g., "us-east-1a". */ virtual const std::string& zoneName() const PURE; /** - * Human readable cluster name. E.g., "eta". + * @return the human readable cluster name. E.g., "eta". */ virtual const std::string& clusterName() const PURE; /** - * Human readable individual node name. E.g., "i-123456". + * @return the human readable individual node name. E.g., "i-123456". */ virtual const std::string& nodeName() const PURE; /** - * v2 API Node protobuf. This is the full node identity presented to management servers. + * @return the full node identity presented to management servers. */ virtual const envoy::config::core::v3::Node& node() const PURE; }; diff --git a/include/envoy/network/address.h b/include/envoy/network/address.h index edea7108a7b9..bd28205bd630 100644 --- a/include/envoy/network/address.h +++ b/include/envoy/network/address.h @@ -102,23 +102,37 @@ class Ip { }; /** - * Interface for a generic Pipe address + * Interface for a generic Pipe address. */ class Pipe { public: virtual ~Pipe() = default; /** - * @return abstract namespace flag + * @return abstract namespace flag. */ virtual bool abstractNamespace() const PURE; /** - * @return pipe mode + * @return pipe mode. */ virtual mode_t mode() const PURE; }; -enum class Type { Ip, Pipe }; +/** + * Interface for a generic internal address. + */ +class EnvoyInternalAddress { +public: + virtual ~EnvoyInternalAddress() = default; + + /** + * @return The unique id of the internal address. If the address represents the destination + * internal listener, the address id is that listener name. + */ + virtual const std::string& addressId() const PURE; +}; + +enum class Type { Ip, Pipe, EnvoyInternal }; /** * Interface for all network addresses. @@ -167,12 +181,19 @@ class Instance { virtual const Pipe* pipe() const PURE; /** - * @return the underlying structure wherein the address is stored + * @return the envoy internal address information IFF type() == + * Type::EnvoyInternal, otherwise nullptr. + */ + virtual const EnvoyInternalAddress* envoyInternalAddress() const PURE; + + /** + * @return the underlying structure wherein the address is stored. Return nullptr if the address + * type is internal address. */ virtual const sockaddr* sockAddr() const PURE; /** - * @return length of the address container + * @return length of the address container. */ virtual socklen_t sockAddrLen() const PURE; @@ -182,7 +203,7 @@ class Instance { virtual Type type() const PURE; /** - * @return SocketInterface to be used with the address + * @return SocketInterface to be used with the address. */ virtual const Network::SocketInterface& socketInterface() const PURE; }; diff --git a/include/envoy/network/connection.h b/include/envoy/network/connection.h index 3eb74e8f64de..b486f614ed96 100644 --- a/include/envoy/network/connection.h +++ b/include/envoy/network/connection.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -307,6 +308,13 @@ class Connection : public Event::DeferredDeletable, public FilterManager { * occurred an empty string is returned. */ virtual absl::string_view transportFailureReason() const PURE; + + /** + * @return absl::optional An optional of the most recent round-trip + * time of the connection. If the platform does not support this, then an empty optional is + * returned. + */ + virtual absl::optional lastRoundTripTime() const PURE; }; using ConnectionPtr = std::unique_ptr; diff --git a/include/envoy/network/connection_balancer.h b/include/envoy/network/connection_balancer.h index 98e424df4610..16a5f199639e 100644 --- a/include/envoy/network/connection_balancer.h +++ b/include/envoy/network/connection_balancer.h @@ -65,7 +65,7 @@ class ConnectionBalancer { pickTargetHandler(BalancedConnectionHandler& current_handler) PURE; }; -using ConnectionBalancerPtr = std::unique_ptr; +using ConnectionBalancerSharedPtr = std::shared_ptr; } // namespace Network } // namespace Envoy diff --git a/include/envoy/network/connection_handler.h b/include/envoy/network/connection_handler.h index 58f672c04641..c42cc290cd61 100644 --- a/include/envoy/network/connection_handler.h +++ b/include/envoy/network/connection_handler.h @@ -52,6 +52,13 @@ class ConnectionHandler { */ virtual void removeListeners(uint64_t listener_tag) PURE; + /** + * Get the ``UdpListenerCallbacks`` associated with ``listener_tag``. This will be + * absl::nullopt for non-UDP listeners and for ``listener_tag`` values that have already been + * removed. + */ + virtual UdpListenerCallbacksOptRef getUdpListenerCallbacks(uint64_t listener_tag) PURE; + /** * Remove the filter chains and the connections in the listener. All connections owned * by the filter chains will be closed. Once all the connections are destroyed(connections @@ -87,6 +94,12 @@ class ConnectionHandler { */ virtual void enableListeners() PURE; + /** + * Set the fraction of connections the listeners should reject. + * @param reject_fraction a value between 0 (reject none) and 1 (reject all). + */ + virtual void setListenerRejectFraction(float reject_fraction) PURE; + /** * @return the stat prefix used for per-handler stats. */ @@ -126,6 +139,22 @@ class ConnectionHandler { }; using ActiveListenerPtr = std::unique_ptr; + + /** + * Used by ConnectionHandler to manage UDP listeners. + */ + class ActiveUdpListener : public virtual ActiveListener, public Network::UdpListenerCallbacks { + public: + ~ActiveUdpListener() override = default; + + /** + * Returns the worker index that ``data`` should be delivered to. The return value must be in + * the range [0, concurrency). + */ + virtual uint32_t destination(const Network::UdpRecvData& data) const PURE; + }; + + using ActiveUdpListenerPtr = std::unique_ptr; }; using ConnectionHandlerPtr = std::unique_ptr; @@ -140,15 +169,16 @@ class ActiveUdpListenerFactory { /** * Creates an ActiveUdpListener object and a corresponding UdpListener * according to given config. + * @param worker_index The index of the worker this listener is being created on. * @param parent is the owner of the created ActiveListener objects. * @param dispatcher is used to create actual UDP listener. * @param config provides information needed to create ActiveUdpListener and * UdpListener objects. * @return the ActiveUdpListener created. */ - virtual ConnectionHandler::ActiveListenerPtr - createActiveUdpListener(ConnectionHandler& parent, Event::Dispatcher& disptacher, - Network::ListenerConfig& config) PURE; + virtual ConnectionHandler::ActiveUdpListenerPtr + createActiveUdpListener(uint32_t worker_index, ConnectionHandler& parent, + Event::Dispatcher& dispatcher, Network::ListenerConfig& config) PURE; /** * @return true if the UDP passing through listener doesn't form stateful connections. @@ -159,4 +189,4 @@ class ActiveUdpListenerFactory { using ActiveUdpListenerFactoryPtr = std::unique_ptr; } // namespace Network -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/include/envoy/network/io_handle.h b/include/envoy/network/io_handle.h index 1348998d3d9e..f11855014a3e 100644 --- a/include/envoy/network/io_handle.h +++ b/include/envoy/network/io_handle.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/api/io_error.h" @@ -14,6 +15,7 @@ namespace Envoy { namespace Buffer { struct RawSlice; +class Instance; } // namespace Buffer namespace Event { @@ -65,6 +67,15 @@ class IoHandle { virtual Api::IoCallUint64Result readv(uint64_t max_length, Buffer::RawSlice* slices, uint64_t num_slice) PURE; + /** + * Read from a io handle directly into buffer. + * @param buffer supplies the buffer to read into. + * @param max_length supplies the maximum length to read. + * @return a IoCallUint64Result with err_ = nullptr and rc_ = the number of bytes + * read if successful, or err_ = some IoError for failure. If call failed, rc_ shouldn't be used. + */ + virtual Api::IoCallUint64Result read(Buffer::Instance& buffer, uint64_t max_length) PURE; + /** * Write the data in slices out. * @param slices points to the location of data to be written. @@ -74,6 +85,15 @@ class IoHandle { */ virtual Api::IoCallUint64Result writev(const Buffer::RawSlice* slices, uint64_t num_slice) PURE; + /** + * Write the buffer out to a file descriptor. + * @param buffer supplies the buffer to write to. + * @return a IoCallUint64Result with err_ = nullptr and rc_ = the number of bytes + * written if successful, or err_ = some IoError for failure. If call failed, rc_ shouldn't be + * used. + */ + virtual Api::IoCallUint64Result write(Buffer::Instance& buffer) PURE; + /** * Send a message to the address. * @param slices points to the location of data to be sent. @@ -223,10 +243,7 @@ class IoHandle { virtual Api::SysCallIntResult setBlocking(bool blocking) PURE; /** - * Get domain used by underlying socket (see man 2 socket) - * @param domain updated to the underlying socket's domain if call is successful - * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call - * is successful, errno_ shouldn't be used. + * @return the domain used by underlying socket (see man 2 socket) */ virtual absl::optional domain() PURE; @@ -258,6 +275,13 @@ class IoHandle { * Shut down part of a full-duplex connection (see man 2 shutdown) */ virtual Api::SysCallIntResult shutdown(int how) PURE; + + /** + * @return absl::optional An optional of the most recent round-trip + * time of the connection. If the platform does not support this, then an empty optional is + * returned. + */ + virtual absl::optional lastRoundTripTime() PURE; }; using IoHandlePtr = std::unique_ptr; diff --git a/include/envoy/network/listen_socket.h b/include/envoy/network/listen_socket.h index bc0c736589ee..e6f9cef2f20d 100644 --- a/include/envoy/network/listen_socket.h +++ b/include/envoy/network/listen_socket.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -92,6 +93,13 @@ class ConnectionSocket : public virtual Socket { * @return requested server name (e.g. SNI in TLS), if any. */ virtual absl::string_view requestedServerName() const PURE; + + /** + * @return absl::optional An optional of the most recent round-trip + * time of the connection. If the platform does not support this, then an empty optional is + * returned. + */ + virtual absl::optional lastRoundTripTime() PURE; }; using ConnectionSocketPtr = std::unique_ptr; diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h index da708aa95d35..4401df6cc20c 100644 --- a/include/envoy/network/listener.h +++ b/include/envoy/network/listener.h @@ -20,6 +20,10 @@ namespace Envoy { namespace Network { class ActiveUdpListenerFactory; +class UdpListenerWorkerRouter; + +using UdpListenerWorkerRouterOptRef = + absl::optional>; /** * ListenSocketFactory is a member of ListenConfig to provide listen socket. @@ -137,11 +141,17 @@ class ListenerConfig { virtual ActiveUdpListenerFactory* udpListenerFactory() PURE; /** - * @return factory pointer if writing on UDP socket, otherwise return - * nullptr. + * @return factory if writing on UDP socket, otherwise return + * nullopt. */ virtual UdpPacketWriterFactoryOptRef udpPacketWriterFactory() PURE; + /** + * @return the ``UdpListenerWorkerRouter`` for this listener. This will + * be non-empty iff this is a UDP listener. + */ + virtual UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() PURE; + /** * @return traffic direction of the listener. */ @@ -187,10 +197,14 @@ class TcpListenerCallbacks { */ virtual void onAccept(ConnectionSocketPtr&& socket) PURE; + enum class RejectCause { + GlobalCxLimit, + OverloadAction, + }; /** * Called when a new connection is rejected. */ - virtual void onReject() PURE; + virtual void onReject(RejectCause cause) PURE; }; /** @@ -246,7 +260,7 @@ class UdpListenerCallbacks { * * @param data UdpRecvData from the underlying socket. */ - virtual void onData(UdpRecvData& data) PURE; + virtual void onData(UdpRecvData&& data) PURE; /** * Called when the underlying socket is ready for read, before onData() is @@ -278,8 +292,26 @@ class UdpListenerCallbacks { * UdpListenerCallback */ virtual UdpPacketWriter& udpPacketWriter() PURE; + + /** + * Returns the index of this worker, in the range of [0, concurrency). + */ + virtual uint32_t workerIndex() const PURE; + + /** + * Called whenever data is received on the underlying udp socket, on + * the destination worker for the datagram according to ``destination()``. + */ + virtual void onDataWorker(Network::UdpRecvData&& data) PURE; + + /** + * Posts ``data`` to be delivered on this worker. + */ + virtual void post(Network::UdpRecvData&& data) PURE; }; +using UdpListenerCallbacksOptRef = absl::optional>; + /** * An abstract socket listener. Free the listener to stop listening on the socket. */ @@ -296,6 +328,12 @@ class Listener { * Enable accepting new connections. */ virtual void enable() PURE; + + /** + * Set the fraction of incoming connections that will be closed immediately + * after being opened. + */ + virtual void setRejectFraction(float reject_fraction) PURE; }; using ListenerPtr = std::unique_ptr; @@ -337,9 +375,44 @@ class UdpListener : public virtual Listener { * @return the error code of the underlying flush api. */ virtual Api::IoCallUint64Result flush() PURE; + + /** + * Make this listener readable at the beginning of the next event loop. + * + * @note: it may become readable during the current loop if feature + * ``envoy.reloadable_features.activate_fds_next_event_loop`` is disabled. + */ + virtual void activateRead() PURE; }; using UdpListenerPtr = std::unique_ptr; +/** + * Handles delivering datagrams to the correct worker. + */ +class UdpListenerWorkerRouter { +public: + virtual ~UdpListenerWorkerRouter() = default; + + /** + * Registers a worker's callbacks for this listener. This worker must accept + * packets until it calls ``unregisterWorker``. + */ + virtual void registerWorkerForListener(UdpListenerCallbacks& listener) PURE; + + /** + * Unregisters a worker's callbacks for this listener. + */ + virtual void unregisterWorkerForListener(UdpListenerCallbacks& listener) PURE; + + /** + * Deliver ``data`` to the correct worker by calling ``onDataWorker()`` + * or ``post()`` on one of the registered workers. + */ + virtual void deliver(uint32_t dest_worker_index, UdpRecvData&& data) PURE; +}; + +using UdpListenerWorkerRouterPtr = std::unique_ptr; + } // namespace Network } // namespace Envoy diff --git a/include/envoy/network/socket_interface.h b/include/envoy/network/socket_interface.h index 702819ce3b4a..1bc922fa3303 100644 --- a/include/envoy/network/socket_interface.h +++ b/include/envoy/network/socket_interface.h @@ -32,13 +32,6 @@ class SocketInterface { virtual IoHandlePtr socket(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr) const PURE; - /** - * Wrap socket file descriptor in IoHandle - * @param fd socket file descriptor to be wrapped - * @return @ref Network::IoHandlePtr that wraps the socket file descriptor - */ - virtual IoHandlePtr socket(os_fd_t fd) PURE; - /** * Returns true if the given family is supported on this machine. * @param domain the IP family. @@ -49,7 +42,7 @@ class SocketInterface { using SocketInterfacePtr = std::unique_ptr; /** - * Create IoHandle for given address + * Create IoHandle for given address. * @param type type of socket to be requested * @param addr address that is gleaned for address type, version and socket interface name * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor @@ -60,4 +53,4 @@ static inline IoHandlePtr ioHandleForAddr(Socket::Type type, } } // namespace Network -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index aaac1ff40be6..b4b3be9f3c37 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -795,6 +795,22 @@ class RouteEntry : public ResponseEntry { */ virtual absl::optional idleTimeout() const PURE; + /** + * @return optional the route's maximum stream duration. + */ + virtual absl::optional maxStreamDuration() const PURE; + + /** + * @return optional the max grpc-timeout this route will allow. + */ + virtual absl::optional grpcTimeoutHeaderMax() const PURE; + + /** + * @return optional the delta between grpc-timeout and enforced grpc + * timeout. + */ + virtual absl::optional grpcTimeoutHeaderOffset() const PURE; + /** * @return absl::optional the maximum allowed timeout value derived * from 'grpc-timeout' header of a gRPC request. Non-present value disables use of 'grpc-timeout' diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index 35737b6d0745..72aa2e90fb28 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -63,11 +63,6 @@ class Snapshot : public ThreadLocal::ThreadLocalObject { using OverrideLayerConstPtr = std::unique_ptr; - /** - * Updates deprecated feature use stats. - */ - virtual void countDeprecatedFeatureUse() const PURE; - /** * Returns true if a deprecated feature is allowed. * @@ -265,6 +260,11 @@ class Loader { * @return Stats::Scope& the root scope. */ virtual Stats::Scope& getRootScope() PURE; + + /** + * Updates deprecated feature use stats. + */ + virtual void countDeprecatedFeatureUse() const PURE; }; using LoaderPtr = std::unique_ptr; diff --git a/include/envoy/server/configuration.h b/include/envoy/server/configuration.h index e6d02a12cb9c..d10e27e72c4d 100644 --- a/include/envoy/server/configuration.h +++ b/include/envoy/server/configuration.h @@ -90,9 +90,14 @@ class Main { virtual std::chrono::milliseconds statsFlushInterval() const PURE; /** - * @return const Watchdog& the configuration of the watchdog. + * @return const Watchdog& the configuration of the main thread watchdog. */ - virtual const Watchdog& watchdogConfig() const PURE; + virtual const Watchdog& mainThreadWatchdogConfig() const PURE; + + /** + * @return const Watchdog& the configuration of the worker watchdog. + */ + virtual const Watchdog& workerWatchdogConfig() const PURE; }; /** diff --git a/include/envoy/server/factory_context.h b/include/envoy/server/factory_context.h index 08f67e31cc3b..7d496a6d2eb4 100644 --- a/include/envoy/server/factory_context.h +++ b/include/envoy/server/factory_context.h @@ -64,11 +64,6 @@ class CommonFactoryContext { */ virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE; - /** - * @return RandomGenerator& the random generator for the server. - */ - virtual Envoy::Random::RandomGenerator& random() PURE; - /** * @return Runtime::Loader& the singleton runtime loader for the server. */ diff --git a/include/envoy/server/guarddog_config.h b/include/envoy/server/guarddog_config.h index 4c51778a7eb3..e89a3f5308c7 100644 --- a/include/envoy/server/guarddog_config.h +++ b/include/envoy/server/guarddog_config.h @@ -9,6 +9,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/protobuf/message_validator.h" #include "envoy/server/guarddog.h" +#include "envoy/stats/scope.h" #include "common/protobuf/protobuf.h" @@ -19,6 +20,8 @@ namespace Configuration { struct GuardDogActionFactoryContext { Api::Api& api_; Event::Dispatcher& dispatcher_; // not owned (this is the guard dog's dispatcher) + Stats::Scope& stats_; // not owned (this is the server's stats scope) + absl::string_view guarddog_name_; }; class GuardDogAction { @@ -27,13 +30,14 @@ class GuardDogAction { /** * Callback function for when the GuardDog observes an event. * @param event the event the GuardDog observes. - * @param thread_ltt_pairs pairs of the relevant thread to the event, and the - * last time touched (LTT) of those threads with their watchdog. + * @param thread_last_checkin_pairs pair of the relevant thread to the event, and the + * last check in time of those threads with their watchdog. * @param now the current time. */ - virtual void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event, - const std::vector>& thread_ltt_pairs, - MonotonicTime now) PURE; + virtual void + run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event, + const std::vector>& thread_last_checkin_pairs, + MonotonicTime now) PURE; }; using GuardDogActionPtr = std::unique_ptr; diff --git a/include/envoy/server/health_checker_config.h b/include/envoy/server/health_checker_config.h index 5bb6344b8907..00584f517651 100644 --- a/include/envoy/server/health_checker_config.h +++ b/include/envoy/server/health_checker_config.h @@ -24,11 +24,6 @@ class HealthCheckerFactoryContext { */ virtual Envoy::Runtime::Loader& runtime() PURE; - /** - * @return RandomGenerator& the random generator for the server. - */ - virtual Envoy::Random::RandomGenerator& random() PURE; - /** * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used * for all singleton processing. diff --git a/include/envoy/server/instance.h b/include/envoy/server/instance.h index c2d294ac3cb8..8bba884e8b47 100644 --- a/include/envoy/server/instance.h +++ b/include/envoy/server/instance.h @@ -135,11 +135,6 @@ class Instance { */ virtual const Options& options() PURE; - /** - * @return RandomGenerator& the random generator for the server. - */ - virtual Random::RandomGenerator& random() PURE; - /** * @return Runtime::Loader& the singleton runtime loader for the server. */ diff --git a/include/envoy/server/listener_manager.h b/include/envoy/server/listener_manager.h index ea7062313bde..86e5548f730c 100644 --- a/include/envoy/server/listener_manager.h +++ b/include/envoy/server/listener_manager.h @@ -58,8 +58,10 @@ class ListenerComponentFactory { /** * @return an LDS API provider. * @param lds_config supplies the management server configuration. + * @param lds_resources_locator udpa::core::v1::ResourceLocator for listener collection. */ - virtual LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) PURE; + virtual LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config, + const udpa::core::v1::ResourceLocator* lds_resources_locator) PURE; /** * Creates a socket. @@ -167,8 +169,10 @@ class ListenerManager { * during server initialization because the listener manager is created prior to several core * pieces of the server existing. * @param lds_config supplies the management server configuration. + * @param lds_resources_locator udpa::core::v1::ResourceLocator for listener collection. */ - virtual void createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) PURE; + virtual void createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config, + const udpa::core::v1::ResourceLocator* lds_resources_locator) PURE; /** * @param state the type of listener to be returned (defaults to ACTIVE), states can be OR'd diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index 546e0f9d8e80..362857899bc2 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -254,6 +254,16 @@ class Options { * @return CommandLineOptionsPtr the protobuf representation of the options. */ virtual CommandLineOptionsPtr toCommandLineOptions() const PURE; + + /** + * @return the path of socket file. + */ + virtual const std::string& socketPath() const PURE; + + /** + * @return the mode of socket file. + */ + virtual mode_t socketMode() const PURE; }; } // namespace Server diff --git a/include/envoy/server/overload_manager.h b/include/envoy/server/overload_manager.h index 77c6c21c097d..8f5914fe34f3 100644 --- a/include/envoy/server/overload_manager.h +++ b/include/envoy/server/overload_manager.h @@ -62,6 +62,10 @@ class OverloadActionNameValues { // Overload action to stop accepting new connections. const std::string StopAcceptingConnections = "envoy.overload_actions.stop_accepting_connections"; + // Overload action to reject (accept and then close) new connections. + const std::string RejectIncomingConnections = + "envoy.overload_actions.reject_incoming_connections"; + // Overload action to try to shrink the heap by releasing free memory. const std::string ShrinkHeap = "envoy.overload_actions.shrink_heap"; }; diff --git a/include/envoy/server/transport_socket_config.h b/include/envoy/server/transport_socket_config.h index e08405f9b4ab..faee6f2728d9 100644 --- a/include/envoy/server/transport_socket_config.h +++ b/include/envoy/server/transport_socket_config.h @@ -64,11 +64,6 @@ class TransportSocketFactoryContext { */ virtual Event::Dispatcher& dispatcher() PURE; - /** - * @return RandomGenerator& the random generator for the server. - */ - virtual Envoy::Random::RandomGenerator& random() PURE; - /** * @return the server-wide stats store. */ diff --git a/include/envoy/server/worker.h b/include/envoy/server/worker.h index c2f273044150..9d6ed578cfbe 100644 --- a/include/envoy/server/worker.h +++ b/include/envoy/server/worker.h @@ -100,11 +100,12 @@ class WorkerFactory { virtual ~WorkerFactory() = default; /** + * @param index supplies the index of the worker, in the range of [0, concurrency). * @param overload_manager supplies the server's overload manager. * @param worker_name supplies the name of the worker, used for per-worker stats. * @return WorkerPtr a new worker. */ - virtual WorkerPtr createWorker(OverloadManager& overload_manager, + virtual WorkerPtr createWorker(uint32_t index, OverloadManager& overload_manager, const std::string& worker_name) PURE; }; diff --git a/include/envoy/ssl/context.h b/include/envoy/ssl/context.h index dfb01db046b9..d7b9248e8b77 100644 --- a/include/envoy/ssl/context.h +++ b/include/envoy/ssl/context.h @@ -6,6 +6,8 @@ #include "envoy/admin/v3/certs.pb.h" #include "envoy/common/pure.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Ssl { @@ -32,6 +34,12 @@ class Context { * @return certificate details conforming to proto admin.v2alpha.certs. */ virtual std::vector getCertChainInformation() const PURE; + + /** + * @return the number of seconds in this context until the next OCSP response will + * expire, or `absl::nullopt` if no OCSP responses exist. + */ + virtual absl::optional secondsUntilFirstOcspResponseExpires() const PURE; }; using ContextSharedPtr = std::shared_ptr; diff --git a/include/envoy/ssl/context_config.h b/include/envoy/ssl/context_config.h index 487e0b619f92..675bddde27de 100644 --- a/include/envoy/ssl/context_config.h +++ b/include/envoy/ssl/context_config.h @@ -123,11 +123,23 @@ class ServerContextConfig : public virtual ContextConfig { std::array aes_key_; // AES256 key size, in bytes }; + enum class OcspStaplePolicy { + LenientStapling, + StrictStapling, + MustStaple, + }; + /** * @return True if client certificate is required, false otherwise. */ virtual bool requireClientCertificate() const PURE; + /** + * @return OcspStaplePolicy The rule for determining whether to staple OCSP + * responses on new connections. + */ + virtual OcspStaplePolicy ocspStaplePolicy() const PURE; + /** * @return The keys to use for encrypting and decrypting session tickets. * The first element is used for encrypting new tickets, and all elements diff --git a/include/envoy/ssl/context_manager.h b/include/envoy/ssl/context_manager.h index 77f6ead17c8c..d35f044474d7 100644 --- a/include/envoy/ssl/context_manager.h +++ b/include/envoy/ssl/context_manager.h @@ -47,6 +47,12 @@ class ContextManager { * context manager. */ virtual PrivateKeyMethodManager& privateKeyMethodManager() PURE; + + /** + * @return the number of seconds until the next OCSP response being managed will + * expire, or `absl::nullopt` if no OCSP responses exist. + */ + virtual absl::optional secondsUntilFirstOcspResponseExpires() const PURE; }; using ContextManagerPtr = std::unique_ptr; diff --git a/include/envoy/ssl/handshaker.h b/include/envoy/ssl/handshaker.h index 42d20601071b..517c933bf21e 100644 --- a/include/envoy/ssl/handshaker.h +++ b/include/envoy/ssl/handshaker.h @@ -30,6 +30,12 @@ class HandshakeCallbacks { * A callback which will be executed at most once upon handshake failure. */ virtual void onFailure() PURE; + + /** + * Returns a pointer to the transportSocketCallbacks struct, or nullptr if + * unset. + */ + virtual Network::TransportSocketCallbacks* transportSocketCallbacks() PURE; }; /** diff --git a/include/envoy/ssl/tls_certificate_config.h b/include/envoy/ssl/tls_certificate_config.h index 882d40fe133d..06113a9f33df 100644 --- a/include/envoy/ssl/tls_certificate_config.h +++ b/include/envoy/ssl/tls_certificate_config.h @@ -50,6 +50,17 @@ class TlsCertificateConfig { * password was inlined. */ virtual const std::string& passwordPath() const PURE; + + /** + * @return a byte vector of ocsp response. + */ + virtual const std::vector& ocspStaple() const PURE; + + /** + * @return path of the ocsp response file for this certificate or "" if the + * ocsp response was inlined. + */ + virtual const std::string& ocspStaplePath() const PURE; }; using TlsCertificateConfigPtr = std::unique_ptr; diff --git a/include/envoy/stream_info/filter_state.h b/include/envoy/stream_info/filter_state.h index ca16c0614fa8..612dc6994ec5 100644 --- a/include/envoy/stream_info/filter_state.h +++ b/include/envoy/stream_info/filter_state.h @@ -101,6 +101,13 @@ class FilterState { return *result; } + /** + * @param data_name the name of the data being looked up (mutable/readonly). + * @return a const reference to the stored data. + * An exception will be thrown if the data does not exist. + */ + virtual const Object* getDataReadOnlyGeneric(absl::string_view data_name) const PURE; + /** * @param data_name the name of the data being looked up (mutable only). * @return a non-const reference to the stored data if and only if the @@ -154,7 +161,6 @@ class FilterState { virtual FilterStateSharedPtr parent() const PURE; protected: - virtual const Object* getDataReadOnlyGeneric(absl::string_view data_name) const PURE; virtual Object* getDataMutableGeneric(absl::string_view data_name) PURE; }; diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index c4944ca29f1d..858ff0f0952f 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -78,8 +78,10 @@ enum ResponseFlag { ResponseFromCacheFilter = 0x100000, // Filter config was not received within the permitted warming deadline. NoFilterConfigFound = 0x200000, + // Request or connection exceeded the downstream connection duration. + DurationTimeout = 0x400000, // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST FLAG. - LastFlag = NoFilterConfigFound + LastFlag = DurationTimeout }; /** @@ -111,17 +113,17 @@ struct ResponseCodeDetailValues { const std::string StreamIdleTimeout = "stream_idle_timeout"; // The per-stream max duration timeout was exceeded. const std::string MaxDurationTimeout = "max_duration_timeout"; - // The per-stream total request timeout was exceeded + // The per-stream total request timeout was exceeded. const std::string RequestOverallTimeout = "request_overall_timeout"; // The request was rejected due to the Overload Manager reaching configured resource limits. const std::string Overload = "overload"; // The HTTP/1.0 or HTTP/0.9 request was rejected due to HTTP/1.0 support not being configured. const std::string LowVersion = "low_version"; - // The request was rejected due to the Host: or :authority field missing + // The request was rejected due to a missing Host: or :authority field. const std::string MissingHost = "missing_host_header"; // The request was rejected due to x-envoy-* headers failing strict header validation. const std::string InvalidEnvoyRequestHeaders = "request_headers_failed_strict_check"; - // The request was rejected due to the Path or :path header field missing. + // The request was rejected due to a missing Path or :path header field. const std::string MissingPath = "missing_path_rejected"; // The request was rejected due to using an absolute path on a route not supporting them. const std::string AbsolutePath = "absolute_path_rejected"; @@ -144,9 +146,9 @@ struct ResponseCodeDetailValues { const std::string MaintenanceMode = "maintenance_mode"; // The request was rejected by the router filter because there was no healthy upstream found. const std::string NoHealthyUpstream = "no_healthy_upstream"; - // The upstream response timed out + // The upstream response timed out. const std::string UpstreamTimeout = "upstream_response_timeout"; - // The final upstream try timed out + // The final upstream try timed out. const std::string UpstreamPerTryTimeout = "upstream_per_try_timeout"; // The request was destroyed because of user defined max stream duration. const std::string UpstreamMaxStreamDurationReached = "upstream_max_stream_duration_reached"; @@ -158,7 +160,7 @@ struct ResponseCodeDetailValues { // indicates that original "success" headers may have been sent downstream // despite the subsequent failure. const std::string LateUpstreamReset = "upstream_reset_after_response_started"; - // The connection is rejected due to no matching filter chain. + // The request was rejected due to no matching filter chain. const std::string FilterChainNotFound = "filter_chain_not_found"; // The client disconnected unexpectedly. const std::string DownstreamRemoteDisconnect = "downstream_remote_disconnect"; @@ -170,6 +172,8 @@ struct ResponseCodeDetailValues { const std::string AdminFilterResponse = "admin_filter_response"; // The original stream was replaced with an internal redirect. const std::string InternalRedirect = "internal_redirect"; + // Changes or additions to details should be reflected in + // docs/root/configuration/http/http_conn_man/response_code_details_details.rst }; using ResponseCodeDetails = ConstSingleton; @@ -232,6 +236,13 @@ class StreamInfo { */ virtual void setResponseCodeDetails(absl::string_view rc_details) PURE; + /** + * @param connection_termination_details the termination details string to set for this + * connection. + */ + virtual void + setConnectionTerminationDetails(absl::string_view connection_termination_details) PURE; + /** * @param response_flags the response_flags to intersect with. * @return true if the intersection of the response_flags argument and the currently set response @@ -283,6 +294,11 @@ class StreamInfo { */ virtual const absl::optional& responseCodeDetails() const PURE; + /** + * @return the termination details of the connection. + */ + virtual const absl::optional& connectionTerminationDetails() const PURE; + /** * @return the time that the first byte of the request was received. */ @@ -577,6 +593,16 @@ class StreamInfo { * @return A shared pointer to the request ID utils for this stream */ virtual Http::RequestIDExtensionSharedPtr getRequestIDExtension() const PURE; + + /** + * @return Connection ID of the downstream connection, or unset if not available. + **/ + virtual absl::optional connectionID() const PURE; + + /** + * @param id Connection ID of the downstream connection. + **/ + virtual void setConnectionID(uint64_t id) PURE; }; } // namespace StreamInfo diff --git a/include/envoy/thread_local/thread_local.h b/include/envoy/thread_local/thread_local.h index 683617634a20..5c4374aff8ea 100644 --- a/include/envoy/thread_local/thread_local.h +++ b/include/envoy/thread_local/thread_local.h @@ -16,6 +16,14 @@ namespace ThreadLocal { class ThreadLocalObject { public: virtual ~ThreadLocalObject() = default; + + /** + * Return the object casted to a concrete type. See getTyped() below for comments on the casts. + */ + template T& asType() { + ASSERT(dynamic_cast(this) != nullptr); + return *static_cast(this); + } }; using ThreadLocalObjectSharedPtr = std::shared_ptr; @@ -54,27 +62,15 @@ class Slot { return *static_cast(get().get()); } - /** - * Run a callback on all registered threads. - * @param cb supplies the callback to run. - */ - virtual void runOnAllThreads(Event::PostCb cb) PURE; - - /** - * Run a callback on all registered threads with a barrier. A shutdown initiated during the - * running of the PostCBs may prevent all_threads_complete_cb from being called. - * @param cb supplies the callback to run on each thread. - * @param all_threads_complete_cb supplies the callback to run on main thread after cb has - * been run on all registered threads. - */ - virtual void runOnAllThreads(Event::PostCb cb, Event::PostCb all_threads_complete_cb) PURE; - /** * Set thread local data on all threads previously registered via registerThread(). * @param initializeCb supplies the functor that will be called *on each thread*. The functor * returns the thread local object which is then stored. The storage is via * a shared_ptr. Thus, this is a flexible mechanism that can be used to share * the same data across all threads or to share different data on each thread. + * + * NOTE: The initialize callback is not supposed to capture the Slot, or its owner. As the owner + * may be destructed in main thread before the update_cb gets called in a worker thread. */ using InitializeCb = std::function; virtual void set(InitializeCb cb) PURE; diff --git a/include/envoy/upstream/cluster_factory.h b/include/envoy/upstream/cluster_factory.h index 68bbff008baf..019e87a51d23 100644 --- a/include/envoy/upstream/cluster_factory.h +++ b/include/envoy/upstream/cluster_factory.h @@ -79,11 +79,6 @@ class ClusterFactoryContext { */ virtual AccessLog::AccessLogManager& logManager() PURE; - /** - * @return RandomGenerator& the random generator for the server. - */ - virtual Random::RandomGenerator& random() PURE; - /** * @return Runtime::Loader& the singleton runtime loader for the server. */ diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h index 8389eb94a96d..5939092a371b 100644 --- a/include/envoy/upstream/cluster_manager.h +++ b/include/envoy/upstream/cluster_manager.h @@ -370,7 +370,6 @@ class ClusterInfoFactory { ClusterManager& cm_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; - Random::RandomGenerator& random_; Singleton::Manager& singleton_manager_; ThreadLocal::SlotAllocator& tls_; ProtobufMessage::ValidationVisitor& validation_visitor_; diff --git a/include/envoy/upstream/load_balancer.h b/include/envoy/upstream/load_balancer.h index 031daffc8ad2..fe8bb7e73b92 100644 --- a/include/envoy/upstream/load_balancer.h +++ b/include/envoy/upstream/load_balancer.h @@ -99,6 +99,14 @@ class LoadBalancer { * is missing and use sensible defaults. */ virtual HostConstSharedPtr chooseHost(LoadBalancerContext* context) PURE; + + /** + * Returns a best effort prediction of the next host to be picked, or nullptr if not predictable. + * Advances with subsequent calls, so while the first call will return the next host to be picked, + * a subsequent call will return the second host to be picked. + * @param context supplies the context which is used in host selection. + */ + virtual HostConstSharedPtr peekAnotherHost(LoadBalancerContext* context) PURE; }; using LoadBalancerPtr = std::unique_ptr; diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 3f727e318d43..127df14c923a 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -735,7 +735,12 @@ class ClusterInfo { /** * @return how many streams should be anticipated per each current stream. */ - virtual float prefetchRatio() const PURE; + virtual float perUpstreamPrefetchRatio() const PURE; + + /** + * @return how many streams should be anticipated per each current stream. + */ + virtual float peekaheadRatio() const PURE; /** * @return soft limit on size of the cluster's connections read and write buffers. diff --git a/repokitteh.star b/repokitteh.star index f71320677c31..bf5919628aff 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -3,8 +3,8 @@ pin("github.com/repokitteh/modules", "4ee2ed0c3622aad7fcddc04cb5dc866e44a541e6") use("github.com/repokitteh/modules/assign.star") use("github.com/repokitteh/modules/review.star") use("github.com/repokitteh/modules/wait.star") -use("github.com/repokitteh/modules/circleci.star", secret_token=get_secret('circle_token')) use("github.com/envoyproxy/envoy/ci/repokitteh/modules/azure_pipelines.star", secret_token=get_secret('azp_token')) +use("github.com/envoyproxy/envoy/ci/repokitteh/modules/newcontributor.star") use( "github.com/envoyproxy/envoy/ci/repokitteh/modules/ownerscheck.star", paths=[ @@ -27,14 +27,15 @@ use( "path": "api/envoy/", }, { - "owner": "envoyproxy/dependency-watchers", + "owner": "envoyproxy/dependency-shepherds!", "path": - "(bazel/repository_locations\.bzl)|(api/bazel/repository_locations\.bzl)|(.*/requirements\.txt)", + "(bazel/.*repos.*\.bzl)|(bazel/dependency_imports\.bzl)|(api/bazel/.*\.bzl)|(.*/requirements\.txt)", + "label": "deps", + "github_status_label": "any dependency change", }, ], ) -alias('retest-circle', 'retry-circle') alias('retest', 'retry-azp') def _backport(): diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index 447f951bc2f8..3163921b4800 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -300,7 +300,8 @@ InstanceSharedPtr AccessLogFactory::fromProto(const envoy::config::accesslog::v3 Server::Configuration::FactoryContext& context) { FilterPtr filter; if (config.has_filter()) { - filter = FilterFactory::fromProto(config.filter(), context.runtime(), context.random(), + filter = FilterFactory::fromProto(config.filter(), context.runtime(), + context.api().randomGenerator(), context.messageValidationVisitor()); } diff --git a/source/common/api/api_impl.cc b/source/common/api/api_impl.cc index 8efbbb34dadc..59477ca089cd 100644 --- a/source/common/api/api_impl.cc +++ b/source/common/api/api_impl.cc @@ -11,9 +11,10 @@ namespace Api { Impl::Impl(Thread::ThreadFactory& thread_factory, Stats::Store& store, Event::TimeSystem& time_system, Filesystem::Instance& file_system, - const ProcessContextOptRef& process_context) + Random::RandomGenerator& random_generator, const ProcessContextOptRef& process_context) : thread_factory_(thread_factory), store_(store), time_system_(time_system), - file_system_(file_system), process_context_(process_context) {} + file_system_(file_system), random_generator_(random_generator), + process_context_(process_context) {} Event::DispatcherPtr Impl::allocateDispatcher(const std::string& name) { return std::make_unique(name, *this, time_system_); diff --git a/source/common/api/api_impl.h b/source/common/api/api_impl.h index 0096da46ec4b..89dde910abd3 100644 --- a/source/common/api/api_impl.h +++ b/source/common/api/api_impl.h @@ -18,7 +18,7 @@ namespace Api { class Impl : public Api { public: Impl(Thread::ThreadFactory& thread_factory, Stats::Store& store, Event::TimeSystem& time_system, - Filesystem::Instance& file_system, + Filesystem::Instance& file_system, Random::RandomGenerator& random_generator, const ProcessContextOptRef& process_context = absl::nullopt); // Api::Api @@ -29,6 +29,7 @@ class Impl : public Api { Filesystem::Instance& fileSystem() override { return file_system_; } TimeSource& timeSource() override { return time_system_; } const Stats::Scope& rootScope() override { return store_; } + Random::RandomGenerator& randomGenerator() override { return random_generator_; } ProcessContextOptRef processContext() override { return process_context_; } private: @@ -36,6 +37,7 @@ class Impl : public Api { Stats::Store& store_; Event::TimeSystem& time_system_; Filesystem::Instance& file_system_; + Random::RandomGenerator& random_generator_; ProcessContextOptRef process_context_; }; diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index 0b92c7a426f5..556faf73d638 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -327,24 +327,6 @@ void OwnedImpl::move(Instance& rhs, uint64_t length) { other.postProcess(); } -Api::IoCallUint64Result OwnedImpl::read(Network::IoHandle& io_handle, uint64_t max_length) { - if (max_length == 0) { - return Api::ioCallUint64ResultNoError(); - } - constexpr uint64_t MaxSlices = 2; - RawSlice slices[MaxSlices]; - const uint64_t num_slices = reserve(max_length, slices, MaxSlices); - Api::IoCallUint64Result result = io_handle.readv(max_length, slices, num_slices); - uint64_t bytes_to_commit = result.ok() ? result.rc_ : 0; - ASSERT(bytes_to_commit <= max_length); - for (uint64_t i = 0; i < num_slices; i++) { - slices[i].len_ = std::min(slices[i].len_, static_cast(bytes_to_commit)); - bytes_to_commit -= slices[i].len_; - } - commit(slices, num_slices); - return result; -} - uint64_t OwnedImpl::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) { if (num_iovecs == 0 || length == 0) { return 0; @@ -516,16 +498,6 @@ bool OwnedImpl::startsWith(absl::string_view data) const { NOT_REACHED_GCOVR_EXCL_LINE; } -Api::IoCallUint64Result OwnedImpl::write(Network::IoHandle& io_handle) { - constexpr uint64_t MaxSlices = 16; - RawSliceVector slices = getRawSlices(MaxSlices); - Api::IoCallUint64Result result = io_handle.writev(slices.begin(), slices.size()); - if (result.ok() && result.rc_ > 0) { - drain(static_cast(result.rc_)); - } - return result; -} - OwnedImpl::OwnedImpl() = default; OwnedImpl::OwnedImpl(absl::string_view data) : OwnedImpl() { add(data); } diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index f5cea7650421..cfbaadd82323 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -6,7 +6,6 @@ #include #include "envoy/buffer/buffer.h" -#include "envoy/network/io_handle.h" #include "common/common/assert.h" #include "common/common/non_copyable.h" @@ -570,11 +569,9 @@ class OwnedImpl : public LibEventInstance { void* linearize(uint32_t size) override; void move(Instance& rhs) override; void move(Instance& rhs, uint64_t length) override; - Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override; uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override; ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const override; bool startsWith(absl::string_view data) const override; - Api::IoCallUint64Result write(Network::IoHandle& io_handle) override; std::string toString() const override; // LibEventInstance diff --git a/source/common/buffer/watermark_buffer.cc b/source/common/buffer/watermark_buffer.cc index 9d566be1965d..0503266085b7 100644 --- a/source/common/buffer/watermark_buffer.cc +++ b/source/common/buffer/watermark_buffer.cc @@ -57,24 +57,12 @@ SliceDataPtr WatermarkBuffer::extractMutableFrontSlice() { return result; } -Api::IoCallUint64Result WatermarkBuffer::read(Network::IoHandle& io_handle, uint64_t max_length) { - Api::IoCallUint64Result result = OwnedImpl::read(io_handle, max_length); - checkHighAndOverflowWatermarks(); - return result; -} - uint64_t WatermarkBuffer::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) { uint64_t bytes_reserved = OwnedImpl::reserve(length, iovecs, num_iovecs); checkHighAndOverflowWatermarks(); return bytes_reserved; } -Api::IoCallUint64Result WatermarkBuffer::write(Network::IoHandle& io_handle) { - Api::IoCallUint64Result result = OwnedImpl::write(io_handle); - checkLowWatermark(); - return result; -} - void WatermarkBuffer::appendSliceForTest(const void* data, uint64_t size) { OwnedImpl::appendSliceForTest(data, size); checkHighAndOverflowWatermarks(); diff --git a/source/common/buffer/watermark_buffer.h b/source/common/buffer/watermark_buffer.h index de44822a56ab..23c7c32854d2 100644 --- a/source/common/buffer/watermark_buffer.h +++ b/source/common/buffer/watermark_buffer.h @@ -35,9 +35,7 @@ class WatermarkBuffer : public OwnedImpl { void move(Instance& rhs) override; void move(Instance& rhs, uint64_t length) override; SliceDataPtr extractMutableFrontSlice() override; - Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override; uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override; - Api::IoCallUint64Result write(Network::IoHandle& io_handle) override; void postProcess() override { checkLowWatermark(); } void appendSliceForTest(const void* data, uint64_t size) override; void appendSliceForTest(absl::string_view data) override; diff --git a/source/common/chromium_url/BUILD b/source/common/chromium_url/BUILD new file mode 100644 index 000000000000..2d4acb348765 --- /dev/null +++ b/source/common/chromium_url/BUILD @@ -0,0 +1,28 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "chromium_url", + srcs = [ + "url_canon.cc", + "url_canon_internal.cc", + "url_canon_path.cc", + "url_canon_stdstring.cc", + ], + hdrs = [ + "envoy_shim.h", + "url_canon.h", + "url_canon_internal.h", + "url_canon_stdstring.h", + "url_parse.h", + "url_parse_internal.h", + ], + deps = ["//source/common/common:assert_lib"], +) diff --git a/source/common/chromium_url/LICENSE b/source/common/chromium_url/LICENSE new file mode 100644 index 000000000000..a32e00ce6be3 --- /dev/null +++ b/source/common/chromium_url/LICENSE @@ -0,0 +1,27 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/source/common/chromium_url/README.md b/source/common/chromium_url/README.md new file mode 100644 index 000000000000..32e251c82d4d --- /dev/null +++ b/source/common/chromium_url/README.md @@ -0,0 +1,16 @@ +This is a manually minified variant of +https://chromium.googlesource.com/chromium/src.git/+archive/74.0.3729.15/url.tar.gz, +providing just the parts needed for `url::CanonicalizePath()`. This is intended +to support a security release fix for CVE-2019-9901. Long term we need this to +be moved to absl or QUICHE for upgrades and long-term support. + +Some specific transforms of interest: +* The namespace `url` was changed to `chromium_url`. +* `url_parse.h` is minified to just `Component` and flattened back into the URL + directory. It does not contain any non-Chromium authored code any longer and + so does not have a separate LICENSE. +* `envoy_shim.h` adapts various macros to the Envoy context. +* Anything not reachable from `url::CanonicalizePath()` has been dropped. +* Header include paths have changed as needed. +* BUILD was manually written. +* Various clang-tidy and format fixes. diff --git a/source/common/chromium_url/envoy_shim.h b/source/common/chromium_url/envoy_shim.h new file mode 100644 index 000000000000..2b7443926c1f --- /dev/null +++ b/source/common/chromium_url/envoy_shim.h @@ -0,0 +1,17 @@ +#pragma once + +#include "common/common/assert.h" + +// This is a minimal Envoy adaptation layer for the Chromium URL library. +// NOLINT(namespace-envoy) + +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + TypeName& operator=(const TypeName&) = delete + +#define EXPORT_TEMPLATE_DECLARE(x) +#define EXPORT_TEMPLATE_DEFINE(x) +#define COMPONENT_EXPORT(x) + +#define DCHECK(x) ASSERT(x) +#define NOTREACHED() NOT_REACHED_GCOVR_EXCL_LINE diff --git a/source/common/chromium_url/url_canon.cc b/source/common/chromium_url/url_canon.cc new file mode 100644 index 000000000000..b9ad1b829726 --- /dev/null +++ b/source/common/chromium_url/url_canon.cc @@ -0,0 +1,16 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "common/chromium_url/url_canon.h" + +#include "common/chromium_url/envoy_shim.h" + +namespace chromium_url { + +template class EXPORT_TEMPLATE_DEFINE(COMPONENT_EXPORT(URL)) CanonOutputT; + +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon.h b/source/common/chromium_url/url_canon.h new file mode 100644 index 000000000000..0280de643ac8 --- /dev/null +++ b/source/common/chromium_url/url_canon.h @@ -0,0 +1,186 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_CANON_H_ +#define URL_URL_CANON_H_ + +#include +#include + +#include "common/chromium_url/envoy_shim.h" +#include "common/chromium_url/url_parse.h" + +namespace chromium_url { + +// Canonicalizer output ------------------------------------------------------- + +// Base class for the canonicalizer output, this maintains a buffer and +// supports simple resizing and append operations on it. +// +// It is VERY IMPORTANT that no virtual function calls be made on the common +// code path. We only have two virtual function calls, the destructor and a +// resize function that is called when the existing buffer is not big enough. +// The derived class is then in charge of setting up our buffer which we will +// manage. +template class CanonOutputT { +public: + CanonOutputT() : buffer_(NULL), buffer_len_(0), cur_len_(0) {} + virtual ~CanonOutputT() = default; + + // Implemented to resize the buffer. This function should update the buffer + // pointer to point to the new buffer, and any old data up to |cur_len_| in + // the buffer must be copied over. + // + // The new size |sz| must be larger than buffer_len_. + virtual void Resize(int sz) = 0; + + // Accessor for returning a character at a given position. The input offset + // must be in the valid range. + inline T at(int offset) const { return buffer_[offset]; } + + // Sets the character at the given position. The given position MUST be less + // than the length(). + inline void set(int offset, T ch) { buffer_[offset] = ch; } + + // Returns the number of characters currently in the buffer. + inline int length() const { return cur_len_; } + + // Returns the current capacity of the buffer. The length() is the number of + // characters that have been declared to be written, but the capacity() is + // the number that can be written without reallocation. If the caller must + // write many characters at once, it can make sure there is enough capacity, + // write the data, then use set_size() to declare the new length(). + int capacity() const { return buffer_len_; } + + // Called by the user of this class to get the output. The output will NOT + // be NULL-terminated. Call length() to get the + // length. + const T* data() const { return buffer_; } + T* data() { return buffer_; } + + // Shortens the URL to the new length. Used for "backing up" when processing + // relative paths. This can also be used if an external function writes a lot + // of data to the buffer (when using the "Raw" version below) beyond the end, + // to declare the new length. + // + // This MUST NOT be used to expand the size of the buffer beyond capacity(). + void set_length(int new_len) { cur_len_ = new_len; } + + // This is the most performance critical function, since it is called for + // every character. + void push_back(T ch) { + // In VC2005, putting this common case first speeds up execution + // dramatically because this branch is predicted as taken. + if (cur_len_ < buffer_len_) { + buffer_[cur_len_] = ch; + cur_len_++; + return; + } + + // Grow the buffer to hold at least one more item. Hopefully we won't have + // to do this very often. + if (!Grow(1)) + return; + + // Actually do the insertion. + buffer_[cur_len_] = ch; + cur_len_++; + } + + // Appends the given string to the output. + void Append(const T* str, int str_len) { + if (cur_len_ + str_len > buffer_len_) { + if (!Grow(cur_len_ + str_len - buffer_len_)) + return; + } + for (int i = 0; i < str_len; i++) + buffer_[cur_len_ + i] = str[i]; + cur_len_ += str_len; + } + + void ReserveSizeIfNeeded(int estimated_size) { + // Reserve a bit extra to account for escaped chars. + if (estimated_size > buffer_len_) + Resize(estimated_size + 8); + } + +protected: + // Grows the given buffer so that it can fit at least |min_additional| + // characters. Returns true if the buffer could be resized, false on OOM. + bool Grow(int min_additional) { + static const int kMinBufferLen = 16; + int new_len = (buffer_len_ == 0) ? kMinBufferLen : buffer_len_; + do { + if (new_len >= (1 << 30)) // Prevent overflow below. + return false; + new_len *= 2; + } while (new_len < buffer_len_ + min_additional); + Resize(new_len); + return true; + } + + T* buffer_; + int buffer_len_; + + // Used characters in the buffer. + int cur_len_; +}; + +// Simple implementation of the CanonOutput using new[]. This class +// also supports a static buffer so if it is allocated on the stack, most +// URLs can be canonicalized with no heap allocations. +template class RawCanonOutputT : public CanonOutputT { +public: + RawCanonOutputT() : CanonOutputT() { + this->buffer_ = fixed_buffer_; + this->buffer_len_ = fixed_capacity; + } + ~RawCanonOutputT() override { + if (this->buffer_ != fixed_buffer_) + delete[] this->buffer_; + } + + void Resize(int sz) override { + T* new_buf = new T[sz]; + memcpy(new_buf, this->buffer_, sizeof(T) * (this->cur_len_ < sz ? this->cur_len_ : sz)); + if (this->buffer_ != fixed_buffer_) + delete[] this->buffer_; + this->buffer_ = new_buf; + this->buffer_len_ = sz; + } + +protected: + T fixed_buffer_[fixed_capacity]; +}; + +// Explicitly instantiate commonly used instantiations. +extern template class EXPORT_TEMPLATE_DECLARE(COMPONENT_EXPORT(URL)) CanonOutputT; + +// Normally, all canonicalization output is in narrow characters. We support +// the templates so it can also be used internally if a wide buffer is +// required. +using CanonOutput = CanonOutputT; + +template +class RawCanonOutput : public RawCanonOutputT {}; + +// Path. If the input does not begin in a slash (including if the input is +// empty), we'll prepend a slash to the path to make it canonical. +// +// The 8-bit version assumes UTF-8 encoding, but does not verify the validity +// of the UTF-8 (i.e., you can have invalid UTF-8 sequences, invalid +// characters, etc.). Normally, URLs will come in as UTF-16, so this isn't +// an issue. Somebody giving us an 8-bit path is responsible for generating +// the path that the server expects (we'll escape high-bit characters), so +// if something is invalid, it's their problem. +COMPONENT_EXPORT(URL) +bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, + Component* out_path); + +} // namespace chromium_url + +#endif // URL_URL_CANON_H_ diff --git a/source/common/chromium_url/url_canon_internal.cc b/source/common/chromium_url/url_canon_internal.cc new file mode 100644 index 000000000000..38c932cad5b4 --- /dev/null +++ b/source/common/chromium_url/url_canon_internal.cc @@ -0,0 +1,295 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "common/chromium_url/url_canon_internal.h" + +namespace chromium_url { + +// See the header file for this array's declaration. +const unsigned char kSharedCharTypeTable[0x100] = { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x00 - 0x0f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x10 - 0x1f + 0, // 0x20 ' ' (escape spaces in queries) + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x21 ! + 0, // 0x22 " + 0, // 0x23 # (invalid in query since it marks the ref) + CHAR_QUERY | CHAR_USERINFO, // 0x24 $ + CHAR_QUERY | CHAR_USERINFO, // 0x25 % + CHAR_QUERY | CHAR_USERINFO, // 0x26 & + 0, // 0x27 ' (Try to prevent XSS.) + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x28 ( + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x29 ) + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2a * + CHAR_QUERY | CHAR_USERINFO, // 0x2b + + CHAR_QUERY | CHAR_USERINFO, // 0x2c , + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2d - + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x2e . + CHAR_QUERY, // 0x2f / + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x30 0 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x31 1 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x32 2 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x33 3 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x34 4 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x35 5 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x36 6 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x37 7 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x38 8 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x39 9 + CHAR_QUERY, // 0x3a : + CHAR_QUERY, // 0x3b ; + 0, // 0x3c < (Try to prevent certain types of XSS.) + CHAR_QUERY, // 0x3d = + 0, // 0x3e > (Try to prevent certain types of XSS.) + CHAR_QUERY, // 0x3f ? + CHAR_QUERY, // 0x40 @ + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x41 A + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x42 B + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x43 C + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x44 D + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x45 E + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x46 F + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x47 G + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x48 H + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x49 I + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4a J + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4b K + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4c L + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4d M + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4e N + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4f O + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x50 P + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x51 Q + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x52 R + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x53 S + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x54 T + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x55 U + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x56 V + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x57 W + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x58 X + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x59 Y + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5a Z + CHAR_QUERY, // 0x5b [ + CHAR_QUERY, // 0x5c '\' + CHAR_QUERY, // 0x5d ] + CHAR_QUERY, // 0x5e ^ + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5f _ + CHAR_QUERY, // 0x60 ` + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x61 a + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x62 b + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x63 c + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x64 d + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x65 e + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x66 f + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x67 g + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x68 h + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x69 i + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6a j + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6b k + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6c l + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6d m + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6e n + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6f o + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x70 p + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x71 q + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x72 r + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x73 s + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x74 t + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x75 u + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x76 v + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x77 w + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x78 x + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x79 y + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7a z + CHAR_QUERY, // 0x7b { + CHAR_QUERY, // 0x7c | + CHAR_QUERY, // 0x7d } + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7e ~ + 0, // 0x7f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x80 - 0x8f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x90 - 0x9f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xa0 - 0xaf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xb0 - 0xbf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xc0 - 0xcf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xd0 - 0xdf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xe0 - 0xef + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xf0 - 0xff +}; + +const char kHexCharLookup[0x10] = { + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', +}; + +const char kCharToHexLookup[8] = { + 0, // 0x00 - 0x1f + '0', // 0x20 - 0x3f: digits 0 - 9 are 0x30 - 0x39 + 'A' - 10, // 0x40 - 0x5f: letters A - F are 0x41 - 0x46 + 'a' - 10, // 0x60 - 0x7f: letters a - f are 0x61 - 0x66 + 0, // 0x80 - 0x9F + 0, // 0xA0 - 0xBF + 0, // 0xC0 - 0xDF + 0, // 0xE0 - 0xFF +}; + +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_internal.h b/source/common/chromium_url/url_canon_internal.h new file mode 100644 index 000000000000..8c405b49814a --- /dev/null +++ b/source/common/chromium_url/url_canon_internal.h @@ -0,0 +1,204 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_CANON_INTERNAL_H_ +#define URL_URL_CANON_INTERNAL_H_ + +// This file is intended to be included in another C++ file where the character +// types are defined. This allows us to write mostly generic code, but not have +// template bloat because everything is inlined when anybody calls any of our +// functions. + +#include +#include + +#include "common/chromium_url/envoy_shim.h" +#include "common/chromium_url/url_canon.h" + +namespace chromium_url { + +// Character type handling ----------------------------------------------------- + +// Bits that identify different character types. These types identify different +// bits that are set for each 8-bit character in the kSharedCharTypeTable. +enum SharedCharTypes { + // Characters that do not require escaping in queries. Characters that do + // not have this flag will be escaped; see url_canon_query.cc + CHAR_QUERY = 1, + + // Valid in the username/password field. + CHAR_USERINFO = 2, + + // Valid in a IPv4 address (digits plus dot and 'x' for hex). + CHAR_IPV4 = 4, + + // Valid in an ASCII-representation of a hex digit (as in %-escaped). + CHAR_HEX = 8, + + // Valid in an ASCII-representation of a decimal digit. + CHAR_DEC = 16, + + // Valid in an ASCII-representation of an octal digit. + CHAR_OCT = 32, + + // Characters that do not require escaping in encodeURIComponent. Characters + // that do not have this flag will be escaped; see url_util.cc. + CHAR_COMPONENT = 64, +}; + +// This table contains the flags in SharedCharTypes for each 8-bit character. +// Some canonicalization functions have their own specialized lookup table. +// For those with simple requirements, we have collected the flags in one +// place so there are fewer lookup tables to load into the CPU cache. +// +// Using an unsigned char type has a small but measurable performance benefit +// over using a 32-bit number. +extern const unsigned char kSharedCharTypeTable[0x100]; + +// More readable wrappers around the character type lookup table. +inline bool IsCharOfType(unsigned char c, SharedCharTypes type) { + return !!(kSharedCharTypeTable[c] & type); +} +inline bool IsQueryChar(unsigned char c) { return IsCharOfType(c, CHAR_QUERY); } +inline bool IsIPv4Char(unsigned char c) { return IsCharOfType(c, CHAR_IPV4); } +inline bool IsHexChar(unsigned char c) { return IsCharOfType(c, CHAR_HEX); } +inline bool IsComponentChar(unsigned char c) { return IsCharOfType(c, CHAR_COMPONENT); } + +// Maps the hex numerical values 0x0 to 0xf to the corresponding ASCII digit +// that will be used to represent it. +COMPONENT_EXPORT(URL) extern const char kHexCharLookup[0x10]; + +// This lookup table allows fast conversion between ASCII hex letters and their +// corresponding numerical value. The 8-bit range is divided up into 8 +// regions of 0x20 characters each. Each of the three character types (numbers, +// uppercase, lowercase) falls into different regions of this range. The table +// contains the amount to subtract from characters in that range to get at +// the corresponding numerical value. +// +// See HexDigitToValue for the lookup. +extern const char kCharToHexLookup[8]; + +// Assumes the input is a valid hex digit! Call IsHexChar before using this. +inline unsigned char HexCharToValue(unsigned char c) { return c - kCharToHexLookup[c / 0x20]; } + +// Indicates if the given character is a dot or dot equivalent, returning the +// number of characters taken by it. This will be one for a literal dot, 3 for +// an escaped dot. If the character is not a dot, this will return 0. +template inline int IsDot(const CHAR* spec, int offset, int end) { + if (spec[offset] == '.') { + return 1; + } else if (spec[offset] == '%' && offset + 3 <= end && spec[offset + 1] == '2' && + (spec[offset + 2] == 'e' || spec[offset + 2] == 'E')) { + // Found "%2e" + return 3; + } + return 0; +} + +// Write a single character, escaped, to the output. This always escapes: it +// does no checking that thee character requires escaping. +// Escaping makes sense only 8 bit chars, so code works in all cases of +// input parameters (8/16bit). +template +inline void AppendEscapedChar(UINCHAR ch, CanonOutputT* output) { + output->push_back('%'); + output->push_back(kHexCharLookup[(ch >> 4) & 0xf]); + output->push_back(kHexCharLookup[ch & 0xf]); +} + +// UTF-8 functions ------------------------------------------------------------ + +// Generic To-UTF-8 converter. This will call the given append method for each +// character that should be appended, with the given output method. Wrappers +// are provided below for escaped and non-escaped versions of this. +// +// The char_value must have already been checked that it's a valid Unicode +// character. +template +inline void DoAppendUTF8(unsigned char_value, Output* output) { + if (char_value <= 0x7f) { + Appender(static_cast(char_value), output); + } else if (char_value <= 0x7ff) { + // 110xxxxx 10xxxxxx + Appender(static_cast(0xC0 | (char_value >> 6)), output); + Appender(static_cast(0x80 | (char_value & 0x3f)), output); + } else if (char_value <= 0xffff) { + // 1110xxxx 10xxxxxx 10xxxxxx + Appender(static_cast(0xe0 | (char_value >> 12)), output); + Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); + Appender(static_cast(0x80 | (char_value & 0x3f)), output); + } else if (char_value <= 0x10FFFF) { // Max Unicode code point. + // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + Appender(static_cast(0xf0 | (char_value >> 18)), output); + Appender(static_cast(0x80 | ((char_value >> 12) & 0x3f)), output); + Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); + Appender(static_cast(0x80 | (char_value & 0x3f)), output); + } else { + // Invalid UTF-8 character (>20 bits). + NOTREACHED(); + } +} + +// Helper used by AppendUTF8Value below. We use an unsigned parameter so there +// are no funny sign problems with the input, but then have to convert it to +// a regular char for appending. +inline void AppendCharToOutput(unsigned char ch, CanonOutput* output) { + output->push_back(static_cast(ch)); +} + +// Writes the given character to the output as UTF-8. This does NO checking +// of the validity of the Unicode characters; the caller should ensure that +// the value it is appending is valid to append. +inline void AppendUTF8Value(unsigned char_value, CanonOutput* output) { + DoAppendUTF8(char_value, output); +} + +// Writes the given character to the output as UTF-8, escaping ALL +// characters (even when they are ASCII). This does NO checking of the +// validity of the Unicode characters; the caller should ensure that the value +// it is appending is valid to append. +inline void AppendUTF8EscapedValue(unsigned char_value, CanonOutput* output) { + DoAppendUTF8(char_value, output); +} + +// Given a '%' character at |*begin| in the string |spec|, this will decode +// the escaped value and put it into |*unescaped_value| on success (returns +// true). On failure, this will return false, and will not write into +// |*unescaped_value|. +// +// |*begin| will be updated to point to the last character of the escape +// sequence so that when called with the index of a for loop, the next time +// through it will point to the next character to be considered. On failure, +// |*begin| will be unchanged. +inline bool Is8BitChar(char /*c*/) { + return true; // this case is specialized to avoid a warning +} + +template +inline bool DecodeEscaped(const CHAR* spec, int* begin, int end, unsigned char* unescaped_value) { + if (*begin + 3 > end || !Is8BitChar(spec[*begin + 1]) || !Is8BitChar(spec[*begin + 2])) { + // Invalid escape sequence because there's not enough room, or the + // digits are not ASCII. + return false; + } + + unsigned char first = static_cast(spec[*begin + 1]); + unsigned char second = static_cast(spec[*begin + 2]); + if (!IsHexChar(first) || !IsHexChar(second)) { + // Invalid hex digits, fail. + return false; + } + + // Valid escape sequence. + *unescaped_value = (HexCharToValue(first) << 4) + HexCharToValue(second); + *begin += 2; + return true; +} + +} // namespace chromium_url + +#endif // URL_URL_CANON_INTERNAL_H_ diff --git a/source/common/chromium_url/url_canon_path.cc b/source/common/chromium_url/url_canon_path.cc new file mode 100644 index 000000000000..22587c0ab8a1 --- /dev/null +++ b/source/common/chromium_url/url_canon_path.cc @@ -0,0 +1,413 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include + +#include "common/chromium_url/url_canon.h" +#include "common/chromium_url/url_canon_internal.h" +#include "common/chromium_url/url_parse_internal.h" + +namespace chromium_url { + +namespace { + +enum CharacterFlags { + // Pass through unchanged, whether escaped or unescaped. This doesn't + // actually set anything so you can't OR it to check, it's just to make the + // table below more clear when neither ESCAPE or UNESCAPE is set. + PASS = 0, + + // This character requires special handling in DoPartialPath. Doing this test + // first allows us to filter out the common cases of regular characters that + // can be directly copied. + SPECIAL = 1, + + // This character must be escaped in the canonical output. Note that all + // escaped chars also have the "special" bit set so that the code that looks + // for this is triggered. Not valid with PASS or ESCAPE + ESCAPE_BIT = 2, + ESCAPE = ESCAPE_BIT | SPECIAL, + + // This character must be unescaped in canonical output. Not valid with + // ESCAPE or PASS. We DON'T set the SPECIAL flag since if we encounter these + // characters unescaped, they should just be copied. + UNESCAPE = 4, + + // This character is disallowed in URLs. Note that the "special" bit is also + // set to trigger handling. + INVALID_BIT = 8, + INVALID = INVALID_BIT | SPECIAL, +}; + +// This table contains one of the above flag values. Note some flags are more +// than one bits because they also turn on the "special" flag. Special is the +// only flag that may be combined with others. +// +// This table is designed to match exactly what IE does with the characters. +// +// Dot is even more special, and the escaped version is handled specially by +// IsDot. Therefore, we don't need the "escape" flag, and even the "unescape" +// bit is never handled (we just need the "special") bit. +const unsigned char kPathCharLookup[0x100] = { + // NULL control chars... + INVALID, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, + // control chars... + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, + // ' ' ! " # $ % & ' ( ) * + // + , - . / + ESCAPE, PASS, ESCAPE, ESCAPE, PASS, ESCAPE, PASS, PASS, PASS, PASS, PASS, PASS, PASS, UNESCAPE, + SPECIAL, PASS, + // 0 1 2 3 4 5 6 7 8 9 : + // ; < = > ? + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, PASS, PASS, ESCAPE, PASS, ESCAPE, ESCAPE, + // @ A B C D E F G H I J + // K L M N O + PASS, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + // P Q R S T U V W X Y Z + // [ \ ] ^ _ + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, PASS, ESCAPE, PASS, ESCAPE, UNESCAPE, + // ` a b c d e f g h i j + // k l m n o + ESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + // p q r s t u v w x y z + // { | } ~ + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, ESCAPE, ESCAPE, ESCAPE, UNESCAPE, ESCAPE, + // ...all the high-bit characters are escaped + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE}; + +enum DotDisposition { + // The given dot is just part of a filename and is not special. + NOT_A_DIRECTORY, + + // The given dot is the current directory. + DIRECTORY_CUR, + + // The given dot is the first of a double dot that should take us up one. + DIRECTORY_UP +}; + +// When the path resolver finds a dot, this function is called with the +// character following that dot to see what it is. The return value +// indicates what type this dot is (see above). This code handles the case +// where the dot is at the end of the input. +// +// |*consumed_len| will contain the number of characters in the input that +// express what we found. +// +// If the input is "../foo", |after_dot| = 1, |end| = 6, and +// at the end, |*consumed_len| = 2 for the "./" this function consumed. The +// original dot length should be handled by the caller. +template +DotDisposition ClassifyAfterDot(const CHAR* spec, int after_dot, int end, int* consumed_len) { + if (after_dot == end) { + // Single dot at the end. + *consumed_len = 0; + return DIRECTORY_CUR; + } + if (IsURLSlash(spec[after_dot])) { + // Single dot followed by a slash. + *consumed_len = 1; // Consume the slash + return DIRECTORY_CUR; + } + + int second_dot_len = IsDot(spec, after_dot, end); + if (second_dot_len) { + int after_second_dot = after_dot + second_dot_len; + if (after_second_dot == end) { + // Double dot at the end. + *consumed_len = second_dot_len; + return DIRECTORY_UP; + } + if (IsURLSlash(spec[after_second_dot])) { + // Double dot followed by a slash. + *consumed_len = second_dot_len + 1; + return DIRECTORY_UP; + } + } + + // The dots are followed by something else, not a directory. + *consumed_len = 0; + return NOT_A_DIRECTORY; +} + +// Rewinds the output to the previous slash. It is assumed that the output +// ends with a slash and this doesn't count (we call this when we are +// appending directory paths, so the previous path component has and ending +// slash). +// +// This will stop at the first slash (assumed to be at position +// |path_begin_in_output| and not go any higher than that. Some web pages +// do ".." too many times, so we need to handle that brokenness. +// +// It searches for a literal slash rather than including a backslash as well +// because it is run only on the canonical output. +// +// The output is guaranteed to end in a slash when this function completes. +void BackUpToPreviousSlash(int path_begin_in_output, CanonOutput* output) { + DCHECK(output->length() > 0); + + int i = output->length() - 1; + DCHECK(output->at(i) == '/'); + if (i == path_begin_in_output) + return; // We're at the first slash, nothing to do. + + // Now back up (skipping the trailing slash) until we find another slash. + i--; + while (output->at(i) != '/' && i > path_begin_in_output) + i--; + + // Now shrink the output to just include that last slash we found. + output->set_length(i + 1); +} + +// Looks for problematic nested escape sequences and escapes the output as +// needed to ensure they can't be misinterpreted. +// +// Our concern is that in input escape sequence that's invalid because it +// contains nested escape sequences might look valid once those are unescaped. +// For example, "%%300" is not a valid escape sequence, but after unescaping the +// inner "%30" this becomes "%00" which is valid. Leaving this in the output +// string can result in callers re-canonicalizing the string and unescaping this +// sequence, thus resulting in something fundamentally different than the +// original input here. This can cause a variety of problems. +// +// This function is called after we've just unescaped a sequence that's within +// two output characters of a previous '%' that we know didn't begin a valid +// escape sequence in the input string. We look for whether the output is going +// to turn into a valid escape sequence, and if so, convert the initial '%' into +// an escaped "%25" so the output can't be misinterpreted. +// +// |spec| is the input string we're canonicalizing. +// |next_input_index| is the index of the next unprocessed character in |spec|. +// |input_len| is the length of |spec|. +// |last_invalid_percent_index| is the index in |output| of a previously-seen +// '%' character. The caller knows this '%' character isn't followed by a valid +// escape sequence in the input string. +// |output| is the canonicalized output thus far. The caller guarantees this +// ends with a '%' followed by one or two characters, and the '%' is the one +// pointed to by |last_invalid_percent_index|. The last character in the string +// was just unescaped. +template +void CheckForNestedEscapes(const CHAR* spec, int next_input_index, int input_len, + int last_invalid_percent_index, CanonOutput* output) { + const int length = output->length(); + const char last_unescaped_char = output->at(length - 1); + + // If |output| currently looks like "%c", we need to try appending the next + // input character to see if this will result in a problematic escape + // sequence. Note that this won't trigger on the first nested escape of a + // two-escape sequence like "%%30%30" -- we'll allow the conversion to + // "%0%30" -- but the second nested escape will be caught by this function + // when it's called again in that case. + const bool append_next_char = last_invalid_percent_index == length - 2; + if (append_next_char) { + // If the input doesn't contain a 7-bit character next, this case won't be a + // problem. + if ((next_input_index == input_len) || (spec[next_input_index] >= 0x80)) + return; + output->push_back(static_cast(spec[next_input_index])); + } + + // Now output ends like "%cc". Try to unescape this. + int begin = last_invalid_percent_index; + unsigned char temp; + if (DecodeEscaped(output->data(), &begin, output->length(), &temp)) { + // New escape sequence found. Overwrite the characters following the '%' + // with "25", and push_back() the one or two characters that were following + // the '%' when we were called. + if (!append_next_char) + output->push_back(output->at(last_invalid_percent_index + 1)); + output->set(last_invalid_percent_index + 1, '2'); + output->set(last_invalid_percent_index + 2, '5'); + output->push_back(last_unescaped_char); + } else if (append_next_char) { + // Not a valid escape sequence, but we still need to undo appending the next + // source character so the caller can process it normally. + output->set_length(length); + } +} + +// Appends the given path to the output. It assumes that if the input path +// starts with a slash, it should be copied to the output. If no path has +// already been appended to the output (the case when not resolving +// relative URLs), the path should begin with a slash. +// +// If there are already path components (this mode is used when appending +// relative paths for resolving), it assumes that the output already has +// a trailing slash and that if the input begins with a slash, it should be +// copied to the output. +// +// We do not collapse multiple slashes in a row to a single slash. It seems +// no web browsers do this, and we don't want incompatibilities, even though +// it would be correct for most systems. +template +bool DoPartialPath(const CHAR* spec, const Component& path, int path_begin_in_output, + CanonOutput* output) { + int end = path.end(); + + // We use this variable to minimize the amount of work done when unescaping -- + // we'll only call CheckForNestedEscapes() when this points at one of the last + // couple of characters in |output|. + int last_invalid_percent_index = INT_MIN; + + bool success = true; + for (int i = path.begin; i < end; i++) { + UCHAR uch = static_cast(spec[i]); + // Chromium UTF8 logic is unneeded, as the missing templated result + // refers only to char const* (single-byte) characters at this time. + // This only trips up MSVC, since linux gcc seems to optimize it away. + // Indention is to avoid gratuitous diffs to origin source + { + unsigned char out_ch = static_cast(uch); + unsigned char flags = kPathCharLookup[out_ch]; + if (flags & SPECIAL) { + // Needs special handling of some sort. + int dotlen; + if ((dotlen = IsDot(spec, i, end)) > 0) { + // See if this dot was preceded by a slash in the output. We + // assume that when canonicalizing paths, they will always + // start with a slash and not a dot, so we don't have to + // bounds check the output. + // + // Note that we check this in the case of dots so we don't have to + // special case slashes. Since slashes are much more common than + // dots, this actually increases performance measurably (though + // slightly). + DCHECK(output->length() > path_begin_in_output); + if (output->length() > path_begin_in_output && output->at(output->length() - 1) == '/') { + // Slash followed by a dot, check to see if this is means relative + int consumed_len; + switch (ClassifyAfterDot(spec, i + dotlen, end, &consumed_len)) { + case NOT_A_DIRECTORY: + // Copy the dot to the output, it means nothing special. + output->push_back('.'); + i += dotlen - 1; + break; + case DIRECTORY_CUR: // Current directory, just skip the input. + i += dotlen + consumed_len - 1; + break; + case DIRECTORY_UP: + BackUpToPreviousSlash(path_begin_in_output, output); + i += dotlen + consumed_len - 1; + break; + } + } else { + // This dot is not preceded by a slash, it is just part of some + // file name. + output->push_back('.'); + i += dotlen - 1; + } + + } else if (out_ch == '\\') { + // Convert backslashes to forward slashes + output->push_back('/'); + + } else if (out_ch == '%') { + // Handle escape sequences. + unsigned char unescaped_value; + if (DecodeEscaped(spec, &i, end, &unescaped_value)) { + // Valid escape sequence, see if we keep, reject, or unescape it. + // Note that at this point DecodeEscape() will have advanced |i| to + // the last character of the escape sequence. + char unescaped_flags = kPathCharLookup[unescaped_value]; + + if (unescaped_flags & UNESCAPE) { + // This escaped value shouldn't be escaped. Try to copy it. + output->push_back(unescaped_value); + // If we just unescaped a value within 2 output characters of the + // '%' from a previously-detected invalid escape sequence, we + // might have an input string with problematic nested escape + // sequences; detect and fix them. + if (last_invalid_percent_index >= (output->length() - 3)) { + CheckForNestedEscapes(spec, i + 1, end, last_invalid_percent_index, output); + } + } else { + // Either this is an invalid escaped character, or it's a valid + // escaped character we should keep escaped. In the first case we + // should just copy it exactly and remember the error. In the + // second we also copy exactly in case the server is sensitive to + // changing the case of any hex letters. + output->push_back('%'); + output->push_back(static_cast(spec[i - 1])); + output->push_back(static_cast(spec[i])); + if (unescaped_flags & INVALID_BIT) + success = false; + } + } else { + // Invalid escape sequence. IE7+ rejects any URLs with such + // sequences, while other browsers pass them through unchanged. We + // use the permissive behavior. + // TODO(brettw): Consider testing IE's strict behavior, which would + // allow removing the code to handle nested escapes above. + last_invalid_percent_index = output->length(); + output->push_back('%'); + } + + } else if (flags & INVALID_BIT) { + // For NULLs, etc. fail. + AppendEscapedChar(out_ch, output); + success = false; + + } else if (flags & ESCAPE_BIT) { + // This character should be escaped. + AppendEscapedChar(out_ch, output); + } + } else { + // Nothing special about this character, just append it. + output->push_back(out_ch); + } + } + } + return success; +} + +template +bool DoPath(const CHAR* spec, const Component& path, CanonOutput* output, Component* out_path) { + bool success = true; + out_path->begin = output->length(); + if (path.len > 0) { + // Write out an initial slash if the input has none. If we just parse a URL + // and then canonicalize it, it will of course have a slash already. This + // check is for the replacement and relative URL resolving cases of file + // URLs. + if (!IsURLSlash(spec[path.begin])) + output->push_back('/'); + + success = DoPartialPath(spec, path, out_path->begin, output); + } else { + // No input, canonical path is a slash. + output->push_back('/'); + } + out_path->len = output->length() - out_path->begin; + return success; +} + +} // namespace + +bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, + Component* out_path) { + return DoPath(spec, path, output, out_path); +} + +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.cc b/source/common/chromium_url/url_canon_stdstring.cc new file mode 100644 index 000000000000..0c61831e5f1a --- /dev/null +++ b/source/common/chromium_url/url_canon_stdstring.cc @@ -0,0 +1,33 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "common/chromium_url/url_canon_stdstring.h" + +namespace chromium_url { + +StdStringCanonOutput::StdStringCanonOutput(std::string* str) : CanonOutput(), str_(str) { + cur_len_ = static_cast(str_->size()); // Append to existing data. + buffer_ = str_->empty() ? NULL : &(*str_)[0]; + buffer_len_ = static_cast(str_->size()); +} + +StdStringCanonOutput::~StdStringCanonOutput() { + // Nothing to do, we don't own the string. +} + +void StdStringCanonOutput::Complete() { + str_->resize(cur_len_); + buffer_len_ = cur_len_; +} + +void StdStringCanonOutput::Resize(int sz) { + str_->resize(sz); + buffer_ = str_->empty() ? NULL : &(*str_)[0]; + buffer_len_ = sz; +} + +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.h b/source/common/chromium_url/url_canon_stdstring.h new file mode 100644 index 000000000000..e14d6c22e74e --- /dev/null +++ b/source/common/chromium_url/url_canon_stdstring.h @@ -0,0 +1,58 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_CANON_STDSTRING_H_ +#define URL_URL_CANON_STDSTRING_H_ + +// This header file defines a canonicalizer output method class for STL +// strings. Because the canonicalizer tries not to be dependent on the STL, +// we have segregated it here. + +#include + +#include "common/chromium_url/envoy_shim.h" +#include "common/chromium_url/url_canon.h" + +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + TypeName& operator=(const TypeName&) = delete + +namespace chromium_url { + +// Write into a std::string given in the constructor. This object does not own +// the string itself, and the user must ensure that the string stays alive +// throughout the lifetime of this object. +// +// The given string will be appended to; any existing data in the string will +// be preserved. +// +// Note that when canonicalization is complete, the string will likely have +// unused space at the end because we make the string very big to start out +// with (by |initial_size|). This ends up being important because resize +// operations are slow, and because the base class needs to write directly +// into the buffer. +// +// Therefore, the user should call Complete() before using the string that +// this class wrote into. +class COMPONENT_EXPORT(URL) StdStringCanonOutput : public CanonOutput { +public: + StdStringCanonOutput(std::string* str); + ~StdStringCanonOutput() override; + + // Must be called after writing has completed but before the string is used. + void Complete(); + + void Resize(int sz) override; + +protected: + std::string* str_; + DISALLOW_COPY_AND_ASSIGN(StdStringCanonOutput); +}; + +} // namespace chromium_url + +#endif // URL_URL_CANON_STDSTRING_H_ diff --git a/source/common/chromium_url/url_parse.h b/source/common/chromium_url/url_parse.h new file mode 100644 index 000000000000..b840af60438d --- /dev/null +++ b/source/common/chromium_url/url_parse.h @@ -0,0 +1,49 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_PARSE_H_ +#define URL_PARSE_H_ + +namespace chromium_url { + +// Component ------------------------------------------------------------------ + +// Represents a substring for URL parsing. +struct Component { + Component() : begin(0), len(-1) {} + + // Normal constructor: takes an offset and a length. + Component(int b, int l) : begin(b), len(l) {} + + int end() const { return begin + len; } + + // Returns true if this component is valid, meaning the length is given. Even + // valid components may be empty to record the fact that they exist. + bool is_valid() const { return (len != -1); } + + // Returns true if the given component is specified on false, the component + // is either empty or invalid. + bool is_nonempty() const { return (len > 0); } + + void reset() { + begin = 0; + len = -1; + } + + bool operator==(const Component& other) const { return begin == other.begin && len == other.len; } + + int begin; // Byte offset in the string of this component. + int len; // Will be -1 if the component is unspecified. +}; + +// Helper that returns a component created with the given begin and ending +// points. The ending point is non-inclusive. +inline Component MakeRange(int begin, int end) { return Component(begin, end - begin); } + +} // namespace chromium_url + +#endif // URL_PARSE_H_ diff --git a/source/common/chromium_url/url_parse_internal.h b/source/common/chromium_url/url_parse_internal.h new file mode 100644 index 000000000000..0ca47bc48846 --- /dev/null +++ b/source/common/chromium_url/url_parse_internal.h @@ -0,0 +1,18 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_PARSE_INTERNAL_H_ +#define URL_URL_PARSE_INTERNAL_H_ + +namespace chromium_url { + +// We treat slashes and backslashes the same for IE compatibility. +inline bool IsURLSlash(char ch) { return ch == '/' || ch == '\\'; } + +} // namespace chromium_url + +#endif // URL_URL_PARSE_INTERNAL_H_ diff --git a/source/common/common/fancy_logger.cc b/source/common/common/fancy_logger.cc index a6c1ff215de0..3c28414cbf40 100644 --- a/source/common/common/fancy_logger.cc +++ b/source/common/common/fancy_logger.cc @@ -90,6 +90,15 @@ void FancyContext::setAllFancyLoggers(spdlog::level::level_enum level) } } +FancyLogLevelMap FancyContext::getAllFancyLogLevelsForTest() ABSL_LOCKS_EXCLUDED(fancy_log_lock_) { + FancyLogLevelMap log_levels; + absl::ReaderMutexLock l(&fancy_log_lock_); + for (const auto& it : *fancy_log_map_) { + log_levels[it.first] = it.second->level(); + } + return log_levels; +} + void FancyContext::initSink() { spdlog::sink_ptr sink = Logger::Registry::getSink(); Logger::DelegatingLogSinkSharedPtr sp = std::static_pointer_cast(sink); diff --git a/source/common/common/fancy_logger.h b/source/common/common/fancy_logger.h index 3b563217cd9d..bd3e37d2dbd4 100644 --- a/source/common/common/fancy_logger.h +++ b/source/common/common/fancy_logger.h @@ -13,6 +13,7 @@ namespace Envoy { using SpdLoggerSharedPtr = std::shared_ptr; using FancyMap = absl::flat_hash_map; using FancyMapPtr = std::shared_ptr; +using FancyLogLevelMap = absl::flat_hash_map; /** * Stores the lock and functions used by Fancy Logger's macro so that we don't need to declare @@ -55,6 +56,12 @@ class FancyContext { */ void setAllFancyLoggers(spdlog::level::level_enum level) ABSL_LOCKS_EXCLUDED(fancy_log_lock_); + /** + * Obtain a map from logger key to log level. Useful for testing, e.g. in macros such as + * EXPECT_LOG_CONTAINS_ALL_OF_HELPER. + */ + FancyLogLevelMap getAllFancyLogLevelsForTest() ABSL_LOCKS_EXCLUDED(fancy_log_lock_); + private: /** * Initializes sink for the initialization of loggers, needed only in benchmark test. @@ -97,8 +104,10 @@ FancyContext& getFancyContext(); ::Envoy::getFancyContext().initFancyLogger(FANCY_KEY, flogger); \ local_flogger = flogger.load(std::memory_order_relaxed); \ } \ - local_flogger->log(spdlog::source_loc{__FILE__, __LINE__, __func__}, \ - ENVOY_SPDLOG_LEVEL(LEVEL), __VA_ARGS__); \ + if (ENVOY_LOG_COMP_LEVEL(*local_flogger, LEVEL)) { \ + local_flogger->log(spdlog::source_loc{__FILE__, __LINE__, __func__}, \ + ENVOY_SPDLOG_LEVEL(LEVEL), __VA_ARGS__); \ + } \ } while (0) /** diff --git a/source/common/common/fmt.h b/source/common/common/fmt.h index 1c37d0cf32b6..fe808a2139a1 100644 --- a/source/common/common/fmt.h +++ b/source/common/common/fmt.h @@ -12,8 +12,8 @@ namespace fmt { // Provide an implementation of formatter for fmt::format that allows absl::string_view to be // formatted with the same format specifiers available to std::string. -// TODO(zuercher): Once absl::string_view is replaced with std::string_view, this can be removed -// as fmtlib handles std::string_view natively. +// TODO(zuercher): Once absl::string_view is replaced with the std type, this can be removed +// as fmtlib handles string_view natively. // NOLINTNEXTLINE(readability-identifier-naming) template <> struct formatter : formatter { auto format(absl::string_view absl_string_view, fmt::format_context& ctx) -> decltype(ctx.out()) { diff --git a/source/common/common/logger.h b/source/common/common/logger.h index f30dd283e99c..c22b29b873db 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -339,7 +339,7 @@ template class Loggable { #define ENVOY_SPDLOG_LEVEL(LEVEL) \ (static_cast(Envoy::Logger::Logger::LEVEL)) -#define ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL) (ENVOY_SPDLOG_LEVEL(LEVEL) >= LOGGER.level()) +#define ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL) (ENVOY_SPDLOG_LEVEL(LEVEL) >= (LOGGER).level()) // Compare levels before invoking logger. This is an optimization to avoid // executing expressions computing log contents when they would be suppressed. @@ -355,12 +355,17 @@ template class Loggable { #define ENVOY_LOG_CHECK_LEVEL(LEVEL) ENVOY_LOG_COMP_LEVEL(ENVOY_LOGGER(), LEVEL) /** - * Convenience macro to log to a user-specified logger. - * Maps directly to ENVOY_LOG_COMP_AND_LOG - it could contain macro logic itself, without - * redirection, but left in case various implementations are required in the future (based on log - * level for example). + * Convenience macro to log to a user-specified logger. When fancy logging is used, the specific + * logger is ignored and instead the file-specific logger is used. */ -#define ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ...) ENVOY_LOG_COMP_AND_LOG(LOGGER, LEVEL, ##__VA_ARGS__) +#define ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ...) \ + do { \ + if (Envoy::Logger::Context::useFancyLogger()) { \ + FANCY_LOG(LEVEL, ##__VA_ARGS__); \ + } else { \ + ENVOY_LOG_COMP_AND_LOG(LOGGER, LEVEL, ##__VA_ARGS__); \ + } \ + } while (0) /** * Convenience macro to get logger. @@ -393,69 +398,89 @@ template class Loggable { /** * Command line options for log macros: use Fancy Logger or not. */ -#define ENVOY_LOG(LEVEL, ...) \ - do { \ - if (Envoy::Logger::Context::useFancyLogger()) { \ - FANCY_LOG(LEVEL, ##__VA_ARGS__); \ - } else { \ - ENVOY_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, ##__VA_ARGS__); \ - } \ - } while (0) +#define ENVOY_LOG(LEVEL, ...) ENVOY_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, ##__VA_ARGS__) -#define ENVOY_LOG_FIRST_N(LEVEL, N, ...) \ +#define ENVOY_LOG_FIRST_N_TO_LOGGER(LOGGER, LEVEL, N, ...) \ do { \ - if (ENVOY_LOG_COMP_LEVEL(ENVOY_LOGGER(), LEVEL)) { \ + if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) { \ static auto* countdown = new std::atomic(); \ if (countdown->fetch_add(1) < N) { \ - ENVOY_LOG(LEVEL, ##__VA_ARGS__); \ + ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ##__VA_ARGS__); \ } \ } \ } while (0) -#define ENVOY_LOG_ONCE(LEVEL, ...) \ - do { \ - ENVOY_LOG_FIRST_N(LEVEL, 1, ##__VA_ARGS__); \ - } while (0) +#define ENVOY_LOG_FIRST_N(LEVEL, N, ...) \ + ENVOY_LOG_FIRST_N_TO_LOGGER(ENVOY_LOGGER(), LEVEL, N, ##__VA_ARGS__) -#define ENVOY_LOG_EVERY_NTH(LEVEL, N, ...) \ +#define ENVOY_LOG_FIRST_N_MISC(LEVEL, N, ...) \ + ENVOY_LOG_FIRST_N_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, N, ##__VA_ARGS__) + +#define ENVOY_LOG_ONCE_TO_LOGGER(LOGGER, LEVEL, ...) \ + ENVOY_LOG_FIRST_N_TO_LOGGER(LOGGER, LEVEL, 1, ##__VA_ARGS__) + +#define ENVOY_LOG_ONCE(LEVEL, ...) ENVOY_LOG_ONCE_TO_LOGGER(ENVOY_LOGGER(), LEVEL, ##__VA_ARGS__) + +#define ENVOY_LOG_ONCE_MISC(LEVEL, ...) \ + ENVOY_LOG_ONCE_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, ##__VA_ARGS__) + +#define ENVOY_LOG_EVERY_NTH_TO_LOGGER(LOGGER, LEVEL, N, ...) \ do { \ - if (ENVOY_LOG_COMP_LEVEL(ENVOY_LOGGER(), LEVEL)) { \ + if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) { \ static auto* count = new std::atomic(); \ if ((count->fetch_add(1) % N) == 0) { \ - ENVOY_LOG(LEVEL, ##__VA_ARGS__); \ + ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ##__VA_ARGS__); \ } \ } \ } while (0) -#define ENVOY_LOG_EVERY_POW_2(LEVEL, ...) \ +#define ENVOY_LOG_EVERY_NTH(LEVEL, N, ...) \ + ENVOY_LOG_EVERY_NTH_TO_LOGGER(ENVOY_LOGGER(), LEVEL, N, ##__VA_ARGS__) + +#define ENVOY_LOG_EVERY_NTH_MISC(LEVEL, N, ...) \ + ENVOY_LOG_EVERY_NTH_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, N, ##__VA_ARGS__) + +#define ENVOY_LOG_EVERY_POW_2_TO_LOGGER(LOGGER, LEVEL, ...) \ do { \ - if (ENVOY_LOG_COMP_LEVEL(ENVOY_LOGGER(), LEVEL)) { \ + if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) { \ static auto* count = new std::atomic(); \ if (std::bitset<64>(1 /* for the first hit*/ + count->fetch_add(1)).count() == 1) { \ - ENVOY_LOG(LEVEL, ##__VA_ARGS__); \ + ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ##__VA_ARGS__); \ } \ } \ } while (0) +#define ENVOY_LOG_EVERY_POW_2(LEVEL, ...) \ + ENVOY_LOG_EVERY_POW_2_TO_LOGGER(ENVOY_LOGGER(), LEVEL, ##__VA_ARGS__) + +#define ENVOY_LOG_EVERY_POW_2_MISC(LEVEL, ...) \ + ENVOY_LOG_EVERY_POW_2_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, ##__VA_ARGS__) + // This is to get us to pass the format check. We reference a real-world time source here. // We'd have to introduce a singleton for a time source here, and consensus was that avoiding // that is preferable. using t_logclock = std::chrono::steady_clock; // NOLINT -#define ENVOY_LOG_PERIODIC(LEVEL, CHRONO_DURATION, ...) \ +#define ENVOY_LOG_PERIODIC_TO_LOGGER(LOGGER, LEVEL, CHRONO_DURATION, ...) \ do { \ - if (ENVOY_LOG_COMP_LEVEL(ENVOY_LOGGER(), LEVEL)) { \ + if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) { \ static auto* last_hit = new std::atomic(); \ auto last = last_hit->load(); \ const auto now = t_logclock::now().time_since_epoch().count(); \ if ((now - last) > \ std::chrono::duration_cast(CHRONO_DURATION).count() && \ last_hit->compare_exchange_strong(last, now)) { \ - ENVOY_LOG(LEVEL, ##__VA_ARGS__); \ + ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ##__VA_ARGS__); \ } \ } \ } while (0) +#define ENVOY_LOG_PERIODIC(LEVEL, CHRONO_DURATION, ...) \ + ENVOY_LOG_PERIODIC_TO_LOGGER(ENVOY_LOGGER(), LEVEL, CHRONO_DURATION, ##__VA_ARGS__) + +#define ENVOY_LOG_PERIODIC_MISC(LEVEL, CHRONO_DURATION, ...) \ + ENVOY_LOG_PERIODIC_TO_LOGGER(GET_MISC_LOGGER(), LEVEL, CHRONO_DURATION, ##__VA_ARGS__) + #define ENVOY_FLUSH_LOG() \ do { \ if (Envoy::Logger::Context::useFancyLogger()) { \ diff --git a/source/common/common/utility.cc b/source/common/common/utility.cc index efdbb3ee29d5..0c72f30c816e 100644 --- a/source/common/common/utility.cc +++ b/source/common/common/utility.cc @@ -309,7 +309,7 @@ bool StringUtil::findToken(absl::string_view source, absl::string_view delimiter absl::string_view key_token, bool trim_whitespace) { const auto tokens = splitToken(source, delimiters, trim_whitespace); if (trim_whitespace) { - for (const auto token : tokens) { + for (const auto& token : tokens) { if (key_token == trim(token)) { return true; } diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 9b51dbb38b03..61d0bf4b4445 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -14,6 +14,7 @@ envoy_cc_library( hdrs = ["api_type_oracle.h"], deps = [ "//source/common/protobuf", + "//source/common/protobuf:type_util_lib", "@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto", ], ) @@ -66,6 +67,7 @@ envoy_cc_library( deps = [ "//include/envoy/config:subscription_interface", "//source/common/protobuf:utility_lib", + "@com_github_cncf_udpa//udpa/core/v1:pkg_cc_proto", ], ) @@ -303,11 +305,13 @@ envoy_cc_library( ":http_subscription_lib", ":new_grpc_mux_lib", ":type_to_endpoint_lib", + ":udpa_resource_lib", ":utility_lib", "//include/envoy/config:subscription_factory_interface", "//include/envoy/config:subscription_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/common:minimal_logger_lib", + "//source/common/http:utility_lib", "//source/common/protobuf", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/common/config/api_type_oracle.cc b/source/common/config/api_type_oracle.cc index 161ecf058610..f6feba09828a 100644 --- a/source/common/config/api_type_oracle.cc +++ b/source/common/config/api_type_oracle.cc @@ -31,5 +31,15 @@ ApiTypeOracle::getEarlierVersionMessageTypeName(const std::string& message_type) } return absl::nullopt; } + +const absl::optional ApiTypeOracle::getEarlierTypeUrl(const std::string& type_url) { + const std::string type{TypeUtil::typeUrlToDescriptorFullName(type_url)}; + absl::optional old_type = ApiTypeOracle::getEarlierVersionMessageTypeName(type); + if (old_type.has_value()) { + return TypeUtil::descriptorFullNameToTypeUrl(old_type.value()); + } + return {}; +} + } // namespace Config } // namespace Envoy diff --git a/source/common/config/api_type_oracle.h b/source/common/config/api_type_oracle.h index cd0c5971ee52..ab5e75b113d7 100644 --- a/source/common/config/api_type_oracle.h +++ b/source/common/config/api_type_oracle.h @@ -1,7 +1,9 @@ #pragma once #include "common/protobuf/protobuf.h" +#include "common/protobuf/type_util.h" +#include "absl/strings/string_view.h" #include "absl/types/optional.h" namespace Envoy { @@ -22,6 +24,8 @@ class ApiTypeOracle { static const absl::optional getEarlierVersionMessageTypeName(const std::string& message_type); + + static const absl::optional getEarlierTypeUrl(const std::string& type_url); }; } // namespace Config diff --git a/source/common/config/datasource.h b/source/common/config/datasource.h index 4b3ccdb17ffd..c3969ee7f450 100644 --- a/source/common/config/datasource.h +++ b/source/common/config/datasource.h @@ -3,6 +3,7 @@ #include "envoy/api/api.h" #include "envoy/common/random_generator.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/event/deferred_deletable.h" #include "envoy/init/manager.h" #include "envoy/upstream/cluster_manager.h" @@ -59,7 +60,8 @@ class LocalAsyncDataProvider { using LocalAsyncDataProviderPtr = std::unique_ptr; -class RemoteAsyncDataProvider : public Config::DataFetcher::RemoteDataFetcherCallback, +class RemoteAsyncDataProvider : public Event::DeferredDeletable, + public Config::DataFetcher::RemoteDataFetcherCallback, public Logger::Loggable { public: RemoteAsyncDataProvider(Upstream::ClusterManager& cm, Init::Manager& manager, diff --git a/source/common/config/decoded_resource_impl.h b/source/common/config/decoded_resource_impl.h index 669878020302..559eef874c5b 100644 --- a/source/common/config/decoded_resource_impl.h +++ b/source/common/config/decoded_resource_impl.h @@ -4,6 +4,8 @@ #include "common/protobuf/utility.h" +#include "udpa/core/v1/collection_entry.pb.h" + namespace Envoy { namespace Config { @@ -28,6 +30,11 @@ class DecodedResourceImpl : public DecodedResource { const envoy::service::discovery::v3::Resource& resource) : DecodedResourceImpl(resource_decoder, resource.name(), resource.aliases(), resource.resource(), resource.has_resource(), resource.version()) {} + DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder, + const udpa::core::v1::CollectionEntry::InlineEntry& inline_entry) + : DecodedResourceImpl(resource_decoder, inline_entry.name(), + Protobuf::RepeatedPtrField(), inline_entry.resource(), + true, inline_entry.version()) {} DecodedResourceImpl(ProtobufTypes::MessagePtr resource, const std::string& name, const std::vector& aliases, const std::string& version) : resource_(std::move(resource)), has_resource_(true), name_(name), aliases_(aliases), @@ -64,11 +71,15 @@ struct DecodedResourcesWrapper { const Protobuf::RepeatedPtrField& resources, const std::string& version) { for (const auto& resource : resources) { - owned_resources_.emplace_back(new DecodedResourceImpl(resource_decoder, resource, version)); - refvec_.emplace_back(*owned_resources_.back()); + pushBack(std::make_unique(resource_decoder, resource, version)); } } + void pushBack(Config::DecodedResourcePtr&& resource) { + owned_resources_.push_back(std::move(resource)); + refvec_.emplace_back(*owned_resources_.back()); + } + std::vector owned_resources_; std::vector refvec_; }; diff --git a/source/common/config/filesystem_subscription_impl.cc b/source/common/config/filesystem_subscription_impl.cc index 79d505d76361..13d3829d1cde 100644 --- a/source/common/config/filesystem_subscription_impl.cc +++ b/source/common/config/filesystem_subscription_impl.cc @@ -46,27 +46,34 @@ void FilesystemSubscriptionImpl::configRejected(const EnvoyException& e, callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); } +std::string FilesystemSubscriptionImpl::refreshInternal(ProtobufTypes::MessagePtr* config_update) { + auto owned_message = std::make_unique(); + auto& message = *owned_message; + MessageUtil::loadFromFile(path_, message, validation_visitor_, api_); + *config_update = std::move(owned_message); + const auto decoded_resources = + DecodedResourcesWrapper(resource_decoder_, message.resources(), message.version_info()); + callbacks_.onConfigUpdate(decoded_resources.refvec_, message.version_info()); + return message.version_info(); +} + void FilesystemSubscriptionImpl::refresh() { ENVOY_LOG(debug, "Filesystem config refresh for {}", path_); stats_.update_attempt_.inc(); - bool config_update_available = false; - envoy::service::discovery::v3::DiscoveryResponse message; + ProtobufTypes::MessagePtr config_update; try { - MessageUtil::loadFromFile(path_, message, validation_visitor_, api_); - config_update_available = true; - const auto decoded_resources = - DecodedResourcesWrapper(resource_decoder_, message.resources(), message.version_info()); - callbacks_.onConfigUpdate(decoded_resources.refvec_, message.version_info()); + const std::string version = refreshInternal(&config_update); stats_.update_time_.set(DateUtil::nowToMilliseconds(api_.timeSource())); - stats_.version_.set(HashUtil::xxHash64(message.version_info())); - stats_.version_text_.set(message.version_info()); + stats_.version_.set(HashUtil::xxHash64(version)); + stats_.version_text_.set(version); stats_.update_success_.inc(); - ENVOY_LOG(debug, "Filesystem config update accepted for {}: {}", path_, message.DebugString()); + ENVOY_LOG(debug, "Filesystem config update accepted for {}: {}", path_, + config_update->DebugString()); } catch (const ProtobufMessage::UnknownProtoFieldException& e) { - configRejected(e, message.DebugString()); + configRejected(e, config_update == nullptr ? "" : config_update->DebugString()); } catch (const EnvoyException& e) { - if (config_update_available) { - configRejected(e, message.DebugString()); + if (config_update != nullptr) { + configRejected(e, config_update->DebugString()); } else { ENVOY_LOG(warn, "Filesystem config update failure: {}", e.what()); stats_.update_failure_.inc(); @@ -77,5 +84,58 @@ void FilesystemSubscriptionImpl::refresh() { } } +FilesystemCollectionSubscriptionImpl::FilesystemCollectionSubscriptionImpl( + Event::Dispatcher& dispatcher, absl::string_view path, SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) + : FilesystemSubscriptionImpl(dispatcher, path, callbacks, resource_decoder, stats, + validation_visitor, api) {} + +std::string +FilesystemCollectionSubscriptionImpl::refreshInternal(ProtobufTypes::MessagePtr* config_update) { + auto owned_resource_message = std::make_unique(); + auto& resource_message = *owned_resource_message; + MessageUtil::loadFromFile(path_, resource_message, validation_visitor_, api_); + // Dynamically load the collection message. + const std::string collection_type = + std::string(TypeUtil::typeUrlToDescriptorFullName(resource_message.resource().type_url())); + const Protobuf::Descriptor* collection_descriptor = + Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(collection_type); + if (collection_descriptor == nullptr) { + throw EnvoyException(fmt::format("Unknown collection type {}", collection_type)); + } + Protobuf::DynamicMessageFactory dmf; + ProtobufTypes::MessagePtr collection_message; + collection_message.reset(dmf.GetPrototype(collection_descriptor)->New()); + MessageUtil::unpackTo(resource_message.resource(), *collection_message); + const auto* collection_entries_field_descriptor = collection_descriptor->field(0); + // Verify collection message type structure. + if (collection_entries_field_descriptor == nullptr || + collection_entries_field_descriptor->type() != Protobuf::FieldDescriptor::TYPE_MESSAGE || + collection_entries_field_descriptor->message_type()->full_name() != + "udpa.core.v1.CollectionEntry" || + !collection_entries_field_descriptor->is_repeated()) { + throw EnvoyException(fmt::format("Invalid structure for collection type {} in {}", + collection_type, resource_message.DebugString())); + } + const auto* reflection = collection_message->GetReflection(); + const uint32_t num_entries = + reflection->FieldSize(*collection_message, collection_entries_field_descriptor); + DecodedResourcesWrapper decoded_resources; + for (uint32_t i = 0; i < num_entries; ++i) { + udpa::core::v1::CollectionEntry collection_entry; + collection_entry.MergeFrom(reflection->GetRepeatedMessage( + *collection_message, collection_entries_field_descriptor, i)); + // TODO(htuch): implement indirect collection entries. + if (collection_entry.has_inline_entry()) { + decoded_resources.pushBack(std::make_unique( + resource_decoder_, collection_entry.inline_entry())); + } + } + *config_update = std::move(owned_resource_message); + callbacks_.onConfigUpdate(decoded_resources.refvec_, resource_message.version()); + return resource_message.version(); +} + } // namespace Config } // namespace Envoy diff --git a/source/common/config/filesystem_subscription_impl.h b/source/common/config/filesystem_subscription_impl.h index eb23ffdc9330..2a817180d2a8 100644 --- a/source/common/config/filesystem_subscription_impl.h +++ b/source/common/config/filesystem_subscription_impl.h @@ -17,7 +17,7 @@ namespace Config { * lists of xDS resources. */ class FilesystemSubscriptionImpl : public Config::Subscription, - Logger::Loggable { + protected Logger::Loggable { public: FilesystemSubscriptionImpl(Event::Dispatcher& dispatcher, absl::string_view path, SubscriptionCallbacks& callbacks, @@ -33,7 +33,8 @@ class FilesystemSubscriptionImpl : public Config::Subscription, NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } -private: +protected: + virtual std::string refreshInternal(ProtobufTypes::MessagePtr* config_update); void refresh(); void configRejected(const EnvoyException& e, const std::string& message); @@ -47,5 +48,19 @@ class FilesystemSubscriptionImpl : public Config::Subscription, ProtobufMessage::ValidationVisitor& validation_visitor_; }; +// Currently a FilesystemSubscriptionImpl subclass, but this will need to change when we support +// non-inline collection resources. +class FilesystemCollectionSubscriptionImpl : public FilesystemSubscriptionImpl { +public: + FilesystemCollectionSubscriptionImpl(Event::Dispatcher& dispatcher, absl::string_view path, + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder, + SubscriptionStats stats, + ProtobufMessage::ValidationVisitor& validation_visitor, + Api::Api& api); + + std::string refreshInternal(ProtobufTypes::MessagePtr* config_update) override; +}; + } // namespace Config } // namespace Envoy diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 2be7cd4fddd7..b415776d6b8e 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -23,7 +23,9 @@ GrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, rate_limit_settings), local_info_(local_info), skip_subsequent_node_(skip_subsequent_node), - first_stream_request_(true), transport_api_version_(transport_api_version) { + first_stream_request_(true), transport_api_version_(transport_api_version), + enable_type_url_downgrade_and_upgrade_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.enable_type_url_downgrade_and_upgrade")) { Config::Utility::checkLocalInfo("ads", local_info); } @@ -76,6 +78,9 @@ GrpcMuxWatchPtr GrpcMuxImpl::addWatch(const std::string& type_url, api_state_[type_url].request_.mutable_node()->MergeFrom(local_info_.node()); api_state_[type_url].subscribed_ = true; subscriptions_.emplace_back(type_url); + if (enable_type_url_downgrade_and_upgrade_) { + registerVersionedTypeUrl(type_url); + } } // This will send an updated request on each subscription. @@ -113,19 +118,41 @@ ScopedResume GrpcMuxImpl::pause(const std::vector type_urls) { }); } +void GrpcMuxImpl::registerVersionedTypeUrl(const std::string& type_url) { + TypeUrlMap& type_url_map = typeUrlMap(); + if (type_url_map.find(type_url) != type_url_map.end()) { + return; + } + // If type_url is v3, earlier_type_url will contain v2 type url. + const absl::optional earlier_type_url = ApiTypeOracle::getEarlierTypeUrl(type_url); + // Register v2 to v3 and v3 to v2 type_url mapping in the hash map. + if (earlier_type_url.has_value()) { + type_url_map[earlier_type_url.value()] = type_url; + type_url_map[type_url] = earlier_type_url.value(); + } +} + void GrpcMuxImpl::onDiscoveryResponse( std::unique_ptr&& message, ControlPlaneStats& control_plane_stats) { - const std::string& type_url = message->type_url(); + std::string type_url = message->type_url(); ENVOY_LOG(debug, "Received gRPC message for {} at version {}", type_url, message->version_info()); if (message->has_control_plane()) { control_plane_stats.identifier_.set(message->control_plane().identifier()); } + // If this type url is not watched(no subscriber or no watcher), try another version of type url. + if (enable_type_url_downgrade_and_upgrade_ && api_state_.count(type_url) == 0) { + registerVersionedTypeUrl(type_url); + TypeUrlMap& type_url_map = typeUrlMap(); + if (type_url_map.find(type_url) != type_url_map.end()) { + type_url = type_url_map[type_url]; + } + } if (api_state_.count(type_url) == 0) { - ENVOY_LOG(warn, "Ignoring the message for type URL {} as it has no current subscribers.", - type_url); // TODO(yuval-k): This should never happen. consider dropping the stream as this is a // protocol violation + ENVOY_LOG(warn, "Ignoring the message for type URL {} as it has no current subscribers.", + type_url); return; } if (api_state_[type_url].watches_.empty()) { @@ -164,10 +191,10 @@ void GrpcMuxImpl::onDiscoveryResponse( OpaqueResourceDecoder& resource_decoder = api_state_[type_url].watches_.front()->resource_decoder_; for (const auto& resource : message->resources()) { - if (type_url != resource.type_url()) { + if (message->type_url() != resource.type_url()) { throw EnvoyException( fmt::format("{} does not match the message-wide type URL {} in DiscoveryResponse {}", - resource.type_url(), type_url, message->DebugString())); + resource.type_url(), message->type_url(), message->DebugString())); } resources.emplace_back( new DecodedResourceImpl(resource_decoder, resource, message->version_info())); diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index 06acf1a78ef7..1006b165c5ba 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -20,6 +20,7 @@ #include "common/config/api_version.h" #include "common/config/grpc_stream.h" #include "common/config/utility.h" +#include "common/runtime/runtime_features.h" #include "absl/container/node_hash_map.h" @@ -60,6 +61,7 @@ class GrpcMuxImpl : public GrpcMux, // Config::GrpcStreamCallbacks void onStreamEstablished() override; void onEstablishmentFailure() override; + void registerVersionedTypeUrl(const std::string& type_url); void onDiscoveryResponse(std::unique_ptr&& message, ControlPlaneStats& control_plane_stats) override; @@ -147,6 +149,7 @@ class GrpcMuxImpl : public GrpcMux, // This string is a type URL. std::unique_ptr> request_queue_; const envoy::config::core::v3::ApiVersion transport_api_version_; + bool enable_type_url_downgrade_and_upgrade_; }; using GrpcMuxImplPtr = std::unique_ptr; diff --git a/source/common/config/http_subscription_impl.cc b/source/common/config/http_subscription_impl.cc index 9c4616766fed..534af337b0f7 100644 --- a/source/common/config/http_subscription_impl.cc +++ b/source/common/config/http_subscription_impl.cc @@ -70,10 +70,9 @@ void HttpSubscriptionImpl::createRequest(Http::RequestMessage& request) { stats_.update_attempt_.inc(); request.headers().setReferenceMethod(Http::Headers::get().MethodValues.Post); request.headers().setPath(path_); - request.body() = std::make_unique( - VersionConverter::getJsonStringFromMessage(request_, transport_api_version_)); + request.body().add(VersionConverter::getJsonStringFromMessage(request_, transport_api_version_)); request.headers().setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - request.headers().setContentLength(request.body()->length()); + request.headers().setContentLength(request.body().length()); } void HttpSubscriptionImpl::parseResponse(const Http::ResponseMessage& response) { diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index 4b72f94fb8f1..0015a2689971 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -23,7 +23,9 @@ NewGrpcMuxImpl::NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, const LocalInfo::LocalInfo& local_info) : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, rate_limit_settings), - local_info_(local_info), transport_api_version_(transport_api_version) {} + local_info_(local_info), transport_api_version_(transport_api_version), + enable_type_url_downgrade_and_upgrade_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.enable_type_url_downgrade_and_upgrade")) {} ScopedResume NewGrpcMuxImpl::pause(const std::string& type_url) { return pause(std::vector{type_url}); @@ -44,12 +46,36 @@ ScopedResume NewGrpcMuxImpl::pause(const std::vector type_urls) { }); } +void NewGrpcMuxImpl::registerVersionedTypeUrl(const std::string& type_url) { + + TypeUrlMap& type_url_map = typeUrlMap(); + if (type_url_map.find(type_url) != type_url_map.end()) { + return; + } + // If type_url is v3, earlier_type_url will contain v2 type url. + absl::optional earlier_type_url = ApiTypeOracle::getEarlierTypeUrl(type_url); + // Register v2 to v3 and v3 to v2 type_url mapping in the hash map. + if (earlier_type_url.has_value()) { + type_url_map[earlier_type_url.value()] = type_url; + type_url_map[type_url] = earlier_type_url.value(); + } +} + void NewGrpcMuxImpl::onDiscoveryResponse( std::unique_ptr&& message, ControlPlaneStats&) { ENVOY_LOG(debug, "Received DeltaDiscoveryResponse for {} at version {}", message->type_url(), message->system_version_info()); auto sub = subscriptions_.find(message->type_url()); + // If this type url is not watched, try another version type url. + if (enable_type_url_downgrade_and_upgrade_ && sub == subscriptions_.end()) { + const std::string& type_url = message->type_url(); + registerVersionedTypeUrl(type_url); + TypeUrlMap& type_url_map = typeUrlMap(); + if (type_url_map.find(type_url) != type_url_map.end()) { + sub = subscriptions_.find(type_url_map[type_url]); + } + } if (sub == subscriptions_.end()) { ENVOY_LOG(warn, "Dropping received DeltaDiscoveryResponse (with version {}) for non-existent " @@ -107,6 +133,9 @@ GrpcMuxWatchPtr NewGrpcMuxImpl::addWatch(const std::string& type_url, auto entry = subscriptions_.find(type_url); if (entry == subscriptions_.end()) { // We don't yet have a subscription for type_url! Make one! + if (enable_type_url_downgrade_and_upgrade_) { + registerVersionedTypeUrl(type_url); + } addSubscription(type_url, use_namespace_matching); return addWatch(type_url, resources, callbacks, resource_decoder, use_namespace_matching); } diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index 4f549556558f..8e64f3e4399f 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -16,6 +16,7 @@ #include "common/config/pausable_ack_queue.h" #include "common/config/watch_map.h" #include "common/grpc/common.h" +#include "common/runtime/runtime_features.h" namespace Envoy { namespace Config { @@ -48,6 +49,8 @@ class NewGrpcMuxImpl ScopedResume pause(const std::string& type_url) override; ScopedResume pause(const std::vector type_urls) override; + void registerVersionedTypeUrl(const std::string& type_url); + void onDiscoveryResponse( std::unique_ptr&& message, ControlPlaneStats& control_plane_stats) override; @@ -151,6 +154,8 @@ class NewGrpcMuxImpl const LocalInfo::LocalInfo& local_info_; const envoy::config::core::v3::ApiVersion transport_api_version_; + + const bool enable_type_url_downgrade_and_upgrade_; }; using NewGrpcMuxImplPtr = std::unique_ptr; diff --git a/source/common/config/remote_data_fetcher.cc b/source/common/config/remote_data_fetcher.cc index 2572e0091389..4723402e4955 100644 --- a/source/common/config/remote_data_fetcher.cc +++ b/source/common/config/remote_data_fetcher.cc @@ -44,15 +44,15 @@ void RemoteDataFetcher::onSuccess(const Http::AsyncClient::Request&, const uint64_t status_code = Http::Utility::getResponseStatus(response->headers()); if (status_code == enumToInt(Http::Code::OK)) { ENVOY_LOG(debug, "fetch remote data [uri = {}]: success", uri_.uri()); - if (response->body()) { + if (response->body().length() > 0) { auto& crypto_util = Envoy::Common::Crypto::UtilitySingleton::get(); - const auto content_hash = Hex::encode(crypto_util.getSha256Digest(*response->body())); + const auto content_hash = Hex::encode(crypto_util.getSha256Digest(response->body())); if (content_hash_ != content_hash) { ENVOY_LOG(debug, "fetch remote data [uri = {}]: data is invalid", uri_.uri()); callback_.onFailure(FailureReason::InvalidData); } else { - callback_.onSuccess(response->body()->toString()); + callback_.onSuccess(response->bodyAsString()); } } else { ENVOY_LOG(debug, "fetch remote data [uri = {}]: body is empty", uri_.uri()); diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index 6495688add61..9283654cf419 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -8,7 +8,9 @@ #include "common/config/http_subscription_impl.h" #include "common/config/new_grpc_mux_impl.h" #include "common/config/type_to_endpoint.h" +#include "common/config/udpa_resource.h" #include "common/config/utility.h" +#include "common/http/utility.h" #include "common/protobuf/protobuf.h" namespace Envoy { @@ -16,9 +18,9 @@ namespace Config { SubscriptionFactoryImpl::SubscriptionFactoryImpl( const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Upstream::ClusterManager& cm, Random::RandomGenerator& random, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, Runtime::Loader& runtime) - : local_info_(local_info), dispatcher_(dispatcher), cm_(cm), random_(random), + Upstream::ClusterManager& cm, ProtobufMessage::ValidationVisitor& validation_visitor, + Api::Api& api, Runtime::Loader& runtime) + : local_info_(local_info), dispatcher_(dispatcher), cm_(cm), validation_visitor_(validation_visitor), api_(api), runtime_(runtime) {} SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( @@ -28,12 +30,13 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Config::Utility::checkLocalInfo(type_url, local_info_); std::unique_ptr result; SubscriptionStats stats = Utility::generateStats(scope); + auto& runtime_snapshot = runtime_.snapshot(); const auto transport_api_version = config.api_config_source().transport_api_version(); if (transport_api_version == envoy::config::core::v3::ApiVersion::V2 && - runtime_.snapshot().runtimeFeatureEnabled( + runtime_snapshot.runtimeFeatureEnabled( "envoy.reloadable_features.enable_deprecated_v2_api_warning")) { - runtime_.snapshot().countDeprecatedFeatureUse(); + runtime_.countDeprecatedFeatureUse(); ENVOY_LOG(warn, "xDS of version v2 has been deprecated and will be removed in subsequent versions"); } @@ -57,8 +60,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( config.DebugString()); case envoy::config::core::v3::ApiConfigSource::REST: return std::make_unique( - local_info_, cm_, api_config_source.cluster_names()[0], dispatcher_, random_, - Utility::apiConfigSourceRefreshDelay(api_config_source), + local_info_, cm_, api_config_source.cluster_names()[0], dispatcher_, + api_.randomGenerator(), Utility::apiConfigSourceRefreshDelay(api_config_source), Utility::apiConfigSourceRequestTimeout(api_config_source), restMethod(type_url, api_config_source.transport_api_version()), type_url, api_config_source.transport_api_version(), callbacks, resource_decoder, stats, @@ -71,7 +74,7 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( api_config_source, scope, true) ->create(), dispatcher_, sotwGrpcMethod(type_url, api_config_source.transport_api_version()), - api_config_source.transport_api_version(), random_, scope, + api_config_source.transport_api_version(), api_.randomGenerator(), scope, Utility::parseRateLimitSettings(api_config_source), api_config_source.set_node_on_first_message_only()), callbacks, resource_decoder, stats, type_url, dispatcher_, @@ -84,7 +87,7 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( api_config_source, scope, true) ->create(), dispatcher_, deltaGrpcMethod(type_url, api_config_source.transport_api_version()), - api_config_source.transport_api_version(), random_, scope, + api_config_source.transport_api_version(), api_.randomGenerator(), scope, Utility::parseRateLimitSettings(api_config_source), local_info_), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), false); @@ -105,5 +108,28 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( NOT_REACHED_GCOVR_EXCL_LINE; } +SubscriptionPtr SubscriptionFactoryImpl::collectionSubscriptionFromUrl( + const udpa::core::v1::ResourceLocator& collection_locator, + const envoy::config::core::v3::ConfigSource& /*config*/, absl::string_view /*type_url*/, + Stats::Scope& scope, SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) { + std::unique_ptr result; + SubscriptionStats stats = Utility::generateStats(scope); + + switch (collection_locator.scheme()) { + case udpa::core::v1::ResourceLocator::FILE: { + const std::string path = + Http::Utility::localPathFromFilePath(absl::StrJoin(collection_locator.id(), "/")); + Utility::checkFilesystemSubscriptionBackingPath(path, api_); + return std::make_unique( + dispatcher_, path, callbacks, resource_decoder, stats, validation_visitor_, api_); + } + default: + throw EnvoyException(fmt::format("Unsupported collection resource locator: {}", + UdpaResourceIdentifier::encodeUrl(collection_locator))); + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + } // namespace Config } // namespace Envoy diff --git a/source/common/config/subscription_factory_impl.h b/source/common/config/subscription_factory_impl.h index 1241229861d4..6e65a7211f13 100644 --- a/source/common/config/subscription_factory_impl.h +++ b/source/common/config/subscription_factory_impl.h @@ -16,7 +16,7 @@ namespace Config { class SubscriptionFactoryImpl : public SubscriptionFactory, Logger::Loggable { public: SubscriptionFactoryImpl(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Upstream::ClusterManager& cm, Random::RandomGenerator& random, + Upstream::ClusterManager& cm, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, Runtime::Loader& runtime); @@ -25,12 +25,17 @@ class SubscriptionFactoryImpl : public SubscriptionFactory, Logger::Loggablecallbacks_.onConfigUpdate({}, resource_to_remove, system_version_info); } + // notify empty update + if (added_resources.empty() && removed_resources.empty()) { + for (auto& cur_watch : wildcard_watches_) { + cur_watch->callbacks_.onConfigUpdate({}, {}, system_version_info); + } + } } void WatchMap::onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) { diff --git a/source/common/config/well_known_names.cc b/source/common/config/well_known_names.cc index a9b1662d20f6..e2677cb742a4 100644 --- a/source/common/config/well_known_names.cc +++ b/source/common/config/well_known_names.cc @@ -82,6 +82,9 @@ TagNameValues::TagNameValues() { // tcp.(.) addRegex(TCP_PREFIX, R"(^tcp\.((.*?)\.)\w+?$)"); + // udp.(.) + addRegex(UDP_PREFIX, R"(^udp\.((.*?)\.)\w+?$)"); + // auth.clientssl.(.) addRegex(CLIENTSSL_PREFIX, R"(^auth\.clientssl\.((.*?)\.)\w+?$)"); diff --git a/source/common/config/well_known_names.h b/source/common/config/well_known_names.h index 30698815f9ba..1d3cd09c51d8 100644 --- a/source/common/config/well_known_names.h +++ b/source/common/config/well_known_names.h @@ -95,6 +95,8 @@ class TagNameValues { const std::string RATELIMIT_PREFIX = "envoy.ratelimit_prefix"; // Stats prefix for the TCP Proxy network filter const std::string TCP_PREFIX = "envoy.tcp_prefix"; + // Stats prefix for the UDP Proxy network filter + const std::string UDP_PREFIX = "envoy.udp_prefix"; // Downstream cluster for the Fault http filter const std::string FAULT_DOWNSTREAM_CLUSTER = "envoy.fault_downstream_cluster"; // Operation name for the Dynamo http filter diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index aff522c9f669..bc5293e99318 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -33,7 +33,7 @@ void ConnPoolImplBase::destructAllConnections() { dispatcher_.clearDeferredDeleteList(); } -bool ConnPoolImplBase::shouldCreateNewConnection() const { +bool ConnPoolImplBase::shouldCreateNewConnection(float global_prefetch_ratio) const { // If the host is not healthy, don't make it do extra work, especially as // upstream selection logic may result in bypassing this upstream entirely. // If an Envoy user wants prefetching for degraded upstreams this could be @@ -41,6 +41,17 @@ bool ConnPoolImplBase::shouldCreateNewConnection() const { if (host_->health() != Upstream::Host::Health::Healthy) { return pending_streams_.size() > connecting_stream_capacity_; } + + // If global prefetching is on, and this connection is within the global + // prefetch limit, prefetch. + // We may eventually want to track prefetch_attempts to allow more prefetching for + // heavily weighted upstreams or sticky picks. + if (global_prefetch_ratio > 1.0 && + ((pending_streams_.size() + 1 + num_active_streams_) * global_prefetch_ratio > + (connecting_stream_capacity_ + num_active_streams_))) { + return true; + } + // The number of streams we want to be provisioned for is the number of // pending and active streams times the prefetch ratio. // The number of streams we are (theoretically) provisioned for is the @@ -48,13 +59,13 @@ bool ConnPoolImplBase::shouldCreateNewConnection() const { // // If prefetch ratio is not set, it defaults to 1, and this simplifies to the // legacy value of pending_streams_.size() > connecting_stream_capacity_ - return (pending_streams_.size() + num_active_streams_) * prefetchRatio() > + return (pending_streams_.size() + num_active_streams_) * perUpstreamPrefetchRatio() > (connecting_stream_capacity_ + num_active_streams_); } -float ConnPoolImplBase::prefetchRatio() const { +float ConnPoolImplBase::perUpstreamPrefetchRatio() const { if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_prefetch")) { - return host_->cluster().prefetchRatio(); + return host_->cluster().perUpstreamPrefetchRatio(); } else { return 1.0; } @@ -74,9 +85,9 @@ void ConnPoolImplBase::tryCreateNewConnections() { } } -bool ConnPoolImplBase::tryCreateNewConnection() { +bool ConnPoolImplBase::tryCreateNewConnection(float global_prefetch_ratio) { // There are already enough CONNECTING connections for the number of queued streams. - if (!shouldCreateNewConnection()) { + if (!shouldCreateNewConnection(global_prefetch_ratio)) { return false; } @@ -184,6 +195,10 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) } } +bool ConnPoolImplBase::maybePrefetch(float global_prefetch_ratio) { + return tryCreateNewConnection(global_prefetch_ratio); +} + void ConnPoolImplBase::onUpstreamReady() { while (!pending_streams_.empty() && !ready_clients_.empty()) { ActiveClientPtr& client = ready_clients_.front(); @@ -231,7 +246,7 @@ void ConnPoolImplBase::addDrainedCallbackImpl(Instance::DrainedCb cb) { checkForDrained(); } -void ConnPoolImplBase::closeIdleConnections() { +void ConnPoolImplBase::closeIdleConnectionsForDrainingPool() { // Create a separate list of elements to close to avoid mutate-while-iterating problems. std::list to_close; @@ -253,7 +268,7 @@ void ConnPoolImplBase::closeIdleConnections() { } void ConnPoolImplBase::drainConnectionsImpl() { - closeIdleConnections(); + closeIdleConnectionsForDrainingPool(); // closeIdleConnections() closes all connections in ready_clients_ with no active streams, // so all remaining entries in ready_clients_ are serving streams. Move them and all entries @@ -275,7 +290,7 @@ void ConnPoolImplBase::checkForDrained() { return; } - closeIdleConnections(); + closeIdleConnectionsForDrainingPool(); if (pending_streams_.empty() && ready_clients_.empty() && busy_clients_.empty() && connecting_clients_.empty()) { @@ -394,14 +409,14 @@ void ConnPoolImplBase::purgePendingStreams( bool ConnPoolImplBase::connectingConnectionIsExcess() const { ASSERT(connecting_stream_capacity_ >= connecting_clients_.front()->effectiveConcurrentStreamLimit()); - // If prefetchRatio is one, this simplifies to checking if there would still be sufficient - // connecting stream capacity to serve all pending streams if the most recent client were - // removed from the picture. + // If perUpstreamPrefetchRatio is one, this simplifies to checking if there would still be + // sufficient connecting stream capacity to serve all pending streams if the most recent client + // were removed from the picture. // // If prefetch ratio is set, it also factors in the anticipated load based on both queued streams // and active streams, and makes sure the connecting capacity would still be sufficient to serve // that even with the most recent client removed. - return (pending_streams_.size() + num_active_streams_) * prefetchRatio() <= + return (pending_streams_.size() + num_active_streams_) * perUpstreamPrefetchRatio() <= (connecting_stream_capacity_ - connecting_clients_.front()->effectiveConcurrentStreamLimit() + num_active_streams_); } diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index 83369795a5ad..85d8f304719d 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -138,17 +138,21 @@ class ConnPoolImplBase : protected Logger::Loggable { absl::string_view failure_reason, ConnectionPool::PoolFailureReason pool_failure_reason); - // Closes any idle connections. - void closeIdleConnections(); + // Closes any idle connections as this pool is drained. + void closeIdleConnectionsForDrainingPool(); // Changes the state_ of an ActiveClient and moves to the appropriate list. void transitionActiveClientState(ActiveClient& client, ActiveClient::State new_state); void onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, Network::ConnectionEvent event); + // See if the drain process has started and/or completed. void checkForDrained(); void onUpstreamReady(); ConnectionPool::Cancellable* newStream(AttachContext& context); + // Called if this pool is likely to be picked soon, to determine if it's worth + // prefetching a connection. + bool maybePrefetch(float global_prefetch_ratio); virtual ConnectionPool::Cancellable* newPendingStream(AttachContext& context) PURE; @@ -176,7 +180,9 @@ class ConnPoolImplBase : protected Logger::Loggable { // Creates a new connection if there is sufficient demand, it is allowed by resourceManager, or // to avoid starving this pool. - bool tryCreateNewConnection(); + // Demand is determined either by perUpstreamPrefetchRatio() or global_prefetch_ratio + // if this is called by maybePrefetch() + bool tryCreateNewConnection(float global_prefetch_ratio = 0); // A helper function which determines if a canceled pending connection should // be closed as excess or not. @@ -184,9 +190,9 @@ class ConnPoolImplBase : protected Logger::Loggable { // A helper function which determines if a new incoming stream should trigger // connection prefetch. - bool shouldCreateNewConnection() const; + bool shouldCreateNewConnection(float global_prefetch_ratio) const; - float prefetchRatio() const; + float perUpstreamPrefetchRatio() const; const Upstream::HostConstSharedPtr host_; const Upstream::ResourcePriority priority_; diff --git a/source/common/event/BUILD b/source/common/event/BUILD index cf0ded8373e9..e84aeb0cde04 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -30,11 +30,14 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/common:thread_lib", "//source/common/filesystem:watcher_lib", - "//source/common/network:connection_lib", "//source/common/network:dns_lib", + "//source/common/network:connection_lib", "//source/common/network:listener_lib", "//source/common/runtime:runtime_features_lib", - ], + ] + select({ + "//bazel:apple": ["//source/common/network:apple_dns_lib"], + "//conditions:default": [], + }), ) envoy_cc_library( @@ -149,3 +152,14 @@ envoy_cc_library( "//include/envoy/event:dispatcher_interface", ], ) + +envoy_cc_library( + name = "scaled_range_timer_manager", + srcs = ["scaled_range_timer_manager.cc"], + hdrs = ["scaled_range_timer_manager.h"], + deps = [ + "//include/envoy/event:dispatcher_interface", + "//include/envoy/event:range_timer_interface", + "//source/common/common:scope_tracker", + ], +) diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index 0c19bddf2db6..6cdbb623b720 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -22,6 +22,7 @@ #include "common/network/dns_impl.h" #include "common/network/tcp_listener_impl.h" #include "common/network/udp_listener_impl.h" +#include "common/runtime/runtime_features.h" #include "event2/event.h" @@ -29,6 +30,10 @@ #include "common/signal/signal_action.h" #endif +#ifdef __APPLE__ +#include "common/network/apple_dns_impl.h" +#endif + namespace Envoy { namespace Event { @@ -121,6 +126,23 @@ Network::DnsResolverSharedPtr DispatcherImpl::createDnsResolver( const std::vector& resolvers, const bool use_tcp_for_dns_lookups) { ASSERT(isThreadSafe()); +#ifdef __APPLE__ + static bool use_apple_api_for_dns_lookups = + Runtime::runtimeFeatureEnabled("envoy.restart_features.use_apple_api_for_dns_lookups"); + if (use_apple_api_for_dns_lookups) { + RELEASE_ASSERT( + resolvers.empty(), + "defining custom resolvers is not possible when using Apple APIs for DNS resolution. " + "Apple's API only allows overriding DNS resolvers via system settings. Delete resolvers " + "config or disable the envoy.restart_features.use_apple_api_for_dns_lookups runtime " + "feature."); + RELEASE_ASSERT(!use_tcp_for_dns_lookups, + "using TCP for DNS lookups is not possible when using Apple APIs for DNS " + "resolution. Apple' API only uses UDP for DNS resolution. Use UDP or disable " + "the envoy.restart_features.use_apple_api_for_dns_lookups runtime feature."); + return Network::DnsResolverSharedPtr{new Network::AppleDnsResolverImpl(*this)}; + } +#endif return Network::DnsResolverSharedPtr{ new Network::DnsResolverImpl(*this, resolvers, use_tcp_for_dns_lookups)}; } @@ -140,11 +162,11 @@ Network::ListenerPtr DispatcherImpl::createListener(Network::SocketSharedPtr&& s Network::TcpListenerCallbacks& cb, bool bind_to_port, uint32_t backlog_size) { ASSERT(isThreadSafe()); - return std::make_unique(*this, std::move(socket), cb, bind_to_port, - backlog_size); + return std::make_unique( + *this, api_.randomGenerator(), std::move(socket), cb, bind_to_port, backlog_size); } -Network::UdpListenerPtr DispatcherImpl::createUdpListener(Network::SocketSharedPtr&& socket, +Network::UdpListenerPtr DispatcherImpl::createUdpListener(Network::SocketSharedPtr socket, Network::UdpListenerCallbacks& cb) { ASSERT(isThreadSafe()); return std::make_unique(*this, std::move(socket), cb, timeSource()); @@ -168,7 +190,7 @@ void DispatcherImpl::deferredDelete(DeferredDeletablePtr&& to_delete) { ASSERT(isThreadSafe()); current_to_delete_->emplace_back(std::move(to_delete)); ENVOY_LOG(trace, "item added to deferred deletion list (size={})", current_to_delete_->size()); - if (1 == current_to_delete_->size()) { + if (current_to_delete_->size() == 1) { deferred_delete_cb_->scheduleCallbackCurrentIteration(); } } diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index c81498c89f79..4b05b355410c 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -62,7 +62,7 @@ class DispatcherImpl : Logger::Loggable, Network::ListenerPtr createListener(Network::SocketSharedPtr&& socket, Network::TcpListenerCallbacks& cb, bool bind_to_port, uint32_t backlog_size) override; - Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr&& socket, + Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr socket, Network::UdpListenerCallbacks& cb) override; TimerPtr createTimer(TimerCb cb) override; Event::SchedulableCallbackPtr createSchedulableCallback(std::function cb) override; diff --git a/source/common/event/file_event_impl.h b/source/common/event/file_event_impl.h index e4044fd25194..cc3e505d788b 100644 --- a/source/common/event/file_event_impl.h +++ b/source/common/event/file_event_impl.h @@ -41,6 +41,5 @@ class FileEventImpl : public FileEvent, ImplBase { // polling and activating new fd events. const bool activate_fd_events_next_event_loop_; }; - } // namespace Event } // namespace Envoy diff --git a/source/common/event/scaled_range_timer_manager.cc b/source/common/event/scaled_range_timer_manager.cc new file mode 100644 index 000000000000..10ac02b713cd --- /dev/null +++ b/source/common/event/scaled_range_timer_manager.cc @@ -0,0 +1,257 @@ +#include "common/event/scaled_range_timer_manager.h" + +#include +#include +#include + +#include "envoy/event/range_timer.h" +#include "envoy/event/timer.h" + +#include "common/common/assert.h" +#include "common/common/scope_tracker.h" + +namespace Envoy { +namespace Event { + +/** + * Implementation of RangeTimer that can be scaled by the backing manager object. + * + * Instances of this class exist in one of 3 states: + * - inactive: not enabled + * - waiting-for-min: enabled, min timeout not elapsed + * - scaling-max: enabled, min timeout elapsed, max timeout not elapsed + * + * The allowed state transitions are: + * - inactive -> waiting-for-min + * - waiting-for-min -> scaling-max | inactive + * - scaling-max -> inactive + * + * Some methods combine multiple state transitions; enableTimer(0, max) on a + * timer in the scaling-max state will logically execute the transition sequence + * [scaling-max -> inactive -> waiting-for-min -> scaling-max] in a single + * method call. The waiting-for-min transitions are elided for efficiency. + */ +class ScaledRangeTimerManager::RangeTimerImpl final : public RangeTimer { +public: + RangeTimerImpl(TimerCb callback, ScaledRangeTimerManager& manager) + : manager_(manager), callback_(std::move(callback)), + min_duration_timer_(manager.dispatcher_.createTimer([this] { onMinTimerComplete(); })) {} + + ~RangeTimerImpl() override { disableTimer(); } + + void disableTimer() override { + struct Dispatch { + Dispatch(RangeTimerImpl& timer) : timer_(timer) {} + RangeTimerImpl& timer_; + void operator()(const Inactive&) {} + void operator()(const WaitingForMin&) { timer_.min_duration_timer_->disableTimer(); } + void operator()(ScalingMax& active) { timer_.manager_.removeTimer(active.handle_); } + }; + absl::visit(Dispatch(*this), state_); + state_.emplace(); + scope_ = nullptr; + } + + void enableTimer(const std::chrono::milliseconds min_ms, const std::chrono::milliseconds max_ms, + const ScopeTrackedObject* scope) override { + disableTimer(); + scope_ = scope; + ENVOY_LOG_MISC(trace, "enableTimer called on {} for ({}ms, {}ms)", static_cast(this), + min_ms.count(), max_ms.count()); + if (min_ms <= std::chrono::milliseconds::zero()) { + // If the duration spread (max - min) is zero, skip over the waiting-for-min and straight to + // the scaling-max state. + auto handle = manager_.activateTimer(max_ms, *this); + state_.emplace(handle); + } else { + state_.emplace(max_ms - min_ms); + min_duration_timer_->enableTimer(min_ms); + } + } + + bool enabled() override { return !absl::holds_alternative(state_); } + + void trigger() { + ASSERT(manager_.dispatcher_.isThreadSafe()); + ASSERT(!absl::holds_alternative(state_)); + ENVOY_LOG_MISC(trace, "RangeTimerImpl triggered: {}", static_cast(this)); + state_.emplace(); + if (scope_ == nullptr) { + callback_(); + } else { + ScopeTrackerScopeState scope(scope_, manager_.dispatcher_); + scope_ = nullptr; + callback_(); + } + } + +private: + struct Inactive {}; + + struct WaitingForMin { + WaitingForMin(std::chrono::milliseconds scalable_duration) + : scalable_duration_(scalable_duration) {} + + // The amount of time between this enabled timer's max and min, which should + // be scaled by the current scale factor. + const std::chrono::milliseconds scalable_duration_; + }; + + struct ScalingMax { + ScalingMax(ScaledRangeTimerManager::ScalingTimerHandle handle) : handle_(handle) {} + + // A handle that can be used to disable the timer. + ScaledRangeTimerManager::ScalingTimerHandle handle_; + }; + + /** + * This is called when the min timer expires, on the dispatcher for the manager. It registers with + * the manager so the duration can be scaled, unless the duration is zero in which case it just + * triggers the callback right away. + */ + void onMinTimerComplete() { + ASSERT(manager_.dispatcher_.isThreadSafe()); + ENVOY_LOG_MISC(info, "min timer complete for {}", static_cast(this)); + ASSERT(absl::holds_alternative(state_)); + const WaitingForMin& waiting = absl::get(state_); + + // This + if (waiting.scalable_duration_ < std::chrono::milliseconds::zero()) { + trigger(); + } else { + state_.emplace(manager_.activateTimer(waiting.scalable_duration_, *this)); + } + } + + ScaledRangeTimerManager& manager_; + const TimerCb callback_; + const TimerPtr min_duration_timer_; + + absl::variant state_; + const ScopeTrackedObject* scope_; +}; + +ScaledRangeTimerManager::ScaledRangeTimerManager(Dispatcher& dispatcher) + : dispatcher_(dispatcher), scale_factor_(1.0) {} + +ScaledRangeTimerManager::~ScaledRangeTimerManager() { + // Scaled timers created by the manager shouldn't outlive it. This is + // necessary but not sufficient to guarantee that. + ASSERT(queues_.empty()); +} + +RangeTimerPtr ScaledRangeTimerManager::createTimer(TimerCb callback) { + return std::make_unique(callback, *this); +} + +void ScaledRangeTimerManager::setScaleFactor(double scale_factor) { + const MonotonicTime now = dispatcher_.approximateMonotonicTime(); + scale_factor_ = DurationScaleFactor(scale_factor); + for (auto& queue : queues_) { + resetQueueTimer(*queue, now); + } +} + +ScaledRangeTimerManager::Queue::Item::Item(RangeTimerImpl& timer, MonotonicTime active_time) + : timer_(timer), active_time_(active_time) {} + +ScaledRangeTimerManager::Queue::Queue(std::chrono::milliseconds duration, + ScaledRangeTimerManager& manager, Dispatcher& dispatcher) + : duration_(duration), + timer_(dispatcher.createTimer([this, &manager] { manager.onQueueTimerFired(*this); })) {} + +ScaledRangeTimerManager::ScalingTimerHandle::ScalingTimerHandle(Queue& queue, + Queue::Iterator iterator) + : queue_(queue), iterator_(iterator) {} + +ScaledRangeTimerManager::DurationScaleFactor::DurationScaleFactor(double value) + : value_(std::max(0.0, std::min(value, 1.0))) {} + +MonotonicTime ScaledRangeTimerManager::computeTriggerTime(const Queue::Item& item, + std::chrono::milliseconds duration, + DurationScaleFactor scale_factor) { + return item.active_time_ + + std::chrono::duration_cast(duration * scale_factor.value()); +} + +ScaledRangeTimerManager::ScalingTimerHandle +ScaledRangeTimerManager::activateTimer(std::chrono::milliseconds duration, + RangeTimerImpl& range_timer) { + // Ensure this is being called on the same dispatcher. + ASSERT(dispatcher_.isThreadSafe()); + + // Find the matching queue for the (max - min) duration of the range timer; if there isn't one, + // create it. + auto it = queues_.find(duration); + if (it == queues_.end()) { + auto queue = std::make_unique(duration, *this, dispatcher_); + it = queues_.emplace(std::move(queue)).first; + } + Queue& queue = **it; + + // Put the timer at the back of the queue. Since the timer has the same maximum duration as all + // the other timers in the queue, and since the activation times are monotonic, the queue stays in + // sorted order. + queue.range_timers_.emplace_back(range_timer, dispatcher_.approximateMonotonicTime()); + if (queue.range_timers_.size() == 1) { + resetQueueTimer(queue, dispatcher_.approximateMonotonicTime()); + } + + return ScalingTimerHandle(queue, --queue.range_timers_.end()); +} + +void ScaledRangeTimerManager::removeTimer(ScalingTimerHandle handle) { + // Ensure this is being called on the same dispatcher. + ASSERT(dispatcher_.isThreadSafe()); + + const bool was_front = handle.queue_.range_timers_.begin() == handle.iterator_; + handle.queue_.range_timers_.erase(handle.iterator_); + // Don't keep around empty queues + if (handle.queue_.range_timers_.empty()) { + queues_.erase(handle.queue_); + return; + } + + // The queue's timer tracks the expiration time of the first range timer, so it only needs + // adjusting if the first timer is the one that was removed. + if (was_front) { + resetQueueTimer(handle.queue_, dispatcher_.approximateMonotonicTime()); + } +} + +void ScaledRangeTimerManager::resetQueueTimer(Queue& queue, MonotonicTime now) { + ASSERT(!queue.range_timers_.empty()); + const MonotonicTime trigger_time = + computeTriggerTime(queue.range_timers_.front(), queue.duration_, scale_factor_); + if (trigger_time < now) { + queue.timer_->enableTimer(std::chrono::milliseconds::zero()); + } else { + queue.timer_->enableTimer( + std::chrono::duration_cast(trigger_time - now)); + } +} + +void ScaledRangeTimerManager::onQueueTimerFired(Queue& queue) { + auto& timers = queue.range_timers_; + ASSERT(!timers.empty()); + const MonotonicTime now = dispatcher_.approximateMonotonicTime(); + + // Pop and trigger timers until the one at the front isn't supposed to have expired yet (given the + // current scale factor). + while (!timers.empty() && + computeTriggerTime(timers.front(), queue.duration_, scale_factor_) <= now) { + auto item = std::move(queue.range_timers_.front()); + queue.range_timers_.pop_front(); + item.timer_.trigger(); + } + + if (queue.range_timers_.empty()) { + // Maintain the invariant that queues are never empty. + queues_.erase(queue); + } else { + resetQueueTimer(queue, now); + } +} + +} // namespace Event +} // namespace Envoy diff --git a/source/common/event/scaled_range_timer_manager.h b/source/common/event/scaled_range_timer_manager.h new file mode 100644 index 000000000000..1fbd51c8c86e --- /dev/null +++ b/source/common/event/scaled_range_timer_manager.h @@ -0,0 +1,150 @@ +#include +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/event/range_timer.h" +#include "envoy/event/timer.h" + +#include "absl/container/flat_hash_map.h" + +namespace Envoy { +namespace Event { + +/** + * Class for creating RangeTimer objects that can be adjusted towards either the minimum or maximum + * of their range by the owner of the manager object. Users of this class can call createTimer() to + * receive a new RangeTimer object that they can then enable or disable at will (but only on the + * same dispatcher), and setScaleFactor() to change the scaling factor. The current scale factor is + * applied to all timers, including those that are created later. + * + * Internally, the manager uses a set of queues to track timers. When an enabled timer reaches its + * min duration, it adds a tracker object to the queue corresponding to the duration (max - min). + * Each queue tracks timers of only a single duration, and uses a real Timer object to schedule the + * expiration of the first timer in the queue. The expectation is that the number of (max - min) + * values used to enable timers is small, so the number of queues is tightly bounded. The + * queue-based implementation depends on that expectation for efficient operation. + */ +class ScaledRangeTimerManager { +public: + explicit ScaledRangeTimerManager(Dispatcher& dispatcher); + ~ScaledRangeTimerManager(); + + /** + * Creates a new range timer backed by the manager. The returned timer will be subject to the + * current and future scale factor values set on the manager. All returned timers must be deleted + * before the manager. + */ + RangeTimerPtr createTimer(TimerCb callback); + + /** + * Sets the scale factor for all timers created through this manager. The value should be between + * 0 and 1, inclusive. The scale factor affects the amount of time timers spend in their target + * range. The RangeTimers returned by createTimer will fire after (min + (max - min) * + * scale_factor). This means that a scale factor of 0 causes timers to fire immediately at the min + * duration, a factor of 0.5 causes firing halfway between min and max, and a factor of 1 causes + * firing at max. + */ + void setScaleFactor(double scale_factor); + +private: + class RangeTimerImpl; + + // A queue object that maintains a list of timers with the same (max - min) values. + struct Queue { + struct Item { + Item(RangeTimerImpl& timer, MonotonicTime active_time); + // The timer owned by the caller being kept in the queue. + RangeTimerImpl& timer_; + // The time at which the timer became active (when its min duration expired). + MonotonicTime active_time_; + }; + + // Typedef for convenience. + using Iterator = std::list::iterator; + + Queue(std::chrono::milliseconds duration, ScaledRangeTimerManager& manager, + Dispatcher& dispatcher); + + // The (max - min) value for all timers in range_timers_. + const std::chrono::milliseconds duration_; + + // The list of active timers in this queue. This is implemented as a + // std::list so that the iterators held in ScalingTimerHandle instances are + // not invalidated by removal or insertion of other timers. The timers in + // the list are in sorted order by active_time_ because they are only + // inserted at the end of the list, and the time is monotonically increasing. + std::list range_timers_; + + // A real Timer that tracks the expiration time of the first timer in the queue. This gets + // adjusted + // 1) at queue creation time + // 2) on expiration + // 3) when the scale factor changes + const TimerPtr timer_; + }; + + /** + * An object passed back to RangeTimerImpl that can be used to remove it from its queue. + */ + struct ScalingTimerHandle { + ScalingTimerHandle(Queue& queue, Queue::Iterator iterator); + Queue& queue_; + Queue::Iterator iterator_; + }; + + // A simple wrapper around a float that ensures value() is sane (in the range [0, 1]). + class DurationScaleFactor { + public: + DurationScaleFactor(double value); + double value() const { return value_; } + + private: + double value_; + }; + + struct Hash { + // Magic declaration to allow heterogeneous lookup. + using is_transparent = void; // NOLINT(readability-identifier-naming) + + size_t operator()(const std::chrono::milliseconds duration) const { + return hash_(duration.count()); + } + size_t operator()(const Queue& queue) const { return (*this)(queue.duration_); } + size_t operator()(const std::unique_ptr& queue) const { return (*this)(*queue); } + std::hash hash_; + }; + + struct Eq { + // Magic declaration to allow heterogeneous lookup. + using is_transparent = void; // NOLINT(readability-identifier-naming) + + bool operator()(const std::unique_ptr& lhs, std::chrono::milliseconds rhs) const { + return lhs->duration_ == rhs; + } + bool operator()(const std::unique_ptr& lhs, const Queue& rhs) const { + return (*this)(lhs, rhs.duration_); + } + bool operator()(const std::unique_ptr& lhs, const std::unique_ptr& rhs) const { + return (*this)(lhs, *rhs); + } + }; + + static MonotonicTime computeTriggerTime(const Queue::Item& item, + std::chrono::milliseconds duration, + DurationScaleFactor scale_factor); + + ScalingTimerHandle activateTimer(std::chrono::milliseconds duration, RangeTimerImpl& timer); + + void removeTimer(ScalingTimerHandle handle); + + void resetQueueTimer(Queue& queue, MonotonicTime now); + + void onQueueTimerFired(Queue& queue); + + Dispatcher& dispatcher_; + DurationScaleFactor scale_factor_; + absl::flat_hash_set, Hash, Eq> queues_; +}; + +} // namespace Event +} // namespace Envoy \ No newline at end of file diff --git a/source/common/event/schedulable_cb_impl.cc b/source/common/event/schedulable_cb_impl.cc index 797e5bb004e1..2109af17972e 100644 --- a/source/common/event/schedulable_cb_impl.cc +++ b/source/common/event/schedulable_cb_impl.cc @@ -21,12 +21,18 @@ SchedulableCallbackImpl::SchedulableCallbackImpl(Libevent::BasePtr& libevent, } void SchedulableCallbackImpl::scheduleCallbackCurrentIteration() { + if (enabled()) { + return; + } // event_active directly adds the event to the end of the work queue so it executes in the current // iteration of the event loop. event_active(&raw_event_, EV_TIMEOUT, 0); } void SchedulableCallbackImpl::scheduleCallbackNextIteration() { + if (enabled()) { + return; + } // libevent computes the list of timers to move to the work list after polling for fd events, but // iteration through the work list starts. Zero delay timers added while iterating through the // work list execute on the next iteration of the event loop. diff --git a/source/common/event/timer_impl.cc b/source/common/event/timer_impl.cc index 56137dc8b2e3..bfb9b686ea13 100644 --- a/source/common/event/timer_impl.cc +++ b/source/common/event/timer_impl.cc @@ -39,13 +39,13 @@ TimerImpl::TimerImpl(Libevent::BasePtr& libevent, TimerCb cb, Dispatcher& dispat void TimerImpl::disableTimer() { event_del(&raw_event_); } -void TimerImpl::enableTimer(const std::chrono::milliseconds& d, const ScopeTrackedObject* object) { +void TimerImpl::enableTimer(const std::chrono::milliseconds d, const ScopeTrackedObject* object) { timeval tv; TimerUtils::durationToTimeval(d, tv); internalEnableTimer(tv, object); } -void TimerImpl::enableHRTimer(const std::chrono::microseconds& d, +void TimerImpl::enableHRTimer(const std::chrono::microseconds d, const ScopeTrackedObject* object = nullptr) { timeval tv; TimerUtils::durationToTimeval(d, tv); diff --git a/source/common/event/timer_impl.h b/source/common/event/timer_impl.h index cb4e25c6fb4f..f7513dda996c 100644 --- a/source/common/event/timer_impl.h +++ b/source/common/event/timer_impl.h @@ -57,9 +57,8 @@ class TimerImpl : public Timer, ImplBase { // Timer void disableTimer() override; - void enableTimer(const std::chrono::milliseconds& d, const ScopeTrackedObject* scope) override; - void enableHRTimer(const std::chrono::microseconds& us, - const ScopeTrackedObject* object) override; + void enableTimer(std::chrono::milliseconds d, const ScopeTrackedObject* scope) override; + void enableHRTimer(std::chrono::microseconds us, const ScopeTrackedObject* object) override; bool enabled() override; diff --git a/source/common/filesystem/file_shared_impl.cc b/source/common/filesystem/file_shared_impl.cc index 56601badb01c..5c76ea6c29de 100644 --- a/source/common/filesystem/file_shared_impl.cc +++ b/source/common/filesystem/file_shared_impl.cc @@ -1,43 +1,25 @@ #include "common/filesystem/file_shared_impl.h" -#include +#include "common/common/utility.h" namespace Envoy { namespace Filesystem { -Api::IoError::IoErrorCode IoFileError::getErrorCode() const { return IoErrorCode::UnknownError; } - -std::string IoFileError::getErrorDetails() const { - // TODO(sunjayBhatia, wrowe): Disable clang-format until win32 implementation no longer uses POSIX - // subsystem, see https://github.com/envoyproxy/envoy/issues/11655 - // clang-format off - return ::strerror(errno_); - // clang-format on -} - -Api::IoCallBoolResult FileSharedImpl::open(FlagSet in) { - if (isOpen()) { - return resultSuccess(true); +Api::IoError::IoErrorCode IoFileError::getErrorCode() const { + switch (errno_) { + case HANDLE_ERROR_PERM: + return IoErrorCode::Permission; + case HANDLE_ERROR_INVALID: + return IoErrorCode::BadFd; + default: + ENVOY_LOG_MISC(debug, "Unknown error code {} details {}", errno_, getErrorDetails()); + return IoErrorCode::UnknownError; } - - openFile(in); - return fd_ != -1 ? resultSuccess(true) : resultFailure(false, errno); } -Api::IoCallSizeResult FileSharedImpl::write(absl::string_view buffer) { - const ssize_t rc = writeFile(buffer); - return rc != -1 ? resultSuccess(rc) : resultFailure(rc, errno); -}; - -Api::IoCallBoolResult FileSharedImpl::close() { - ASSERT(isOpen()); - - bool success = closeFile(); - fd_ = -1; - return success ? resultSuccess(true) : resultFailure(false, errno); -} +std::string IoFileError::getErrorDetails() const { return errorDetails(errno_); } -bool FileSharedImpl::isOpen() const { return fd_ != -1; }; +bool FileSharedImpl::isOpen() const { return fd_ != INVALID_HANDLE; }; std::string FileSharedImpl::path() const { return path_; }; diff --git a/source/common/filesystem/file_shared_impl.h b/source/common/filesystem/file_shared_impl.h index 06e166661287..4b8b623f7fdb 100644 --- a/source/common/filesystem/file_shared_impl.h +++ b/source/common/filesystem/file_shared_impl.h @@ -37,23 +37,15 @@ template Api::IoCallResult resultSuccess(T result) { class FileSharedImpl : public File { public: - FileSharedImpl(std::string path) : fd_(-1), path_(std::move(path)) {} + FileSharedImpl(std::string path) : fd_(INVALID_HANDLE), path_(std::move(path)) {} ~FileSharedImpl() override = default; - // Filesystem::File - Api::IoCallBoolResult open(FlagSet flag) override; - Api::IoCallSizeResult write(absl::string_view buffer) override; - Api::IoCallBoolResult close() override; bool isOpen() const override; std::string path() const override; protected: - virtual void openFile(FlagSet in) PURE; - virtual ssize_t writeFile(absl::string_view buffer) PURE; - virtual bool closeFile() PURE; - - int fd_; + filesystem_os_id_t fd_; const std::string path_; }; diff --git a/source/common/filesystem/posix/filesystem_impl.cc b/source/common/filesystem/posix/filesystem_impl.cc index e24814d0ca70..5f69e98e764b 100644 --- a/source/common/filesystem/posix/filesystem_impl.cc +++ b/source/common/filesystem/posix/filesystem_impl.cc @@ -30,13 +30,26 @@ FileImplPosix::~FileImplPosix() { } } -void FileImplPosix::openFile(FlagSet in) { +Api::IoCallBoolResult FileImplPosix::open(FlagSet in) { + if (isOpen()) { + return resultSuccess(true); + } + const auto flags_and_mode = translateFlag(in); fd_ = ::open(path_.c_str(), flags_and_mode.flags_, flags_and_mode.mode_); + return fd_ != -1 ? resultSuccess(true) : resultFailure(false, errno); } -ssize_t FileImplPosix::writeFile(absl::string_view buffer) { - return ::write(fd_, buffer.data(), buffer.size()); +Api::IoCallSizeResult FileImplPosix::write(absl::string_view buffer) { + const ssize_t rc = ::write(fd_, buffer.data(), buffer.size()); + return rc != -1 ? resultSuccess(rc) : resultFailure(rc, errno); +}; + +Api::IoCallBoolResult FileImplPosix::close() { + ASSERT(isOpen()); + int rc = ::close(fd_); + fd_ = -1; + return (rc != -1) ? resultSuccess(true) : resultFailure(false, errno); } FileImplPosix::FlagsAndMode FileImplPosix::translateFlag(FlagSet in) { @@ -62,8 +75,6 @@ FileImplPosix::FlagsAndMode FileImplPosix::translateFlag(FlagSet in) { return {out, mode}; } -bool FileImplPosix::closeFile() { return ::close(fd_) != -1; } - FilePtr InstanceImplPosix::createFile(const std::string& path) { return std::make_unique(path); } diff --git a/source/common/filesystem/posix/filesystem_impl.h b/source/common/filesystem/posix/filesystem_impl.h index 173be8918d33..4f81255be222 100644 --- a/source/common/filesystem/posix/filesystem_impl.h +++ b/source/common/filesystem/posix/filesystem_impl.h @@ -21,13 +21,12 @@ class FileImplPosix : public FileSharedImpl { mode_t mode_ = 0; }; - // Filesystem::FileSharedImpl - FlagsAndMode translateFlag(FlagSet in); - void openFile(FlagSet flags) override; - ssize_t writeFile(absl::string_view buffer) override; - bool closeFile() override; + Api::IoCallBoolResult open(FlagSet flag) override; + Api::IoCallSizeResult write(absl::string_view buffer) override; + Api::IoCallBoolResult close() override; private: + FlagsAndMode translateFlag(FlagSet in); friend class FileSystemImplTest; }; diff --git a/source/common/filesystem/win32/filesystem_impl.cc b/source/common/filesystem/win32/filesystem_impl.cc index 268408e2dd67..cfdb3098fe1e 100644 --- a/source/common/filesystem/win32/filesystem_impl.cc +++ b/source/common/filesystem/win32/filesystem_impl.cc @@ -26,40 +26,67 @@ FileImplWin32::~FileImplWin32() { } } -void FileImplWin32::openFile(FlagSet in) { - const auto flags_and_mode = translateFlag(in); - fd_ = ::open(path_.c_str(), flags_and_mode.flags_, flags_and_mode.pmode_); +Api::IoCallBoolResult FileImplWin32::open(FlagSet in) { + if (isOpen()) { + return resultSuccess(true); + } + + auto flags = translateFlag(in); + fd_ = CreateFileA(path_.c_str(), flags.access_, FILE_SHARE_READ | FILE_SHARE_WRITE, 0, + flags.creation_, 0, NULL); + if (fd_ == INVALID_HANDLE) { + return resultFailure(false, ::GetLastError()); + } + return resultSuccess(true); } -ssize_t FileImplWin32::writeFile(absl::string_view buffer) { - return ::_write(fd_, buffer.data(), buffer.size()); +Api::IoCallSizeResult FileImplWin32::write(absl::string_view buffer) { + DWORD bytes_written; + BOOL result = WriteFile(fd_, buffer.data(), buffer.length(), &bytes_written, NULL); + if (result == 0) { + return resultFailure(-1, ::GetLastError()); + } + return resultSuccess(bytes_written); +}; + +Api::IoCallBoolResult FileImplWin32::close() { + ASSERT(isOpen()); + + BOOL result = CloseHandle(fd_); + fd_ = INVALID_HANDLE; + if (result == 0) { + return resultFailure(false, ::GetLastError()); + } + return resultSuccess(true); } FileImplWin32::FlagsAndMode FileImplWin32::translateFlag(FlagSet in) { - int out = 0; - int pmode = 0; + DWORD access = 0; + DWORD creation = OPEN_EXISTING; + if (in.test(File::Operation::Create)) { - out |= _O_CREAT; - pmode |= _S_IREAD | _S_IWRITE; + creation = OPEN_ALWAYS; + } + + if (in.test(File::Operation::Write)) { + access = GENERIC_WRITE; } + // Order of tests matter here. There reason for that + // is that `FILE_APPEND_DATA` should not be used together + // with `GENERIC_WRITE`. If both of them are used the file + // is not opened in append mode. if (in.test(File::Operation::Append)) { - out |= _O_APPEND; + access = FILE_APPEND_DATA; } - if (in.test(File::Operation::Read) && in.test(File::Operation::Write)) { - out |= _O_RDWR; - } else if (in.test(File::Operation::Read)) { - out |= _O_RDONLY; - } else if (in.test(File::Operation::Write)) { - out |= _O_WRONLY; + if (in.test(File::Operation::Read)) { + access |= GENERIC_READ; } - return {out, pmode}; + return {access, creation}; } -bool FileImplWin32::closeFile() { return ::_close(fd_) != -1; } - FilePtr InstanceImplWin32::createFile(const std::string& path) { return std::make_unique(path); } @@ -78,11 +105,19 @@ bool InstanceImplWin32::directoryExists(const std::string& path) { } ssize_t InstanceImplWin32::fileSize(const std::string& path) { - struct _stat info; - if (::_stat(path.c_str(), &info) != 0) { + auto fd = CreateFileA(path.c_str(), GENERIC_READ, FILE_SHARE_READ, 0, OPEN_EXISTING, 0, NULL); + if (fd == INVALID_HANDLE) { + return -1; + } + ssize_t result = 0; + LARGE_INTEGER lFileSize; + BOOL bGetSize = GetFileSizeEx(fd, &lFileSize); + CloseHandle(fd); + if (!bGetSize) { return -1; } - return info.st_size; + result += lFileSize.QuadPart; + return result; } std::string InstanceImplWin32::fileReadToEnd(const std::string& path) { diff --git a/source/common/filesystem/win32/filesystem_impl.h b/source/common/filesystem/win32/filesystem_impl.h index f39b40378d64..95989ceb9e0f 100644 --- a/source/common/filesystem/win32/filesystem_impl.h +++ b/source/common/filesystem/win32/filesystem_impl.h @@ -14,18 +14,17 @@ class FileImplWin32 : public FileSharedImpl { ~FileImplWin32(); protected: + Api::IoCallBoolResult open(FlagSet flag) override; + Api::IoCallSizeResult write(absl::string_view buffer) override; + Api::IoCallBoolResult close() override; + +private: struct FlagsAndMode { - int flags_ = 0; - int pmode_ = 0; + DWORD access_ = 0; + DWORD creation_ = 0; }; - // Filesystem::FileSharedImpl FlagsAndMode translateFlag(FlagSet in); - void openFile(FlagSet in) override; - ssize_t writeFile(absl::string_view buffer) override; - bool closeFile() override; - -private: friend class FileSystemImplTest; }; diff --git a/source/common/formatter/substitution_formatter.cc b/source/common/formatter/substitution_formatter.cc index 57f1798b9c42..9b121db96803 100644 --- a/source/common/formatter/substitution_formatter.cc +++ b/source/common/formatter/substitution_formatter.cc @@ -45,7 +45,7 @@ void truncate(std::string& str, absl::optional max_length) { // Matches newline pattern in a StartTimeFormatter format string. const std::regex& getStartTimeNewlinePattern() { - CONSTRUCT_ON_FIRST_USE(std::regex, "%[-_0^#]*[1-9]*n"); + CONSTRUCT_ON_FIRST_USE(std::regex, "%[-_0^#]*[1-9]*(E|O)?n"); } const std::regex& getNewlinePattern() { CONSTRUCT_ON_FIRST_USE(std::regex, "\n"); } @@ -617,6 +617,11 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { [](const StreamInfo::StreamInfo& stream_info) { return stream_info.responseCodeDetails(); }); + } else if (field_name == "CONNECTION_TERMINATION_DETAILS") { + field_extractor_ = std::make_unique( + [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.connectionTerminationDetails(); + }); } else if (field_name == "BYTES_SENT") { field_extractor_ = std::make_unique( [](const StreamInfo::StreamInfo& stream_info) { return stream_info.bytesSent(); }); @@ -685,6 +690,11 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { StreamInfoAddressFieldExtractor::withoutPort([](const StreamInfo::StreamInfo& stream_info) { return stream_info.downstreamDirectRemoteAddress(); }); + } else if (field_name == "CONNECTION_ID") { + field_extractor_ = std::make_unique( + [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.connectionID().value_or(0); + }); } else if (field_name == "REQUESTED_SERVER_NAME") { field_extractor_ = std::make_unique( [](const StreamInfo::StreamInfo& stream_info) { @@ -853,13 +863,15 @@ HeaderFormatter::HeaderFormatter(const std::string& main_header, : main_header_(main_header), alternative_header_(alternative_header), max_length_(max_length) {} const Http::HeaderEntry* HeaderFormatter::findHeader(const Http::HeaderMap& headers) const { - const Http::HeaderEntry* header = headers.get(main_header_); + const auto header = headers.get(main_header_); - if (!header && !alternative_header_.get().empty()) { - return headers.get(alternative_header_); + if (header.empty() && !alternative_header_.get().empty()) { + const auto alternate_header = headers.get(alternative_header_); + // TODO(https://github.com/envoyproxy/envoy/issues/13454): Potentially log all header values. + return alternate_header.empty() ? nullptr : alternate_header[0]; } - return header; + return header.empty() ? nullptr : header[0]; } absl::optional HeaderFormatter::format(const Http::HeaderMap& headers) const { diff --git a/source/common/grpc/async_client_impl.cc b/source/common/grpc/async_client_impl.cc index 55e4fa75b23b..aaec44e0e03f 100644 --- a/source/common/grpc/async_client_impl.cc +++ b/source/common/grpc/async_client_impl.cc @@ -16,8 +16,9 @@ AsyncClientImpl::AsyncClientImpl(Upstream::ClusterManager& cm, const envoy::config::core::v3::GrpcService& config, TimeSource& time_source) : cm_(cm), remote_cluster_name_(config.envoy_grpc().cluster_name()), - host_name_(config.envoy_grpc().authority()), initial_metadata_(config.initial_metadata()), - time_source_(time_source) {} + host_name_(config.envoy_grpc().authority()), time_source_(time_source), + metadata_parser_( + Router::HeaderParser::configure(config.initial_metadata(), /*append=*/false)) {} AsyncClientImpl::~AsyncClientImpl() { while (!active_streams_.empty()) { @@ -88,10 +89,9 @@ void AsyncStreamImpl::initialize(bool buffer_body_for_retry) { parent_.host_name_.empty() ? parent_.remote_cluster_name_ : parent_.host_name_, service_full_name_, method_name_, options_.timeout); // Fill service-wide initial metadata. - for (const auto& header_value : parent_.initial_metadata_) { - headers_message_->headers().addCopy(Http::LowerCaseString(header_value.key()), - header_value.value()); - } + parent_.metadata_parser_->evaluateHeaders(headers_message_->headers(), + options_.parent_context.stream_info); + callbacks_.onCreateInitialMetadata(headers_message_->headers()); stream_->sendHeaders(headers_message_->headers(), false); } diff --git a/source/common/grpc/async_client_impl.h b/source/common/grpc/async_client_impl.h index ae0e2c7782ab..2e7139df209c 100644 --- a/source/common/grpc/async_client_impl.h +++ b/source/common/grpc/async_client_impl.h @@ -10,6 +10,7 @@ #include "common/grpc/codec.h" #include "common/grpc/typed_async_client.h" #include "common/http/async_client_impl.h" +#include "common/router/header_parser.h" namespace Envoy { namespace Grpc { @@ -39,9 +40,9 @@ class AsyncClientImpl final : public RawAsyncClient { const std::string remote_cluster_name_; // The host header value in the http transport. const std::string host_name_; - const Protobuf::RepeatedPtrField initial_metadata_; std::list active_streams_; TimeSource& time_source_; + Router::HeaderParserPtr metadata_parser_; friend class AsyncRequestImpl; friend class AsyncStreamImpl; diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc index 4322df957916..8b7551d8bea2 100644 --- a/source/common/grpc/common.cc +++ b/source/common/grpc/common.cc @@ -103,13 +103,14 @@ std::string Common::getGrpcMessage(const Http::ResponseHeaderOrTrailerMap& trail absl::optional Common::getGrpcStatusDetailsBin(const Http::HeaderMap& trailers) { - const Http::HeaderEntry* details_header = trailers.get(Http::Headers::get().GrpcStatusDetailsBin); - if (!details_header) { + const auto details_header = trailers.get(Http::Headers::get().GrpcStatusDetailsBin); + if (details_header.empty()) { return absl::nullopt; } // Some implementations use non-padded base64 encoding for grpc-status-details-bin. - auto decoded_value = Base64::decodeWithoutPadding(details_header->value().getStringView()); + // This is effectively a trusted header so using the first value is fine. + auto decoded_value = Base64::decodeWithoutPadding(details_header[0]->value().getStringView()); if (decoded_value.empty()) { return absl::nullopt; } @@ -161,9 +162,10 @@ Buffer::InstancePtr Common::serializeMessage(const Protobuf::Message& message) { return body; } -std::chrono::milliseconds Common::getGrpcTimeout(const Http::RequestHeaderMap& request_headers) { - std::chrono::milliseconds timeout(0); +absl::optional +Common::getGrpcTimeout(const Http::RequestHeaderMap& request_headers) { const Http::HeaderEntry* header_grpc_timeout_entry = request_headers.GrpcTimeout(); + std::chrono::milliseconds timeout; if (header_grpc_timeout_entry) { uint64_t grpc_timeout; // TODO(dnoe): Migrate to pure string_view (#6580) @@ -172,16 +174,13 @@ std::chrono::milliseconds Common::getGrpcTimeout(const Http::RequestHeaderMap& r if (unit != nullptr && *unit != '\0') { switch (*unit) { case 'H': - timeout = std::chrono::hours(grpc_timeout); - break; + return std::chrono::hours(grpc_timeout); case 'M': - timeout = std::chrono::minutes(grpc_timeout); - break; + return std::chrono::minutes(grpc_timeout); case 'S': - timeout = std::chrono::seconds(grpc_timeout); - break; + return std::chrono::seconds(grpc_timeout); case 'm': - timeout = std::chrono::milliseconds(grpc_timeout); + return std::chrono::milliseconds(grpc_timeout); break; case 'u': timeout = std::chrono::duration_cast( @@ -189,18 +188,18 @@ std::chrono::milliseconds Common::getGrpcTimeout(const Http::RequestHeaderMap& r if (timeout < std::chrono::microseconds(grpc_timeout)) { timeout++; } - break; + return timeout; case 'n': timeout = std::chrono::duration_cast( std::chrono::nanoseconds(grpc_timeout)); if (timeout < std::chrono::nanoseconds(grpc_timeout)) { timeout++; } - break; + return timeout; } } } - return timeout; + return absl::nullopt; } void Common::toGrpcTimeout(const std::chrono::milliseconds& timeout, diff --git a/source/common/grpc/common.h b/source/common/grpc/common.h index cd94fe450568..f76082610f31 100644 --- a/source/common/grpc/common.h +++ b/source/common/grpc/common.h @@ -98,10 +98,11 @@ class Common { * @param request_headers the header map from which to extract the value of 'grpc-timeout' header. * If this header is missing the timeout corresponds to infinity. The header is encoded in * maximum of 8 decimal digits and a char for the unit. - * @return std::chrono::milliseconds the duration in milliseconds. A zero value corresponding to - * infinity is returned if 'grpc-timeout' is missing or malformed. + * @return absl::optional the duration in milliseconds. absl::nullopt + * is returned if 'grpc-timeout' is missing or malformed. */ - static std::chrono::milliseconds getGrpcTimeout(const Http::RequestHeaderMap& request_headers); + static absl::optional + getGrpcTimeout(const Http::RequestHeaderMap& request_headers); /** * Encode 'timeout' into 'grpc-timeout' format in the grpc-timeout header. diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index e4b329d3e67e..9239fe4d74c5 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -10,6 +10,7 @@ #include "common/grpc/common.h" #include "common/grpc/google_grpc_creds_impl.h" #include "common/grpc/google_grpc_utils.h" +#include "common/router/header_parser.h" #include "common/tracing/http_tracer_impl.h" #include "grpcpp/support/proto_buffer_reader.h" @@ -79,15 +80,18 @@ GoogleAsyncClientImpl::GoogleAsyncClientImpl(Event::Dispatcher& dispatcher, const envoy::config::core::v3::GrpcService& config, Api::Api& api, const StatNames& stat_names) : dispatcher_(dispatcher), tls_(tls), stat_prefix_(config.google_grpc().stat_prefix()), - initial_metadata_(config.initial_metadata()), scope_(scope), + scope_(scope), per_stream_buffer_limit_bytes_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( - config.google_grpc(), per_stream_buffer_limit_bytes, DefaultBufferLimitBytes)) { + config.google_grpc(), per_stream_buffer_limit_bytes, DefaultBufferLimitBytes)), + metadata_parser_( + Router::HeaderParser::configure(config.initial_metadata(), /*append=*/false)) { // We rebuild the channel each time we construct the channel. It appears that the gRPC library is // smart enough to do connection pooling and reuse with identical channel args, so this should // have comparable overhead to what we are doing in Grpc::AsyncClientImpl, i.e. no expensive // new connection implied. std::shared_ptr channel = GoogleGrpcUtils::createChannel(config, api); stub_ = stub_factory.createStub(channel); + scope_->counterFromStatName(stat_names.google_grpc_client_creation_).inc(); // Initialize client stats. // TODO(jmarantz): Capture these names in async_client_manager_impl.cc and // pass in a struct of StatName objects so we don't have to take locks here. @@ -166,12 +170,8 @@ void GoogleAsyncStreamImpl::initialize(bool /*buffer_body_for_retry*/) { : gpr_inf_future(GPR_CLOCK_REALTIME); ctxt_.set_deadline(abs_deadline); // Fill service-wide initial metadata. - for (const auto& header_value : parent_.initial_metadata_) { - ctxt_.AddMetadata(header_value.key(), header_value.value()); - } - // Due to the different HTTP header implementations, we effectively double - // copy headers here. auto initial_metadata = Http::RequestHeaderMapImpl::create(); + parent_.metadata_parser_->evaluateHeaders(*initial_metadata, options_.parent_context.stream_info); callbacks_.onCreateInitialMetadata(*initial_metadata); initial_metadata->iterate([this](const Http::HeaderEntry& header) { ctxt_.AddMetadata(std::string(header.key().getStringView()), diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index 8e946ce5c0cb..a0e27c5e5efd 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -19,6 +19,7 @@ #include "common/grpc/google_grpc_context.h" #include "common/grpc/stat_names.h" #include "common/grpc/typed_async_client.h" +#include "common/router/header_parser.h" #include "common/tracing/http_tracer_impl.h" #include "absl/container/node_hash_set.h" @@ -197,10 +198,10 @@ class GoogleAsyncClientImpl final : public RawAsyncClient, Logger::Loggable active_streams_; const std::string stat_prefix_; - const Protobuf::RepeatedPtrField initial_metadata_; Stats::ScopeSharedPtr scope_; GoogleAsyncClientStats stats_; uint64_t per_stream_buffer_limit_bytes_; + Router::HeaderParserPtr metadata_parser_; friend class GoogleAsyncClientThreadLocal; friend class GoogleAsyncRequestImpl; diff --git a/source/common/grpc/stat_names.cc b/source/common/grpc/stat_names.cc index 101ddeec84db..3366c45c99cf 100644 --- a/source/common/grpc/stat_names.cc +++ b/source/common/grpc/stat_names.cc @@ -4,7 +4,8 @@ namespace Envoy { namespace Grpc { StatNames::StatNames(Stats::SymbolTable& symbol_table) - : pool_(symbol_table), streams_total_(pool_.add("streams_total")) { + : pool_(symbol_table), streams_total_(pool_.add("streams_total")), + google_grpc_client_creation_(pool_.add("google_grpc_client_creation")) { for (uint32_t i = 0; i <= Status::WellKnownGrpcStatus::MaximumKnown; ++i) { std::string status_str = absl::StrCat(i); streams_closed_[i] = pool_.add(absl::StrCat("streams_closed_", status_str)); diff --git a/source/common/grpc/stat_names.h b/source/common/grpc/stat_names.h index c0dfe03b683b..daa2f7c9aeb9 100644 --- a/source/common/grpc/stat_names.h +++ b/source/common/grpc/stat_names.h @@ -21,6 +21,8 @@ struct StatNames { Stats::StatName streams_total_; std::array streams_closed_; absl::flat_hash_map status_names_; + // Stat name tracking the creation of the Google grpc client. + Stats::StatName google_grpc_client_creation_; }; } // namespace Grpc diff --git a/source/common/grpc/typed_async_client.h b/source/common/grpc/typed_async_client.h index 241926ee4ed7..2905db8f345a 100644 --- a/source/common/grpc/typed_async_client.h +++ b/source/common/grpc/typed_async_client.h @@ -160,6 +160,7 @@ template class AsyncClient /* : public Raw public: AsyncClient() = default; AsyncClient(RawAsyncClientPtr&& client) : client_(std::move(client)) {} + AsyncClient(RawAsyncClientSharedPtr client) : client_(client) {} virtual ~AsyncClient() = default; virtual AsyncRequest* send(const Protobuf::MethodDescriptor& service_method, @@ -192,7 +193,7 @@ template class AsyncClient /* : public Raw void reset() { client_.reset(); } private: - RawAsyncClientPtr client_{}; + RawAsyncClientSharedPtr client_{}; }; } // namespace Grpc diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 580a30ac64bc..726c33322614 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -306,6 +306,7 @@ envoy_cc_library( "//source/common/common:empty_string", "//source/common/common:non_copyable", "//source/common/common:utility_lib", + "//source/common/runtime:runtime_features_lib", "//source/common/singleton:const_singleton", ], ) @@ -370,6 +371,7 @@ envoy_cc_library( external_deps = [ "abseil_node_hash_set", "abseil_optional", + "http_parser", "nghttp2", ], deps = [ @@ -387,7 +389,6 @@ envoy_cc_library( "//source/common/common:enum_to_int", "//source/common/common:utility_lib", "//source/common/grpc:status_lib", - "//source/common/json:json_loader_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", "//source/common/runtime:runtime_features_lib", @@ -407,7 +408,6 @@ envoy_cc_library( ":utility_lib", "//include/envoy/common:regex_interface", "//include/envoy/http:header_map_interface", - "//include/envoy/json:json_object_interface", "//source/common/common:regex_lib", "//source/common/common:utility_lib", "//source/common/protobuf:utility_lib", @@ -421,12 +421,10 @@ envoy_cc_library( name = "path_utility_lib", srcs = ["path_utility.cc"], hdrs = ["path_utility.h"], - external_deps = [ - "abseil_optional", - "googleurl", - ], + external_deps = ["abseil_optional"], deps = [ "//include/envoy/http:header_map_interface", + "//source/common/chromium_url", "//source/common/common:logger_lib", ], ) @@ -463,17 +461,3 @@ envoy_cc_library( "//source/common/common:assert_lib", ], ) - -envoy_cc_library( - name = "url_utility_lib", - srcs = ["url_utility.cc"], - hdrs = ["url_utility.h"], - external_deps = [ - "googleurl", - ], - deps = [ - "//source/common/common:assert_lib", - "//source/common/common:empty_string", - "//source/common/common:utility_lib", - ], -) diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index a1eaecc78fc4..0ebbbdfd12f7 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -94,7 +94,8 @@ AsyncStreamImpl::AsyncStreamImpl(AsyncClientImpl& parent, AsyncClient::StreamCal // TODO(mattklein123): Correctly set protocol in stream info when we support access logging. } -void AsyncStreamImpl::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) { +void AsyncStreamImpl::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, + absl::string_view) { ENVOY_LOG(debug, "async http request response headers (end_stream={}):\n{}", end_stream, *headers); ASSERT(!remote_closed_); @@ -256,11 +257,11 @@ AsyncRequestImpl::AsyncRequestImpl(RequestMessagePtr&& request, AsyncClientImpl& void AsyncRequestImpl::initialize() { child_span_->injectContext(request_->headers()); - sendHeaders(request_->headers(), !request_->body()); - if (request_->body()) { + sendHeaders(request_->headers(), request_->body().length() == 0); + if (request_->body().length() != 0) { // It's possible this will be a no-op due to a local response synchronously generated in // sendHeaders; guards handle this within AsyncStreamImpl. - sendData(*request_->body(), true); + sendData(request_->body(), true); } // TODO(mattklein123): Support request trailers. } @@ -282,11 +283,8 @@ void AsyncRequestImpl::onHeaders(ResponseHeaderMapPtr&& headers, bool) { } void AsyncRequestImpl::onData(Buffer::Instance& data, bool) { - if (!response_->body()) { - response_->body() = std::make_unique(); - } streamInfo().addBytesReceived(data.length()); - response_->body()->move(data); + response_->body().move(data); } void AsyncRequestImpl::onTrailers(ResponseTrailerMapPtr&& trailers) { diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 1653599d9f27..8ec5261e3852 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -255,6 +255,15 @@ class AsyncStreamImpl : public AsyncClient::Stream, } } absl::optional idleTimeout() const override { return absl::nullopt; } + absl::optional maxStreamDuration() const override { + return absl::nullopt; + } + absl::optional grpcTimeoutHeaderMax() const override { + return absl::nullopt; + } + absl::optional grpcTimeoutHeaderOffset() const override { + return absl::nullopt; + } absl::optional maxGrpcTimeout() const override { return absl::nullopt; } @@ -366,30 +375,30 @@ class AsyncStreamImpl : public AsyncClient::Stream, std::function modify_headers, const absl::optional grpc_status, absl::string_view details) override { - stream_info_.setResponseCodeDetails(details); if (encoded_response_headers_) { resetStream(); return; } Utility::sendLocalReply( remote_closed_, - Utility::EncodeFunctions{ - nullptr, nullptr, - [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { - if (modify_headers != nullptr) { - modify_headers(*headers); - } - encodeHeaders(std::move(headers), end_stream); - }, - [this](Buffer::Instance& data, bool end_stream) -> void { - encodeData(data, end_stream); - }}, + Utility::EncodeFunctions{nullptr, nullptr, + [this, modify_headers, &details](ResponseHeaderMapPtr&& headers, + bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*headers); + } + encodeHeaders(std::move(headers), end_stream, details); + }, + [this](Buffer::Instance& data, bool end_stream) -> void { + encodeData(data, end_stream); + }}, Utility::LocalReplyData{is_grpc_request_, code, body, grpc_status, is_head_request_}); } // The async client won't pause if sending an Expect: 100-Continue so simply // swallows any incoming encode100Continue. void encode100ContinueHeaders(ResponseHeaderMapPtr&&) override {} - void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override; + void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, + absl::string_view details) override; void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeTrailers(ResponseTrailerMapPtr&& trailers) override; void encodeMetadata(MetadataMapPtr&&) override {} @@ -459,7 +468,7 @@ class AsyncRequestImpl final : public AsyncClient::Request, // The request is already fully buffered. Note that this is only called via the async client's // internal use of the router filter which uses this function for buffering. } - const Buffer::Instance* decodingBuffer() override { return request_->body().get(); } + const Buffer::Instance* decodingBuffer() override { return &request_->body(); } void modifyDecodingBuffer(std::function) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 2353eba5be36..95ff7664bf2d 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -149,7 +149,8 @@ void CodecClient::onData(Buffer::Instance& data) { CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& connection, Upstream::HostDescriptionConstSharedPtr host, - Event::Dispatcher& dispatcher) + Event::Dispatcher& dispatcher, + Random::RandomGenerator& random_generator) : CodecClient(type, std::move(connection), host, dispatcher) { switch (type) { @@ -166,17 +167,10 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne break; } case Type::HTTP2: { - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { - codec_ = std::make_unique( - *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), - Http2::ProdNghttp2SessionFactory::get()); - } else { - codec_ = std::make_unique( - *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), - Http2::ProdNghttp2SessionFactory::get()); - } + codec_ = std::make_unique( + *connection_, *this, host->cluster().http2CodecStats(), random_generator, + host->cluster().http2Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, + host->cluster().maxResponseHeadersCount(), Http2::ProdNghttp2SessionFactory::get()); break; } case Type::HTTP3: { diff --git a/source/common/http/codec_client.h b/source/common/http/codec_client.h index 895b27473715..5d74eb114ec8 100644 --- a/source/common/http/codec_client.h +++ b/source/common/http/codec_client.h @@ -4,6 +4,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/event/deferred_deletable.h" #include "envoy/event/timer.h" #include "envoy/http/codec.h" @@ -245,7 +246,8 @@ using CodecClientPtr = std::unique_ptr; class CodecClientProd : public CodecClient { public: CodecClientProd(Type type, Network::ClientConnectionPtr&& connection, - Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher); + Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random_generator); }; } // namespace Http diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index b21b1d78a339..fb87ebfa1fc9 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -52,6 +52,17 @@ namespace Envoy { namespace Http { +bool requestWasConnect(const RequestHeaderMapPtr& headers, Protocol protocol) { + if (!headers) { + return false; + } + if (protocol <= Protocol::Http11) { + return HeaderUtility::isConnect(*headers); + } + // All HTTP/2 style upgrades were originally connect requests. + return HeaderUtility::isConnect(*headers) || Utility::isUpgrade(*headers); +} + ConnectionManagerStats ConnectionManagerImpl::generateStats(const std::string& prefix, Stats::Scope& scope) { return ConnectionManagerStats( @@ -177,7 +188,19 @@ void ConnectionManagerImpl::doEndStream(ActiveStream& stream) { // TODO(snowp): This call might not be necessary, try to clean up + remove setter function. stream.filter_manager_.setLocalComplete(); stream.state_.codec_saw_local_complete_ = true; - stream.response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); + + // Per https://tools.ietf.org/html/rfc7540#section-8.3 if there was an error + // with the TCP connection during a CONNECT request, it should be + // communicated via CONNECT_ERROR + if (requestWasConnect(stream.request_headers_, codec_->protocol()) && + (stream.filter_manager_.streamInfo().hasResponseFlag( + StreamInfo::ResponseFlag::UpstreamConnectionFailure) || + stream.filter_manager_.streamInfo().hasResponseFlag( + StreamInfo::ResponseFlag::UpstreamConnectionTermination))) { + stream.response_encoder_->getStream().resetStream(StreamResetReason::ConnectError); + } else { + stream.response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); + } reset_stream = true; } @@ -204,6 +227,7 @@ void ConnectionManagerImpl::doDeferredStreamDestroy(ActiveStream& stream) { stream.filter_manager_.disarmRequestTimeout(); stream.completeRequest(); + stream.filter_manager_.onStreamComplete(); stream.filter_manager_.log(); stream.filter_manager_.destroyFilters(); @@ -244,7 +268,7 @@ void ConnectionManagerImpl::handleCodecError(absl::string_view error) { // GOAWAY. doConnectionClose(Network::ConnectionCloseType::FlushWriteAndDelay, StreamInfo::ResponseFlag::DownstreamProtocolError, - absl::StrCat("codec error: ", error)); + absl::StrCat("codec error:", error)); } void ConnectionManagerImpl::createCodec(Buffer::Instance& data) { @@ -280,8 +304,7 @@ Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool const Status status = codec_->dispatch(data); - ASSERT(!isPrematureResponseError(status)); - if (isBufferFloodError(status)) { + if (isBufferFloodError(status) || isInboundFramesWithEmptyPayloadError(status)) { handleCodecError(status.message()); return Network::FilterStatus::StopIteration; } else if (isCodecProtocolError(status)) { @@ -289,6 +312,7 @@ Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool handleCodecError(status.message()); return Network::FilterStatus::StopIteration; } + ASSERT(status.ok()); // Processing incoming data may release outbound data so check for closure here as well. checkForDeferredClose(); @@ -440,6 +464,7 @@ void ConnectionManagerImpl::onIdleTimeout() { } } +// TODO(#13142): Add DurationTimeout response flag for HCM. void ConnectionManagerImpl::onConnectionDurationTimeout() { ENVOY_CONN_LOG(debug, "max connection duration reached", read_callbacks_->connection()); stats_.named_.downstream_cx_max_duration_reached_.inc(); @@ -601,6 +626,9 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect filter_manager_.streamInfo().setDownstreamSslConnection( connection_manager_.read_callbacks_->connection().ssl()); + filter_manager_.streamInfo().setConnectionID( + connection_manager_.read_callbacks_->connection().id()); + if (connection_manager_.config_.streamIdleTimeout().count()) { idle_timeout_ms_ = connection_manager_.config_.streamIdleTimeout(); stream_idle_timer_ = connection_manager_.read_callbacks_->connection().dispatcher().createTimer( @@ -650,12 +678,10 @@ void ConnectionManagerImpl::ActiveStream::completeRequest() { filter_manager_.streamInfo().setResponseFlag( StreamInfo::ResponseFlag::DownstreamConnectionTermination); } - if (connection_manager_.codec_->protocol() < Protocol::Http2) { - // For HTTP/2 there are still some reset cases where details are not set. - // For HTTP/1 there shouldn't be any. Regression-proof this. + // TODO(danzh) bring HTTP/3 to parity here. + if (connection_manager_.codec_->protocol() != Protocol::Http3) { ASSERT(filter_manager_.streamInfo().responseCodeDetails().has_value()); } - connection_manager_.stats_.named_.downstream_rq_active_.dec(); if (filter_manager_.streamInfo().healthCheck()) { connection_manager_.config_.tracingStats().health_check_.inc(); @@ -715,7 +741,8 @@ void ConnectionManagerImpl::ActiveStream::onStreamMaxDurationReached() { sendLocalReply(request_headers_ != nullptr && Grpc::Common::isGrpcRequestHeaders(*request_headers_), Http::Code::RequestTimeout, "downstream duration timeout", nullptr, - absl::nullopt, StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); + Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded, + StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); } else { filter_manager_.streamInfo().setResponseCodeDetails( StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); @@ -838,7 +865,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he filter_manager_.maybeEndDecode(end_stream); // Drop new requests when overloaded as soon as we have decoded the headers. - if (connection_manager_.overload_stop_accepting_requests_ref_.isSaturated()) { + if (connection_manager_.random_generator_.bernoulli( + connection_manager_.overload_stop_accepting_requests_ref_.value())) { // In this one special case, do not create the filter chain. If there is a risk of memory // overload it is more important to avoid unnecessary allocation than to create the filters. filter_manager_.skipFilterChainCreation(); @@ -1136,6 +1164,83 @@ void ConnectionManagerImpl::ActiveStream::snapScopedRouteConfig() { void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { refreshCachedRoute(nullptr); } +void ConnectionManagerImpl::ActiveStream::refreshDurationTimeout() { + if (!filter_manager_.streamInfo().route_entry_ || !request_headers_) { + return; + } + auto& route = filter_manager_.streamInfo().route_entry_; + + auto grpc_timeout = Grpc::Common::getGrpcTimeout(*request_headers_); + std::chrono::milliseconds timeout; + bool disable_timer = false; + + if (!grpc_timeout || !route->grpcTimeoutHeaderMax()) { + // Either there is no grpc-timeout header or special timeouts for it are not + // configured. Use stream duration. + if (route->maxStreamDuration()) { + timeout = route->maxStreamDuration().value(); + if (timeout == std::chrono::milliseconds(0)) { + // Explicitly configured 0 means no timeout. + disable_timer = true; + } + } else { + // Fall back to HCM config. If no HCM duration limit exists, disable + // timers set by any prior route configuration. + const auto max_stream_duration = connection_manager_.config_.maxStreamDuration(); + if (max_stream_duration.has_value() && max_stream_duration.value().count()) { + timeout = max_stream_duration.value(); + } else { + disable_timer = true; + } + } + } else { + // Start with the timeout equal to the gRPC timeout header. + timeout = grpc_timeout.value(); + // If there's a valid cap, apply it. + if (timeout > route->grpcTimeoutHeaderMax().value() && + route->grpcTimeoutHeaderMax().value() != std::chrono::milliseconds(0)) { + timeout = route->grpcTimeoutHeaderMax().value(); + } + + // Apply the configured offset. + if (timeout != std::chrono::milliseconds(0) && route->grpcTimeoutHeaderOffset()) { + const auto offset = route->grpcTimeoutHeaderOffset().value(); + if (offset < timeout) { + timeout -= offset; + } else { + timeout = std::chrono::milliseconds(0); + } + } + } + + // Disable any existing timer if configured to do so. + if (disable_timer) { + if (max_stream_duration_timer_) { + max_stream_duration_timer_->disableTimer(); + } + return; + } + + // See how long this stream has been alive, and adjust the timeout + // accordingly. + std::chrono::duration time_used = std::chrono::duration_cast( + connection_manager_.timeSource().monotonicTime() - + filter_manager_.streamInfo().startTimeMonotonic()); + if (timeout > time_used) { + timeout -= time_used; + } else { + timeout = std::chrono::milliseconds(0); + } + + // Finally create (if necessary) and enable the timer. + if (!max_stream_duration_timer_) { + max_stream_duration_timer_ = + connection_manager_.read_callbacks_->connection().dispatcher().createTimer( + [this]() -> void { onStreamMaxDurationReached(); }); + } + max_stream_duration_timer_->enableTimer(timeout); +} + void ConnectionManagerImpl::ActiveStream::refreshCachedRoute(const Router::RouteCallback& cb) { Router::RouteConstSharedPtr route; if (request_headers_ != nullptr) { @@ -1161,6 +1266,7 @@ void ConnectionManagerImpl::ActiveStream::refreshCachedRoute(const Router::Route filter_manager_.streamInfo().setUpstreamClusterInfo(cached_cluster_info_.value()); refreshCachedTracingCustomTags(); + refreshDurationTimeout(); } void ConnectionManagerImpl::ActiveStream::refreshCachedTracingCustomTags() { @@ -1256,7 +1362,8 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ResponseHeaderMap& heade bool drain_connection_due_to_overload = false; if (connection_manager_.drain_state_ == DrainState::NotDraining && - connection_manager_.overload_disable_keepalive_ref_.isSaturated()) { + connection_manager_.random_generator_.bernoulli( + connection_manager_.overload_disable_keepalive_ref_.value())) { ENVOY_STREAM_LOG(debug, "disabling keepalive due to envoy overload", *this); if (connection_manager_.codec_->protocol() < Protocol::Http2 || Runtime::runtimeFeatureEnabled( @@ -1390,7 +1497,8 @@ void ConnectionManagerImpl::ActiveStream::onDecoderFilterAboveWriteBufferHighWat connection_manager_.stats_.named_.downstream_flow_control_paused_reading_total_.inc(); } -void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason, absl::string_view) { +void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason reset_reason, + absl::string_view) { // NOTE: This function gets called in all of the following cases: // 1) We TX an app level reset // 2) The codec TX a codec level reset @@ -1399,11 +1507,13 @@ void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason, absl: ENVOY_STREAM_LOG(debug, "stream reset", *this); connection_manager_.stats_.named_.downstream_rq_rx_reset_.inc(); - // If the codec sets its responseDetails(), impute a - // DownstreamProtocolError and propagate the details upwards. + // If the codec sets its responseDetails() for a reason other than peer reset, set a + // DownstreamProtocolError. Either way, propagate details. const absl::string_view encoder_details = response_encoder_->getStream().responseDetails(); - if (!encoder_details.empty()) { + if (!encoder_details.empty() && reset_reason == StreamResetReason::LocalReset) { filter_manager_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError); + } + if (!encoder_details.empty()) { filter_manager_.streamInfo().setResponseCodeDetails(encoder_details); } diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index df172dac16b2..280f61ab0b2e 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -290,6 +290,7 @@ class ConnectionManagerImpl : Logger::Loggable, Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) override; void refreshCachedTracingCustomTags(); + void refreshDurationTimeout(); // All state for the stream. Put here for readability. struct State { @@ -435,4 +436,4 @@ class ConnectionManagerImpl : Logger::Loggable, }; } // namespace Http -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index c8ce01993cfa..86c7861bb420 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -44,7 +44,7 @@ std::string ConnectionManagerUtility::determineNextProtocol(Network::Connection& ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( Network::Connection& connection, const Buffer::Instance& data, - ServerConnectionCallbacks& callbacks, Stats::Scope& scope, + ServerConnectionCallbacks& callbacks, Stats::Scope& scope, Random::RandomGenerator& random, Http1::CodecStats::AtomicPtr& http1_codec_stats, Http2::CodecStats::AtomicPtr& http2_codec_stats, const Http1Settings& http1_settings, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, @@ -55,11 +55,11 @@ ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( Http2::CodecStats& stats = Http2::CodecStats::atomicGet(http2_codec_stats, scope); if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { return std::make_unique( - connection, callbacks, stats, http2_options, max_request_headers_kb, + connection, callbacks, stats, random, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } else { return std::make_unique( - connection, callbacks, stats, http2_options, max_request_headers_kb, + connection, callbacks, stats, random, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } } else { diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index b46a98c2f0a7..768971ffac2d 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -38,7 +38,7 @@ class ConnectionManagerUtility { static ServerConnectionPtr autoCreateCodec(Network::Connection& connection, const Buffer::Instance& data, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, - Http1::CodecStats::AtomicPtr& http1_codec_stats, + Random::RandomGenerator& random, Http1::CodecStats::AtomicPtr& http1_codec_stats, Http2::CodecStats::AtomicPtr& http2_codec_stats, const Http1Settings& http1_settings, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 939eed295b01..f24eea263f06 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -51,6 +51,9 @@ class HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase, Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } ConnectionPool::Cancellable* newStream(Http::ResponseDecoder& response_decoder, Http::ConnectionPool::Callbacks& callbacks) override; + bool maybePrefetch(float ratio) override { + return Envoy::ConnectionPool::ConnPoolImplBase::maybePrefetch(ratio); + } bool hasActiveConnections() const override; // Creates a new PendingStream and enqueues it into the queue. diff --git a/source/common/http/filter_manager.cc b/source/common/http/filter_manager.cc index d81cdc00fdaa..e866101e4a26 100644 --- a/source/common/http/filter_manager.cc +++ b/source/common/http/filter_manager.cc @@ -6,6 +6,7 @@ #include "common/http/header_map_impl.h" #include "common/http/header_utility.h" #include "common/http/utility.h" +#include "common/runtime/runtime_features.h" namespace Envoy { namespace Http { @@ -109,8 +110,7 @@ bool ActiveStreamFilterBase::commonHandleAfter100ContinueHeadersCallback( } bool ActiveStreamFilterBase::commonHandleAfterHeadersCallback(FilterHeadersStatus status, - bool& end_stream, - bool& headers_only) { + bool& end_stream) { ASSERT(!headers_continued_); ASSERT(canIterate()); @@ -124,14 +124,7 @@ bool ActiveStreamFilterBase::commonHandleAfterHeadersCallback(FilterHeadersStatu case FilterHeadersStatus::StopAllIterationAndWatermark: iteration_state_ = IterationState::StopAllWatermark; break; - case FilterHeadersStatus::ContinueAndEndStream: - // Set headers_only to true so we know to end early if necessary, - // but continue filter iteration so we actually write the headers/run the cleanup code. - headers_only = true; - ENVOY_STREAM_LOG(debug, "converting to headers only", parent_); - break; case FilterHeadersStatus::ContinueAndDontEndStream: - headers_only = false; end_stream = false; headers_continued_ = true; ENVOY_STREAM_LOG(debug, "converting to headers and body (body not available yet)", parent_); @@ -340,7 +333,6 @@ void ActiveStreamDecoderFilter::sendLocalReply( Code code, absl::string_view body, std::function modify_headers, const absl::optional grpc_status, absl::string_view details) { - parent_.stream_info_.setResponseCodeDetails(details); parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, grpc_status, details); } @@ -354,7 +346,9 @@ void ActiveStreamDecoderFilter::encode100ContinueHeaders(ResponseHeaderMapPtr&& } } -void ActiveStreamDecoderFilter::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) { +void ActiveStreamDecoderFilter::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, + absl::string_view details) { + parent_.stream_info_.setResponseCodeDetails(details); parent_.filter_manager_callbacks_.setResponseHeaders(std::move(headers)); parent_.encodeHeaders(nullptr, *parent_.filter_manager_callbacks_.responseHeaders(), end_stream); } @@ -445,13 +439,9 @@ void FilterManager::decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHead for (; entry != decoder_filters_.end(); entry++) { ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeHeaders)); state_.filter_call_state_ |= FilterCallState::DecodeHeaders; - (*entry)->end_stream_ = state_.decoding_headers_only_ || - (end_stream && continue_data_entry == decoder_filters_.end()); + (*entry)->end_stream_ = (end_stream && continue_data_entry == decoder_filters_.end()); FilterHeadersStatus status = (*entry)->decodeHeaders(headers, (*entry)->end_stream_); - ASSERT(!(status == FilterHeadersStatus::ContinueAndEndStream && (*entry)->end_stream_), - "Filters should not return FilterHeadersStatus::ContinueAndEndStream from decodeHeaders " - "when end_stream is already true"); ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_), "Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from " "decodeHeaders when end_stream is already false"); @@ -462,12 +452,10 @@ void FilterManager::decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHead (*entry)->decode_headers_called_ = true; - // decoding_headers_only_ is set if the filter returns ContinueAndEndStream. - const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback( - status, end_stream, state_.decoding_headers_only_); + const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback(status, end_stream); // If this filter ended the stream, decodeComplete() should be called for it. - if ((*entry)->end_stream_ || state_.decoding_headers_only_) { + if ((*entry)->end_stream_) { (*entry)->handle_->decodeComplete(); } @@ -512,11 +500,6 @@ void FilterManager::decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instan ScopeTrackerScopeState scope(&*this, dispatcher_); filter_manager_callbacks_.resetIdleTimer(); - // If we previously decided to decode only the headers, do nothing here. - if (state_.decoding_headers_only_) { - return; - } - // If a response is complete or a reset has been sent, filters do not care about further body // data. Just drop it. if (state_.local_complete_) { @@ -651,11 +634,6 @@ void FilterManager::addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::In MetadataMapVector& FilterManager::addDecodedMetadata() { return *getRequestMetadataMapVector(); } void FilterManager::decodeTrailers(ActiveStreamDecoderFilter* filter, RequestTrailerMap& trailers) { - // If we previously decided to decode only the headers, do nothing here. - if (state_.decoding_headers_only_) { - return; - } - // See decodeData() above for why we check local_complete_ here. if (state_.local_complete_) { return; @@ -758,10 +736,15 @@ FilterManager::commonDecodePrefix(ActiveStreamDecoderFilter* filter, } void FilterManager::sendLocalReply( - bool is_grpc_request, Code code, absl::string_view body, + bool old_was_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, const absl::optional grpc_status, absl::string_view details) { const bool is_head_request = state_.is_head_request_; + bool is_grpc_request = old_was_grpc_request; + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.unify_grpc_handling")) { + is_grpc_request = state_.is_grpc_request_; + } + stream_info_.setResponseCodeDetails(details); filter_manager_callbacks_.onLocalReply(code); @@ -781,7 +764,6 @@ void FilterManager::sendLocalReply( // sendDirectLocalReply(code, body, modify_headers, state_.is_head_request_, grpc_status); } else { - stream_info_.setResponseCodeDetails(details); // If we land in this branch, response headers have already been sent to the client. // All we can do at this point is reset the stream. ENVOY_STREAM_LOG(debug, "Resetting stream due to {}. Prior headers have already been sent", @@ -806,7 +788,16 @@ void FilterManager::sendLocalReplyViaFilterChain( Utility::sendLocalReply( state_.destroyed_, Utility::EncodeFunctions{ - modify_headers, + [this, modify_headers](ResponseHeaderMap& headers) -> void { + if (streamInfo().route_entry_ && + Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.always_apply_route_header_rules")) { + streamInfo().route_entry_->finalizeResponseHeaders(headers, streamInfo()); + } + if (modify_headers) { + modify_headers(headers); + } + }, [this](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { // TODO(snowp): This &get() business isn't nice, rework LocalReply and others to accept @@ -840,7 +831,16 @@ void FilterManager::sendDirectLocalReply( Http::Utility::sendLocalReply( state_.destroyed_, Utility::EncodeFunctions{ - modify_headers, + [this, modify_headers](ResponseHeaderMap& headers) -> void { + if (streamInfo().route_entry_ && + Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.always_apply_route_header_rules")) { + streamInfo().route_entry_->finalizeResponseHeaders(headers, streamInfo()); + } + if (modify_headers) { + modify_headers(headers); + } + }, [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().has_value() @@ -856,17 +856,14 @@ void FilterManager::sendDirectLocalReply( state_.non_100_response_headers_encoded_ = true; filter_manager_callbacks_.encodeHeaders(*filter_manager_callbacks_.responseHeaders(), end_stream); + maybeEndEncode(end_stream); }, [&](Buffer::Instance& data, bool end_stream) -> void { filter_manager_callbacks_.encodeData(data, end_stream); maybeEndEncode(end_stream); }}, - Utility::LocalReplyData{ - filter_manager_callbacks_.requestHeaders().has_value() && - Grpc::Common::hasGrpcContentType(filter_manager_callbacks_.requestHeaders()->get()), - code, body, grpc_status, is_head_request}); - maybeEndEncode(state_.local_complete_); + Utility::LocalReplyData{state_.is_grpc_request_, code, body, grpc_status, is_head_request}); } void FilterManager::encode100ContinueHeaders(ActiveStreamEncoderFilter* filter, @@ -929,13 +926,9 @@ void FilterManager::encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHea for (; entry != encoder_filters_.end(); entry++) { ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeHeaders)); state_.filter_call_state_ |= FilterCallState::EncodeHeaders; - (*entry)->end_stream_ = state_.encoding_headers_only_ || - (end_stream && continue_data_entry == encoder_filters_.end()); + (*entry)->end_stream_ = (end_stream && continue_data_entry == encoder_filters_.end()); FilterHeadersStatus status = (*entry)->handle_->encodeHeaders(headers, (*entry)->end_stream_); - ASSERT(!(status == FilterHeadersStatus::ContinueAndEndStream && (*entry)->end_stream_), - "Filters should not return FilterHeadersStatus::ContinueAndEndStream from encodeHeaders " - "when end_stream is already true"); ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_), "Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from " "encodeHeaders when end_stream is already false"); @@ -946,21 +939,13 @@ void FilterManager::encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHea (*entry)->encode_headers_called_ = true; - // encoding_headers_only_ is set if the filter returns ContinueAndEndStream. - const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback( - status, end_stream, state_.encoding_headers_only_); + const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback(status, end_stream); // If this filter ended the stream, encodeComplete() should be called for it. - if ((*entry)->end_stream_ || state_.encoding_headers_only_) { + if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); } - // If we're encoding a headers only response, then mark the local as complete. This ensures - // that we don't attempt to reset the downstream request in doEndStream. - if (state_.encoding_headers_only_) { - state_.local_complete_ = true; - } - if (!continue_iteration) { if (!(*entry)->end_stream_) { maybeContinueEncoding(continue_data_entry); @@ -975,8 +960,7 @@ void FilterManager::encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHea } } - const bool modified_end_stream = state_.encoding_headers_only_ || - (end_stream && continue_data_entry == encoder_filters_.end()); + const bool modified_end_stream = (end_stream && continue_data_entry == encoder_filters_.end()); state_.non_100_response_headers_encoded_ = true; filter_manager_callbacks_.encodeHeaders(headers, modified_end_stream); maybeEndEncode(modified_end_stream); @@ -1055,11 +1039,6 @@ void FilterManager::encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instan FilterIterationStartState filter_iteration_start_state) { filter_manager_callbacks_.resetIdleTimer(); - // If we previously decided to encode only the headers, do nothing here. - if (state_.encoding_headers_only_) { - return; - } - // Filter iteration may start at the current filter. std::list::iterator entry = commonEncodePrefix(filter, end_stream, filter_iteration_start_state); @@ -1111,7 +1090,6 @@ void FilterManager::encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instan } const bool modified_end_stream = end_stream && trailers_added_entry == encoder_filters_.end(); - ASSERT(!state_.encoding_headers_only_); filter_manager_callbacks_.encodeData(data, modified_end_stream); maybeEndEncode(modified_end_stream); @@ -1126,11 +1104,6 @@ void FilterManager::encodeTrailers(ActiveStreamEncoderFilter* filter, ResponseTrailerMap& trailers) { filter_manager_callbacks_.resetIdleTimer(); - // If we previously decided to encode only the headers, do nothing here. - if (state_.encoding_headers_only_) { - return; - } - // Filter iteration may start at the current filter. std::list::iterator entry = commonEncodePrefix(filter, true, FilterIterationStartState::CanStartFromCurrent); @@ -1415,6 +1388,14 @@ void ActiveStreamEncoderFilter::modifyEncodingBuffer( callback(*parent_.buffered_response_data_.get()); } +void ActiveStreamEncoderFilter::sendLocalReply( + Code code, absl::string_view body, + std::function modify_headers, + const absl::optional grpc_status, absl::string_view details) { + parent_.sendLocalReply(parent_.state_.is_grpc_request_, code, body, modify_headers, grpc_status, + details); +} + Http1StreamEncoderOptionsOptRef ActiveStreamEncoderFilter::http1StreamEncoderOptions() { // TODO(mattklein123): At some point we might want to actually wrap this interface but for now // we give the filter direct access to the encoder options. diff --git a/source/common/http/filter_manager.h b/source/common/http/filter_manager.h index 7af9c11ecfd5..507e3f4a8684 100644 --- a/source/common/http/filter_manager.h +++ b/source/common/http/filter_manager.h @@ -31,8 +31,7 @@ struct ActiveStreamFilterBase : public virtual StreamFilterCallbacks, // corresponding data. Those functions handle state updates and data storage (if needed) // according to the status returned by filter's callback functions. bool commonHandleAfter100ContinueHeadersCallback(FilterHeadersStatus status); - bool commonHandleAfterHeadersCallback(FilterHeadersStatus status, bool& end_stream, - bool& headers_only); + bool commonHandleAfterHeadersCallback(FilterHeadersStatus status, bool& end_stream); bool commonHandleAfterDataCallback(FilterDataStatus status, Buffer::Instance& provided_data, bool& buffer_was_streaming); bool commonHandleAfterTrailersCallback(FilterTrailersStatus status); @@ -173,7 +172,8 @@ struct ActiveStreamDecoderFilter : public ActiveStreamFilterBase, const absl::optional grpc_status, absl::string_view details) override; void encode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override; - void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override; + void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, + absl::string_view details) override; void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeTrailers(ResponseTrailerMapPtr&& trailers) override; void encodeMetadata(MetadataMapPtr&& metadata_map_ptr) override; @@ -254,6 +254,10 @@ struct ActiveStreamEncoderFilter : public ActiveStreamFilterBase, void continueEncoding() override; const Buffer::Instance* encodingBuffer() override; void modifyEncodingBuffer(std::function callback) override; + void sendLocalReply(Code code, absl::string_view body, + std::function modify_headers, + const absl::optional grpc_status, + absl::string_view details) override; Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override; void responseDataTooLarge(); @@ -497,9 +501,7 @@ class FilterManager : public ScopeTrackedObject, // ScopeTrackedObject void dumpState(std::ostream& os, int indent_level = 0) const override { const char* spaces = spacesForLevel(indent_level); - os << spaces << "FilterManager " << this << DUMP_MEMBER(state_.has_continue_headers_) - << DUMP_MEMBER(state_.decoding_headers_only_) << DUMP_MEMBER(state_.encoding_headers_only_) - << "\n"; + os << spaces << "FilterManager " << this << DUMP_MEMBER(state_.has_continue_headers_) << "\n"; DUMP_OPT_REF_DETAILS(filter_manager_callbacks_.requestHeaders()); DUMP_OPT_REF_DETAILS(filter_manager_callbacks_.requestTrailers()); @@ -540,6 +542,19 @@ class FilterManager : public ScopeTrackedObject, } } + void onStreamComplete() { + for (auto& filter : decoder_filters_) { + filter->handle_->onStreamComplete(); + } + + for (auto& filter : encoder_filters_) { + // Do not call onStreamComplete twice for dual registered filters. + if (!filter->dual_filter_) { + filter->handle_->onStreamComplete(); + } + } + } + void destroyFilters() { state_.destroyed_ = true; @@ -643,6 +658,8 @@ class FilterManager : public ScopeTrackedObject, filter_manager_callbacks_.requestHeaders()->get().getMethodValue()) { state_.is_head_request_ = true; } + state_.is_grpc_request_ = + Grpc::Common::isGrpcRequestHeaders(filter_manager_callbacks_.requestHeaders()->get()); } /** @@ -796,7 +813,7 @@ class FilterManager : public ScopeTrackedObject, struct State { State() : remote_complete_(false), local_complete_(false), has_continue_headers_(false), - created_filter_chain_(false), is_head_request_(false), + created_filter_chain_(false), is_head_request_(false), is_grpc_request_(false), non_100_response_headers_encoded_(false) {} uint32_t filter_call_state_{0}; @@ -809,7 +826,10 @@ class FilterManager : public ScopeTrackedObject, // is ever called, this is set to true so commonContinue resumes processing the 100-Continue. bool has_continue_headers_ : 1; bool created_filter_chain_ : 1; + // These two are latched on initial header read, to determine if the original headers + // constituted a HEAD or gRPC request, respectively. bool is_head_request_ : 1; + bool is_grpc_request_ : 1; // Tracks if headers other than 100-Continue have been encoded to the codec. bool non_100_response_headers_encoded_ : 1; @@ -819,12 +839,6 @@ class FilterManager : public ScopeTrackedObject, bool encoder_filters_streaming_{true}; bool decoder_filters_streaming_{true}; bool destroyed_{false}; - // Whether a filter has indicated that the response should be treated as a headers only - // response. - bool encoding_headers_only_{false}; - // Whether a filter has indicated that the request should be treated as a headers only - // request. - bool decoding_headers_only_{false}; // Used to track which filter is the latest filter that has received data. ActiveStreamEncoderFilter* latest_data_encoding_filter_{}; diff --git a/source/common/http/hash_policy.cc b/source/common/http/hash_policy.cc index d00dbb99fed7..9fa0f6377fb0 100644 --- a/source/common/http/hash_policy.cc +++ b/source/common/http/hash_policy.cc @@ -39,13 +39,14 @@ class HeaderHashMethod : public HashMethodImplBase { const StreamInfo::FilterStateSharedPtr) const override { absl::optional hash; - const HeaderEntry* header = headers.get(header_name_); - if (header) { + // TODO(mattklein123): Potentially hash on all headers. + const auto header = headers.get(header_name_); + if (!header.empty()) { if (regex_rewrite_ != nullptr) { - hash = HashUtil::xxHash64(regex_rewrite_->replaceAll(header->value().getStringView(), + hash = HashUtil::xxHash64(regex_rewrite_->replaceAll(header[0]->value().getStringView(), regex_rewrite_substitution_)); } else { - hash = HashUtil::xxHash64(header->value().getStringView()); + hash = HashUtil::xxHash64(header[0]->value().getStringView()); } } return hash; diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index ce63493486b7..6a4c00e5663e 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -10,6 +10,7 @@ #include "common/common/assert.h" #include "common/common/dump_state_utils.h" #include "common/common/empty_string.h" +#include "common/runtime/runtime_features.h" #include "common/singleton/const_singleton.h" #include "absl/strings/match.h" @@ -178,6 +179,48 @@ template <> bool HeaderMapImpl::HeaderList::isPseudoHeader(const LowerCaseString return key.get().c_str()[0] == ':'; } +bool HeaderMapImpl::HeaderList::maybeMakeMap() { + if (lazy_map_.empty()) { + if (headers_.size() < lazy_map_min_size_) { + return false; + } + // Add all entries from the list into the map. + for (auto node = headers_.begin(); node != headers_.end(); ++node) { + HeaderNodeVector& v = lazy_map_[node->key().getStringView()]; + v.push_back(node); + } + } + return true; +} + +size_t HeaderMapImpl::HeaderList::remove(absl::string_view key) { + size_t removed_bytes = 0; + if (maybeMakeMap()) { + auto iter = lazy_map_.find(key); + if (iter != lazy_map_.end()) { + // Erase from the map, and all same key entries from the list. + HeaderNodeVector header_nodes = std::move(iter->second); + lazy_map_.erase(iter); + for (const HeaderNode& node : header_nodes) { + ASSERT(node->key() == key); + removed_bytes += node->key().size() + node->value().size(); + erase(node, false /* remove_from_map */); + } + } + } else { + // Erase all same key entries from the list. + for (auto i = headers_.begin(); i != headers_.end();) { + if (i->key() == key) { + removed_bytes += i->key().size() + i->value().size(); + i = erase(i, false /* remove_from_map */); + } else { + ++i; + } + } + } + return removed_bytes; +} + HeaderMapImpl::HeaderEntryImpl::HeaderEntryImpl(const LowerCaseString& key) : key_(key) {} HeaderMapImpl::HeaderEntryImpl::HeaderEntryImpl(const LowerCaseString& key, HeaderString&& value) @@ -326,7 +369,7 @@ void HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) { } } else { addSize(key.size() + value.size()); - std::list::iterator i = headers_.insert(std::move(key), std::move(value)); + HeaderNode i = headers_.insert(std::move(key), std::move(value)); i->entry_ = i; } } @@ -383,9 +426,9 @@ void HeaderMapImpl::addCopy(const LowerCaseString& key, absl::string_view value) void HeaderMapImpl::appendCopy(const LowerCaseString& key, absl::string_view value) { // TODO(#9221): converge on and document a policy for coalescing multiple headers. - auto* entry = getExisting(key); - if (entry) { - const uint64_t added_size = appendToHeader(entry->value(), value); + auto entry = getExisting(key); + if (!entry.empty()) { + const uint64_t added_size = appendToHeader(entry[0]->value(), value); addSize(added_size); } else { addCopy(key, value); @@ -393,29 +436,27 @@ void HeaderMapImpl::appendCopy(const LowerCaseString& key, absl::string_view val } void HeaderMapImpl::setReference(const LowerCaseString& key, absl::string_view value) { - HeaderString ref_key(key); - HeaderString ref_value(value); remove(key); - insertByKey(std::move(ref_key), std::move(ref_value)); + addReference(key, value); } void HeaderMapImpl::setReferenceKey(const LowerCaseString& key, absl::string_view value) { - HeaderString ref_key(key); - HeaderString new_value; - new_value.setCopy(value); remove(key); - insertByKey(std::move(ref_key), std::move(new_value)); - ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move) + addReferenceKey(key, value); } void HeaderMapImpl::setCopy(const LowerCaseString& key, absl::string_view value) { - // Replaces the first occurrence of a header if it exists, otherwise adds by copy. - // TODO(#9221): converge on and document a policy for coalescing multiple headers. - auto* entry = getExisting(key); - if (entry) { - updateSize(entry->value().size(), value.size()); - entry->value(value); + if (!Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http_set_copy_replace_all_headers")) { + auto entry = getExisting(key); + if (!entry.empty()) { + updateSize(entry[0]->value().size(), value.size()); + entry[0]->value(value); + } else { + addCopy(key, value); + } } else { + remove(key); addCopy(key, value); } } @@ -432,33 +473,49 @@ void HeaderMapImpl::verifyByteSizeInternalForTest() const { ASSERT(cached_byte_size_ == byte_size); } -const HeaderEntry* HeaderMapImpl::get(const LowerCaseString& key) const { - return const_cast(this)->getExisting(key); +HeaderMap::GetResult HeaderMapImpl::get(const LowerCaseString& key) const { + return HeaderMap::GetResult(const_cast(this)->getExisting(key)); } -HeaderEntry* HeaderMapImpl::getExisting(const LowerCaseString& key) { +HeaderMap::NonConstGetResult HeaderMapImpl::getExisting(const LowerCaseString& key) { // Attempt a trie lookup first to see if the user is requesting an O(1) header. This may be // relatively common in certain header matching / routing patterns. // TODO(mattklein123): Add inline handle support directly to the header matcher code to support // this use case more directly. + HeaderMap::NonConstGetResult ret; auto lookup = staticLookup(key.get()); if (lookup.has_value()) { - return *lookup.value().entry_; + if (*lookup.value().entry_ != nullptr) { + ret.push_back(*lookup.value().entry_); + } + return ret; + } + + // If the requested header is not an O(1) header try using the lazy map to + // search for it instead of iterating the headers list. + if (headers_.maybeMakeMap()) { + HeaderList::HeaderLazyMap::iterator iter = headers_.mapFind(key.get()); + if (iter != headers_.mapEnd()) { + const HeaderList::HeaderNodeVector& v = iter->second; + ASSERT(!v.empty()); // It's impossible to have a map entry with an empty vector as its value. + for (const auto& values_it : v) { + // Convert the iterated value to a HeaderEntry*. + ret.push_back(&(*values_it)); + } + } + return ret; } - // If the requested header is not an O(1) header we do a full scan. Doing the trie lookup is - // wasteful in the miss case, but is present for code consistency with other functions that do - // similar things. - // TODO(mattklein123): The full scan here and in remove() are the biggest issues with this - // implementation for certain use cases. We can either replace this with a totally different - // implementation or potentially create a lazy map if the size of the map is above a threshold. + // If the requested header is not an O(1) header and the lazy map is not in use, we do a full + // scan. Doing the trie lookup is wasteful in the miss case, but is present for code consistency + // with other functions that do similar things. for (HeaderEntryImpl& header : headers_) { if (header.key() == key.get().c_str()) { - return &header; + ret.push_back(&header); } } - return nullptr; + return ret; } void HeaderMapImpl::iterate(HeaderMap::ConstIterateCb cb) const { @@ -485,7 +542,7 @@ void HeaderMapImpl::clear() { size_t HeaderMapImpl::removeIf(const HeaderMap::HeaderMatchPredicate& predicate) { const size_t old_size = headers_.size(); - headers_.remove_if([&predicate, this](const HeaderEntryImpl& entry) { + headers_.removeIf([&predicate, this](const HeaderEntryImpl& entry) { const bool to_remove = predicate(entry); if (to_remove) { // If this header should be removed, make sure any references in the @@ -508,17 +565,14 @@ size_t HeaderMapImpl::removeIf(const HeaderMap::HeaderMatchPredicate& predicate) } size_t HeaderMapImpl::remove(const LowerCaseString& key) { + const size_t old_size = headers_.size(); auto lookup = staticLookup(key.get()); if (lookup.has_value()) { - const size_t old_size = headers_.size(); removeInline(lookup.value().entry_); - return old_size - headers_.size(); } else { - // TODO(mattklein123): When the lazy map is implemented we can stop using removeIf() here. - return HeaderMapImpl::removeIf([&key](const HeaderEntry& entry) -> bool { - return key.get() == entry.key().getStringView(); - }); + subtractSize(headers_.remove(key.get())); } + return old_size - headers_.size(); } size_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { @@ -543,7 +597,7 @@ HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl } addSize(key.get().size()); - std::list::iterator i = headers_.insert(key); + HeaderNode i = headers_.insert(key); i->entry_ = i; *entry = &(*i); return **entry; @@ -558,7 +612,7 @@ HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl } addSize(key.get().size() + value.size()); - std::list::iterator i = headers_.insert(key, std::move(value)); + HeaderNode i = headers_.insert(key, std::move(value)); i->entry_ = i; *entry = &(*i); return **entry; @@ -573,7 +627,7 @@ size_t HeaderMapImpl::removeInline(HeaderEntryImpl** ptr_to_entry) { const uint64_t size_to_subtract = entry->entry_->key().size() + entry->entry_->value().size(); subtractSize(size_to_subtract); *ptr_to_entry = nullptr; - headers_.erase(entry->entry_); + headers_.erase(entry->entry_, true); return 1; } diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index d4cb88bdcacb..375abeca3658 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -12,6 +12,7 @@ #include "common/common/non_copyable.h" #include "common/common/utility.h" #include "common/http/headers.h" +#include "common/runtime/runtime_features.h" namespace Envoy { namespace Http { @@ -85,7 +86,7 @@ class HeaderMapImpl : NonCopyable { void setReferenceKey(const LowerCaseString& key, absl::string_view value); void setCopy(const LowerCaseString& key, absl::string_view value); uint64_t byteSize() const; - const HeaderEntry* get(const LowerCaseString& key) const; + HeaderMap::GetResult get(const LowerCaseString& key) const; void iterate(HeaderMap::ConstIterateCb cb) const; void iterateReverse(HeaderMap::ConstIterateCb cb) const; void clear(); @@ -114,6 +115,7 @@ class HeaderMapImpl : NonCopyable { HeaderString value_; std::list::iterator entry_; }; + using HeaderNode = std::list::iterator; /** * This is the static lookup table that is used to determine whether a header is one of the O(1) @@ -170,6 +172,10 @@ class HeaderMapImpl : NonCopyable { /** * List of HeaderEntryImpl that keeps the pseudo headers (key starting with ':') in the front * of the list (as required by nghttp2) and otherwise maintains insertion order. + * When the list size is greater or equal to the envoy.http.headermap.lazy_map_min_size runtime + * feature value (or uint32_t max value if not set), all headers are added to a map, to allow + * fast access given a header key. Once the map is initialized, it will be used even if the number + * of headers decreases below the threshold. * * Note: the internal iterators held in fields make this unsafe to copy and move, since the * reference to end() is not preserved across a move (see Notes in @@ -180,59 +186,125 @@ class HeaderMapImpl : NonCopyable { */ class HeaderList : NonCopyable { public: - HeaderList() : pseudo_headers_end_(headers_.end()) {} + using HeaderNodeVector = absl::InlinedVector; + using HeaderLazyMap = absl::flat_hash_map; + + HeaderList() + : pseudo_headers_end_(headers_.end()), + lazy_map_min_size_(static_cast(Runtime::getInteger( + "envoy.http.headermap.lazy_map_min_size", std::numeric_limits::max()))) {} template bool isPseudoHeader(const Key& key) { return !key.getStringView().empty() && key.getStringView()[0] == ':'; } - template - std::list::iterator insert(Key&& key, Value&&... value) { + template HeaderNode insert(Key&& key, Value&&... value) { const bool is_pseudo_header = isPseudoHeader(key); - std::list::iterator i = - headers_.emplace(is_pseudo_header ? pseudo_headers_end_ : headers_.end(), - std::forward(key), std::forward(value)...); + HeaderNode i = headers_.emplace(is_pseudo_header ? pseudo_headers_end_ : headers_.end(), + std::forward(key), std::forward(value)...); + if (!lazy_map_.empty()) { + lazy_map_[i->key().getStringView()].push_back(i); + } if (!is_pseudo_header && pseudo_headers_end_ == headers_.end()) { pseudo_headers_end_ = i; } return i; } - std::list::iterator erase(std::list::iterator i) { + HeaderNode erase(HeaderNode i, bool remove_from_map) { if (pseudo_headers_end_ == i) { pseudo_headers_end_++; } + if (remove_from_map) { + lazy_map_.erase(i->key().getStringView()); + } return headers_.erase(i); } - template void remove_if(UnaryPredicate p) { - headers_.remove_if([&](const HeaderEntryImpl& entry) { - const bool to_remove = p(entry); - if (to_remove) { - if (pseudo_headers_end_ == entry.entry_) { - pseudo_headers_end_++; + template void removeIf(UnaryPredicate p) { + if (!lazy_map_.empty()) { + // Lazy map is used, iterate over its elements and remove those that satisfy the predicate + // from the map and from the list. + for (auto map_it = lazy_map_.begin(); map_it != lazy_map_.end();) { + auto& values_vec = map_it->second; + ASSERT(!values_vec.empty()); + // The following call to std::remove_if removes the elements that satisfy the + // UnaryPredicate and shifts the vector elements, but does not resize the vector. + // The call to erase that follows erases the unneeded cells (from remove_pos to the + // end) and modifies the vector's size. + const auto remove_pos = + std::remove_if(values_vec.begin(), values_vec.end(), [&](HeaderNode it) { + if (p(*(it->entry_))) { + // Remove the element from the list. + if (pseudo_headers_end_ == it->entry_) { + pseudo_headers_end_++; + } + headers_.erase(it); + return true; + } + return false; + }); + values_vec.erase(remove_pos, values_vec.end()); + + // If all elements were removed from the map entry, erase it. + if (values_vec.empty()) { + lazy_map_.erase(map_it++); + } else { + map_it++; } } - return to_remove; - }); + } else { + // The lazy map isn't used, iterate over the list elements and remove elements that satisfy + // the predicate. + headers_.remove_if([&](const HeaderEntryImpl& entry) { + const bool to_remove = p(entry); + if (to_remove) { + if (pseudo_headers_end_ == entry.entry_) { + pseudo_headers_end_++; + } + } + return to_remove; + }); + } } + /* + * Creates and populates a map if the number of headers is at least the + * envoy.http.headermap.lazy_map_min_size runtime feature value. + * + * @return if a map was created. + */ + bool maybeMakeMap(); + + /* + * Removes a given key and its values from the HeaderList. + * + * @return the number of bytes that were removed. + */ + size_t remove(absl::string_view key); + std::list::iterator begin() { return headers_.begin(); } std::list::iterator end() { return headers_.end(); } std::list::const_iterator begin() const { return headers_.begin(); } std::list::const_iterator end() const { return headers_.end(); } std::list::const_reverse_iterator rbegin() const { return headers_.rbegin(); } std::list::const_reverse_iterator rend() const { return headers_.rend(); } + HeaderLazyMap::iterator mapFind(absl::string_view key) { return lazy_map_.find(key); } + HeaderLazyMap::iterator mapEnd() { return lazy_map_.end(); } size_t size() const { return headers_.size(); } bool empty() const { return headers_.empty(); } void clear() { headers_.clear(); pseudo_headers_end_ = headers_.end(); + lazy_map_.clear(); } private: std::list headers_; - std::list::iterator pseudo_headers_end_; + HeaderNode pseudo_headers_end_; + // The number of headers threshold for lazy map usage. + const uint32_t lazy_map_min_size_; + HeaderLazyMap lazy_map_; }; void insertByKey(HeaderString&& key, HeaderString&& value); @@ -241,7 +313,7 @@ class HeaderMapImpl : NonCopyable { HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key); HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key, HeaderString&& value); - HeaderEntry* getExisting(const LowerCaseString& key); + HeaderMap::NonConstGetResult getExisting(const LowerCaseString& key); size_t removeInline(HeaderEntryImpl** entry); void updateSize(uint64_t from_size, uint64_t to_size); void addSize(uint64_t size); @@ -296,7 +368,7 @@ template class TypedHeaderMapImpl : public HeaderMapImpl, publ HeaderMapImpl::setCopy(key, value); } uint64_t byteSize() const override { return HeaderMapImpl::byteSize(); } - const HeaderEntry* get(const LowerCaseString& key) const override { + HeaderMap::GetResult get(const LowerCaseString& key) const override { return HeaderMapImpl::get(key); } void iterate(HeaderMap::ConstIterateCb cb) const override { HeaderMapImpl::iterate(cb); } diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 3b1726d0304e..3e030010de4f 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -80,17 +80,6 @@ HeaderUtility::HeaderData::HeaderData(const envoy::config::route::v3::HeaderMatc } } -void HeaderUtility::getAllOfHeader(const HeaderMap& headers, absl::string_view key, - std::vector& out) { - headers.iterate([key = LowerCaseString(std::string(key)), - &out](const HeaderEntry& header) -> HeaderMap::Iterate { - if (header.key() == key.get().c_str()) { - out.emplace_back(header.value().getStringView()); - } - return HeaderMap::Iterate::Continue; - }); -} - bool HeaderUtility::matchHeaders(const HeaderMap& request_headers, const std::vector& config_headers) { // No headers to match is considered a match. @@ -105,39 +94,69 @@ bool HeaderUtility::matchHeaders(const HeaderMap& request_headers, return true; } +HeaderUtility::GetAllOfHeaderAsStringResult +HeaderUtility::getAllOfHeaderAsString(const HeaderMap& headers, const Http::LowerCaseString& key, + absl::string_view separator) { + GetAllOfHeaderAsStringResult result; + const auto header_value = headers.get(key); + + if (header_value.empty()) { + // Empty for clarity. Avoid handling the empty case in the block below if the runtime feature + // is disabled. + } else if (header_value.size() == 1 || + !Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http_match_on_all_headers")) { + result.result_ = header_value[0]->value().getStringView(); + } else { + // In this case we concatenate all found headers using a delimiter before performing the + // final match. We use an InlinedVector of absl::string_view to invoke the optimized join + // algorithm. This requires a copying phase before we invoke join. The 3 used as the inline + // size has been arbitrarily chosen. + // TODO(mattklein123): Do we need to normalize any whitespace here? + absl::InlinedVector string_view_vector; + string_view_vector.reserve(header_value.size()); + for (size_t i = 0; i < header_value.size(); i++) { + string_view_vector.push_back(header_value[i]->value().getStringView()); + } + result.result_backing_string_ = absl::StrJoin(string_view_vector, separator); + } + + return result; +} + bool HeaderUtility::matchHeaders(const HeaderMap& request_headers, const HeaderData& header_data) { - const HeaderEntry* header = request_headers.get(header_data.name_); + const auto header_value = getAllOfHeaderAsString(request_headers, header_data.name_); - if (header == nullptr) { + if (!header_value.result().has_value()) { return header_data.invert_match_ && header_data.header_match_type_ == HeaderMatchType::Present; } bool match; - const absl::string_view header_view = header->value().getStringView(); switch (header_data.header_match_type_) { case HeaderMatchType::Value: - match = header_data.value_.empty() || header_view == header_data.value_; + match = header_data.value_.empty() || header_value.result().value() == header_data.value_; break; case HeaderMatchType::Regex: - match = header_data.regex_->match(header_view); + match = header_data.regex_->match(header_value.result().value()); break; case HeaderMatchType::Range: { - int64_t header_value = 0; - match = absl::SimpleAtoi(header_view, &header_value) && - header_value >= header_data.range_.start() && header_value < header_data.range_.end(); + int64_t header_int_value = 0; + match = absl::SimpleAtoi(header_value.result().value(), &header_int_value) && + header_int_value >= header_data.range_.start() && + header_int_value < header_data.range_.end(); break; } case HeaderMatchType::Present: match = true; break; case HeaderMatchType::Prefix: - match = absl::StartsWith(header_view, header_data.value_); + match = absl::StartsWith(header_value.result().value(), header_data.value_); break; case HeaderMatchType::Suffix: - match = absl::EndsWith(header_view, header_data.value_); + match = absl::EndsWith(header_value.result().value(), header_data.value_); break; case HeaderMatchType::Contains: - match = absl::StrContains(header_view, header_data.value_); + match = absl::StrContains(header_value.result().value(), header_data.value_); break; default: NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index 27d2b9907361..0a1e23c46717 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -6,7 +6,6 @@ #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/http/header_map.h" #include "envoy/http/protocol.h" -#include "envoy/json/json_object.h" #include "envoy/type/v3/range.pb.h" #include "common/protobuf/protobuf.h" @@ -22,16 +21,34 @@ class HeaderUtility { enum class HeaderMatchType { Value, Regex, Range, Present, Prefix, Suffix, Contains }; /** - * Get all instances of the header key specified, and return the values in the vector provided. - * - * This should not be used for inline headers, as it turns a constant time lookup into O(n). - * - * @param headers the headers to return keys from - * @param key the header key to return values for - * @param out the vector to return values in + * Get all header values as a single string. Multiple headers are concatenated with ','. */ - static void getAllOfHeader(const HeaderMap& headers, absl::string_view key, - std::vector& out); + class GetAllOfHeaderAsStringResult { + public: + // The ultimate result of the concatenation. If absl::nullopt, no header values were found. + // If the final string required a string allocation, the memory is held in + // backingString(). This allows zero allocation in the common case of a single header + // value. + absl::optional result() const { + // This is safe for move/copy of this class as the backing string will be moved or copied. + // Otherwise result_ is valid. The assert verifies that both are empty or only 1 is set. + ASSERT((!result_.has_value() && result_backing_string_.empty()) || + (result_.has_value() ^ !result_backing_string_.empty())); + return !result_backing_string_.empty() ? result_backing_string_ : result_; + } + + const std::string& backingString() const { return result_backing_string_; } + + private: + absl::optional result_; + // Valid only if result_ relies on memory allocation that must live beyond the call. See above. + std::string result_backing_string_; + + friend class HeaderUtility; + }; + static GetAllOfHeaderAsStringResult getAllOfHeaderAsString(const HeaderMap& headers, + const Http::LowerCaseString& key, + absl::string_view separator = ","); // A HeaderData specifies one of exact value or regex or range element // to match in a request's header, specified in the header_match_type_ member. diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 34f33c3aac00..13dac43ad29b 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -61,6 +61,7 @@ class CustomHeaderValues { const LowerCaseString AccessControlAllowCredentials{"access-control-allow-credentials"}; const LowerCaseString Authorization{"authorization"}; const LowerCaseString CacheControl{"cache-control"}; + const LowerCaseString CdnLoop{"cdn-loop"}; const LowerCaseString ContentEncoding{"content-encoding"}; const LowerCaseString Etag{"etag"}; const LowerCaseString GrpcAcceptEncoding{"grpc-accept-encoding"}; @@ -132,7 +133,6 @@ class HeaderValues { const LowerCaseString Cookie{"cookie"}; const LowerCaseString Date{"date"}; const LowerCaseString EnvoyAttemptCount{absl::StrCat(prefix(), "-attempt-count")}; - const LowerCaseString EnvoyAuthPartialBody{absl::StrCat(prefix(), "-auth-partial-body")}; const LowerCaseString EnvoyCluster{absl::StrCat(prefix(), "-cluster")}; const LowerCaseString EnvoyDegraded{absl::StrCat(prefix(), "-degraded")}; const LowerCaseString EnvoyDownstreamServiceCluster{ diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 2fb4325d9810..325fa94883a8 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -44,7 +44,6 @@ CODEC_LIB_DEPS = [ "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/http:status_lib", - "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/common/runtime:runtime_features_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 0ef6c9bef322..42b7f4dfce6a 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -18,7 +18,6 @@ #include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/header_formatter.h" -#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/runtime/runtime_features.h" @@ -30,6 +29,8 @@ namespace Http { namespace Http1 { namespace { +// Changes or additions to details should be reflected in +// docs/root/configuration/http/http_conn_man/response_code_details_details.rst struct Http1ResponseCodeDetailValues { const absl::string_view TooManyHeaders = "http1.too_many_headers"; const absl::string_view HeadersTooLarge = "http1.headers_too_large"; @@ -263,9 +264,6 @@ void StreamEncoderImpl::endEncode() { } void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) { - if (!flood_protection_) { - return; - } // It's messy and complicated to try to tag the final write of an HTTP response for response // tracking for flood protection. Instead, write an empty buffer fragment after the response, // to allow for tracking. @@ -280,9 +278,6 @@ void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffe Status ServerConnectionImpl::doFloodProtectionChecks() const { ASSERT(dispatching_); - if (!flood_protection_) { - return okStatus(); - } // Before processing another request, make sure that we are below the response flood protection // threshold. if (outbound_responses_ >= max_outbound_responses_) { @@ -858,8 +853,6 @@ ServerConnectionImpl::ServerConnectionImpl( // maintainer team as it will otherwise be removed entirely soon. max_outbound_responses_( Runtime::getInteger("envoy.do_not_use_going_away_max_http2_outbound_responses", 2)), - flood_protection_( - Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")), headers_with_underscores_action_(headers_with_underscores_action) {} uint32_t ServerConnectionImpl::getHeadersSize() { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 0b66ea129171..2ae00fb03400 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -545,7 +545,6 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { // of Envoy wish to enable pipelining (which is dangerous and ill supported) // we could make this configurable. uint32_t max_outbound_responses_{}; - bool flood_protection_{}; // TODO(mattklein123): This should be a member of ActiveRequest but this change needs dedicated // thought as some of the reset and no header code paths make this difficult. Headers are // populated on message begin. Trailers are populated on the first parsed trailer field (if diff --git a/source/common/http/http1/codec_impl_legacy.cc b/source/common/http/http1/codec_impl_legacy.cc index b0cc69da3220..332f1241eb7d 100644 --- a/source/common/http/http1/codec_impl_legacy.cc +++ b/source/common/http/http1/codec_impl_legacy.cc @@ -16,7 +16,6 @@ #include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/header_formatter.h" -#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/runtime/runtime_features.h" @@ -266,9 +265,6 @@ void StreamEncoderImpl::endEncode() { } void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) { - if (!flood_protection_) { - return; - } // It's messy and complicated to try to tag the final write of an HTTP response for response // tracking for flood protection. Instead, write an empty buffer fragment after the response, // to allow for tracking. @@ -282,9 +278,6 @@ void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffe } void ServerConnectionImpl::doFloodProtectionChecks() const { - if (!flood_protection_) { - return; - } // Before processing another request, make sure that we are below the response flood protection // threshold. if (outbound_responses_ >= max_outbound_responses_) { @@ -801,8 +794,6 @@ ServerConnectionImpl::ServerConnectionImpl( // maintainer team as it will otherwise be removed entirely soon. max_outbound_responses_( Runtime::getInteger("envoy.do_not_use_going_away_max_http2_outbound_responses", 2)), - flood_protection_( - Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")), headers_with_underscores_action_(headers_with_underscores_action) {} uint32_t ServerConnectionImpl::getHeadersSize() { diff --git a/source/common/http/http1/codec_impl_legacy.h b/source/common/http/http1/codec_impl_legacy.h index 01c8a51aea25..8a1b68b0fad4 100644 --- a/source/common/http/http1/codec_impl_legacy.h +++ b/source/common/http/http1/codec_impl_legacy.h @@ -519,7 +519,6 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { // of Envoy wish to enable pipelining (which is dangerous and ill supported) // we could make this configurable. uint32_t max_outbound_responses_{}; - bool flood_protection_{}; // TODO(mattklein123): This should be a member of ActiveRequest but this change needs dedicated // thought as some of the reset and no header code paths make this difficult. Headers are // populated on message begin. Trailers are populated on the first parsed trailer field (if diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index a4215c0cf49a..3115e743e88f 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -23,8 +23,8 @@ namespace Envoy { namespace Http { namespace Http1 { -ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, +ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) : HttpConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, @@ -32,7 +32,8 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() { upstream_ready_enabled_ = false; onUpstreamReady(); - })) {} + })), + random_generator_(random_generator) {} ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } @@ -69,7 +70,6 @@ void ConnPoolImpl::onResponseComplete(ActiveClient& client) { ConnPoolImpl::StreamWrapper::StreamWrapper(ResponseDecoder& response_decoder, ActiveClient& parent) : RequestEncoderWrapper(parent.codec_client_->newStream(*this)), ResponseDecoderWrapper(response_decoder), parent_(parent) { - RequestEncoderWrapper::inner_.getStream().addCallbacks(*this); } @@ -132,17 +132,17 @@ RequestEncoder& ConnPoolImpl::ActiveClient::newStreamEncoder(ResponseDecoder& re CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP1, std::move(data.connection_), - data.host_description_, dispatcher_)}; + data.host_description_, dispatcher_, random_generator_)}; return codec; } ConnectionPool::InstancePtr -allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, +allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { - return std::make_unique(dispatcher, host, priority, options, - transport_socket_options); + return std::make_unique( + dispatcher, random_generator, host, priority, options, transport_socket_options); } } // namespace Http1 diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 75783d7fae9d..895e6fba6673 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -19,8 +19,8 @@ namespace Http1 { */ class ConnPoolImpl : public Http::HttpConnPoolImplBase { public: - ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, + ConnPoolImpl(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); @@ -82,6 +82,7 @@ class ConnPoolImpl : public Http::HttpConnPoolImplBase { Event::SchedulableCallbackPtr upstream_ready_cb_; bool upstream_ready_enabled_{false}; + Random::RandomGenerator& random_generator_; }; /** @@ -89,19 +90,15 @@ class ConnPoolImpl : public Http::HttpConnPoolImplBase { */ class ProdConnPoolImpl : public ConnPoolImpl { public: - ProdConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImpl(dispatcher, host, priority, options, transport_socket_options) {} + using ConnPoolImpl::ConnPoolImpl; // ConnPoolImpl CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; }; ConnectionPool::InstancePtr -allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, +allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index 5ccf63147d5f..b1dda984d053 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -22,6 +22,7 @@ CODEC_LIB_DEPS = [ ":codec_stats_lib", ":metadata_decoder_lib", ":metadata_encoder_lib", + ":protocol_constraints_lib", "//include/envoy/event:deferred_deletable", "//include/envoy/event:dispatcher_interface", "//include/envoy/http:codec_interface", @@ -131,3 +132,17 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", ], ) + +envoy_cc_library( + name = "protocol_constraints_lib", + srcs = ["protocol_constraints.cc"], + hdrs = ["protocol_constraints.h"], + deps = [ + ":codec_stats_lib", + "//bazel/foreign_cc:nghttp2", + "//include/envoy/network:connection_interface", + "//source/common/common:assert_lib", + "//source/common/http:status_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index e75c9775e708..4af93895de4a 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -28,6 +28,8 @@ namespace Envoy { namespace Http { namespace Http2 { +// Changes or additions to details should be reflected in +// docs/root/configuration/http/http_conn_man/response_code_details_details.rst class Http2ResponseCodeDetailValues { public: // Invalid HTTP header field was received and stream is going to be @@ -45,6 +47,10 @@ class Http2ResponseCodeDetailValues { const absl::string_view inbound_empty_frame_flood = "http2.inbound_empty_frames_flood"; // Envoy was configured to drop requests with header keys beginning with underscores. const absl::string_view invalid_underscore = "http2.unexpected_underscore"; + // The peer refused the stream. + const absl::string_view remote_refused = "http2.remote_refuse"; + // The peer reset the stream. + const absl::string_view remote_reset = "http2.remote_reset"; const absl::string_view errorDetails(int error_code) const { switch (error_code) { @@ -58,6 +64,17 @@ class Http2ResponseCodeDetailValues { } }; +int reasonToReset(StreamResetReason reason) { + switch (reason) { + case StreamResetReason::LocalRefusedStreamReset: + return NGHTTP2_REFUSED_STREAM; + case StreamResetReason::ConnectError: + return NGHTTP2_CONNECT_ERROR; + default: + return NGHTTP2_NO_ERROR; + } +} + using Http2ResponseCodeDetails = ConstSingleton; bool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, @@ -160,16 +177,10 @@ void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector local_end_stream_ = end_stream; submitHeaders(final_headers, end_stream ? nullptr : &provider); - auto status = parent_.sendPendingFrames(); - // The RELEASE_ASSERT below does not change the existing behavior of `sendPendingFrames()`. - // The `sendPendingFrames()` used to throw on errors and the only method that was catching - // these exceptions was the `dispatch()`. The `dispatch()` method still checks and handles - // errors returned by the `sendPendingFrames()`. - // Other callers of `sendPendingFrames()` do not catch exceptions from this method and - // would cause abnormal process termination in error cases. This change replaces abnormal - // process termination from unhandled exception with the RELEASE_ASSERT. - // Further work will replace this RELEASE_ASSERT with proper error handling. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers, @@ -237,9 +248,10 @@ void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { } } else { submitTrailers(trailers); - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } } @@ -252,9 +264,10 @@ void ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadat for (uint8_t flags : metadata_encoder.payloadFrameFlagBytes()) { submitMetadata(flags); } - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } void ConnectionImpl::StreamImpl::readDisable(bool disable) { @@ -269,9 +282,10 @@ void ConnectionImpl::StreamImpl::readDisable(bool disable) { if (!buffersOverrun()) { nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); unconsumed_bytes_ = 0; - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } } } @@ -397,27 +411,25 @@ ssize_t ConnectionImpl::StreamImpl::onDataSourceRead(uint64_t length, uint32_t* } } -Status ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { +void ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we // "just know" that the frame header is 9 bytes. // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback static const uint64_t FRAME_HEADER_SIZE = 9; - parent_.outbound_data_frames_++; + parent_.protocol_constraints_.incrementOutboundDataFrameCount(); Buffer::OwnedImpl output; - auto status = parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE); - if (!status.ok()) { + parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE); + if (!parent_.protocol_constraints_.checkOutboundFrameLimits().ok()) { ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", parent_.connection_); setDetails(Http2ResponseCodeDetails::get().outbound_frame_flood); - return status; } parent_.stats_.pending_send_bytes_.sub(length); output.move(pending_send_data_, length); parent_.connection_.write(output, false); - return status; } void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, @@ -453,9 +465,10 @@ void ConnectionImpl::StreamImpl::onPendingFlushTimer() { // This will emit a reset frame for this stream and close the stream locally. No reset callbacks // will be run because higher layers think the stream is already finished. resetStreamWorker(StreamResetReason::LocalReset); - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { @@ -479,9 +492,10 @@ void ConnectionImpl::StreamImpl::encodeDataHelper(Buffer::Instance& data, bool e data_deferred_ = false; } - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } if (local_end_stream_ && pending_send_data_.length() > 0) { createPendingFlushTimer(); } @@ -505,16 +519,15 @@ void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { // We must still call sendPendingFrames() in both the deferred and not deferred path. This forces // the cleanup logic to run which will reset the stream in all cases if all data frames could not // be sent. - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } void ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) { int rc = nghttp2_submit_rst_stream(parent_.session_, NGHTTP2_FLAG_NONE, stream_id_, - reason == StreamResetReason::LocalRefusedStreamReset - ? NGHTTP2_REFUSED_STREAM - : NGHTTP2_NO_ERROR); + reasonToReset(reason)); ASSERT(rc == 0); } @@ -540,6 +553,7 @@ void ConnectionImpl::StreamImpl::onMetadataDecoded(MetadataMapPtr&& metadata_map } ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, + Random::RandomGenerator& random_generator, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_headers_kb, const uint32_t max_headers_count) : stats_(stats), connection_(connection), max_headers_kb_(max_headers_kb), @@ -547,19 +561,27 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()), stream_error_on_invalid_http_messaging_( http2_options.override_stream_error_on_invalid_http_message().value()), - max_outbound_frames_(http2_options.max_outbound_frames().value()), - frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), - max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), - control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), - max_consecutive_inbound_frames_with_empty_payload_( - http2_options.max_consecutive_inbound_frames_with_empty_payload().value()), - max_inbound_priority_frames_per_stream_( - http2_options.max_inbound_priority_frames_per_stream().value()), - max_inbound_window_update_frames_per_data_frame_sent_( - http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()), + protocol_constraints_(stats, http2_options), skip_encoding_empty_trailers_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.http2_skip_encoding_empty_trailers")), - dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} + dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false), + random_(random_generator) { + if (http2_options.has_connection_keepalive()) { + keepalive_interval_ = std::chrono::milliseconds( + PROTOBUF_GET_MS_REQUIRED(http2_options.connection_keepalive(), interval)); + keepalive_timeout_ = std::chrono::milliseconds( + PROTOBUF_GET_MS_REQUIRED(http2_options.connection_keepalive(), timeout)); + keepalive_interval_jitter_percent_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( + http2_options.connection_keepalive(), interval_jitter, 15.0); + + keepalive_send_timer_ = connection.dispatcher().createTimer([this]() { sendKeepalive(); }); + keepalive_timeout_timer_ = + connection.dispatcher().createTimer([this]() { onKeepaliveResponseTimeout(); }); + + // This call schedules the initial interval, with jitter. + onKeepaliveResponse(); + } +} ConnectionImpl::~ConnectionImpl() { for (const auto& stream : active_streams_) { @@ -568,6 +590,44 @@ ConnectionImpl::~ConnectionImpl() { nghttp2_session_del(session_); } +void ConnectionImpl::sendKeepalive() { + // Include the current time as the payload to help with debugging. + SystemTime now = connection_.dispatcher().timeSource().systemTime(); + uint64_t ms_since_epoch = + std::chrono::duration_cast(now.time_since_epoch()).count(); + ENVOY_CONN_LOG(trace, "Sending keepalive PING {}", connection_, ms_since_epoch); + + // The last parameter is an opaque 8-byte buffer, so this cast is safe. + int rc = nghttp2_submit_ping(session_, 0 /*flags*/, reinterpret_cast(&ms_since_epoch)); + ASSERT(rc == 0); + + if (sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } + keepalive_timeout_timer_->enableTimer(keepalive_timeout_); +} +void ConnectionImpl::onKeepaliveResponse() { + // Check the timers for nullptr in case the peer sent an unsolicited PING ACK. + if (keepalive_timeout_timer_ != nullptr) { + keepalive_timeout_timer_->disableTimer(); + } + if (keepalive_send_timer_ != nullptr) { + uint64_t interval_ms = keepalive_interval_.count(); + const uint64_t jitter_percent_mod = keepalive_interval_jitter_percent_ * interval_ms / 100; + if (jitter_percent_mod > 0) { + interval_ms += random_.random() % jitter_percent_mod; + } + keepalive_send_timer_->enableTimer(std::chrono::milliseconds(interval_ms)); + } +} + +void ConnectionImpl::onKeepaliveResponseTimeout() { + ENVOY_CONN_LOG(debug, "Closing connection due to keepalive timeout", connection_); + stats_.keepalive_timeout_.inc(); + connection_.close(Network::ConnectionCloseType::NoFlush); +} + Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either // throw an exception or return an error status. The utility wrapper catches exceptions and @@ -636,18 +696,20 @@ void ConnectionImpl::goAway() { NGHTTP2_NO_ERROR, nullptr, 0); ASSERT(rc == 0); - auto status = sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); + if (sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } void ConnectionImpl::shutdownNotice() { int rc = nghttp2_submit_shutdown_notice(session_); ASSERT(rc == 0); - auto status = sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); + if (sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } Status ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { @@ -683,6 +745,19 @@ Status ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { // and CONTINUATION frames in onBeforeFrameReceived(). ASSERT(frame->hd.type != NGHTTP2_CONTINUATION); + if ((frame->hd.type == NGHTTP2_PING) && (frame->ping.hd.flags & NGHTTP2_FLAG_ACK)) { + // The ``opaque_data`` should be exactly what was sent in the ping, which is + // was the current time when the ping was sent. This can be useful while debugging + // to match the ping and ack. + uint64_t data; + static_assert(sizeof(data) == sizeof(frame->ping.opaque_data), "Sizes are equal"); + memcpy(&data, frame->ping.opaque_data, sizeof(data)); + ENVOY_CONN_LOG(trace, "recv PING ACK {}", connection_, data); + + onKeepaliveResponse(); + return okStatus(); + } + if (frame->hd.type == NGHTTP2_DATA) { RETURN_IF_ERROR(trackInboundFrames(&frame->hd, frame->data.padlen)); } @@ -852,48 +927,21 @@ int ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) { return 0; } -Status ConnectionImpl::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { - ++outbound_frames_; - if (is_outbound_flood_monitored_control_frame) { - ++outbound_control_frames_; - } - return checkOutboundQueueLimits(); -} - -Status ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, - size_t length) { +void ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, + size_t length) { // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the // onBeforeFrameSend callback is not called for DATA frames. bool is_outbound_flood_monitored_control_frame = false; std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_); - RETURN_IF_ERROR(incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame)); - + auto releasor = trackOutboundFrames(is_outbound_flood_monitored_control_frame); output.add(data, length); - output.addDrainTracker(is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ - : frame_buffer_releasor_); - return okStatus(); + output.addDrainTracker(releasor); } -void ConnectionImpl::releaseOutboundFrame() { - ASSERT(outbound_frames_ >= 1); - --outbound_frames_; -} - -void ConnectionImpl::releaseOutboundControlFrame() { - ASSERT(outbound_control_frames_ >= 1); - --outbound_control_frames_; - releaseOutboundFrame(); -} - -StatusOr ConnectionImpl::onSend(const uint8_t* data, size_t length) { +ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { ENVOY_CONN_LOG(trace, "send data: bytes={}", connection_, length); Buffer::OwnedImpl buffer; - auto status = addOutboundFrameFragment(buffer, data, length); - if (!status.ok()) { - ENVOY_CONN_LOG(debug, "error sending frame: Too many frames in the outbound queue.", - connection_); - return status; - } + addOutboundFrameFragment(buffer, data, length); // While the buffer is transient the fragment it contains will be moved into the // write_buffer_ of the underlying connection_ by the write method below. @@ -922,8 +970,17 @@ int ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) { // the connection. reason = StreamResetReason::LocalReset; } else { - reason = error_code == NGHTTP2_REFUSED_STREAM ? StreamResetReason::RemoteRefusedStreamReset - : StreamResetReason::RemoteReset; + if (error_code == NGHTTP2_REFUSED_STREAM) { + reason = StreamResetReason::RemoteRefusedStreamReset; + stream->setDetails(Http2ResponseCodeDetails::get().remote_refused); + } else { + if (error_code == NGHTTP2_CONNECT_ERROR) { + reason = StreamResetReason::ConnectError; + } else { + reason = StreamResetReason::RemoteReset; + } + stream->setDetails(Http2ResponseCodeDetails::get().remote_reset); + } } stream->runResetCallbacks(reason); @@ -1022,16 +1079,6 @@ Status ConnectionImpl::sendPendingFrames() { const int rc = nghttp2_session_send(session_); if (rc != 0) { ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE); - - if (!nghttp2_callback_status_.ok()) { - return nghttp2_callback_status_; - } - - // The frame flood error should set the nghttp2_callback_status_ error, and return at the - // statement above. - ASSERT(outbound_frames_ <= max_outbound_frames_ && - outbound_control_frames_ <= max_outbound_control_frames_); - return codecProtocolError(nghttp2_strerror(rc)); } @@ -1056,7 +1103,23 @@ Status ConnectionImpl::sendPendingFrames() { } RETURN_IF_ERROR(sendPendingFrames()); } - return okStatus(); + + // After all pending frames have been written into the outbound buffer check if any of + // protocol constraints had been violated. + Status status = protocol_constraints_.checkOutboundFrameLimits(); + if (!status.ok()) { + ENVOY_CONN_LOG(debug, "error sending frames: Too many frames in the outbound queue.", + connection_); + } + return status; +} + +bool ConnectionImpl::sendPendingFramesAndHandleError() { + if (!sendPendingFrames().ok()) { + scheduleProtocolConstraintViolationCallback(); + return true; + } + return false; } void ConnectionImpl::sendSettings( @@ -1129,18 +1192,26 @@ int ConnectionImpl::setAndCheckNghttp2CallbackStatus(Status&& status) { return nghttp2_callback_status_.ok() ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; } +void ConnectionImpl::scheduleProtocolConstraintViolationCallback() { + if (!protocol_constraint_violation_callback_) { + protocol_constraint_violation_callback_ = connection_.dispatcher().createSchedulableCallback( + [this]() { onProtocolConstraintViolation(); }); + protocol_constraint_violation_callback_->scheduleCallbackCurrentIteration(); + } +} + +void ConnectionImpl::onProtocolConstraintViolation() { + // Flooded outbound queue implies that peer is not reading and it does not + // make sense to try to flush pending bytes. + connection_.close(Envoy::Network::ConnectionCloseType::NoFlush); +} + ConnectionImpl::Http2Callbacks::Http2Callbacks() { nghttp2_session_callbacks_new(&callbacks_); nghttp2_session_callbacks_set_send_callback( callbacks_, [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { - auto status_or_len = static_cast(user_data)->onSend(data, length); - if (status_or_len.ok()) { - return status_or_len.value(); - } - auto status = status_or_len.status(); - return static_cast(user_data)->setAndCheckNghttp2CallbackStatus( - std::move(status)); + return static_cast(user_data)->onSend(data, length); }); nghttp2_session_callbacks_set_send_data_callback( @@ -1148,9 +1219,8 @@ ConnectionImpl::Http2Callbacks::Http2Callbacks() { [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length, nghttp2_data_source* source, void*) -> int { ASSERT(frame->data.padlen == 0); - auto status = static_cast(source->ptr)->onDataSourceSend(framehd, length); - return static_cast(source->ptr) - ->parent_.setAndCheckNghttp2CallbackStatus(std::move(status)); + static_cast(source->ptr)->onDataSourceSend(framehd, length); + return 0; }); nghttp2_session_callbacks_set_on_begin_headers_callback( @@ -1305,10 +1375,11 @@ ConnectionImpl::ClientHttp2Options::ClientHttp2Options( ClientConnectionImpl::ClientConnectionImpl( Network::Connection& connection, Http::ConnectionCallbacks& callbacks, CodecStats& stats, + Random::RandomGenerator& random_generator, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_response_headers_kb, const uint32_t max_response_headers_count, Nghttp2SessionFactory& http2_session_factory) - : ConnectionImpl(connection, stats, http2_options, max_response_headers_kb, + : ConnectionImpl(connection, stats, random_generator, http2_options, max_response_headers_kb, max_response_headers_count), callbacks_(callbacks) { ClientHttp2Options client_http2_options(http2_options); @@ -1355,11 +1426,12 @@ int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& na ServerConnectionImpl::ServerConnectionImpl( Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats, + Random::RandomGenerator& random_generator, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) - : ConnectionImpl(connection, stats, http2_options, max_request_headers_kb, + : ConnectionImpl(connection, stats, random_generator, http2_options, max_request_headers_kb, max_request_headers_count), callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) { Http2Options h2_options(http2_options); @@ -1410,86 +1482,25 @@ Status ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, ENVOY_CONN_LOG(trace, "track inbound frame type={} flags={} length={} padding_length={}", connection_, static_cast(hd->type), static_cast(hd->flags), static_cast(hd->length), padding_length); - switch (hd->type) { - case NGHTTP2_HEADERS: - case NGHTTP2_CONTINUATION: - // Track new streams. - if (hd->flags & NGHTTP2_FLAG_END_HEADERS) { - inbound_streams_++; - } - FALLTHRU; - case NGHTTP2_DATA: - // Track frames with an empty payload and no end stream flag. - if (hd->length - padding_length == 0 && !(hd->flags & NGHTTP2_FLAG_END_STREAM)) { - ENVOY_CONN_LOG(trace, "frame with an empty payload and no end stream flag.", connection_); - consecutive_inbound_frames_with_empty_payload_++; - } else { - consecutive_inbound_frames_with_empty_payload_ = 0; - } - break; - case NGHTTP2_PRIORITY: - inbound_priority_frames_++; - break; - case NGHTTP2_WINDOW_UPDATE: - inbound_window_update_frames_++; - break; - default: - break; - } - return checkInboundFrameLimits(hd->stream_id); -} - -Status ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { - ASSERT(dispatching_downstream_data_); - ConnectionImpl::StreamImpl* stream = getStream(stream_id); - - if (consecutive_inbound_frames_with_empty_payload_ > - max_consecutive_inbound_frames_with_empty_payload_) { - ENVOY_CONN_LOG(trace, - "error reading frame: Too many consecutive frames with an empty payload " - "received in this HTTP/2 session.", - connection_); - if (stream) { - stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood); + auto result = protocol_constraints_.trackInboundFrames(hd, padding_length); + if (!result.ok()) { + ENVOY_CONN_LOG(trace, "error reading frame: {} received in this HTTP/2 session.", connection_, + result.message()); + if (isInboundFramesWithEmptyPayloadError(result)) { + ConnectionImpl::StreamImpl* stream = getStream(hd->stream_id); + if (stream) { + stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood); + } } - stats_.inbound_empty_frames_flood_.inc(); - return bufferFloodError("Too many consecutive frames with an empty payload"); - } - - if (inbound_priority_frames_ > - static_cast(max_inbound_priority_frames_per_stream_) * (1 + inbound_streams_)) { - ENVOY_CONN_LOG(trace, - "error reading frame: Too many PRIORITY frames received in this HTTP/2 session.", - connection_); - stats_.inbound_priority_frames_flood_.inc(); - return bufferFloodError("Too many PRIORITY frames"); } - - if (inbound_window_update_frames_ > - 1 + 2 * (inbound_streams_ + - max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_)) { - ENVOY_CONN_LOG( - trace, - "error reading frame: Too many WINDOW_UPDATE frames received in this HTTP/2 session.", - connection_); - stats_.inbound_window_update_frames_flood_.inc(); - return bufferFloodError("Too many WINDOW_UPDATE frames"); - } - - return okStatus(); + return result; } -Status ServerConnectionImpl::checkOutboundQueueLimits() { - if (outbound_frames_ > max_outbound_frames_ && dispatching_downstream_data_) { - stats_.outbound_flood_.inc(); - return bufferFloodError("Too many frames in the outbound queue."); - } - if (outbound_control_frames_ > max_outbound_control_frames_ && dispatching_downstream_data_) { - stats_.outbound_control_flood_.inc(); - return bufferFloodError("Too many control frames in the outbound queue."); - } - return okStatus(); +ProtocolConstraints::ReleasorProc +ServerConnectionImpl::trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) { + return protocol_constraints_.incrementOutboundFrameCount( + is_outbound_flood_monitored_control_frame); } Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { @@ -1501,14 +1512,8 @@ Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { } Http::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) { - ASSERT(!dispatching_downstream_data_); - dispatching_downstream_data_ = true; - - // Make sure the dispatching_downstream_data_ is set to false when innerDispatch ends. - Cleanup cleanup([this]() { dispatching_downstream_data_ = false; }); - // Make sure downstream outbound queue was not flooded by the upstream frames. - RETURN_IF_ERROR(checkOutboundQueueLimits()); + RETURN_IF_ERROR(protocol_constraints_.checkOutboundFrameLimits()); return ConnectionImpl::innerDispatch(data); } diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index dbc13068346a..c3c883f7dfd6 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -7,6 +7,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/protocol.pb.h" #include "envoy/event/deferred_deletable.h" #include "envoy/http/codec.h" @@ -23,6 +24,7 @@ #include "common/http/http2/codec_stats.h" #include "common/http/http2/metadata_decoder.h" #include "common/http/http2/metadata_encoder.h" +#include "common/http/http2/protocol_constraints.h" #include "common/http/status.h" #include "common/http/utility.h" @@ -91,6 +93,7 @@ class ProdNghttp2SessionFactory : public Nghttp2SessionFactory { class ConnectionImpl : public virtual Connection, protected Logger::Loggable { public: ConnectionImpl(Network::Connection& connection, CodecStats& stats, + Random::RandomGenerator& random_generator, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_headers_kb, const uint32_t max_headers_count); @@ -186,7 +189,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& final_headers, const HeaderMap& headers); void saveHeader(HeaderString&& name, HeaderString&& value); @@ -274,6 +277,12 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable void { this->pendingRecvBufferLowWatermark(); }, [this]() -> void { this->pendingRecvBufferHighWatermark(); }, @@ -405,7 +414,28 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable active_streams_; @@ -451,64 +488,14 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable frame_buffer_releasor_; - // This counter keeps track of the number of outbound frames of types PING, SETTINGS and - // RST_STREAM (these that were buffered in the underlying connection but not yet written into the - // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is - // terminated. - uint32_t outbound_control_frames_ = 0; - // Maximum number of outbound frames of types PING, SETTINGS and RST_STREAM. Initialized from - // corresponding http2_protocol_options. Default value is 1000. - const uint32_t max_outbound_control_frames_; - const std::function control_frame_buffer_releasor_; - // This counter keeps track of the number of consecutive inbound frames of types HEADERS, - // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds - // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated. - uint32_t consecutive_inbound_frames_with_empty_payload_ = 0; - // Maximum number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA without - // a payload. Initialized from corresponding http2_protocol_options. Default value is 1. - const uint32_t max_consecutive_inbound_frames_with_empty_payload_; - - // This counter keeps track of the number of inbound streams. - uint32_t inbound_streams_ = 0; - // This counter keeps track of the number of inbound PRIORITY frames. If this counter exceeds - // the value calculated using this formula: - // - // max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_) - // - // the connection is terminated. - uint64_t inbound_priority_frames_ = 0; - // Maximum number of inbound PRIORITY frames per stream. Initialized from corresponding - // http2_protocol_options. Default value is 100. - const uint32_t max_inbound_priority_frames_per_stream_; - - // This counter keeps track of the number of inbound WINDOW_UPDATE frames. If this counter exceeds - // the value calculated using this formula: - // - // 1 + 2 * (inbound_streams_ + - // max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_) - // - // the connection is terminated. - uint64_t inbound_window_update_frames_ = 0; - // This counter keeps track of the number of outbound DATA frames. - uint64_t outbound_data_frames_ = 0; - // Maximum number of inbound WINDOW_UPDATE frames per outbound DATA frame sent. Initialized - // from corresponding http2_protocol_options. Default value is 10. - const uint32_t max_inbound_window_update_frames_per_data_frame_sent_; + ProtocolConstraints protocol_constraints_; // For the flood mitigation to work the onSend callback must be called once for each outbound // frame. This is what the nghttp2 library is doing, however this is not documented. The // Http2FloodMitigationTest.* tests in test/integration/http2_integration_test.cc will break if // this changes in the future. Also it is important that onSend does not do partial writes, as the // nghttp2 library will keep calling this callback to write the rest of the frame. - StatusOr onSend(const uint8_t* data, size_t length); + ssize_t onSend(const uint8_t* data, size_t length); // Some browsers (e.g. WebKit-based browsers: https://bugs.webkit.org/show_bug.cgi?id=210108) have // a problem with processing empty trailers (END_STREAM | END_HEADERS with zero length HEADERS) of @@ -532,19 +519,26 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable checkHeaderNameForUnderscores(absl::string_view header_name) override; // Http::Connection // The reason for overriding the dispatch method is to do flood mitigation only when // processing data from downstream client. Doing flood mitigation when processing upstream // responses makes clean-up tricky, which needs to be improved (see comments for the - // ClientConnectionImpl::checkOutboundQueueLimits method). The dispatch method on the + // ClientConnectionImpl::checkProtocolConstraintsStatus method). The dispatch method on the // ServerConnectionImpl objects is called only when processing data from the downstream client in // the ConnectionManagerImpl::onData method. Http::Status dispatch(Buffer::Instance& data) override; @@ -618,10 +613,6 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { ServerConnectionCallbacks& callbacks_; - // This flag indicates that downstream data is being dispatched and turns on flood mitigation - // in the checkMaxOutbound*Framed methods. - bool dispatching_downstream_data_{false}; - // The action to take when a request header name contains underscore characters. envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_; diff --git a/source/common/http/http2/codec_impl_legacy.cc b/source/common/http/http2/codec_impl_legacy.cc index f02e4848339a..88e596458fe9 100644 --- a/source/common/http/http2/codec_impl_legacy.cc +++ b/source/common/http/http2/codec_impl_legacy.cc @@ -46,6 +46,10 @@ class Http2ResponseCodeDetailValues { const absl::string_view inbound_empty_frame_flood = "http2.inbound_empty_frames_flood"; // Envoy was configured to drop requests with header keys beginning with underscores. const absl::string_view invalid_underscore = "http2.unexpected_underscore"; + // The upstream refused the stream. + const absl::string_view remote_refused = "http2.remote_refuse"; + // The upstream reset the stream. + const absl::string_view remote_reset = "http2.remote_reset"; const absl::string_view errorDetails(int error_code) const { switch (error_code) { @@ -59,6 +63,17 @@ class Http2ResponseCodeDetailValues { } }; +int reasonToReset(StreamResetReason reason) { + switch (reason) { + case StreamResetReason::LocalRefusedStreamReset: + return NGHTTP2_REFUSED_STREAM; + case StreamResetReason::ConnectError: + return NGHTTP2_CONNECT_ERROR; + default: + return NGHTTP2_NO_ERROR; + } +} + using Http2ResponseCodeDetails = ConstSingleton; using Http::Http2::CodecStats; using Http::Http2::MetadataDecoder; @@ -165,6 +180,7 @@ void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector local_end_stream_ = end_stream; submitHeaders(final_headers, end_stream ? nullptr : &provider); parent_.sendPendingFrames(); + parent_.checkProtocolConstraintViolation(); } void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers, @@ -233,6 +249,7 @@ void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { } else { submitTrailers(trailers); parent_.sendPendingFrames(); + parent_.checkProtocolConstraintViolation(); } } @@ -246,6 +263,7 @@ void ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadat submitMetadata(flags); } parent_.sendPendingFrames(); + parent_.checkProtocolConstraintViolation(); } void ConnectionImpl::StreamImpl::readDisable(bool disable) { @@ -261,6 +279,7 @@ void ConnectionImpl::StreamImpl::readDisable(bool disable) { nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); unconsumed_bytes_ = 0; parent_.sendPendingFrames(); + parent_.checkProtocolConstraintViolation(); } } } @@ -392,7 +411,7 @@ int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback static const uint64_t FRAME_HEADER_SIZE = 9; - parent_.outbound_data_frames_++; + parent_.protocol_constraints_.incrementOutboundDataFrameCount(); Buffer::OwnedImpl output; if (!parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE)) { @@ -442,6 +461,7 @@ void ConnectionImpl::StreamImpl::onPendingFlushTimer() { // will be run because higher layers think the stream is already finished. resetStreamWorker(StreamResetReason::LocalReset); parent_.sendPendingFrames(); + parent_.checkProtocolConstraintViolation(); } void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { @@ -466,6 +486,8 @@ void ConnectionImpl::StreamImpl::encodeDataHelper(Buffer::Instance& data, bool e } parent_.sendPendingFrames(); + parent_.checkProtocolConstraintViolation(); + if (local_end_stream_ && pending_send_data_.length() > 0) { createPendingFlushTimer(); } @@ -490,13 +512,12 @@ void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { // the cleanup logic to run which will reset the stream in all cases if all data frames could not // be sent. parent_.sendPendingFrames(); + parent_.checkProtocolConstraintViolation(); } void ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) { int rc = nghttp2_submit_rst_stream(parent_.session_, NGHTTP2_FLAG_NONE, stream_id_, - reason == StreamResetReason::LocalRefusedStreamReset - ? NGHTTP2_REFUSED_STREAM - : NGHTTP2_NO_ERROR); + reasonToReset(reason)); ASSERT(rc == 0); } @@ -522,6 +543,7 @@ void ConnectionImpl::StreamImpl::onMetadataDecoded(MetadataMapPtr&& metadata_map } ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, + Random::RandomGenerator& random, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_headers_kb, const uint32_t max_headers_count) : stats_(stats), connection_(connection), max_headers_kb_(max_headers_kb), @@ -529,19 +551,26 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()), stream_error_on_invalid_http_messaging_( http2_options.override_stream_error_on_invalid_http_message().value()), - flood_detected_(false), max_outbound_frames_(http2_options.max_outbound_frames().value()), - frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), - max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), - control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), - max_consecutive_inbound_frames_with_empty_payload_( - http2_options.max_consecutive_inbound_frames_with_empty_payload().value()), - max_inbound_priority_frames_per_stream_( - http2_options.max_inbound_priority_frames_per_stream().value()), - max_inbound_window_update_frames_per_data_frame_sent_( - http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()), + flood_detected_(false), protocol_constraints_(stats, http2_options), skip_encoding_empty_trailers_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.http2_skip_encoding_empty_trailers")), - dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} + dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false), random_(random) { + if (http2_options.has_connection_keepalive()) { + keepalive_interval_ = std::chrono::milliseconds( + PROTOBUF_GET_MS_REQUIRED(http2_options.connection_keepalive(), interval)); + keepalive_timeout_ = std::chrono::milliseconds( + PROTOBUF_GET_MS_REQUIRED(http2_options.connection_keepalive(), timeout)); + keepalive_interval_jitter_percent_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( + http2_options.connection_keepalive(), interval_jitter, 15.0); + + keepalive_send_timer_ = connection.dispatcher().createTimer([this]() { sendKeepalive(); }); + keepalive_timeout_timer_ = + connection.dispatcher().createTimer([this]() { onKeepaliveResponseTimeout(); }); + + // This call schedules the initial interval, with jitter. + onKeepaliveResponse(); + } +} ConnectionImpl::~ConnectionImpl() { for (const auto& stream : active_streams_) { @@ -550,6 +579,43 @@ ConnectionImpl::~ConnectionImpl() { nghttp2_session_del(session_); } +void ConnectionImpl::sendKeepalive() { + // Include the current time as the payload to help with debugging. + SystemTime now = connection_.dispatcher().timeSource().systemTime(); + uint64_t ms_since_epoch = + std::chrono::duration_cast(now.time_since_epoch()).count(); + ENVOY_CONN_LOG(trace, "Sending keepalive PING {}", connection_, ms_since_epoch); + + // The last parameter is an opaque 8-byte buffer, so this cast is safe. + int rc = nghttp2_submit_ping(session_, 0 /*flags*/, reinterpret_cast(&ms_since_epoch)); + ASSERT(rc == 0); + sendPendingFrames(); + checkProtocolConstraintViolation(); + + keepalive_timeout_timer_->enableTimer(keepalive_timeout_); +} + +void ConnectionImpl::onKeepaliveResponse() { + // Check the timers for nullptr in case the peer sent an unsolicited PING ACK. + if (keepalive_timeout_timer_ != nullptr) { + keepalive_timeout_timer_->disableTimer(); + } + if (keepalive_send_timer_ != nullptr) { + uint64_t interval_ms = keepalive_interval_.count(); + const uint64_t jitter_percent_mod = keepalive_interval_jitter_percent_ * interval_ms / 100; + if (jitter_percent_mod > 0) { + interval_ms += random_.random() % jitter_percent_mod; + } + keepalive_send_timer_->enableTimer(std::chrono::milliseconds(interval_ms)); + } +} + +void ConnectionImpl::onKeepaliveResponseTimeout() { + ENVOY_CONN_LOG(debug, "Closing connection due to keepalive timeout", connection_); + stats_.keepalive_timeout_.inc(); + connection_.close(Network::ConnectionCloseType::NoFlush); +} + Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either // throw an exception or return an error status. The utility wrapper catches exceptions and @@ -613,6 +679,7 @@ void ConnectionImpl::goAway() { ASSERT(rc == 0); sendPendingFrames(); + checkProtocolConstraintViolation(); } void ConnectionImpl::shutdownNotice() { @@ -620,6 +687,7 @@ void ConnectionImpl::shutdownNotice() { ASSERT(rc == 0); sendPendingFrames(); + checkProtocolConstraintViolation(); } int ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { @@ -656,6 +724,19 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { // and CONTINUATION frames in onBeforeFrameReceived(). ASSERT(frame->hd.type != NGHTTP2_CONTINUATION); + if ((frame->hd.type == NGHTTP2_PING) && (frame->ping.hd.flags & NGHTTP2_FLAG_ACK)) { + // The ``opaque_data`` should be exactly what was sent in the ping, which is + // was the current time when the ping was sent. This can be useful while debugging + // to match the ping and ack. + uint64_t data; + static_assert(sizeof(data) == sizeof(frame->ping.opaque_data), "Sizes are equal"); + memcpy(&data, frame->ping.opaque_data, sizeof(data)); + ENVOY_CONN_LOG(trace, "recv PING ACK {}", connection_, data); + + onKeepaliveResponse(); + return 0; + } + if (frame->hd.type == NGHTTP2_DATA) { if (!trackInboundFrames(&frame->hd, frame->data.padlen)) { return NGHTTP2_ERR_FLOODED; @@ -827,14 +908,6 @@ int ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) { return 0; } -void ConnectionImpl::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { - ++outbound_frames_; - if (is_outbound_flood_monitored_control_frame) { - ++outbound_control_frames_; - } - checkOutboundQueueLimits(); -} - bool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, size_t length) { // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the @@ -842,28 +915,15 @@ bool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const u bool is_outbound_flood_monitored_control_frame = false; std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_); try { - incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame); + auto releasor = trackOutboundFrames(is_outbound_flood_monitored_control_frame); + output.add(data, length); + output.addDrainTracker(releasor); } catch (const FrameFloodException&) { return false; } - - output.add(data, length); - output.addDrainTracker(is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ - : frame_buffer_releasor_); return true; } -void ConnectionImpl::releaseOutboundFrame() { - ASSERT(outbound_frames_ >= 1); - --outbound_frames_; -} - -void ConnectionImpl::releaseOutboundControlFrame() { - ASSERT(outbound_control_frames_ >= 1); - --outbound_control_frames_; - releaseOutboundFrame(); -} - ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { ENVOY_CONN_LOG(trace, "send data: bytes={}", connection_, length); Buffer::OwnedImpl buffer; @@ -900,8 +960,16 @@ int ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) { // the connection. reason = StreamResetReason::LocalReset; } else { - reason = error_code == NGHTTP2_REFUSED_STREAM ? StreamResetReason::RemoteRefusedStreamReset - : StreamResetReason::RemoteReset; + if (error_code == NGHTTP2_REFUSED_STREAM) { + reason = StreamResetReason::RemoteRefusedStreamReset; + stream->setDetails(Http2ResponseCodeDetails::get().remote_refused); + } else if (error_code == NGHTTP2_CONNECT_ERROR) { + reason = StreamResetReason::ConnectError; + stream->setDetails(Http2ResponseCodeDetails::get().remote_reset); + } else { + reason = StreamResetReason::RemoteReset; + stream->setDetails(Http2ResponseCodeDetails::get().remote_reset); + } } stream->runResetCallbacks(reason); @@ -1004,8 +1072,7 @@ void ConnectionImpl::sendPendingFrames() { // to be thrown. However the nghttp2 library returns only the generic error code for // all failure types. Check queue limits and throw FrameFloodException if they were // exceeded. - if (outbound_frames_ > max_outbound_frames_ || - outbound_control_frames_ > max_outbound_control_frames_) { + if (!protocol_constraints_.status().ok()) { throw FrameFloodException("Too many frames in the outbound queue."); } @@ -1098,6 +1165,20 @@ void ConnectionImpl::sendSettings( } } +void ConnectionImpl::scheduleProtocolConstraintViolationCallback() { + if (!protocol_constraint_violation_callback_) { + protocol_constraint_violation_callback_ = connection_.dispatcher().createSchedulableCallback( + [this]() { onProtocolConstraintViolation(); }); + protocol_constraint_violation_callback_->scheduleCallbackCurrentIteration(); + } +} + +void ConnectionImpl::onProtocolConstraintViolation() { + // Flooded outbound queue implies that peer is not reading and it does not + // make sense to try to flush pending bytes. + connection_.close(Envoy::Network::ConnectionCloseType::NoFlush); +} + ConnectionImpl::Http2Callbacks::Http2Callbacks() { nghttp2_session_callbacks_new(&callbacks_); nghttp2_session_callbacks_set_send_callback( @@ -1260,10 +1341,11 @@ ConnectionImpl::ClientHttp2Options::ClientHttp2Options( ClientConnectionImpl::ClientConnectionImpl( Network::Connection& connection, Http::ConnectionCallbacks& callbacks, CodecStats& stats, + Random::RandomGenerator& random, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_response_headers_kb, const uint32_t max_response_headers_count, Nghttp2SessionFactory& http2_session_factory) - : ConnectionImpl(connection, stats, http2_options, max_response_headers_kb, + : ConnectionImpl(connection, stats, random, http2_options, max_response_headers_kb, max_response_headers_count), callbacks_(callbacks) { ClientHttp2Options client_http2_options(http2_options); @@ -1310,11 +1392,12 @@ int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& na ServerConnectionImpl::ServerConnectionImpl( Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats, + Random::RandomGenerator& random, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) - : ConnectionImpl(connection, stats, http2_options, max_request_headers_kb, + : ConnectionImpl(connection, stats, random, http2_options, max_request_headers_kb, max_request_headers_count), callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) { Http2Options h2_options(http2_options); @@ -1366,34 +1449,16 @@ bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32 ENVOY_CONN_LOG(trace, "track inbound frame type={} flags={} length={} padding_length={}", connection_, static_cast(hd->type), static_cast(hd->flags), static_cast(hd->length), padding_length); - switch (hd->type) { - case NGHTTP2_HEADERS: - case NGHTTP2_CONTINUATION: - // Track new streams. - if (hd->flags & NGHTTP2_FLAG_END_HEADERS) { - inbound_streams_++; - } - FALLTHRU; - case NGHTTP2_DATA: - // Track frames with an empty payload and no end stream flag. - if (hd->length - padding_length == 0 && !(hd->flags & NGHTTP2_FLAG_END_STREAM)) { - ENVOY_CONN_LOG(trace, "frame with an empty payload and no end stream flag.", connection_); - consecutive_inbound_frames_with_empty_payload_++; - } else { - consecutive_inbound_frames_with_empty_payload_ = 0; + auto result = protocol_constraints_.trackInboundFrames(hd, padding_length); + if (!result.ok()) { + ENVOY_CONN_LOG(trace, "error reading frame: {} received in this HTTP/2 session.", connection_, + result.message()); + if (isInboundFramesWithEmptyPayloadError(result)) { + ConnectionImpl::StreamImpl* stream = getStream(hd->stream_id); + if (stream) { + stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood); + } } - break; - case NGHTTP2_PRIORITY: - inbound_priority_frames_++; - break; - case NGHTTP2_WINDOW_UPDATE: - inbound_window_update_frames_++; - break; - default: - break; - } - - if (!checkInboundFrameLimits(hd->stream_id)) { // NGHTTP2_ERR_FLOODED is overridden within nghttp2 library and it doesn't propagate // all the way to nghttp2_session_mem_recv() where we need it. flood_detected_ = true; @@ -1403,53 +1468,19 @@ bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32 return true; } -bool ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { - ASSERT(dispatching_downstream_data_); - ConnectionImpl::StreamImpl* stream = getStream(stream_id); - - if (consecutive_inbound_frames_with_empty_payload_ > - max_consecutive_inbound_frames_with_empty_payload_) { - ENVOY_CONN_LOG(trace, - "error reading frame: Too many consecutive frames with an empty payload " - "received in this HTTP/2 session.", - connection_); - if (stream) { - stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood); - } - stats_.inbound_empty_frames_flood_.inc(); - return false; - } - - if (inbound_priority_frames_ > max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_)) { - ENVOY_CONN_LOG(trace, - "error reading frame: Too many PRIORITY frames received in this HTTP/2 session.", - connection_); - stats_.inbound_priority_frames_flood_.inc(); - return false; - } - - if (inbound_window_update_frames_ > - 1 + 2 * (inbound_streams_ + - max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_)) { - ENVOY_CONN_LOG( - trace, - "error reading frame: Too many WINDOW_UPDATE frames received in this HTTP/2 session.", - connection_); - stats_.inbound_window_update_frames_flood_.inc(); - return false; +Envoy::Http::Http2::ProtocolConstraints::ReleasorProc +ServerConnectionImpl::trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) { + auto releasor = + protocol_constraints_.incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame); + if (dispatching_downstream_data_ && !protocol_constraints_.checkOutboundFrameLimits().ok()) { + throw FrameFloodException(std::string(protocol_constraints_.status().message())); } - - return true; + return releasor; } -void ServerConnectionImpl::checkOutboundQueueLimits() { - if (outbound_frames_ > max_outbound_frames_ && dispatching_downstream_data_) { - stats_.outbound_flood_.inc(); - throw FrameFloodException("Too many frames in the outbound queue."); - } - if (outbound_control_frames_ > max_outbound_control_frames_ && dispatching_downstream_data_) { - stats_.outbound_control_flood_.inc(); - throw FrameFloodException("Too many control frames in the outbound queue."); +void ServerConnectionImpl::checkProtocolConstraintViolation() { + if (!protocol_constraints_.checkOutboundFrameLimits().ok()) { + scheduleProtocolConstraintViolationCallback(); } } @@ -1470,7 +1501,9 @@ Http::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) { Cleanup cleanup([this]() { dispatching_downstream_data_ = false; }); // Make sure downstream outbound queue was not flooded by the upstream frames. - checkOutboundQueueLimits(); + if (!protocol_constraints_.checkOutboundFrameLimits().ok()) { + throw FrameFloodException(std::string(protocol_constraints_.status().message())); + } return ConnectionImpl::innerDispatch(data); } diff --git a/source/common/http/http2/codec_impl_legacy.h b/source/common/http/http2/codec_impl_legacy.h index 2b6db85cf731..238762c45df3 100644 --- a/source/common/http/http2/codec_impl_legacy.h +++ b/source/common/http/http2/codec_impl_legacy.h @@ -7,6 +7,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/protocol.pb.h" #include "envoy/event/deferred_deletable.h" #include "envoy/http/codec.h" @@ -22,6 +23,7 @@ #include "common/http/http2/codec_stats.h" #include "common/http/http2/metadata_decoder.h" #include "common/http/http2/metadata_encoder.h" +#include "common/http/http2/protocol_constraints.h" #include "common/http/status.h" #include "common/http/utility.h" @@ -91,6 +93,7 @@ class ProdNghttp2SessionFactory : public Nghttp2SessionFactory { class ConnectionImpl : public virtual Connection, protected Logger::Loggable { public: ConnectionImpl(Network::Connection& connection, Http::Http2::CodecStats& stats, + Random::RandomGenerator& random, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_headers_kb, const uint32_t max_headers_count); @@ -274,6 +277,12 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable void { this->pendingRecvBufferLowWatermark(); }, [this]() -> void { this->pendingRecvBufferHighWatermark(); }, @@ -427,6 +436,30 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable active_streams_; @@ -443,57 +476,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable frame_buffer_releasor_; - // This counter keeps track of the number of outbound frames of types PING, SETTINGS and - // RST_STREAM (these that were buffered in the underlying connection but not yet written into the - // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is - // terminated. - uint32_t outbound_control_frames_ = 0; - // Maximum number of outbound frames of types PING, SETTINGS and RST_STREAM. Initialized from - // corresponding http2_protocol_options. Default value is 1000. - const uint32_t max_outbound_control_frames_; - const std::function control_frame_buffer_releasor_; - // This counter keeps track of the number of consecutive inbound frames of types HEADERS, - // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds - // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated. - uint32_t consecutive_inbound_frames_with_empty_payload_ = 0; - // Maximum number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA without - // a payload. Initialized from corresponding http2_protocol_options. Default value is 1. - const uint32_t max_consecutive_inbound_frames_with_empty_payload_; - - // This counter keeps track of the number of inbound streams. - uint32_t inbound_streams_ = 0; - // This counter keeps track of the number of inbound PRIORITY frames. If this counter exceeds - // the value calculated using this formula: - // - // max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_) - // - // the connection is terminated. - uint64_t inbound_priority_frames_ = 0; - // Maximum number of inbound PRIORITY frames per stream. Initialized from corresponding - // http2_protocol_options. Default value is 100. - const uint32_t max_inbound_priority_frames_per_stream_; - - // This counter keeps track of the number of inbound WINDOW_UPDATE frames. If this counter exceeds - // the value calculated using this formula: - // - // 1 + 2 * (inbound_streams_ + - // max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_) - // - // the connection is terminated. - uint64_t inbound_window_update_frames_ = 0; - // This counter keeps track of the number of outbound DATA frames. - uint64_t outbound_data_frames_ = 0; - // Maximum number of inbound WINDOW_UPDATE frames per outbound DATA frame sent. Initialized - // from corresponding http2_protocol_options. Default value is 10. - const uint32_t max_inbound_window_update_frames_per_data_frame_sent_; + ::Envoy::Http::Http2::ProtocolConstraints protocol_constraints_; // For the flood mitigation to work the onSend callback must be called once for each outbound // frame. This is what the nghttp2 library is doing, however this is not documented. The @@ -527,16 +510,23 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable checkHeaderNameForUnderscores(absl::string_view header_name) override; + /** + * Check protocol constraint violations outside of the dispatching context. + * This method ASSERTs if it is called in the dispatching context. + */ + void checkProtocolConstraintViolation() override; + // Http::Connection // The reason for overriding the dispatch method is to do flood mitigation only when // processing data from downstream client. Doing flood mitigation when processing upstream // responses makes clean-up tricky, which needs to be improved (see comments for the - // ClientConnectionImpl::checkOutboundQueueLimits method). The dispatch method on the + // ClientConnectionImpl::checkProtocolConstraintsStatus method). The dispatch method on the // ServerConnectionImpl objects is called only when processing data from the downstream client in // the ConnectionManagerImpl::onData method. Http::Status dispatch(Buffer::Instance& data) override; diff --git a/source/common/http/http2/codec_stats.h b/source/common/http/http2/codec_stats.h index 05ea11bbe764..ba9d1592095b 100644 --- a/source/common/http/http2/codec_stats.h +++ b/source/common/http/http2/codec_stats.h @@ -27,6 +27,7 @@ namespace Http2 { COUNTER(trailers) \ COUNTER(tx_flush_timeout) \ COUNTER(tx_reset) \ + COUNTER(keepalive_timeout) \ GAUGE(streams_active, Accumulate) \ GAUGE(pending_send_bytes, Accumulate) diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 87ba8f8a218c..d83def2ec18e 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -12,12 +12,13 @@ namespace Envoy { namespace Http { namespace Http2 { -ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, +ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) : HttpConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, - transport_socket_options, Protocol::Http2) {} + transport_socket_options, Protocol::Http2), + random_generator_(random_generator) {} ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } @@ -84,17 +85,17 @@ RequestEncoder& ConnPoolImpl::ActiveClient::newStreamEncoder(ResponseDecoder& re CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP2, std::move(data.connection_), - data.host_description_, dispatcher_)}; + data.host_description_, dispatcher_, random_generator_)}; return codec; } ConnectionPool::InstancePtr -allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, +allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { - return std::make_unique(dispatcher, host, priority, options, - transport_socket_options); + return std::make_unique( + dispatcher, random_generator, host, priority, options, transport_socket_options); } } // namespace Http2 diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 0e8f4db554df..63aabe08e2a5 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -18,8 +18,8 @@ namespace Http2 { */ class ConnPoolImpl : public Envoy::Http::HttpConnPoolImplBase { public: - ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, + ConnPoolImpl(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); @@ -68,6 +68,8 @@ class ConnPoolImpl : public Envoy::Http::HttpConnPoolImplBase { // All streams are 2^31. Client streams are half that, minus stream 0. Just to be on the safe // side we do 2^29. static const uint64_t DEFAULT_MAX_STREAMS = (1 << 29); + + Random::RandomGenerator& random_generator_; }; /** @@ -82,8 +84,8 @@ class ProdConnPoolImpl : public ConnPoolImpl { }; ConnectionPool::InstancePtr -allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, +allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator, + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); diff --git a/source/common/http/http2/protocol_constraints.cc b/source/common/http/http2/protocol_constraints.cc new file mode 100644 index 000000000000..9b37d36d5465 --- /dev/null +++ b/source/common/http/http2/protocol_constraints.cc @@ -0,0 +1,122 @@ +#include "common/http/http2/protocol_constraints.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace Http { +namespace Http2 { + +ProtocolConstraints::ProtocolConstraints( + CodecStats& stats, const envoy::config::core::v3::Http2ProtocolOptions& http2_options) + : stats_(stats), max_outbound_frames_(http2_options.max_outbound_frames().value()), + frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), + max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), + control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), + max_consecutive_inbound_frames_with_empty_payload_( + http2_options.max_consecutive_inbound_frames_with_empty_payload().value()), + max_inbound_priority_frames_per_stream_( + http2_options.max_inbound_priority_frames_per_stream().value()), + max_inbound_window_update_frames_per_data_frame_sent_( + http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()) {} + +ProtocolConstraints::ReleasorProc +ProtocolConstraints::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { + ++outbound_frames_; + if (is_outbound_flood_monitored_control_frame) { + ++outbound_control_frames_; + } + return is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ + : frame_buffer_releasor_; +} + +void ProtocolConstraints::releaseOutboundFrame() { + ASSERT(outbound_frames_ >= 1); + --outbound_frames_; +} + +void ProtocolConstraints::releaseOutboundControlFrame() { + ASSERT(outbound_control_frames_ >= 1); + --outbound_control_frames_; + releaseOutboundFrame(); +} + +Status ProtocolConstraints::checkOutboundFrameLimits() { + // Stop checking for further violations after the first failure. + if (!status_.ok()) { + return status_; + } + + if (outbound_frames_ > max_outbound_frames_) { + stats_.outbound_flood_.inc(); + return status_ = bufferFloodError("Too many frames in the outbound queue."); + } + if (outbound_control_frames_ > max_outbound_control_frames_) { + stats_.outbound_control_flood_.inc(); + return status_ = bufferFloodError("Too many control frames in the outbound queue."); + } + return okStatus(); +} + +Status ProtocolConstraints::trackInboundFrames(const nghttp2_frame_hd* hd, + uint32_t padding_length) { + switch (hd->type) { + case NGHTTP2_HEADERS: + case NGHTTP2_CONTINUATION: + // Track new streams. + if (hd->flags & NGHTTP2_FLAG_END_HEADERS) { + inbound_streams_++; + } + FALLTHRU; + case NGHTTP2_DATA: + // Track frames with an empty payload and no end stream flag. + if (hd->length - padding_length == 0 && !(hd->flags & NGHTTP2_FLAG_END_STREAM)) { + consecutive_inbound_frames_with_empty_payload_++; + } else { + consecutive_inbound_frames_with_empty_payload_ = 0; + } + break; + case NGHTTP2_PRIORITY: + inbound_priority_frames_++; + break; + case NGHTTP2_WINDOW_UPDATE: + inbound_window_update_frames_++; + break; + default: + break; + } + + status_.Update(checkInboundFrameLimits()); + return status_; +} + +Status ProtocolConstraints::checkInboundFrameLimits() { + // Stop checking for further violations after the first failure. + if (!status_.ok()) { + return status_; + } + + if (consecutive_inbound_frames_with_empty_payload_ > + max_consecutive_inbound_frames_with_empty_payload_) { + stats_.inbound_empty_frames_flood_.inc(); + return inboundFramesWithEmptyPayloadError(); + } + + if (inbound_priority_frames_ > + static_cast(max_inbound_priority_frames_per_stream_) * (1 + inbound_streams_)) { + stats_.inbound_priority_frames_flood_.inc(); + return bufferFloodError("Too many PRIORITY frames"); + } + + if (inbound_window_update_frames_ > + 1 + 2 * (inbound_streams_ + + max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_)) { + stats_.inbound_window_update_frames_flood_.inc(); + return bufferFloodError("Too many WINDOW_UPDATE frames"); + } + + return okStatus(); +} + +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/protocol_constraints.h b/source/common/http/http2/protocol_constraints.h new file mode 100644 index 000000000000..d9219830a5e2 --- /dev/null +++ b/source/common/http/http2/protocol_constraints.h @@ -0,0 +1,120 @@ +#pragma once + +#include +#include + +#include "envoy/config/core/v3/protocol.pb.h" +#include "envoy/network/connection.h" + +#include "common/http/http2/codec_stats.h" +#include "common/http/status.h" + +#include "nghttp2/nghttp2.h" + +namespace Envoy { +namespace Http { +namespace Http2 { + +// Class for detecting abusive peers and validating additional constraints imposed by Envoy. +// This class does not check protocol compliance with the H/2 standard, as this is checked by +// protocol framer/codec. Currently implemented constraints: +// 1. detection of control frame (i.e. PING) initiated floods. +// 2. detection of outbound DATA or HEADER frame floods. +// 4. zero length, PRIORITY and WINDOW_UPDATE floods. + +class ProtocolConstraints { +public: + using ReleasorProc = std::function; + + explicit ProtocolConstraints(CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options); + + // Return ok status if no protocol constraints were violated. + // Return error status of the first detected violation. Subsequent violations of constraints + // do not reset the error status or increment stat counters. + const Status& status() const { return status_; } + + // Increment counters of pending (buffered for sending to the peer) outbound frames. + // If the `is_outbound_flood_monitored_control_frame` is false only the counter for all frame + // types is incremented. If the `is_outbound_flood_monitored_control_frame` is true, both the + // control frame and all frame types counters are incremented. + // Returns callable for decrementing frame counters when frames was successfully written to + // the underlying transport socket object. + // To check if outbound frame constraints were violated call the `status()` method. + // TODO(yanavlasov): return StatusOr when flood checks are implemented for both + // directions. + ReleasorProc incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame); + + // Track received frames of various types. + // Return an error status if inbound frame constraints were violated. + Status trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length); + // Increment the number of DATA frames sent to the peer. + void incrementOutboundDataFrameCount() { ++outbound_data_frames_; } + + Status checkOutboundFrameLimits(); + +private: + void releaseOutboundFrame(); + void releaseOutboundControlFrame(); + Status checkInboundFrameLimits(); + + Status status_; + CodecStats& stats_; + // This counter keeps track of the number of outbound frames of all types (these that were + // buffered in the underlying connection but not yet written into the socket). If this counter + // exceeds the `max_outbound_frames_' value the connection is terminated. + uint32_t outbound_frames_ = 0; + // Maximum number of outbound frames. Initialized from corresponding http2_protocol_options. + // Default value is 10000. + const uint32_t max_outbound_frames_; + ReleasorProc frame_buffer_releasor_; + + // This counter keeps track of the number of outbound frames of types PING, SETTINGS and + // RST_STREAM (these that were buffered in the underlying connection but not yet written into the + // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is + // terminated. + uint32_t outbound_control_frames_ = 0; + // Maximum number of outbound frames of types PING, SETTINGS and RST_STREAM. Initialized from + // corresponding http2_protocol_options. Default value is 1000. + const uint32_t max_outbound_control_frames_; + ReleasorProc control_frame_buffer_releasor_; + + // This counter keeps track of the number of consecutive inbound frames of types HEADERS, + // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds + // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated. + uint32_t consecutive_inbound_frames_with_empty_payload_ = 0; + // Maximum number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA without + // a payload. Initialized from corresponding http2_protocol_options. Default value is 1. + const uint32_t max_consecutive_inbound_frames_with_empty_payload_; + + // This counter keeps track of the number of inbound streams. + uint32_t inbound_streams_ = 0; + // This counter keeps track of the number of inbound PRIORITY frames. If this counter exceeds + // the value calculated using this formula: + // + // max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_) + // + // the connection is terminated. + uint64_t inbound_priority_frames_ = 0; + // Maximum number of inbound PRIORITY frames per stream. Initialized from corresponding + // http2_protocol_options. Default value is 100. + const uint32_t max_inbound_priority_frames_per_stream_; + + // This counter keeps track of the number of inbound WINDOW_UPDATE frames. If this counter exceeds + // the value calculated using this formula: + // + // 1 + 2 * (inbound_streams_ + + // max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_) + // + // the connection is terminated. + uint64_t inbound_window_update_frames_ = 0; + // This counter keeps track of the number of outbound DATA frames. + uint64_t outbound_data_frames_ = 0; + // Maximum number of inbound WINDOW_UPDATE frames per outbound DATA frame sent. Initialized + // from corresponding http2_protocol_options. Default value is 10. + const uint32_t max_inbound_window_update_frames_per_data_frame_sent_; +}; + +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/message_impl.h b/source/common/http/message_impl.h index 698c51824a06..d746227912fc 100644 --- a/source/common/http/message_impl.h +++ b/source/common/http/message_impl.h @@ -24,22 +24,16 @@ class MessageImpl : public Message // Http::Message HeadersInterfaceType& headers() override { return *headers_; } - Buffer::InstancePtr& body() override { return body_; } + Buffer::Instance& body() override { return body_; } TrailersInterfaceType* trailers() override { return trailers_.get(); } void trailers(std::unique_ptr&& trailers) override { trailers_ = std::move(trailers); } - std::string bodyAsString() const override { - if (body_) { - return body_->toString(); - } else { - return ""; - } - } + std::string bodyAsString() const override { return body_.toString(); } private: std::unique_ptr headers_; - Buffer::InstancePtr body_; + Buffer::OwnedImpl body_; std::unique_ptr trailers_; }; diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc index 5194a395e79b..f12790b41103 100644 --- a/source/common/http/path_utility.cc +++ b/source/common/http/path_utility.cc @@ -1,12 +1,12 @@ #include "common/http/path_utility.h" +#include "common/chromium_url/url_canon.h" +#include "common/chromium_url/url_canon_stdstring.h" #include "common/common/logger.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "absl/types/optional.h" -#include "url/url_canon.h" -#include "url/url_canon_stdstring.h" namespace Envoy { namespace Http { @@ -14,10 +14,11 @@ namespace Http { namespace { absl::optional canonicalizePath(absl::string_view original_path) { std::string canonical_path; - url::Component in_component(0, original_path.size()); - url::Component out_component; - url::StdStringCanonOutput output(&canonical_path); - if (!CanonicalizePath(original_path.data(), in_component, &output, &out_component)) { + chromium_url::Component in_component(0, original_path.size()); + chromium_url::Component out_component; + chromium_url::StdStringCanonOutput output(&canonical_path); + if (!chromium_url::CanonicalizePath(original_path.data(), in_component, &output, + &out_component)) { return absl::nullopt; } else { output.Complete(); diff --git a/source/common/http/path_utility.h b/source/common/http/path_utility.h index 62be43e2e03f..a6a99aaef78d 100644 --- a/source/common/http/path_utility.h +++ b/source/common/http/path_utility.h @@ -12,15 +12,15 @@ namespace Http { */ class PathUtil { public: - // Returns if the normalization succeeds. - // If it is successful, the path header in header path will be updated with the normalized path. + // Returns true if the normalization succeeds. + // If it is successful, the path header will be updated with the normalized path. // Requires the Path header be present. static bool canonicalPath(RequestHeaderMap& headers); // Merges two or more adjacent slashes in path part of URI into one. // Requires the Path header be present. static void mergeSlashes(RequestHeaderMap& headers); // Removes the query and/or fragment string (if present) from the input path. - // For example, this function returns "/data" for the input path "/data#fragment?param=value". + // For example, this function returns "/data" for the input path "/data?param=value#fragment". static absl::string_view removeQueryAndFragment(const absl::string_view path); }; diff --git a/source/common/http/status.cc b/source/common/http/status.cc index 78ef5c562f2d..1204a9ac9e4f 100644 --- a/source/common/http/status.cc +++ b/source/common/http/status.cc @@ -23,6 +23,8 @@ absl::string_view statusCodeToString(StatusCode code) { return "PrematureResponseError"; case StatusCode::CodecClientError: return "CodecClientError"; + case StatusCode::InboundFramesWithEmptyPayload: + return "InboundFramesWithEmptyPayloadError"; } NOT_REACHED_GCOVR_EXCL_LINE; } @@ -104,6 +106,13 @@ Status codecClientError(absl::string_view message) { return status; } +Status inboundFramesWithEmptyPayloadError() { + absl::Status status(absl::StatusCode::kInternal, + "Too many consecutive frames with an empty payload"); + storePayload(status, EnvoyStatusPayload(StatusCode::InboundFramesWithEmptyPayload)); + return status; +} + // Methods for checking and extracting error information StatusCode getStatusCode(const Status& status) { return status.ok() ? StatusCode::Ok : getPayload(status).status_code_; @@ -132,5 +141,9 @@ bool isCodecClientError(const Status& status) { return getStatusCode(status) == StatusCode::CodecClientError; } +bool isInboundFramesWithEmptyPayloadError(const Status& status) { + return getStatusCode(status) == StatusCode::InboundFramesWithEmptyPayload; +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/status.h b/source/common/http/status.h index 2b47a5d0a48b..97c7dac96ca1 100644 --- a/source/common/http/status.h +++ b/source/common/http/status.h @@ -68,7 +68,12 @@ enum class StatusCode : int { /** * Indicates a client (local) side error which should not happen. */ - CodecClientError = 4 + CodecClientError = 4, + + /** + * Indicates that peer sent too many consecutive DATA frames with empty payload. + */ + InboundFramesWithEmptyPayload = 5, }; using Status = absl::Status; @@ -88,6 +93,7 @@ Status codecProtocolError(absl::string_view message); Status bufferFloodError(absl::string_view message); Status prematureResponseError(absl::string_view message, Http::Code http_code); Status codecClientError(absl::string_view message); +Status inboundFramesWithEmptyPayloadError(); /** * Returns Envoy::StatusCode of the given status object. @@ -102,6 +108,7 @@ ABSL_MUST_USE_RESULT bool isCodecProtocolError(const Status& status); ABSL_MUST_USE_RESULT bool isBufferFloodError(const Status& status); ABSL_MUST_USE_RESULT bool isPrematureResponseError(const Status& status); ABSL_MUST_USE_RESULT bool isCodecClientError(const Status& status); +ABSL_MUST_USE_RESULT bool isInboundFramesWithEmptyPayloadError(const Status& status); /** * Returns Http::Code value of the PrematureResponseError status. diff --git a/source/common/http/url_utility.cc b/source/common/http/url_utility.cc deleted file mode 100644 index d2fd43015280..000000000000 --- a/source/common/http/url_utility.cc +++ /dev/null @@ -1,95 +0,0 @@ -#include "common/http/url_utility.h" - -#include - -#include -#include - -#include "common/common/assert.h" -#include "common/common/empty_string.h" -#include "common/common/utility.h" - -#include "absl/strings/numbers.h" -#include "absl/strings/str_cat.h" - -namespace Envoy { -namespace Http { -namespace Utility { - -bool Url::initialize(absl::string_view absolute_url, bool is_connect) { - // TODO(dio): When we have access to base::StringPiece, probably we can convert absolute_url to - // that instead. - GURL parsed(std::string{absolute_url}); - if (is_connect) { - return initializeForConnect(std::move(parsed)); - } - - // TODO(dio): Check if we need to accommodate to strictly validate only http(s) AND ws(s) schemes. - // Currently, we only accept http(s). - if (!parsed.is_valid() || !parsed.SchemeIsHTTPOrHTTPS()) { - return false; - } - - scheme_ = parsed.scheme(); - - // Only non-default ports will be rendered as part of host_and_port_. For example, - // http://www.host.com:80 has port component (i.e. 80). However, since 80 is a default port for - // http scheme, host_and_port_ will be rendered as www.host.com (without port). The same case with - // https scheme (with port 443) as well. - host_and_port_ = - absl::StrCat(parsed.host(), parsed.has_port() ? ":" : EMPTY_STRING, parsed.port()); - - const int port = parsed.EffectiveIntPort(); - if (port <= 0 || port > std::numeric_limits::max()) { - return false; - } - port_ = static_cast(port); - - // RFC allows the absolute URI to not end in "/", but the absolute path form must start with "/". - path_and_query_params_ = parsed.PathForRequest(); - if (parsed.has_ref()) { - absl::StrAppend(&path_and_query_params_, "#", parsed.ref()); - } - - return true; -} - -bool Url::initializeForConnect(GURL&& url) { - // CONNECT requests can only contain "hostname:port" - // https://github.com/nodejs/http-parser/blob/d9275da4650fd1133ddc96480df32a9efe4b059b/http_parser.c#L2503-L2506. - if (!url.is_valid() || url.IsStandard()) { - return false; - } - - const auto& parsed = url.parsed_for_possibly_invalid_spec(); - // The parsed.scheme contains the URL's hostname (stored by GURL). While host and port have -1 - // as its length. - if (parsed.scheme.len <= 0 || parsed.host.len > 0 || parsed.port.len > 0) { - return false; - } - - host_and_port_ = url.possibly_invalid_spec(); - const auto& parts = StringUtil::splitToken(host_and_port_, ":", /*keep_empty_string=*/true, - /*trim_whitespace=*/false); - if (parts.size() != 2 || static_cast(parsed.scheme.len) != parts.at(0).size() || - !validPortForConnect(parts.at(1))) { - return false; - } - - return true; -} - -bool Url::validPortForConnect(absl::string_view port_string) { - int port; - const bool valid = absl::SimpleAtoi(port_string, &port); - // Only a port value in valid range (1-65535) is allowed. - if (!valid || port <= 0 || port > std::numeric_limits::max()) { - return false; - } - port_ = static_cast(port); - return true; -} - -} // namespace Utility -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/url_utility.h b/source/common/http/url_utility.h deleted file mode 100644 index fa140c6d5f12..000000000000 --- a/source/common/http/url_utility.h +++ /dev/null @@ -1,58 +0,0 @@ -#pragma once - -#include - -#include "absl/strings/string_view.h" -#include "url/gurl.h" - -namespace Envoy { -namespace Http { -namespace Utility { - -/** - * Given a fully qualified URL, splits the string_view provided into scheme, host and path with - * query parameters components. - */ -class Url { -public: - /** - * Initializes a URL object from a URL string. - * @param absolute_url URL string to be parsed. - * @param is_connect whether to parse the absolute_url as CONNECT request URL or not. - * @return bool if the initialization is successful. - */ - bool initialize(absl::string_view absolute_url, bool is_connect); - - /** - * @return absl::string_view the scheme of a URL. - */ - absl::string_view scheme() const { return scheme_; } - - /** - * @return absl::string_view the host and port part of a URL. - */ - absl::string_view hostAndPort() const { return host_and_port_; } - - /** - * @return absl::string_view the path and query params part of a URL. - */ - absl::string_view pathAndQueryParams() const { return path_and_query_params_; } - - /** - * @return uint64_t the effective port of a URL. - */ - uint64_t port() const { return port_; } - -private: - bool initializeForConnect(GURL&& url); - bool validPortForConnect(absl::string_view port_string); - - std::string scheme_; - std::string host_and_port_; - std::string path_and_query_params_; - uint16_t port_{0}; -}; - -} // namespace Utility -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index d612a55898c3..cb018c0c18f2 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -1,5 +1,7 @@ #include "common/http/utility.h" +#include + #include #include #include @@ -224,6 +226,43 @@ initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions namespace Http { +static const char kDefaultPath[] = "/"; + +bool Utility::Url::initialize(absl::string_view absolute_url, bool is_connect) { + struct http_parser_url u; + http_parser_url_init(&u); + const int result = + http_parser_parse_url(absolute_url.data(), absolute_url.length(), is_connect, &u); + + if (result != 0) { + return false; + } + if ((u.field_set & (1 << UF_HOST)) != (1 << UF_HOST) && + (u.field_set & (1 << UF_SCHEMA)) != (1 << UF_SCHEMA)) { + return false; + } + scheme_ = absl::string_view(absolute_url.data() + u.field_data[UF_SCHEMA].off, + u.field_data[UF_SCHEMA].len); + + uint16_t authority_len = u.field_data[UF_HOST].len; + if ((u.field_set & (1 << UF_PORT)) == (1 << UF_PORT)) { + authority_len = authority_len + u.field_data[UF_PORT].len + 1; + } + host_and_port_ = + absl::string_view(absolute_url.data() + u.field_data[UF_HOST].off, authority_len); + + // RFC allows the absolute-uri to not end in /, but the absolute path form + // must start with + uint64_t path_len = absolute_url.length() - (u.field_data[UF_HOST].off + hostAndPort().length()); + if (path_len > 0) { + uint64_t path_beginning = u.field_data[UF_HOST].off + hostAndPort().length(); + path_and_query_params_ = absl::string_view(absolute_url.data() + path_beginning, path_len); + } else if (!is_connect) { + path_and_query_params_ = absl::string_view(kDefaultPath, 1); + } + return true; +} + void Utility::appendXff(RequestHeaderMap& headers, const Network::Address::Instance& remote_address) { if (remote_address.type() != Network::Address::Type::Ip) { @@ -318,7 +357,7 @@ std::string Utility::parseCookieValue(const HeaderMap& headers, const std::strin if (header.key() == Http::Headers::get().Cookie.get()) { // Split the cookie header into individual cookies. - for (const auto s : StringUtil::splitToken(header.value().getStringView(), ";")) { + for (const auto& s : StringUtil::splitToken(header.value().getStringView(), ";")) { // Find the key part of the cookie (i.e. the name of the cookie). size_t first_non_space = s.find_first_not_of(" "); size_t equals_index = s.find('='); @@ -435,11 +474,16 @@ Utility::parseHttp1Settings(const envoy::config::core::v3::Http1ProtocolOptions& void Utility::sendLocalReply(const bool& is_reset, StreamDecoderFilterCallbacks& callbacks, const LocalReplyData& local_reply_data) { + absl::string_view details; + if (callbacks.streamInfo().responseCodeDetails().has_value()) { + details = callbacks.streamInfo().responseCodeDetails().value(); + }; + sendLocalReply( is_reset, Utility::EncodeFunctions{nullptr, nullptr, [&](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { - callbacks.encodeHeaders(std::move(headers), end_stream); + callbacks.encodeHeaders(std::move(headers), end_stream, details); }, [&](Buffer::Instance& data, bool end_stream) -> void { callbacks.encodeData(data, end_stream); @@ -601,7 +645,7 @@ bool Utility::sanitizeConnectionHeader(Http::RequestHeaderMap& headers) { bool keep_header = false; // Determine whether the nominated header contains invalid values - const HeaderEntry* nominated_header = nullptr; + HeaderMap::GetResult nominated_header; if (lcs_header_to_remove == Http::Headers::get().Connection) { // Remove the connection header from the nominated tokens if it's self nominated @@ -628,8 +672,10 @@ bool Utility::sanitizeConnectionHeader(Http::RequestHeaderMap& headers) { nominated_header = headers.get(lcs_header_to_remove); } - if (nominated_header) { - auto nominated_header_value_sv = nominated_header->value().getStringView(); + if (!nominated_header.empty()) { + // NOTE: The TE header is an inline header, so by definition if we operate on it there can + // only be a single value. In all other cases we remove the nominated header. + auto nominated_header_value_sv = nominated_header[0]->value().getStringView(); const bool is_te_header = (lcs_header_to_remove == Http::Headers::get().TE); @@ -640,6 +686,7 @@ bool Utility::sanitizeConnectionHeader(Http::RequestHeaderMap& headers) { } if (is_te_header) { + ASSERT(nominated_header.size() == 1); for (const auto& header_value : StringUtil::splitToken(nominated_header_value_sv, ",", false)) { @@ -725,6 +772,14 @@ void Utility::extractHostPathFromUri(const absl::string_view& uri, absl::string_ } } +std::string Utility::localPathFromFilePath(const absl::string_view& file_path) { + if (file_path.size() >= 3 && file_path[1] == ':' && file_path[2] == '/' && + std::isalpha(file_path[0])) { + return std::string(file_path); + } + return absl::StrCat("/", file_path); +} + RequestMessagePtr Utility::prepareHeaders(const envoy::config::core::v3::HttpUri& http_uri) { absl::string_view host, path; extractHostPathFromUri(http_uri.uri(), host, path); @@ -764,6 +819,8 @@ const std::string Utility::resetReasonToString(const Http::StreamResetReason res return "remote reset"; case Http::StreamResetReason::RemoteRefusedStreamReset: return "remote refused stream reset"; + case Http::StreamResetReason::ConnectError: + return "remote error with CONNECT request"; } NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 691960b4f437..d677b097d86c 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -15,7 +15,6 @@ #include "common/http/exception.h" #include "common/http/status.h" -#include "common/json/json_loader.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" @@ -126,6 +125,23 @@ initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions namespace Http { namespace Utility { +/** + * Given a fully qualified URL, splits the string_view provided into scheme, + * host and path with query parameters components. + */ +class Url { +public: + bool initialize(absl::string_view absolute_url, bool is_connect_request); + absl::string_view scheme() { return scheme_; } + absl::string_view hostAndPort() { return host_and_port_; } + absl::string_view pathAndQueryParams() { return path_and_query_params_; } + +private: + absl::string_view scheme_; + absl::string_view host_and_port_; + absl::string_view path_and_query_params_; +}; + class PercentEncoding { public: /** @@ -366,6 +382,14 @@ const std::string& getProtocolString(const Protocol p); void extractHostPathFromUri(const absl::string_view& uri, absl::string_view& host, absl::string_view& path); +/** + * Takes a the path component from a file:/// URI and returns a local path for file access. + * @param file_path if we have file:///foo/bar, the file_path is foo/bar. For file:///c:/foo/bar + * it is c:/foo/bar. This is not prefixed with /. + * @return std::string with absolute path for local access, e.g. /foo/bar, c:/foo/bar. + */ +std::string localPathFromFilePath(const absl::string_view& file_path); + /** * Prepare headers for a HttpUri. */ diff --git a/source/common/json/json_loader.cc b/source/common/json/json_loader.cc index 083f7b64f409..bb4ccf808662 100644 --- a/source/common/json/json_loader.cc +++ b/source/common/json/json_loader.cc @@ -25,7 +25,6 @@ #include "rapidjson/writer.h" #include "absl/strings/match.h" -#include "yaml-cpp/yaml.h" namespace Envoy { namespace Json { diff --git a/source/common/local_reply/local_reply.cc b/source/common/local_reply/local_reply.cc index d4549dc1a135..a960ffa4e952 100644 --- a/source/common/local_reply/local_reply.cc +++ b/source/common/local_reply/local_reply.cc @@ -23,10 +23,12 @@ class BodyFormatter { BodyFormatter(const envoy::config::core::v3::SubstitutionFormatString& config) : formatter_(Formatter::SubstitutionFormatStringUtils::fromProtoConfig(config)), content_type_( - config.format_case() == - envoy::config::core::v3::SubstitutionFormatString::FormatCase::kJsonFormat - ? Http::Headers::get().ContentTypeValues.Json - : Http::Headers::get().ContentTypeValues.Text) {} + !config.content_type().empty() + ? config.content_type() + : config.format_case() == + envoy::config::core::v3::SubstitutionFormatString::FormatCase::kJsonFormat + ? Http::Headers::get().ContentTypeValues.Json + : Http::Headers::get().ContentTypeValues.Text) {} void format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, @@ -40,7 +42,7 @@ class BodyFormatter { private: const Formatter::FormatterPtr formatter_; - const absl::string_view content_type_; + const std::string content_type_; }; using BodyFormatterPtr = std::unique_ptr; @@ -53,7 +55,7 @@ class ResponseMapper { config, Server::Configuration::FactoryContext& context) : filter_(AccessLog::FilterFactory::fromProto(config.filter(), context.runtime(), - context.random(), + context.api().randomGenerator(), context.messageValidationVisitor())) { if (config.has_status_code()) { status_code_ = static_cast(config.status_code().value()); diff --git a/source/common/memory/stats.cc b/source/common/memory/stats.cc index 657f9b6b9b08..5b4e97261a4c 100644 --- a/source/common/memory/stats.cc +++ b/source/common/memory/stats.cc @@ -4,7 +4,52 @@ #include "common/common/logger.h" -#ifdef TCMALLOC +#if defined(TCMALLOC) + +#include "tcmalloc/malloc_extension.h" + +namespace Envoy { +namespace Memory { + +uint64_t Stats::totalCurrentlyAllocated() { + return tcmalloc::MallocExtension::GetNumericProperty("generic.current_allocated_bytes") + .value_or(0); +} + +uint64_t Stats::totalCurrentlyReserved() { + // In Google's tcmalloc the semantics of generic.heap_size has + // changed: it doesn't include unmapped bytes. + return tcmalloc::MallocExtension::GetNumericProperty("generic.heap_size").value_or(0) + + tcmalloc::MallocExtension::GetNumericProperty("tcmalloc.pageheap_unmapped_bytes") + .value_or(0); +} + +uint64_t Stats::totalThreadCacheBytes() { + return tcmalloc::MallocExtension::GetNumericProperty("tcmalloc.current_total_thread_cache_bytes") + .value_or(0); +} + +uint64_t Stats::totalPageHeapFree() { + return tcmalloc::MallocExtension::GetNumericProperty("tcmalloc.pageheap_free_bytes").value_or(0); +} + +uint64_t Stats::totalPageHeapUnmapped() { + return tcmalloc::MallocExtension::GetNumericProperty("tcmalloc.pageheap_unmapped_bytes") + .value_or(0); +} + +uint64_t Stats::totalPhysicalBytes() { + return tcmalloc::MallocExtension::GetProperties()["generic.physical_memory_used"].value; +} + +void Stats::dumpStatsToLog() { + ENVOY_LOG_MISC(debug, "TCMalloc stats:\n{}", tcmalloc::MallocExtension::GetStats()); +} + +} // namespace Memory +} // namespace Envoy + +#elif defined(GPERFTOOLS_TCMALLOC) #include "gperftools/malloc_extension.h" @@ -74,4 +119,4 @@ void Stats::dumpStatsToLog() {} } // namespace Memory } // namespace Envoy -#endif // #ifdef TCMALLOC +#endif // #if defined(TCMALLOC) diff --git a/source/common/memory/utils.cc b/source/common/memory/utils.cc index 2fa957572217..c6ac4f6c5fe8 100644 --- a/source/common/memory/utils.cc +++ b/source/common/memory/utils.cc @@ -3,15 +3,26 @@ #include "common/common/assert.h" #include "common/memory/stats.h" -#ifdef TCMALLOC +#if defined(TCMALLOC) +#include "tcmalloc/malloc_extension.h" +#elif defined(GPERFTOOLS_TCMALLOC) #include "gperftools/malloc_extension.h" #endif namespace Envoy { namespace Memory { +namespace { +#if defined(TCMALLOC) || defined(GPERFTOOLS_TCMALLOC) +// TODO(zyfjeff): Make max unfreed memory byte configurable +constexpr uint64_t MAX_UNFREED_MEMORY_BYTE = 100 * 1024 * 1024; +#endif +} // namespace + void Utils::releaseFreeMemory() { -#ifdef TCMALLOC +#if defined(TCMALLOC) + tcmalloc::MallocExtension::ReleaseMemoryToSystem(MAX_UNFREED_MEMORY_BYTE); +#elif defined(GPERFTOOLS_TCMALLOC) MallocExtension::instance()->ReleaseFreeMemory(); #endif } @@ -23,9 +34,7 @@ void Utils::releaseFreeMemory() { Ref: https://github.com/envoyproxy/envoy/pull/9471#discussion_r363825985 */ void Utils::tryShrinkHeap() { -#ifdef TCMALLOC - // TODO(zyfjeff): Make max unfreed memory byte configurable - static const uint64_t MAX_UNFREED_MEMORY_BYTE = 100 * 1024 * 1024; +#if defined(TCMALLOC) || defined(GPERFTOOLS_TCMALLOC) auto total_physical_bytes = Stats::totalPhysicalBytes(); auto allocated_size_by_app = Stats::totalCurrentlyAllocated(); diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 88a6151774d2..a622f082b85b 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -91,6 +91,27 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "apple_dns_lib", + srcs = select({ + "//bazel:apple": ["apple_dns_impl.cc"], + "//conditions:default": [], + }), + hdrs = select({ + "//bazel:apple": ["apple_dns_impl.h"], + "//conditions:default": [], + }), + deps = [ + ":address_lib", + ":utility_lib", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/event:file_event_interface", + "//include/envoy/network:dns_interface", + "//source/common/common:assert_lib", + "//source/common/common:linked_object", + ], +) + envoy_cc_library( name = "dns_lib", srcs = ["dns_impl.cc"], @@ -194,6 +215,7 @@ envoy_cc_library( "//include/envoy/event:dispatcher_interface", "//include/envoy/network:io_handle_interface", "//source/common/api:os_sys_calls_lib", + "//source/common/event:dispatcher_includes", "@envoy_api//envoy/extensions/network/socket_interface/v3:pkg_cc_proto", ], ) @@ -239,6 +261,7 @@ envoy_cc_library( ], deps = [ ":address_lib", + ":default_socket_interface_lib", ":listen_socket_lib", ":udp_default_writer_config", "//include/envoy/event:dispatcher_interface", diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index 2fb068d94674..38308a491f5b 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -300,6 +300,17 @@ PipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode, bool PipeInstance::operator==(const Instance& rhs) const { return asString() == rhs.asString(); } +EnvoyInternalInstance::EnvoyInternalInstance(const std::string& address_id, + const SocketInterface* sock_interface) + : InstanceBase(Type::EnvoyInternal, sockInterfaceOrDefault(sock_interface)), + internal_address_(address_id) { + friendly_name_ = absl::StrCat("envoy://", address_id); +} + +bool EnvoyInternalInstance::operator==(const Instance& rhs) const { + return rhs.type() == Type::EnvoyInternal && asString() == rhs.asString(); +} + } // namespace Address } // namespace Network } // namespace Envoy diff --git a/source/common/network/address_impl.h b/source/common/network/address_impl.h index 3b3ffd52783f..11aed2301952 100644 --- a/source/common/network/address_impl.h +++ b/source/common/network/address_impl.h @@ -10,6 +10,8 @@ #include "envoy/network/address.h" #include "envoy/network/socket.h" +#include "common/common/assert.h" + namespace Envoy { namespace Network { namespace Address { @@ -84,6 +86,7 @@ class Ipv4Instance : public InstanceBase { bool operator==(const Instance& rhs) const override; const Ip* ip() const override { return &ip_; } const Pipe* pipe() const override { return nullptr; } + const EnvoyInternalAddress* envoyInternalAddress() const override { return nullptr; } const sockaddr* sockAddr() const override { return reinterpret_cast(&ip_.ipv4_.address_); } @@ -157,6 +160,7 @@ class Ipv6Instance : public InstanceBase { bool operator==(const Instance& rhs) const override; const Ip* ip() const override { return &ip_; } const Pipe* pipe() const override { return nullptr; } + const EnvoyInternalAddress* envoyInternalAddress() const override { return nullptr; } const sockaddr* sockAddr() const override { return reinterpret_cast(&ip_.ipv6_.address_); } @@ -219,6 +223,7 @@ class PipeInstance : public InstanceBase { bool operator==(const Instance& rhs) const override; const Ip* ip() const override { return nullptr; } const Pipe* pipe() const override { return &pipe_; } + const EnvoyInternalAddress* envoyInternalAddress() const override { return nullptr; } const sockaddr* sockAddr() const override { return reinterpret_cast(&pipe_.address_); } @@ -245,6 +250,33 @@ class PipeInstance : public InstanceBase { PipeHelper pipe_; }; +class EnvoyInternalInstance : public InstanceBase { +public: + /** + * Construct from a string name. + */ + explicit EnvoyInternalInstance(const std::string& address_id, + const SocketInterface* sock_interface = nullptr); + + // Network::Address::Instance + bool operator==(const Instance& rhs) const override; + const Ip* ip() const override { return nullptr; } + const Pipe* pipe() const override { return nullptr; } + const EnvoyInternalAddress* envoyInternalAddress() const override { return &internal_address_; } + // TODO(lambdai): Verify all callers accepts nullptr. + const sockaddr* sockAddr() const override { return nullptr; } + socklen_t sockAddrLen() const override { return 0; } + +private: + struct EnvoyInternalAddressImpl : public EnvoyInternalAddress { + explicit EnvoyInternalAddressImpl(const std::string& address_id) : address_id_(address_id) {} + ~EnvoyInternalAddressImpl() override = default; + const std::string& addressId() const override { return address_id_; } + const std::string address_id_; + }; + EnvoyInternalAddressImpl internal_address_; +}; + } // namespace Address } // namespace Network } // namespace Envoy diff --git a/source/common/network/apple_dns_impl.cc b/source/common/network/apple_dns_impl.cc new file mode 100644 index 000000000000..05e430076056 --- /dev/null +++ b/source/common/network/apple_dns_impl.cc @@ -0,0 +1,325 @@ +#include "common/network/apple_dns_impl.h" + +#include + +#include +#include +#include +#include +#include + +#include "envoy/common/platform.h" + +#include "common/common/assert.h" +#include "common/common/fmt.h" +#include "common/network/address_impl.h" +#include "common/network/utility.h" + +#include "absl/strings/str_join.h" + +namespace Envoy { +namespace Network { + +AppleDnsResolverImpl::AppleDnsResolverImpl(Event::Dispatcher& dispatcher) + : dispatcher_(dispatcher) { + ENVOY_LOG(debug, "Constructing DNS resolver"); + initializeMainSdRef(); +} + +AppleDnsResolverImpl::~AppleDnsResolverImpl() { + ENVOY_LOG(debug, "Destructing DNS resolver"); + deallocateMainSdRef(); +} + +void AppleDnsResolverImpl::deallocateMainSdRef() { + ENVOY_LOG(debug, "DNSServiceRefDeallocate main sd ref"); + // dns_sd.h says: + // If the reference's underlying socket is used in a run loop or select() call, it should + // be removed BEFORE DNSServiceRefDeallocate() is called, as this function closes the + // reference's socket. + sd_ref_event_.reset(); + DNSServiceRefDeallocate(main_sd_ref_); +} + +void AppleDnsResolverImpl::initializeMainSdRef() { + // This implementation uses a shared connection for three main reasons: + // 1. Efficiency of concurrent resolutions by sharing the same underlying UDS to the DNS + // server. + // 2. An error on a connection to the DNS server is good indication that other connections, + // even if not shared, would not succeed. So it is better to share one connection and + // promptly cancel all outstanding queries, rather than individually wait for all + // connections to error out. + // 3. It follows the precedent set in dns_impl with the c-ares library, for consistency of + // style, performance, and expectations between the two implementations. + // However, using a shared connection brings some complexities detailed in the inline comments + // for kDNSServiceFlagsShareConnection in dns_sd.h, and copied (and edited) in this implementation + // where relevant. + auto error = DNSServiceCreateConnection(&main_sd_ref_); + RELEASE_ASSERT(!error, "error in DNSServiceCreateConnection"); + + auto fd = DNSServiceRefSockFD(main_sd_ref_); + RELEASE_ASSERT(fd != -1, "error in DNSServiceRefSockFD"); + ENVOY_LOG(debug, "DNS resolver has fd={}", fd); + + sd_ref_event_ = dispatcher_.createFileEvent( + fd, + // note: Event::FileTriggerType::Level is used here to closely resemble the c-ares + // implementation in dns_impl.cc. + [this](uint32_t events) { onEventCallback(events); }, Event::FileTriggerType::Level, + Event::FileReadyType::Read); + sd_ref_event_->setEnabled(Event::FileReadyType::Read); +} + +void AppleDnsResolverImpl::onEventCallback(uint32_t events) { + ENVOY_LOG(debug, "DNS resolver file event"); + ASSERT(events & Event::FileReadyType::Read); + DNSServiceProcessResult(main_sd_ref_); +} + +ActiveDnsQuery* AppleDnsResolverImpl::resolve(const std::string& dns_name, + DnsLookupFamily dns_lookup_family, + ResolveCb callback) { + ENVOY_LOG(debug, "DNS resolver resolve={}", dns_name); + std::unique_ptr pending_resolution( + new PendingResolution(*this, callback, dispatcher_, main_sd_ref_, dns_name)); + + DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(dns_lookup_family); + if (error != kDNSServiceErr_NoError) { + ENVOY_LOG(warn, "DNS resolver error in dnsServiceGetAddrInfo for {}", dns_name); + return nullptr; + } + + // If the query was synchronously resolved, there is no need to return the query. + if (pending_resolution->synchronously_completed_) { + return nullptr; + } + + pending_resolution->owned_ = true; + return pending_resolution.release(); +} + +void AppleDnsResolverImpl::addPendingQuery(PendingResolution* query) { + ASSERT(queries_with_pending_cb_.count(query) == 0); + queries_with_pending_cb_.insert(query); +} + +void AppleDnsResolverImpl::removePendingQuery(PendingResolution* query) { + auto erased = queries_with_pending_cb_.erase(query); + ASSERT(erased == 1); +} + +void AppleDnsResolverImpl::flushPendingQueries(const bool with_error) { + ENVOY_LOG(debug, "DNS Resolver flushing {} queries", queries_with_pending_cb_.size()); + for (std::set::iterator it = queries_with_pending_cb_.begin(); + it != queries_with_pending_cb_.end(); ++it) { + auto query = *it; + try { + ASSERT(query->pending_cb_); + query->callback_(query->pending_cb_->status_, std::move(query->pending_cb_->responses_)); + } catch (const std::exception& e) { + ENVOY_LOG(warn, "std::exception in DNSService callback: {}", e.what()); + throw EnvoyException(e.what()); + } catch (...) { + ENVOY_LOG(warn, "Unknown exception in DNSService callback"); + throw EnvoyException("unknown"); + } + + if (query->owned_) { + ENVOY_LOG(debug, "Resolution for {} completed (async)", query->dns_name_); + delete *it; + } else { + ENVOY_LOG(debug, "Resolution for {} completed (synchronously)", query->dns_name_); + query->synchronously_completed_ = true; + } + } + + // Purge the contents so no one tries to delete them again. + queries_with_pending_cb_.clear(); + + if (with_error) { + // The main sd ref is destroyed here because a callback with an error is good indication that + // the connection to the DNS server is faulty and needs to be torn down. + // + // Deallocation of the MainSdRef __has__ to happen __after__ flushing queries. Flushing queries + // de-allocates individual refs, so deallocating the main ref ahead would cause deallocation of + // invalid individual refs per dns_sd.h + deallocateMainSdRef(); + initializeMainSdRef(); + } +} + +AppleDnsResolverImpl::PendingResolution::~PendingResolution() { + ENVOY_LOG(debug, "Destroying PendingResolution for {}", dns_name_); + DNSServiceRefDeallocate(individual_sd_ref_); +} + +void AppleDnsResolverImpl::PendingResolution::cancel() { + ENVOY_LOG(debug, "Cancelling PendingResolution for {}", dns_name_); + ASSERT(owned_); + if (pending_cb_) { + /* (taken and edited from dns_sd.h) + * Canceling operations and kDNSServiceFlagsMoreComing + * Whenever you cancel any operation for which you had deferred [resolution] + * because of a kDNSServiceFlagsMoreComing flag, you should [flush]. This is because, after + * cancelling the operation, you can no longer wait for a callback *without* MoreComing set, to + * tell you [to flush] (the operation has been canceled, so there will be no more callbacks). + * + * [FURTHER] An implication of the collective + * kDNSServiceFlagsMoreComing flag for shared connections is that this + * guideline applies more broadly -- any time you cancel an operation on + * a shared connection, you should perform all deferred updates for all + * operations sharing that connection. This is because the MoreComing flag + * might have been referring to events coming for the operation you canceled, + * which will now not be coming because the operation has been canceled. + */ + // First, get rid of the current query, because if it is canceled, its callback should not be + // executed during the subsequent flush. + parent_.removePendingQuery(this); + // Then, flush all other queries. + parent_.flushPendingQueries(false /* with_error */); + } + // Because the query is self-owned, delete now. + delete this; +} + +void AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply( + DNSServiceFlags flags, uint32_t interface_index, DNSServiceErrorType error_code, + const char* hostname, const struct sockaddr* address, uint32_t ttl) { + ENVOY_LOG(debug, + "DNS for {} resolved with: flags={}[MoreComing={}, Add={}], interface_index={}, " + "error_code={}, hostname={}", + dns_name_, flags, flags & kDNSServiceFlagsMoreComing ? "yes" : "no", + flags & kDNSServiceFlagsAdd ? "yes" : "no", interface_index, error_code, hostname); + ASSERT(interface_index == 0); + + // Generic error handling. + if (error_code != kDNSServiceErr_NoError) { + // TODO(junr03): consider creating stats for known error types (timeout, refused connection, + // etc.). Currently a bit challenging because there is no scope access wired through. Current + // query gets a failure status + if (!pending_cb_) { + ENVOY_LOG(warn, "[Error path] Adding to queries pending callback"); + pending_cb_ = {ResolutionStatus::Failure, {}}; + parent_.addPendingQuery(this); + } else { + ENVOY_LOG(warn, "[Error path] Changing status for query already pending flush"); + pending_cb_->status_ = ResolutionStatus::Failure; + } + + ENVOY_LOG(warn, "[Error path] DNS Resolver flushing queries pending callback"); + parent_.flushPendingQueries(true /* with_error */); + // Note: Nothing can follow this call to flushPendingQueries due to deletion of this + // object upon resolution. + return; + } + + // Only add this address to the list if kDNSServiceFlagsAdd is set. Callback targets are purely + // additive. + if (flags & kDNSServiceFlagsAdd) { + auto dns_response = buildDnsResponse(address, ttl); + ENVOY_LOG(debug, "Address to add address={}, ttl={}", + dns_response.address_->ip()->addressAsString(), ttl); + + if (!pending_cb_) { + ENVOY_LOG(debug, "Adding to queries pending callback"); + pending_cb_ = {ResolutionStatus::Success, {dns_response}}; + parent_.addPendingQuery(this); + } else { + ENVOY_LOG(debug, "New address for query already pending flush"); + pending_cb_->responses_.push_back(dns_response); + } + } + + if (!(flags & kDNSServiceFlagsMoreComing)) { + /* (taken and edited from dns_sd.h) + * Collective kDNSServiceFlagsMoreComing flag: + * When [DNSServiceGetAddrInfoReply] are invoked using a shared DNSServiceRef, the + * kDNSServiceFlagsMoreComing flag applies collectively to *all* active + * operations sharing the same [main_sd_ref]. If the MoreComing flag is + * set it means that there are more results queued on this parent DNSServiceRef, + * but not necessarily more results for this particular callback function. + * The implication of this for client programmers is that when a callback + * is invoked with the MoreComing flag set, the code should update its + * internal data structures with the new result (as is done above when calling + * parent_.addPendingQuery(this))...Then, later when a callback is eventually invoked with the + * MoreComing flag not set, the code should update *all* [pending queries] related to that + * shared parent DNSServiceRef that need updating (i.e that have had DNSServiceGetAddrInfoReply + * called on them since the last flush), not just the [queries] related to the particular + * callback that happened to be the last one to be invoked. + */ + ENVOY_LOG(debug, "DNS Resolver flushing queries pending callback"); + parent_.flushPendingQueries(false /* with_error */); + // Note: Nothing can follow this call to flushPendingQueries due to deletion of this + // object upon resolution. + return; + } +} + +DNSServiceErrorType +AppleDnsResolverImpl::PendingResolution::dnsServiceGetAddrInfo(DnsLookupFamily dns_lookup_family) { + DNSServiceProtocol protocol; + switch (dns_lookup_family) { + case DnsLookupFamily::V4Only: + protocol = kDNSServiceProtocol_IPv4; + break; + case DnsLookupFamily::V6Only: + protocol = kDNSServiceProtocol_IPv6; + break; + case DnsLookupFamily::Auto: + protocol = kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6; + break; + } + + // TODO: explore caching: there are caching flags in the dns_sd.h flags, allow expired answers + // from the cache? + // TODO: explore validation via DNSSEC? + return DNSServiceGetAddrInfo( + &individual_sd_ref_, kDNSServiceFlagsShareConnection | kDNSServiceFlagsTimeout, 0, protocol, + dns_name_.c_str(), + /* + * About Thread Safety (taken from inline documentation there): + * The dns_sd.h API does not presuppose any particular threading model, and consequently + * does no locking internally (which would require linking with a specific threading library). + * If the client concurrently, from multiple threads (or contexts), calls API routines using + * the same DNSServiceRef, it is the client's responsibility to provide mutual exclusion for + * that DNSServiceRef. + */ + + // Therefore, much like the c-ares implementation All calls and callbacks to the API need to + // happen on the thread that owns the creating dispatcher. This is the case as callbacks are + // driven by processing bytes in onEventCallback which run on the passed in dispatcher's event + // loop. + [](DNSServiceRef, DNSServiceFlags flags, uint32_t interface_index, + DNSServiceErrorType error_code, const char* hostname, const struct sockaddr* address, + uint32_t ttl, void* context) { + static_cast(context)->onDNSServiceGetAddrInfoReply( + flags, interface_index, error_code, hostname, address, ttl); + }, + this); +} + +DnsResponse +AppleDnsResolverImpl::PendingResolution::buildDnsResponse(const struct sockaddr* address, + uint32_t ttl) { + switch (address->sa_family) { + case AF_INET: + sockaddr_in address_in; + memset(&address_in, 0, sizeof(address_in)); + address_in.sin_family = AF_INET; + address_in.sin_port = 0; + address_in.sin_addr = reinterpret_cast(address)->sin_addr; + return {std::make_shared(&address_in), std::chrono::seconds(ttl)}; + case AF_INET6: + sockaddr_in6 address_in6; + memset(&address_in6, 0, sizeof(address_in6)); + address_in6.sin6_family = AF_INET6; + address_in6.sin6_port = 0; + address_in6.sin6_addr = reinterpret_cast(address)->sin6_addr; + return {std::make_shared(address_in6), std::chrono::seconds(ttl)}; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/apple_dns_impl.h b/source/common/network/apple_dns_impl.h new file mode 100644 index 000000000000..ff805ce796c4 --- /dev/null +++ b/source/common/network/apple_dns_impl.h @@ -0,0 +1,108 @@ +#pragma once + +#include + +#include +#include + +#include "envoy/common/platform.h" +#include "envoy/event/dispatcher.h" +#include "envoy/event/file_event.h" +#include "envoy/network/dns.h" + +#include "common/common/linked_object.h" +#include "common/common/logger.h" +#include "common/common/utility.h" + +#include "absl/container/node_hash_map.h" + +namespace Envoy { +namespace Network { + +/** + * Implementation of DnsResolver that uses Apple dns_sd.h APIs. All calls and callbacks are assumed + * to happen on the thread that owns the creating dispatcher. + */ +class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable { +public: + AppleDnsResolverImpl(Event::Dispatcher& dispatcher); + ~AppleDnsResolverImpl() override; + + // Network::DnsResolver + ActiveDnsQuery* resolve(const std::string& dns_name, DnsLookupFamily dns_lookup_family, + ResolveCb callback) override; + +private: + struct PendingResolution : public ActiveDnsQuery { + PendingResolution(AppleDnsResolverImpl& parent, ResolveCb callback, + Event::Dispatcher& dispatcher, DNSServiceRef sd_ref, + const std::string& dns_name) + : parent_(parent), callback_(callback), dispatcher_(dispatcher), + /* (taken and edited from dns_sd.h): + * For efficiency, clients that perform many concurrent operations may want to use a + * single Unix Domain Socket connection with the background daemon, instead of having a + * separate connection for each independent operation. To use this mode, clients first + * call DNSServiceCreateConnection(&SharedRef) to initialize the main DNSServiceRef. + * For each subsequent operation that is to share that same connection, the client copies + * the SharedRef, and then passes the address of that copy, setting the ShareConnection + * flag to tell the library that this DNSServiceRef is not a typical uninitialized + * DNSServiceRef; it's a copy of an existing DNSServiceRef whose connection information + * should be reused. + */ + individual_sd_ref_(sd_ref), dns_name_(dns_name) {} + ~PendingResolution(); + + // Network::ActiveDnsQuery + void cancel() override; + + static DnsResponse buildDnsResponse(const struct sockaddr* address, uint32_t ttl); + // Wrapper for the API call. + DNSServiceErrorType dnsServiceGetAddrInfo(DnsLookupFamily dns_lookup_family); + // Wrapper for the API callback. + void onDNSServiceGetAddrInfoReply(DNSServiceFlags flags, uint32_t interface_index, + DNSServiceErrorType error_code, const char* hostname, + const struct sockaddr* address, uint32_t ttl); + + // Small wrapping struct to accumulate addresses from firings of the + // onDNSServiceGetAddrInfoReply callback. + struct FinalResponse { + ResolutionStatus status_; + std::list responses_; + }; + + AppleDnsResolverImpl& parent_; + // Caller supplied callback to invoke on query completion or error. + const ResolveCb callback_; + // Dispatcher to post any callback_ exceptions to. + Event::Dispatcher& dispatcher_; + DNSServiceRef individual_sd_ref_; + const std::string dns_name_; + bool synchronously_completed_{}; + bool owned_{}; + // DNSServiceGetAddrInfo fires one callback DNSServiceGetAddrInfoReply callback per IP address, + // and informs via flags if more IP addresses are incoming. Therefore, these addresses need to + // be accumulated before firing callback_. + absl::optional pending_cb_{}; + }; + + void initializeMainSdRef(); + void deallocateMainSdRef(); + void onEventCallback(uint32_t events); + void addPendingQuery(PendingResolution* query); + void removePendingQuery(PendingResolution* query); + void flushPendingQueries(const bool with_error); + + Event::Dispatcher& dispatcher_; + DNSServiceRef main_sd_ref_; + Event::FileEventPtr sd_ref_event_; + // When using a shared sd ref via DNSServiceCreateConnection, the DNSServiceGetAddrInfoReply + // callback with the kDNSServiceFlagsMoreComing flag might refer to addresses for various + // PendingResolutions. Therefore, the resolver needs to have a container of queries pending + // calling their own callback_s until a DNSServiceGetAddrInfoReply is called with + // kDNSServiceFlagsMoreComing not set or an error status is received in + // DNSServiceGetAddrInfoReply. + std::set queries_with_pending_cb_; +}; + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 13dd6ee80179..2804bad755ba 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -9,6 +9,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/event/timer.h" #include "envoy/network/filter.h" +#include "envoy/network/socket.h" #include "common/common/assert.h" #include "common/common/empty_string.h" @@ -257,7 +258,8 @@ void ConnectionImpl::noDelay(bool enable) { } #endif - RELEASE_ASSERT(result.rc_ == 0, ""); + RELEASE_ASSERT(result.rc_ == 0, fmt::format("Failed to set TCP_NODELAY with error {}, {}", + result.errno_, errorDetails(result.errno_))); } void ConnectionImpl::onRead(uint64_t read_buffer_size) { @@ -694,6 +696,10 @@ absl::string_view ConnectionImpl::transportFailureReason() const { return transport_socket_->failureReason(); } +absl::optional ConnectionImpl::lastRoundTripTime() const { + return socket_->lastRoundTripTime(); +}; + void ConnectionImpl::flushWriteBuffer() { if (state() == State::Open && write_buffer_->length() > 0) { onWriteReady(); diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index 17ebe609a263..e28e05e9d182 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -90,6 +90,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback StreamInfo::StreamInfo& streamInfo() override { return stream_info_; } const StreamInfo::StreamInfo& streamInfo() const override { return stream_info_; } absl::string_view transportFailureReason() const override; + absl::optional lastRoundTripTime() const override; // Network::FilterManagerConnection void rawWrite(Buffer::Instance& data, bool end_stream) override; diff --git a/source/common/network/filter_impl.h b/source/common/network/filter_impl.h index fb5d35024c85..1d09d7a9ee93 100644 --- a/source/common/network/filter_impl.h +++ b/source/common/network/filter_impl.h @@ -14,14 +14,5 @@ class ReadFilterBaseImpl : public ReadFilter { Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; } }; -/** - * Implementation of Network::Filter that discards read callbacks. - */ -class FilterBaseImpl : public Filter { -public: - void initializeReadFilterCallbacks(ReadFilterCallbacks&) override {} - Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; } -}; - } // namespace Network } // namespace Envoy diff --git a/source/common/network/filter_matcher.cc b/source/common/network/filter_matcher.cc index 7b2831b8a55e..3865d3e05d99 100644 --- a/source/common/network/filter_matcher.cc +++ b/source/common/network/filter_matcher.cc @@ -14,19 +14,15 @@ ListenerFilterMatcherPtr ListenerFilterMatcherBuilder::buildListenerFilterMatche switch (match_config.rule_case()) { case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kAnyMatch: return std::make_unique(); - case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kNotMatch: { + case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kNotMatch: return std::make_unique(match_config.not_match()); - } - case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kAndMatch: { + case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kAndMatch: return std::make_unique(match_config.and_match().rules()); - } - case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kOrMatch: { + case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kOrMatch: return std::make_unique(match_config.or_match().rules()); - } case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase:: - kDestinationPortRange: { + kDestinationPortRange: return std::make_unique(match_config.destination_port_range()); - } default: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/common/network/io_socket_error_impl.cc b/source/common/network/io_socket_error_impl.cc index c1d3c13d78a0..fcf1f1efcf42 100644 --- a/source/common/network/io_socket_error_impl.cc +++ b/source/common/network/io_socket_error_impl.cc @@ -46,5 +46,15 @@ void IoSocketError::deleteIoError(Api::IoError* err) { } } +inline IoSocketError* getIoSocketInvalidAddressInstance() { + static auto* instance = new IoSocketError(SOCKET_ERROR_NOT_SUP); + return instance; +} + +Api::IoCallUint64Result IoSocketError::ioResultSocketInvalidAddress() { + return Api::IoCallUint64Result( + 0, Api::IoErrorPtr(getIoSocketInvalidAddressInstance(), [](IoError*) {})); +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/io_socket_error_impl.h b/source/common/network/io_socket_error_impl.h index aa8f362dc8ca..50d08b55f26a 100644 --- a/source/common/network/io_socket_error_impl.h +++ b/source/common/network/io_socket_error_impl.h @@ -21,6 +21,11 @@ class IoSocketError : public Api::IoError { // deleter deleteIoError() below to avoid deallocating memory for this error. static IoSocketError* getIoSocketEagainInstance(); + // This error is introduced when Envoy create socket for unsupported address. It is either a bug, + // or this Envoy instance received config which is not yet supported. This should not be fatal + // error. + static Api::IoCallUint64Result ioResultSocketInvalidAddress(); + // Deallocate memory only if the error is not Again. static void deleteIoError(Api::IoError* err); diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index f18c73f1af89..52a6ae6e5ddb 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -4,6 +4,7 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/utility.h" +#include "common/event/file_event_impl.h" #include "common/network/address_impl.h" #include "absl/container/fixed_array.h" @@ -13,6 +14,42 @@ using Envoy::Api::SysCallIntResult; using Envoy::Api::SysCallSizeResult; namespace Envoy { + +namespace { +/** + * On different platforms the sockaddr struct for unix domain + * sockets is different. We use this function to get the + * length of the platform specific struct. + */ +constexpr socklen_t udsAddressLength() { +#if defined(__APPLE__) + return sizeof(sockaddr); +#elif defined(WIN32) + return sizeof(sockaddr_un); +#else + return sizeof(sa_family_t); +#endif +} + +constexpr int messageTypeContainsIP() { +#ifdef IP_RECVDSTADDR + return IP_RECVDSTADDR; +#else + return IP_PKTINFO; +#endif +} + +in_addr addressFromMessage(const cmsghdr& cmsg) { +#ifdef IP_RECVDSTADDR + return *reinterpret_cast(CMSG_DATA(&cmsg)); +#else + auto info = reinterpret_cast(CMSG_DATA(&cmsg)); + return info->ipi_addr; +#endif +} + +} // namespace + namespace Network { IoSocketHandleImpl::~IoSocketHandleImpl() { @@ -47,6 +84,24 @@ Api::IoCallUint64Result IoSocketHandleImpl::readv(uint64_t max_length, Buffer::R fd_, iov.begin(), static_cast(num_slices_to_read))); } +Api::IoCallUint64Result IoSocketHandleImpl::read(Buffer::Instance& buffer, uint64_t max_length) { + if (max_length == 0) { + return Api::ioCallUint64ResultNoError(); + } + constexpr uint64_t MaxSlices = 2; + Buffer::RawSlice slices[MaxSlices]; + const uint64_t num_slices = buffer.reserve(max_length, slices, MaxSlices); + Api::IoCallUint64Result result = readv(max_length, slices, num_slices); + uint64_t bytes_to_commit = result.ok() ? result.rc_ : 0; + ASSERT(bytes_to_commit <= max_length); + for (uint64_t i = 0; i < num_slices; i++) { + slices[i].len_ = std::min(slices[i].len_, static_cast(bytes_to_commit)); + bytes_to_commit -= slices[i].len_; + } + buffer.commit(slices, num_slices); + return result; +} + Api::IoCallUint64Result IoSocketHandleImpl::writev(const Buffer::RawSlice* slices, uint64_t num_slice) { absl::FixedArray iov(num_slice); @@ -65,13 +120,26 @@ Api::IoCallUint64Result IoSocketHandleImpl::writev(const Buffer::RawSlice* slice Api::OsSysCallsSingleton::get().writev(fd_, iov.begin(), num_slices_to_write)); } +Api::IoCallUint64Result IoSocketHandleImpl::write(Buffer::Instance& buffer) { + constexpr uint64_t MaxSlices = 16; + Buffer::RawSliceVector slices = buffer.getRawSlices(MaxSlices); + Api::IoCallUint64Result result = writev(slices.begin(), slices.size()); + if (result.ok() && result.rc_ > 0) { + buffer.drain(static_cast(result.rc_)); + } + return result; +} + Api::IoCallUint64Result IoSocketHandleImpl::sendmsg(const Buffer::RawSlice* slices, uint64_t num_slice, int flags, const Address::Ip* self_ip, const Address::Instance& peer_address) { const auto* address_base = dynamic_cast(&peer_address); sockaddr* sock_addr = const_cast(address_base->sockAddr()); - + if (sock_addr == nullptr) { + // Unlikely to happen unless the wrong peer address is passed. + return IoSocketError::ioResultSocketInvalidAddress(); + } absl::FixedArray iov(num_slice); uint64_t num_slices_to_write = 0; for (uint64_t i = 0; i < num_slice; i++) { @@ -171,37 +239,25 @@ Address::InstanceConstSharedPtr maybeGetDstAddressFromHeader(const cmsghdr& cmsg ipv6_addr->sin6_port = htons(self_port); return getAddressFromSockAddrOrDie(ss, sizeof(sockaddr_in6), fd); } -#ifndef IP_RECVDSTADDR - if (cmsg.cmsg_type == IP_PKTINFO) { - auto info = reinterpret_cast(CMSG_DATA(&cmsg)); -#else - if (cmsg.cmsg_type == IP_RECVDSTADDR) { - auto addr = reinterpret_cast(CMSG_DATA(&cmsg)); -#endif + + if (cmsg.cmsg_type == messageTypeContainsIP()) { sockaddr_storage ss; auto ipv4_addr = reinterpret_cast(&ss); memset(ipv4_addr, 0, sizeof(sockaddr_in)); ipv4_addr->sin_family = AF_INET; - ipv4_addr->sin_addr = -#ifndef IP_RECVDSTADDR - info->ipi_addr; -#else - *addr; -#endif + ipv4_addr->sin_addr = addressFromMessage(cmsg); ipv4_addr->sin_port = htons(self_port); return getAddressFromSockAddrOrDie(ss, sizeof(sockaddr_in), fd); } + return nullptr; } -absl::optional maybeGetPacketsDroppedFromHeader( +absl::optional maybeGetPacketsDroppedFromHeader([[maybe_unused]] const cmsghdr& cmsg) { #ifdef SO_RXQ_OVFL - const cmsghdr& cmsg) { if (cmsg.cmsg_type == SO_RXQ_OVFL) { return *reinterpret_cast(CMSG_DATA(&cmsg)); } -#else - const cmsghdr&) { #endif return absl::nullopt; } @@ -400,7 +456,7 @@ IoHandlePtr IoSocketHandleImpl::accept(struct sockaddr* addr, socklen_t* addrlen return nullptr; } - return std::make_unique(result.rc_, socket_v6only_); + return std::make_unique(result.rc_, socket_v6only_, domain_); } Api::SysCallIntResult IoSocketHandleImpl::connect(Address::InstanceConstSharedPtr address) { @@ -421,20 +477,7 @@ Api::SysCallIntResult IoSocketHandleImpl::setBlocking(bool blocking) { return Api::OsSysCallsSingleton::get().setsocketblocking(fd_, blocking); } -absl::optional IoSocketHandleImpl::domain() { - sockaddr_storage addr; - socklen_t len = sizeof(addr); - Api::SysCallIntResult result; - - result = Api::OsSysCallsSingleton::get().getsockname( - fd_, reinterpret_cast(&addr), &len); - - if (result.rc_ == 0) { - return {addr.ss_family}; - } - - return absl::nullopt; -} +absl::optional IoSocketHandleImpl::domain() { return domain_; } Address::InstanceConstSharedPtr IoSocketHandleImpl::localAddress() { sockaddr_storage ss; @@ -459,12 +502,8 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { throw EnvoyException( fmt::format("getpeername failed for '{}': {}", fd_, errorDetails(result.errno_))); } -#ifdef __APPLE__ - if (ss_len == sizeof(sockaddr) && ss.ss_family == AF_UNIX) -#else - if (ss_len == sizeof(sa_family_t) && ss.ss_family == AF_UNIX) -#endif - { + + if (ss_len == udsAddressLength() && ss.ss_family == AF_UNIX) { // For Unix domain sockets, can't find out the peer name, but it should match our own // name for the socket (i.e. the path should match, barring any namespace or other // mechanisms to hide things, of which there are many). @@ -489,5 +528,18 @@ Api::SysCallIntResult IoSocketHandleImpl::shutdown(int how) { return Api::OsSysCallsSingleton::get().shutdown(fd_, how); } +absl::optional IoSocketHandleImpl::lastRoundTripTime() { +#ifdef TCP_INFO + struct tcp_info ti; + socklen_t len = sizeof(ti); + if (!SOCKET_FAILURE( + Api::OsSysCallsSingleton::get().getsockopt(fd_, IPPROTO_TCP, TCP_INFO, &ti, &len).rc_)) { + return std::chrono::milliseconds(ti.tcpi_rtt); + } +#endif + + return {}; +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index e1cacec47d8b..6b811bfe505f 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -13,12 +13,13 @@ namespace Envoy { namespace Network { /** - * IoHandle derivative for sockets + * IoHandle derivative for sockets. */ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable { public: - explicit IoSocketHandleImpl(os_fd_t fd = INVALID_SOCKET, bool socket_v6only = false) - : fd_(fd), socket_v6only_(socket_v6only) {} + explicit IoSocketHandleImpl(os_fd_t fd = INVALID_SOCKET, bool socket_v6only = false, + absl::optional domain = absl::nullopt) + : fd_(fd), socket_v6only_(socket_v6only), domain_(domain) {} // Close underlying socket if close() hasn't been call yet. ~IoSocketHandleImpl() override; @@ -32,9 +33,12 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable lastRoundTripTime() override; protected: // Converts a SysCallSizeResult to IoCallUint64Result. @@ -85,6 +90,7 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable domain_; // The minimum cmsg buffer size to filled in destination address, packets dropped and gso // size when receiving a packet. It is possible for a received packet to contain both IPv4 @@ -92,6 +98,5 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable lastRoundTripTime() override { + return ioHandle().lastRoundTripTime(); + } + protected: Address::InstanceConstSharedPtr remote_address_; const Address::InstanceConstSharedPtr direct_remote_address_; diff --git a/source/common/network/raw_buffer_socket.cc b/source/common/network/raw_buffer_socket.cc index 0aeaa5bc0c1f..c539add2f71a 100644 --- a/source/common/network/raw_buffer_socket.cc +++ b/source/common/network/raw_buffer_socket.cc @@ -19,7 +19,7 @@ IoResult RawBufferSocket::doRead(Buffer::Instance& buffer) { bool end_stream = false; do { // 16K read is arbitrary. TODO(mattklein123) PERF: Tune the read size. - Api::IoCallUint64Result result = buffer.read(callbacks_->ioHandle(), 16384); + Api::IoCallUint64Result result = callbacks_->ioHandle().read(buffer, 16384); if (result.ok()) { ENVOY_CONN_LOG(trace, "read returns: {}", callbacks_->connection(), result.rc_); @@ -62,7 +62,7 @@ IoResult RawBufferSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { action = PostIoAction::KeepOpen; break; } - Api::IoCallUint64Result result = buffer.write(callbacks_->ioHandle()); + Api::IoCallUint64Result result = callbacks_->ioHandle().write(buffer); if (result.ok()) { ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.rc_); diff --git a/source/common/network/resolver_impl.cc b/source/common/network/resolver_impl.cc index a037f2dc69de..66554c0d2367 100644 --- a/source/common/network/resolver_impl.cc +++ b/source/common/network/resolver_impl.cc @@ -48,9 +48,18 @@ InstanceConstSharedPtr resolveProtoAddress(const envoy::config::core::v3::Addres case envoy::config::core::v3::Address::AddressCase::kSocketAddress: return resolveProtoSocketAddress(address.socket_address()); case envoy::config::core::v3::Address::AddressCase::kPipe: - return InstanceConstSharedPtr{new PipeInstance(address.pipe().path())}; + return std::make_shared(address.pipe().path(), address.pipe().mode()); + case envoy::config::core::v3::Address::AddressCase::kEnvoyInternalAddress: + switch (address.envoy_internal_address().address_name_specifier_case()) { + case envoy::config::core::v3::EnvoyInternalAddress::AddressNameSpecifierCase:: + kServerListenerName: + return std::make_shared( + address.envoy_internal_address().server_listener_name()); + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } default: - throw EnvoyException("Address must be a socket or pipe: " + address.DebugString()); + throw EnvoyException("Address must be set: " + address.DebugString()); } } diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index aa35e0250516..05191ef4dd9a 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -28,7 +28,6 @@ SocketImpl::SocketImpl(IoHandlePtr&& io_handle, } auto domain = io_handle_->domain(); - // This should never happen in practice but too many tests inject fake fds ... if (!domain.has_value()) { return; diff --git a/source/common/network/socket_interface.h b/source/common/network/socket_interface.h index 0bc4f37b0c71..070dec2b3c69 100644 --- a/source/common/network/socket_interface.h +++ b/source/common/network/socket_interface.h @@ -17,9 +17,8 @@ namespace Network { class SocketInterfaceExtension : public Server::BootstrapExtension { public: SocketInterfaceExtension(SocketInterface& sock_interface) : sock_interface_(sock_interface) {} - SocketInterface& socketInterface() { return sock_interface_; } -private: +protected: SocketInterface& sock_interface_; }; @@ -53,4 +52,4 @@ using SocketInterfaceSingleton = InjectableSingleton; using SocketInterfaceLoader = ScopedInjectableLoader; } // namespace Network -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc index 9afbe9d331e7..8a3802e36769 100644 --- a/source/common/network/socket_interface_impl.cc +++ b/source/common/network/socket_interface_impl.cc @@ -4,14 +4,16 @@ #include "envoy/extensions/network/socket_interface/v3/default_socket_interface.pb.h" #include "common/api/os_sys_calls_impl.h" +#include "common/common/assert.h" #include "common/common/utility.h" #include "common/network/io_socket_handle_impl.h" namespace Envoy { namespace Network { -IoHandlePtr SocketInterfaceImpl::makeSocket(int socket_fd, bool socket_v6only) const { - return std::make_unique(socket_fd, socket_v6only); +IoHandlePtr SocketInterfaceImpl::makeSocket(int socket_fd, bool socket_v6only, + absl::optional domain) const { + return std::make_unique(socket_fd, socket_v6only, domain); } IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type addr_type, @@ -36,15 +38,18 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type ASSERT(version == Address::IpVersion::v4); domain = AF_INET; } - } else { - ASSERT(addr_type == Address::Type::Pipe); + } else if (addr_type == Address::Type::Pipe) { domain = AF_UNIX; + } else { + ASSERT(addr_type == Address::Type::EnvoyInternal); + // TODO(lambdai): Add InternalIoSocketHandleImpl to support internal address. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } const Api::SysCallSocketResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, 0); RELEASE_ASSERT(SOCKET_VALID(result.rc_), fmt::format("socket(2) failed, got error: {}", errorDetails(result.errno_))); - IoHandlePtr io_handle = makeSocket(result.rc_, socket_v6only); + IoHandlePtr io_handle = makeSocket(result.rc_, socket_v6only, domain); #if defined(__APPLE__) || defined(WIN32) // Cannot set SOCK_NONBLOCK as a ::socket flag. @@ -74,10 +79,6 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, return io_handle; } -IoHandlePtr SocketInterfaceImpl::socket(os_fd_t fd) { - return std::make_unique(fd); -} - bool SocketInterfaceImpl::ipFamilySupported(int domain) { Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); diff --git a/source/common/network/socket_interface_impl.h b/source/common/network/socket_interface_impl.h index 08185150c013..d87ad6a95913 100644 --- a/source/common/network/socket_interface_impl.h +++ b/source/common/network/socket_interface_impl.h @@ -14,7 +14,6 @@ class SocketInterfaceImpl : public SocketInterfaceBase { bool socket_v6only) const override; IoHandlePtr socket(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr) const override; - IoHandlePtr socket(os_fd_t fd) override; bool ipFamilySupported(int domain) override; // Server::Configuration::BootstrapExtensionFactory @@ -27,10 +26,11 @@ class SocketInterfaceImpl : public SocketInterfaceBase { }; protected: - virtual IoHandlePtr makeSocket(int socket_fd, bool socket_v6only) const; + virtual IoHandlePtr makeSocket(int socket_fd, bool socket_v6only, + absl::optional domain) const; }; DECLARE_FACTORY(SocketInterfaceImpl); } // namespace Network -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/network/tcp_listener_impl.cc b/source/common/network/tcp_listener_impl.cc index 5c39ec692f89..91975d6ca5d1 100644 --- a/source/common/network/tcp_listener_impl.cc +++ b/source/common/network/tcp_listener_impl.cc @@ -62,7 +62,11 @@ void TcpListenerImpl::onSocketEvent(short flags) { if (rejectCxOverGlobalLimit()) { // The global connection limit has been reached. io_handle->close(); - cb_.onReject(); + cb_.onReject(TcpListenerCallbacks::RejectCause::GlobalCxLimit); + continue; + } else if (random_.bernoulli(reject_fraction_)) { + io_handle->close(); + cb_.onReject(TcpListenerCallbacks::RejectCause::OverloadAction); continue; } @@ -106,9 +110,11 @@ void TcpListenerImpl::setupServerSocket(Event::DispatcherImpl& dispatcher, Socke } } -TcpListenerImpl::TcpListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket, - TcpListenerCallbacks& cb, bool bind_to_port, uint32_t backlog_size) - : BaseListenerImpl(dispatcher, std::move(socket)), cb_(cb), backlog_size_(backlog_size) { +TcpListenerImpl::TcpListenerImpl(Event::DispatcherImpl& dispatcher, Random::RandomGenerator& random, + SocketSharedPtr socket, TcpListenerCallbacks& cb, + bool bind_to_port, uint32_t backlog_size) + : BaseListenerImpl(dispatcher, std::move(socket)), cb_(cb), backlog_size_(backlog_size), + random_(random), reject_fraction_(0.0) { if (bind_to_port) { setupServerSocket(dispatcher, *socket_); } @@ -118,5 +124,10 @@ void TcpListenerImpl::enable() { file_event_->setEnabled(Event::FileReadyType::R void TcpListenerImpl::disable() { file_event_->setEnabled(0); } +void TcpListenerImpl::setRejectFraction(const float reject_fraction) { + ASSERT(0 <= reject_fraction && reject_fraction <= 1); + reject_fraction_ = reject_fraction; +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/tcp_listener_impl.h b/source/common/network/tcp_listener_impl.h index 56b6725b36c9..5ecec192abf2 100644 --- a/source/common/network/tcp_listener_impl.h +++ b/source/common/network/tcp_listener_impl.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/runtime/runtime.h" #include "absl/strings/string_view.h" @@ -13,10 +14,12 @@ namespace Network { */ class TcpListenerImpl : public BaseListenerImpl { public: - TcpListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket, - TcpListenerCallbacks& cb, bool bind_to_port, uint32_t backlog_size); + TcpListenerImpl(Event::DispatcherImpl& dispatcher, Random::RandomGenerator& random, + SocketSharedPtr socket, TcpListenerCallbacks& cb, bool bind_to_port, + uint32_t backlog_size); void disable() override; void enable() override; + void setRejectFraction(float reject_fraction) override; static const absl::string_view GlobalMaxCxRuntimeKey; @@ -33,7 +36,9 @@ class TcpListenerImpl : public BaseListenerImpl { // rejected/closed. If the accepted socket is to be admitted, false is returned. static bool rejectCxOverGlobalLimit(); + Random::RandomGenerator& random_; Event::FileEventPtr file_event_; + float reject_fraction_; }; } // namespace Network diff --git a/source/common/network/udp_listener_impl.cc b/source/common/network/udp_listener_impl.cc index d2cbd0feb733..e4f647196b2d 100644 --- a/source/common/network/udp_listener_impl.cc +++ b/source/common/network/udp_listener_impl.cc @@ -93,7 +93,7 @@ void UdpListenerImpl::processPacket(Address::InstanceConstSharedPtr local_addres ASSERT(local_address != nullptr); UdpRecvData recvData{ {std::move(local_address), std::move(peer_address)}, std::move(buffer), receive_time}; - cb_.onData(recvData); + cb_.onData(std::move(recvData)); } void UdpListenerImpl::handleWriteCallback() { @@ -125,5 +125,39 @@ Api::IoCallUint64Result UdpListenerImpl::flush() { return cb_.udpPacketWriter().flush(); } +void UdpListenerImpl::activateRead() { file_event_->activate(Event::FileReadyType::Read); } + +UdpListenerWorkerRouterImpl::UdpListenerWorkerRouterImpl(uint32_t concurrency) + : workers_(concurrency) {} + +void UdpListenerWorkerRouterImpl::registerWorkerForListener(UdpListenerCallbacks& listener) { + absl::WriterMutexLock lock(&mutex_); + + ASSERT(listener.workerIndex() < workers_.size()); + ASSERT(workers_.at(listener.workerIndex()) == nullptr); + workers_.at(listener.workerIndex()) = &listener; +} + +void UdpListenerWorkerRouterImpl::unregisterWorkerForListener(UdpListenerCallbacks& listener) { + absl::WriterMutexLock lock(&mutex_); + + ASSERT(workers_.at(listener.workerIndex()) == &listener); + workers_.at(listener.workerIndex()) = nullptr; +} + +void UdpListenerWorkerRouterImpl::deliver(uint32_t dest_worker_index, UdpRecvData&& data) { + absl::ReaderMutexLock lock(&mutex_); + + ASSERT(dest_worker_index < workers_.size(), + "UdpListenerCallbacks::destination returned out-of-range value"); + auto* worker = workers_[dest_worker_index]; + + // When a listener is being removed, packets could be processed on some workers after the + // listener is removed from other workers, which could result in a nullptr for that worker. + if (worker != nullptr) { + worker->post(std::move(data)); + } +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/udp_listener_impl.h b/source/common/network/udp_listener_impl.h index 67168fb1c7ee..d555649833bc 100644 --- a/source/common/network/udp_listener_impl.h +++ b/source/common/network/udp_listener_impl.h @@ -30,12 +30,14 @@ class UdpListenerImpl : public BaseListenerImpl, // Network::Listener Interface void disable() override; void enable() override; + void setRejectFraction(float) override {} // Network::UdpListener Interface Event::Dispatcher& dispatcher() override; const Address::InstanceConstSharedPtr& localAddress() const override; Api::IoCallUint64Result send(const UdpSendData& data) override; Api::IoCallUint64Result flush() override; + void activateRead() override; void processPacket(Address::InstanceConstSharedPtr local_address, Address::InstanceConstSharedPtr peer_address, Buffer::InstancePtr buffer, @@ -61,5 +63,19 @@ class UdpListenerImpl : public BaseListenerImpl, Event::FileEventPtr file_event_; }; +class UdpListenerWorkerRouterImpl : public UdpListenerWorkerRouter { +public: + UdpListenerWorkerRouterImpl(uint32_t concurrency); + + // UdpListenerWorkerRouter + void registerWorkerForListener(UdpListenerCallbacks& listener) override; + void unregisterWorkerForListener(UdpListenerCallbacks& listener) override; + void deliver(uint32_t dest_worker_index, UdpRecvData&& data) override; + +private: + absl::Mutex mutex_; + std::vector workers_ ABSL_GUARDED_BY(mutex_); +}; + } // namespace Network } // namespace Envoy diff --git a/source/common/profiler/profiler.h b/source/common/profiler/profiler.h index fdf4b20ee8f9..057ffda6f271 100644 --- a/source/common/profiler/profiler.h +++ b/source/common/profiler/profiler.h @@ -2,10 +2,10 @@ #include -// Profiling support is provided in the release tcmalloc, but not in the library +// Profiling support is provided in the release tcmalloc of `gperftools`, but not in the library // that supplies the debug tcmalloc. So all the profiling code must be ifdef'd // on PROFILER_AVAILABLE which is dependent on those two settings. -#if defined(TCMALLOC) && !defined(ENVOY_MEMORY_DEBUG_ENABLED) +#if defined(GPERFTOOLS_TCMALLOC) && !defined(ENVOY_MEMORY_DEBUG_ENABLED) #define PROFILER_AVAILABLE #endif diff --git a/source/common/protobuf/BUILD b/source/common/protobuf/BUILD index f505161b810f..015177dcc186 100644 --- a/source/common/protobuf/BUILD +++ b/source/common/protobuf/BUILD @@ -61,6 +61,7 @@ envoy_cc_library( deps = [ ":message_validator_lib", ":protobuf", + ":type_util_lib", ":well_known_lib", "//include/envoy/api:api_interface", "//include/envoy/protobuf:message_validator_interface", @@ -78,6 +79,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "type_util_lib", + srcs = ["type_util.cc"], + hdrs = ["type_util.h"], + deps = [ + "//source/common/protobuf", + "@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto", + ], +) + envoy_cc_library( name = "visitor_lib", srcs = ["visitor.cc"], diff --git a/source/common/protobuf/type_util.cc b/source/common/protobuf/type_util.cc new file mode 100644 index 000000000000..03b5c1f01b34 --- /dev/null +++ b/source/common/protobuf/type_util.cc @@ -0,0 +1,17 @@ +#include "common/protobuf/type_util.h" + +namespace Envoy { + +absl::string_view TypeUtil::typeUrlToDescriptorFullName(absl::string_view type_url) { + const size_t pos = type_url.rfind('/'); + if (pos != absl::string_view::npos) { + type_url = type_url.substr(pos + 1); + } + return type_url; +} + +std::string TypeUtil::descriptorFullNameToTypeUrl(absl::string_view type) { + return "type.googleapis.com/" + std::string(type); +} + +} // namespace Envoy diff --git a/source/common/protobuf/type_util.h b/source/common/protobuf/type_util.h new file mode 100644 index 000000000000..9bcfa0f8c2f6 --- /dev/null +++ b/source/common/protobuf/type_util.h @@ -0,0 +1,17 @@ +#pragma once + +#include "common/protobuf/protobuf.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace Envoy { + +class TypeUtil { +public: + static absl::string_view typeUrlToDescriptorFullName(absl::string_view type_url); + + static std::string descriptorFullNameToTypeUrl(absl::string_view type); +}; + +} // namespace Envoy diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index a986c6732e2f..f945a458d5de 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -21,6 +21,8 @@ #include "udpa/annotations/sensitive.pb.h" #include "yaml-cpp/yaml.h" +using namespace std::chrono_literals; + namespace Envoy { namespace { @@ -118,9 +120,12 @@ void jsonConvertInternal(const Protobuf::Message& source, enum class MessageVersion { // This is an earlier version of a message, a later one exists. - EARLIER_VERSION, + EarlierVersion, // This is the latest version of a message. - LATEST_VERSION, + LatestVersion, + // Validating to see if the latest version will also be accepted; only apply message validators + // without side effects, validations should be strict. + LatestVersionValidate, }; using MessageXformFn = std::function; @@ -142,7 +147,7 @@ void tryWithApiBoosting(MessageXformFn f, Protobuf::Message& message) { Config::ApiTypeOracle::getEarlierVersionDescriptor(message.GetDescriptor()->full_name()); // If there is no earlier version of a message, just apply f directly. if (earlier_version_desc == nullptr) { - f(message, MessageVersion::LATEST_VERSION); + f(message, MessageVersion::LatestVersion); return; } @@ -152,12 +157,21 @@ void tryWithApiBoosting(MessageXformFn f, Protobuf::Message& message) { try { // Try apply f with an earlier version of the message, then upgrade the // result. - f(*earlier_message, MessageVersion::EARLIER_VERSION); + f(*earlier_message, MessageVersion::EarlierVersion); + // If we succeed at the earlier version, we ask the counterfactual, would this have worked at a + // later version? If not, this is v2 only and we need to warn. This is a waste of CPU cycles but + // we expect that JSON/YAML fragments will not be in use by any CPU limited use cases. + try { + f(message, MessageVersion::LatestVersionValidate); + } catch (EnvoyException& e) { + MessageUtil::onVersionUpgradeWarn(e.what()); + } + // Now we do the real work of upgrading. Config::VersionConverter::upgrade(*earlier_message, message); } catch (ApiBoostRetryException&) { // If we fail at the earlier version, try f at the current version of the // message. - f(message, MessageVersion::LATEST_VERSION); + f(message, MessageVersion::LatestVersion); } } @@ -261,6 +275,33 @@ void ProtoExceptionUtil::throwProtoValidationException(const std::string& valida throw ProtoValidationException(validation_error, message); } +// TODO(htuch): this is where we will also reject v2 configs by default. +void MessageUtil::onVersionUpgradeWarn(absl::string_view desc) { + const std::string& warning_str = + fmt::format("Configuration does not parse cleanly as v3. v2 configuration is " + "deprecated and will be removed from Envoy at the start of Q1 2021: {}", + desc); + // Always log at trace level. This is useful for tests that don't want to rely on possible + // elision. + ENVOY_LOG_MISC(trace, warning_str); + // Log each distinct message at warn level once every 5s. We use a static map here, which is fine + // as we are always on the main thread. + static auto* last_warned = new absl::flat_hash_map(); + const auto now = t_logclock::now().time_since_epoch().count(); + const auto it = last_warned->find(warning_str); + if (it == last_warned->end() || + (now - it->second) > std::chrono::duration_cast(5s).count()) { + ENVOY_LOG_MISC(warn, warning_str); + (*last_warned)[warning_str] = now; + } + Runtime::Loader* loader = Runtime::LoaderSingleton::getExisting(); + // We only log, and don't bump stats, if we're sufficiently early in server initialization (i.e. + // bootstrap). + if (loader != nullptr) { + loader->countDeprecatedFeatureUse(); + } +} + size_t MessageUtil::hash(const Protobuf::Message& message) { std::string text_format; @@ -307,9 +348,11 @@ void MessageUtil::loadFromJson(const std::string& json, Protobuf::Message& messa // We know it's an unknown field at this point. If we're at the latest // version, then it's definitely an unknown field, otherwise we try to // load again at a later version. - if (message_version == MessageVersion::LATEST_VERSION) { + if (message_version == MessageVersion::LatestVersion) { validation_visitor.onUnknownField("type " + message.GetTypeName() + " reason " + strict_status.ToString()); + } else if (message_version == MessageVersion::LatestVersionValidate) { + throw ProtobufMessage::UnknownProtoFieldException(absl::StrCat("Unknown field in: ", json)); } else { throw ApiBoostRetryException("Unknown field, possibly a rename, try again."); } @@ -318,7 +361,7 @@ void MessageUtil::loadFromJson(const std::string& json, Protobuf::Message& messa if (do_boosting) { tryWithApiBoosting(load_json, message); } else { - load_json(message, MessageVersion::LATEST_VERSION); + load_json(message, MessageVersion::LatestVersion); } } @@ -357,11 +400,15 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa MessageVersion message_version) { try { if (message.ParseFromString(contents)) { - MessageUtil::checkForUnexpectedFields(message, validation_visitor); + MessageUtil::checkForUnexpectedFields( + message, message_version == MessageVersion::LatestVersionValidate + ? ProtobufMessage::getStrictValidationVisitor() + : validation_visitor); } return; } catch (EnvoyException& ex) { - if (message_version == MessageVersion::LATEST_VERSION) { + if (message_version == MessageVersion::LatestVersion || + message_version == MessageVersion::LatestVersionValidate) { // Failed reading the latest version - pass the same error upwards throw ex; } @@ -375,7 +422,7 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa // attempts to read as latest version. tryWithApiBoosting(read_proto_binary, message); } else { - read_proto_binary(message, MessageVersion::LATEST_VERSION); + read_proto_binary(message, MessageVersion::LatestVersion); } return; } @@ -387,7 +434,8 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa if (Protobuf::TextFormat::ParseFromString(contents, &message)) { return; } - if (message_version == MessageVersion::LATEST_VERSION) { + if (message_version == MessageVersion::LatestVersion || + message_version == MessageVersion::LatestVersionValidate) { throw EnvoyException("Unable to parse file \"" + path + "\" as a text protobuf (type " + message.GetTypeName() + ")"); } else { @@ -399,7 +447,7 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa if (do_boosting) { tryWithApiBoosting(read_proto_text, message); } else { - read_proto_text(message, MessageVersion::LATEST_VERSION); + read_proto_text(message, MessageVersion::LatestVersion); } return; } @@ -599,6 +647,7 @@ void MessageUtil::unpackTo(const ProtobufWkt::Any& any_message, Protobuf::Messag any_message_with_fixup.DebugString())); } Config::VersionConverter::annotateWithOriginalType(*earlier_version_desc, message); + MessageUtil::onVersionUpgradeWarn(any_full_name); return; } } @@ -947,12 +996,4 @@ void TimestampUtil::systemClockToTimestamp(const SystemTime system_clock_time, .count())); } -absl::string_view TypeUtil::typeUrlToDescriptorFullName(absl::string_view type_url) { - const size_t pos = type_url.rfind('/'); - if (pos != absl::string_view::npos) { - type_url = type_url.substr(pos + 1); - } - return type_url; -} - } // namespace Envoy diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index 3ba16b3bb910..24c3e639e45e 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -136,11 +136,6 @@ class MissingFieldException : public EnvoyException { MissingFieldException(const std::string& field_name, const Protobuf::Message& message); }; -class TypeUtil { -public: - static absl::string_view typeUrlToDescriptorFullName(absl::string_view type_url); -}; - class RepeatedPtrUtil { public: static std::string join(const Protobuf::RepeatedPtrField& source, @@ -368,6 +363,13 @@ class MessageUtil { return typed_message; }; + /** + * Invoke when a version upgrade (e.g. v2 -> v3) is detected. This may warn or throw + * depending on where we are in the major version deprecation cycle. + * @param desc description of upgrade to include in warning or exception. + */ + static void onVersionUpgradeWarn(absl::string_view desc); + /** * Obtain a string field from a protobuf message dynamically. * diff --git a/source/common/router/BUILD b/source/common/router/BUILD index a2825b775dda..d0d4294d4467 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -305,7 +305,6 @@ envoy_cc_library( "//source/common/http:header_map_lib", "//source/common/http:headers_lib", "//source/common/http:message_lib", - "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/common/network:application_protocol_lib", "//source/common/network:transport_socket_options_lib", diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 1affb8d52d42..1e2c7680a262 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -316,6 +316,12 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, route.route().cluster_not_found_response_code())), timeout_(PROTOBUF_GET_MS_OR_DEFAULT(route.route(), timeout, DEFAULT_ROUTE_TIMEOUT_MS)), idle_timeout_(PROTOBUF_GET_OPTIONAL_MS(route.route(), idle_timeout)), + max_stream_duration_( + PROTOBUF_GET_OPTIONAL_MS(route.route().max_stream_duration(), max_stream_duration)), + grpc_timeout_header_max_( + PROTOBUF_GET_OPTIONAL_MS(route.route().max_stream_duration(), grpc_timeout_header_max)), + grpc_timeout_header_offset_(PROTOBUF_GET_OPTIONAL_MS(route.route().max_stream_duration(), + grpc_timeout_header_offset)), max_grpc_timeout_(PROTOBUF_GET_OPTIONAL_MS(route.route(), max_grpc_timeout)), grpc_timeout_offset_(PROTOBUF_GET_OPTIONAL_MS(route.route(), grpc_timeout_offset)), loader_(factory_context.runtime()), runtime_(loadRuntimeData(route.match())), @@ -420,11 +426,11 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, std::make_unique(route.match().tls_context()); } - // Only set include_vh_rate_limits_ to true if the rate limit policy for the route is empty - // or the route set `include_vh_rate_limits` to true. + // Returns true if include_vh_rate_limits is explicitly set to true otherwise it defaults to false + // which is similar to VhRateLimitOptions::Override and will only use virtual host rate limits if + // the route is empty include_vh_rate_limits_ = - (rate_limit_policy_.empty() || - PROTOBUF_GET_WRAPPED_OR_DEFAULT(route.route(), include_vh_rate_limits, false)); + PROTOBUF_GET_WRAPPED_OR_DEFAULT(route.route(), include_vh_rate_limits, false); if (route.route().has_cors()) { cors_policy_ = @@ -544,9 +550,11 @@ void RouteEntryImplBase::finalizeRequestHeaders(Http::RequestHeaderMap& headers, if (!host_rewrite_.empty()) { headers.setHost(host_rewrite_); } else if (auto_host_rewrite_header_) { - const Http::HeaderEntry* header = headers.get(*auto_host_rewrite_header_); - if (header != nullptr) { - absl::string_view header_value = header->value().getStringView(); + const auto header = headers.get(*auto_host_rewrite_header_); + if (!header.empty()) { + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + const absl::string_view header_value = header[0]->value().getStringView(); if (!header_value.empty()) { headers.setHost(header_value); } @@ -875,10 +883,12 @@ RouteConstSharedPtr RouteEntryImplBase::clusterEntry(const Http::HeaderMap& head return shared_from_this(); } else { ASSERT(!cluster_header_name_.get().empty()); - const Http::HeaderEntry* entry = headers.get(cluster_header_name_); + const auto entry = headers.get(cluster_header_name_); std::string final_cluster_name; - if (entry) { - final_cluster_name = std::string(entry->value().getStringView()); + if (!entry.empty()) { + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + final_cluster_name = std::string(entry[0]->value().getStringView()); } // NOTE: Though we return a shared_ptr here, the current ownership model assumes that @@ -1056,9 +1066,10 @@ void ConnectRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers, } RouteConstSharedPtr ConnectRouteEntryImpl::matches(const Http::RequestHeaderMap& headers, - const StreamInfo::StreamInfo&, + const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { - if (Http::HeaderUtility::isConnect(headers)) { + if (Http::HeaderUtility::isConnect(headers) && + RouteEntryImplBase::matchRoute(headers, stream_info, random_value)) { return clusterEntry(headers, random_value); } return nullptr; diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 192da070ce53..752be6546415 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -498,6 +498,15 @@ class RouteEntryImplBase : public RouteEntry, } std::chrono::milliseconds timeout() const override { return timeout_; } absl::optional idleTimeout() const override { return idle_timeout_; } + absl::optional maxStreamDuration() const override { + return max_stream_duration_; + } + absl::optional grpcTimeoutHeaderMax() const override { + return grpc_timeout_header_max_; + } + absl::optional grpcTimeoutHeaderOffset() const override { + return grpc_timeout_header_offset_; + } absl::optional maxGrpcTimeout() const override { return max_grpc_timeout_; } @@ -604,6 +613,15 @@ class RouteEntryImplBase : public RouteEntry, absl::optional idleTimeout() const override { return parent_->idleTimeout(); } + absl::optional maxStreamDuration() const override { + return parent_->max_stream_duration_; + } + absl::optional grpcTimeoutHeaderMax() const override { + return parent_->grpc_timeout_header_max_; + } + absl::optional grpcTimeoutHeaderOffset() const override { + return parent_->grpc_timeout_header_offset_; + } absl::optional maxGrpcTimeout() const override { return parent_->maxGrpcTimeout(); } @@ -758,6 +776,9 @@ class RouteEntryImplBase : public RouteEntry, const Http::Code cluster_not_found_response_code_; const std::chrono::milliseconds timeout_; const absl::optional idle_timeout_; + const absl::optional max_stream_duration_; + const absl::optional grpc_timeout_header_max_; + const absl::optional grpc_timeout_header_offset_; const absl::optional max_grpc_timeout_; const absl::optional grpc_timeout_offset_; Runtime::Loader& loader_; diff --git a/source/common/router/config_utility.h b/source/common/router/config_utility.h index 3734040c9582..c7a8dc455c90 100644 --- a/source/common/router/config_utility.h +++ b/source/common/router/config_utility.h @@ -7,7 +7,6 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/http/codes.h" -#include "envoy/json/json_object.h" #include "envoy/upstream/resource_manager.h" #include "common/common/empty_string.h" diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index 5cc898b1e841..a15e07ec374a 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -186,8 +186,11 @@ parseRequestHeader(absl::string_view param) { Http::LowerCaseString header_name{std::string(param)}; return [header_name](const Envoy::StreamInfo::StreamInfo& stream_info) -> std::string { if (const auto* request_headers = stream_info.getRequestHeaders()) { - if (const auto* entry = request_headers->get(header_name)) { - return std::string(entry->value().getStringView()); + const auto entry = request_headers->get(header_name); + if (!entry.empty()) { + // TODO(https://github.com/envoyproxy/envoy/issues/13454): Potentially use all header + // values. + return std::string(entry[0]->value().getStringView()); } } return std::string(); diff --git a/source/common/router/header_parser.cc b/source/common/router/header_parser.cc index 758f89c86bc6..1e4a9f4e3098 100644 --- a/source/common/router/header_parser.cc +++ b/source/common/router/header_parser.cc @@ -226,9 +226,9 @@ HeaderParserPtr HeaderParser::configure( for (const auto& header_value_option : headers_to_add) { const bool append = PROTOBUF_GET_WRAPPED_OR_DEFAULT(header_value_option, append, true); HeaderFormatterPtr header_formatter = parseInternal(header_value_option.header(), append); - header_parser->headers_to_add_.emplace_back( - Http::LowerCaseString(header_value_option.header().key()), std::move(header_formatter)); + Http::LowerCaseString(header_value_option.header().key()), + HeadersToAddEntry{std::move(header_formatter), header_value_option.header().value()}); } return header_parser; @@ -241,9 +241,9 @@ HeaderParserPtr HeaderParser::configure( for (const auto& header_value : headers_to_add) { HeaderFormatterPtr header_formatter = parseInternal(header_value, append); - - header_parser->headers_to_add_.emplace_back(Http::LowerCaseString(header_value.key()), - std::move(header_formatter)); + header_parser->headers_to_add_.emplace_back( + Http::LowerCaseString(header_value.key()), + HeadersToAddEntry{std::move(header_formatter), header_value.value()}); } return header_parser; @@ -269,19 +269,25 @@ HeaderParserPtr HeaderParser::configure( void HeaderParser::evaluateHeaders(Http::HeaderMap& headers, const StreamInfo::StreamInfo& stream_info) const { + evaluateHeaders(headers, &stream_info); +} + +void HeaderParser::evaluateHeaders(Http::HeaderMap& headers, + const StreamInfo::StreamInfo* stream_info) const { // Removing headers in the headers_to_remove_ list first makes // remove-before-add the default behavior as expected by users. for (const auto& header : headers_to_remove_) { headers.remove(header); } - for (const auto& formatter : headers_to_add_) { - const std::string value = formatter.second->format(stream_info); + for (const auto& [key, entry] : headers_to_add_) { + const std::string value = + stream_info != nullptr ? entry.formatter_->format(*stream_info) : entry.original_value_; if (!value.empty()) { - if (formatter.second->append()) { - headers.addReferenceKey(formatter.first, value); + if (entry.formatter_->append()) { + headers.addReferenceKey(key, value); } else { - headers.setReferenceKey(formatter.first, value); + headers.setReferenceKey(key, value); } } } diff --git a/source/common/router/header_parser.h b/source/common/router/header_parser.h index d32832f414b4..3b947a3d6e11 100644 --- a/source/common/router/header_parser.h +++ b/source/common/router/header_parser.h @@ -49,12 +49,18 @@ class HeaderParser { const Protobuf::RepeatedPtrField& headers_to_remove); void evaluateHeaders(Http::HeaderMap& headers, const StreamInfo::StreamInfo& stream_info) const; + void evaluateHeaders(Http::HeaderMap& headers, const StreamInfo::StreamInfo* stream_info) const; protected: HeaderParser() = default; private: - std::vector> headers_to_add_; + struct HeadersToAddEntry { + HeaderFormatterPtr formatter_; + const std::string original_value_; + }; + + std::vector> headers_to_add_; std::vector headers_to_remove_; }; diff --git a/source/common/router/reset_header_parser.cc b/source/common/router/reset_header_parser.cc index f5d61e9ca714..6089dedd5b0c 100644 --- a/source/common/router/reset_header_parser.cc +++ b/source/common/router/reset_header_parser.cc @@ -27,13 +27,14 @@ ResetHeaderParserImpl::ResetHeaderParserImpl( absl::optional ResetHeaderParserImpl::parseInterval(TimeSource& time_source, const Http::HeaderMap& headers) const { - const Http::HeaderEntry* header = headers.get(name_); + const auto header = headers.get(name_); - if (header == nullptr) { + if (header.empty()) { return absl::nullopt; } - const auto& header_value = header->value().getStringView(); + // This is effectively a trusted header so per the API only using the first value is used. + const auto& header_value = header[0]->value().getStringView(); uint64_t num_seconds{}; switch (format_) { diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 84f575692a21..f4303a55f461 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -127,7 +127,7 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, } if (request_headers.EnvoyRetriableStatusCodes()) { - for (const auto code : + for (const auto& code : StringUtil::splitToken(request_headers.getEnvoyRetriableStatusCodesValue(), ",")) { unsigned int out; if (absl::SimpleAtoi(code, &out)) { @@ -142,7 +142,7 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, // to provide HeaderMatcher serialized into a string. To avoid this extra // complexity we only support name-only header matchers via request // header. Anything more sophisticated needs to be provided via config. - for (const auto header_name : StringUtil::splitToken( + for (const auto& header_name : StringUtil::splitToken( request_headers.EnvoyRetriableHeaderNames()->value().getStringView(), ",")) { envoy::config::route::v3::HeaderMatcher header_matcher; header_matcher.set_name(std::string(absl::StripAsciiWhitespace(header_name))); @@ -181,7 +181,7 @@ void RetryStateImpl::enableBackoffTimer() { std::pair RetryStateImpl::parseRetryOn(absl::string_view config) { uint32_t ret = 0; bool all_fields_valid = true; - for (const auto retry_on : StringUtil::splitToken(config, ",", false, true)) { + for (const auto& retry_on : StringUtil::splitToken(config, ",", false, true)) { if (retry_on == Http::Headers::get().EnvoyRetryOnValues._5xx) { ret |= RetryPolicy::RETRY_ON_5XX; } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.GatewayError) { @@ -211,7 +211,7 @@ std::pair RetryStateImpl::parseRetryOn(absl::string_view config) std::pair RetryStateImpl::parseRetryGrpcOn(absl::string_view retry_grpc_on_header) { uint32_t ret = 0; bool all_fields_valid = true; - for (const auto retry_on : StringUtil::splitToken(retry_grpc_on_header, ",", false, true)) { + for (const auto& retry_on : StringUtil::splitToken(retry_grpc_on_header, ",", false, true)) { if (retry_on == Http::Headers::get().EnvoyRetryOnGrpcValues.Cancelled) { ret |= RetryPolicy::RETRY_ON_GRPC_CANCELLED; } else if (retry_on == Http::Headers::get().EnvoyRetryOnGrpcValues.DeadlineExceeded) { @@ -233,7 +233,7 @@ std::pair RetryStateImpl::parseRetryGrpcOn(absl::string_view ret absl::optional RetryStateImpl::parseResetInterval(const Http::ResponseHeaderMap& response_headers) const { for (const auto& reset_header : reset_headers_) { - const auto interval = reset_header->parseInterval(time_source_, response_headers); + const auto& interval = reset_header->parseInterval(time_source_, response_headers); if (interval.has_value() && interval.value() <= reset_max_interval_) { return interval; } diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 9e2e00128a4a..739050410f74 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -26,7 +26,6 @@ #include "common/http/header_map_impl.h" #include "common/http/headers.h" #include "common/http/message_impl.h" -#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/network/application_protocol.h" #include "common/network/transport_socket_options_impl.h" @@ -118,7 +117,9 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::RequestHeaderMap& req TimeoutData timeout; if (grpc_request && route.maxGrpcTimeout()) { const std::chrono::milliseconds max_grpc_timeout = route.maxGrpcTimeout().value(); - std::chrono::milliseconds grpc_timeout = Grpc::Common::getGrpcTimeout(request_headers); + auto header_timeout = Grpc::Common::getGrpcTimeout(request_headers); + std::chrono::milliseconds grpc_timeout = + header_timeout ? header_timeout.value() : std::chrono::milliseconds(0); if (route.grpcTimeoutOffset()) { // We only apply the offset if it won't result in grpc_timeout hitting 0 or below, as // setting it to 0 means infinity and a negative timeout makes no sense. @@ -740,7 +741,7 @@ void Filter::maybeDoShadowing() { Http::RequestMessagePtr request(new Http::RequestMessageImpl( Http::createHeaderMap(*downstream_headers_))); if (callbacks_->decodingBuffer()) { - request->body() = std::make_unique(*callbacks_->decodingBuffer()); + request->body().add(*callbacks_->decodingBuffer()); } if (downstream_trailers_) { request->trailers(Http::createHeaderMap(*downstream_trailers_)); @@ -967,6 +968,7 @@ void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_ absl::string_view body, bool dropped, absl::string_view details) { // If we have not yet sent anything downstream, send a response with an appropriate status code. // Otherwise just reset the ongoing response. + callbacks_->streamInfo().setResponseFlag(response_flags); if (downstream_response_started_ && !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_500_after_100")) { // This will destroy any created retry timers. @@ -976,9 +978,6 @@ void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_ } else { // This will destroy any created retry timers. cleanup(); - - callbacks_->streamInfo().setResponseFlag(response_flags); - // sendLocalReply may instead reset the stream if downstream_response_started_ is true. callbacks_->sendLocalReply( code, body, @@ -1091,6 +1090,7 @@ Filter::streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason) { return StreamInfo::ResponseFlag::UpstreamOverflow; case Http::StreamResetReason::RemoteReset: case Http::StreamResetReason::RemoteRefusedStreamReset: + case Http::StreamResetReason::ConnectError: return StreamInfo::ResponseFlag::UpstreamRemoteReset; } @@ -1300,9 +1300,8 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPt onUpstreamComplete(upstream_request); } - callbacks_->streamInfo().setResponseCodeDetails( - StreamInfo::ResponseCodeDetails::get().ViaUpstream); - callbacks_->encodeHeaders(std::move(headers), end_stream); + callbacks_->encodeHeaders(std::move(headers), end_stream, + StreamInfo::ResponseCodeDetails::get().ViaUpstream); } void Filter::onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request, diff --git a/source/common/router/router.h b/source/common/router/router.h index e1be0571e2b6..f5512919d7da 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -207,7 +207,7 @@ class FilterConfig { ShadowWriterPtr&& shadow_writer, const envoy::extensions::filters::http::router::v3::Router& config) : FilterConfig(stat_prefix, context.localInfo(), context.scope(), context.clusterManager(), - context.runtime(), context.random(), std::move(shadow_writer), + context.runtime(), context.api().randomGenerator(), std::move(shadow_writer), PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, dynamic_stats, true), config.start_child_span(), config.suppress_envoy_headers(), config.respect_expected_rq_timeout(), config.strict_check_headers(), diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index 615b34412e85..0774f8340be5 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -67,16 +67,17 @@ bool RequestHeadersAction::populateDescriptor(const Router::RouteEntry&, const Http::HeaderMap& headers, const Network::Address::Instance&, const envoy::config::core::v3::Metadata*) const { - const Http::HeaderEntry* header_value = headers.get(header_name_); + const auto header_value = headers.get(header_name_); // If header is not present in the request and if skip_if_absent is true skip this descriptor, // while calling rate limiting service. If skip_if_absent is false, do not call rate limiting // service. - if (!header_value) { + if (header_value.empty()) { return skip_if_absent_; } + // TODO(https://github.com/envoyproxy/envoy/issues/13454): Potentially populate all header values. descriptor.entries_.push_back( - {descriptor_key_, std::string(header_value->value().getStringView())}); + {descriptor_key_, std::string(header_value[0]->value().getStringView())}); return true; } @@ -101,22 +102,40 @@ bool GenericKeyAction::populateDescriptor(const Router::RouteEntry&, return true; } -DynamicMetaDataAction::DynamicMetaDataAction( +MetaDataAction::MetaDataAction(const envoy::config::route::v3::RateLimit::Action::MetaData& action) + : metadata_key_(action.metadata_key()), descriptor_key_(action.descriptor_key()), + default_value_(action.default_value()), source_(action.source()) {} + +MetaDataAction::MetaDataAction( const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action) : metadata_key_(action.metadata_key()), descriptor_key_(action.descriptor_key()), - default_value_(action.default_value()) {} + default_value_(action.default_value()), + source_(envoy::config::route::v3::RateLimit::Action::MetaData::DYNAMIC) {} -bool DynamicMetaDataAction::populateDescriptor( - const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, +bool MetaDataAction::populateDescriptor( + const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string&, const Http::HeaderMap&, const Network::Address::Instance&, const envoy::config::core::v3::Metadata* dynamic_metadata) const { - const ProtobufWkt::Value& metadata_value = - Envoy::Config::Metadata::metadataValue(dynamic_metadata, metadata_key_); + const envoy::config::core::v3::Metadata* metadata_source; + + switch (source_) { + case envoy::config::route::v3::RateLimit::Action::MetaData::DYNAMIC: + metadata_source = dynamic_metadata; + break; + case envoy::config::route::v3::RateLimit::Action::MetaData::ROUTE_ENTRY: + metadata_source = &route.metadata(); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + const std::string metadata_string_value = + Envoy::Config::Metadata::metadataValue(metadata_source, metadata_key_).string_value(); - if (!metadata_value.string_value().empty()) { - descriptor.entries_.push_back({descriptor_key_, metadata_value.string_value()}); + if (!metadata_string_value.empty()) { + descriptor.entries_.push_back({descriptor_key_, metadata_string_value}); return true; - } else if (metadata_value.string_value().empty() && !default_value_.empty()) { + } else if (metadata_string_value.empty() && !default_value_.empty()) { descriptor.entries_.push_back({descriptor_key_, default_value_}); return true; } @@ -165,7 +184,10 @@ RateLimitPolicyEntryImpl::RateLimitPolicyEntryImpl( actions_.emplace_back(new GenericKeyAction(action.generic_key())); break; case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kDynamicMetadata: - actions_.emplace_back(new DynamicMetaDataAction(action.dynamic_metadata())); + actions_.emplace_back(new MetaDataAction(action.dynamic_metadata())); + break; + case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kMetadata: + actions_.emplace_back(new MetaDataAction(action.metadata())); break; case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kHeaderValueMatch: actions_.emplace_back(new HeaderValueMatchAction(action.header_value_match())); diff --git a/source/common/router/router_ratelimit.h b/source/common/router/router_ratelimit.h index 9ea90a5d46b0..912606fc0da8 100644 --- a/source/common/router/router_ratelimit.h +++ b/source/common/router/router_ratelimit.h @@ -114,11 +114,13 @@ class GenericKeyAction : public RateLimitAction { }; /** - * Action for dynamic metadata rate limiting. + * Action for metadata rate limiting. */ -class DynamicMetaDataAction : public RateLimitAction { +class MetaDataAction : public RateLimitAction { public: - DynamicMetaDataAction(const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action); + MetaDataAction(const envoy::config::route::v3::RateLimit::Action::MetaData& action); + // for maintaining backward compatibility with the deprecated DynamicMetaData action + MetaDataAction(const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action); // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, @@ -129,6 +131,7 @@ class DynamicMetaDataAction : public RateLimitAction { const Envoy::Config::MetadataKey metadata_key_; const std::string descriptor_key_; const std::string default_value_; + const envoy::config::route::v3::RateLimit::Action::MetaData::Source source_; }; /** diff --git a/source/common/router/scoped_config_impl.cc b/source/common/router/scoped_config_impl.cc index ef8c7e612471..4c379e1b7a33 100644 --- a/source/common/router/scoped_config_impl.cc +++ b/source/common/router/scoped_config_impl.cc @@ -40,15 +40,17 @@ HeaderValueExtractorImpl::HeaderValueExtractorImpl( std::unique_ptr HeaderValueExtractorImpl::computeFragment(const Http::HeaderMap& headers) const { - const Envoy::Http::HeaderEntry* header_entry = + const auto header_entry = headers.get(Envoy::Http::LowerCaseString(header_value_extractor_config_.name())); - if (header_entry == nullptr) { + if (header_entry.empty()) { return nullptr; } - std::vector elements{header_entry->value().getStringView()}; + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + std::vector elements{header_entry[0]->value().getStringView()}; if (header_value_extractor_config_.element_separator().length() > 0) { - elements = absl::StrSplit(header_entry->value().getStringView(), + elements = absl::StrSplit(header_entry[0]->value().getStringView(), header_value_extractor_config_.element_separator()); } switch (header_value_extractor_config_.extract_type_case()) { diff --git a/source/common/router/string_accessor_impl.h b/source/common/router/string_accessor_impl.h index 251a714f4b14..d4851e9b1a75 100644 --- a/source/common/router/string_accessor_impl.h +++ b/source/common/router/string_accessor_impl.h @@ -19,6 +19,8 @@ class StringAccessorImpl : public StringAccessor { return message; } + absl::optional serializeAsString() const override { return value_; } + private: std::string value_; }; diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 5722deb78372..b906df34fdc9 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -440,10 +440,6 @@ void UpstreamRequest::encodeBodyAndTrailers() { downstream_metadata_map_vector_); upstream_->encodeMetadata(downstream_metadata_map_vector_); downstream_metadata_map_vector_.clear(); - if (shouldSendEndStream()) { - Buffer::OwnedImpl empty_data(""); - upstream_->encodeData(empty_data, true); - } } if (buffered_request_body_) { diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 8b4ab5679e42..27ae0679af0b 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -53,36 +53,43 @@ uint64_t getInteger(absl::string_view feature, uint64_t default_value) { // problem of the bugs being found after the old code path has been removed. constexpr const char* runtime_features[] = { // Enabled - "envoy.reloadable_features.http1_flood_protection", "envoy.reloadable_features.test_feature_true", // Begin alphabetically sorted section. "envoy.deprecated_features.allow_deprecated_extension_names", + "envoy.reloadable_features.always_apply_route_header_rules", "envoy.reloadable_features.activate_fds_next_event_loop", "envoy.reloadable_features.activate_timers_next_event_loop", "envoy.reloadable_features.allow_500_after_100", "envoy.reloadable_features.allow_prefetch", "envoy.reloadable_features.allow_response_for_timeout", "envoy.reloadable_features.consume_all_retry_headers", + "envoy.reloadable_features.check_ocsp_policy", "envoy.reloadable_features.disallow_unbounded_access_logs", "envoy.reloadable_features.early_errors_via_hcm", "envoy.reloadable_features.enable_deprecated_v2_api_warning", "envoy.reloadable_features.enable_dns_cache_circuit_breakers", - "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", + "envoy.reloadable_features.ext_authz_measure_timeout_on_check_created", "envoy.reloadable_features.fix_upgrade_response", "envoy.reloadable_features.fix_wildcard_matching", "envoy.reloadable_features.fixed_connection_close", "envoy.reloadable_features.hcm_stream_error_on_invalid_message", "envoy.reloadable_features.http_default_alpn", + "envoy.reloadable_features.http_match_on_all_headers", + "envoy.reloadable_features.http_set_copy_replace_all_headers", "envoy.reloadable_features.http_transport_failure_reason_in_body", "envoy.reloadable_features.http2_skip_encoding_empty_trailers", "envoy.reloadable_features.listener_in_place_filterchain_update", - "envoy.reloadable_features.new_tcp_connection_pool", + "envoy.reloadable_features.new_codec_behavior", "envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2", + "envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing", "envoy.reloadable_features.preserve_query_string_in_path_redirects", "envoy.reloadable_features.preserve_upstream_date", + "envoy.reloadable_features.require_ocsp_response_for_must_staple_certs", "envoy.reloadable_features.stop_faking_paths", "envoy.reloadable_features.strict_1xx_and_204_response_headers", "envoy.reloadable_features.tls_use_io_handle_bio", + "envoy.reloadable_features.unify_grpc_handling", + "envoy.restart_features.use_apple_api_for_dns_lookups", }; // This is a section for officially sanctioned runtime features which are too @@ -94,8 +101,11 @@ constexpr const char* runtime_features[] = { // When features are added here, there should be a tracking bug assigned to the // code owner to flip the default after sufficient testing. constexpr const char* disabled_runtime_features[] = { - // TODO(asraa) flip this feature after codec errors are handled - "envoy.reloadable_features.new_codec_behavior", + // Allow Envoy to upgrade or downgrade version of type url, should be removed when support for + // v2 url is removed from codebase. + "envoy.reloadable_features.enable_type_url_downgrade_and_upgrade", + // TODO(alyssawilk) flip true after the release. + "envoy.reloadable_features.new_tcp_connection_pool", // Sentinel and test flag. "envoy.reloadable_features.test_feature_false", }; diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index befad79c2b4c..3f469c4921d0 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -30,12 +30,16 @@ namespace Envoy { namespace Runtime { -void SnapshotImpl::countDeprecatedFeatureUse() const { - stats_.deprecated_feature_use_.inc(); +namespace { + +void countDeprecatedFeatureUseInternal(const RuntimeStats& stats) { + stats.deprecated_feature_use_.inc(); // Similar to the above, but a gauge that isn't imported during a hot restart. - stats_.deprecated_feature_seen_since_process_start_.inc(); + stats.deprecated_feature_seen_since_process_start_.inc(); } +} // namespace + bool SnapshotImpl::deprecatedFeatureEnabled(absl::string_view key, bool default_value) const { // If the value is not explicitly set as a runtime boolean, trust the proto annotations passed as // default_value. @@ -46,7 +50,7 @@ bool SnapshotImpl::deprecatedFeatureEnabled(absl::string_view key, bool default_ // The feature is allowed. It is assumed this check is called when the feature // is about to be used, so increment the feature use stat. - countDeprecatedFeatureUse(); + countDeprecatedFeatureUseInternal(stats_); #ifdef ENVOY_DISABLE_DEPRECATED_FEATURES return false; @@ -298,7 +302,7 @@ void DiskLayer::walkDirectory(const std::string& path, const std::string& prefix // Comments are useful for placeholder files with no value. const std::string text_file{api.fileSystem().fileReadToEnd(full_path)}; const auto lines = StringUtil::splitToken(text_file, "\n"); - for (const auto line : lines) { + for (const auto& line : lines) { if (!line.empty() && line.front() == '#') { continue; } @@ -486,7 +490,8 @@ void LoaderImpl::loadNewSnapshot() { } const Snapshot& LoaderImpl::snapshot() { - ASSERT(tls_->currentThreadRegistered(), "snapshot can only be called from a worker thread"); + ASSERT(tls_->currentThreadRegistered(), + "snapshot can only be called from a worker thread or after the main thread is registered"); return tls_->getTyped(); } @@ -511,6 +516,8 @@ void LoaderImpl::mergeValues(const absl::node_hash_map Stats::Scope& LoaderImpl::getRootScope() { return store_; } +void LoaderImpl::countDeprecatedFeatureUse() const { countDeprecatedFeatureUseInternal(stats_); } + RuntimeStats LoaderImpl::generateStats(Stats::Store& store) { std::string prefix = "runtime."; RuntimeStats stats{ diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index ee4c0cb3841c..7f2cda13bf2e 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -67,7 +67,6 @@ class SnapshotImpl : public Snapshot, Logger::Loggable { std::vector&& layers); // Runtime::Snapshot - void countDeprecatedFeatureUse() const override; bool deprecatedFeatureEnabled(absl::string_view key, bool default_value) const override; bool runtimeFeatureEnabled(absl::string_view key) const override; bool featureEnabled(absl::string_view key, uint64_t default_value, uint64_t random_value, @@ -241,6 +240,7 @@ class LoaderImpl : public Loader, Logger::Loggable { void mergeValues(const absl::node_hash_map& values) override; void startRtdsSubscriptions(ReadyCallback on_done) override; Stats::Scope& getRootScope() override; + void countDeprecatedFeatureUse() const override; private: friend RtdsSubscription; diff --git a/source/common/runtime/runtime_protos.h b/source/common/runtime/runtime_protos.h index 855b145121db..aa5c7f219438 100644 --- a/source/common/runtime/runtime_protos.h +++ b/source/common/runtime/runtime_protos.h @@ -34,12 +34,17 @@ class Double { Double(const envoy::config::core::v3::RuntimeDouble& double_proto, Runtime::Loader& runtime) : runtime_key_(double_proto.runtime_key()), default_value_(double_proto.default_value()), runtime_(runtime) {} + Double(std::string runtime_key, double default_value, Runtime::Loader& runtime) + : runtime_key_(std::move(runtime_key)), default_value_(default_value), runtime_(runtime) {} + virtual ~Double() = default; const std::string& runtimeKey() const { return runtime_key_; } - double value() const { return runtime_.snapshot().getDouble(runtime_key_, default_value_); } + virtual double value() const { + return runtime_.snapshot().getDouble(runtime_key_, default_value_); + } -private: +protected: const std::string runtime_key_; const double default_value_; Runtime::Loader& runtime_; @@ -62,5 +67,20 @@ class FractionalPercent { Runtime::Loader& runtime_; }; +// Helper class for runtime-derived percentages. +class Percentage : public Double { +public: + Percentage(const envoy::config::core::v3::RuntimePercent& percent_proto, Runtime::Loader& runtime) + : Double(percent_proto.runtime_key(), percent_proto.default_value().value(), runtime) {} + + double value() const override { + const auto val = Double::value(); + if (val <= 100.0 && val >= 0.0) { + return val; + } + return default_value_; + } +}; + } // namespace Runtime } // namespace Envoy diff --git a/source/common/ssl/tls_certificate_config_impl.cc b/source/common/ssl/tls_certificate_config_impl.cc index 684680310655..7a4d4f64ed2a 100644 --- a/source/common/ssl/tls_certificate_config_impl.cc +++ b/source/common/ssl/tls_certificate_config_impl.cc @@ -11,6 +11,19 @@ namespace Envoy { namespace Ssl { +namespace { +std::vector readOcspStaple(const envoy::config::core::v3::DataSource& source, + Api::Api& api) { + std::string staple = Config::DataSource::read(source, true, api); + if (source.specifier_case() == + envoy::config::core::v3::DataSource::SpecifierCase::kInlineString) { + throw EnvoyException("OCSP staple cannot be provided via inline_string"); + } + + return {staple.begin(), staple.end()}; +} +} // namespace + static const std::string INLINE_STRING = ""; TlsCertificateConfigImpl::TlsCertificateConfigImpl( @@ -26,6 +39,9 @@ TlsCertificateConfigImpl::TlsCertificateConfigImpl( password_(Config::DataSource::read(config.password(), true, api)), password_path_(Config::DataSource::getPath(config.password()) .value_or(password_.empty() ? EMPTY_STRING : INLINE_STRING)), + ocsp_staple_(readOcspStaple(config.ocsp_staple(), api)), + ocsp_staple_path_(Config::DataSource::getPath(config.ocsp_staple()) + .value_or(ocsp_staple_.empty() ? EMPTY_STRING : INLINE_STRING)), private_key_method_( factory_context != nullptr && config.has_private_key_provider() ? factory_context->sslContextManager() diff --git a/source/common/ssl/tls_certificate_config_impl.h b/source/common/ssl/tls_certificate_config_impl.h index 21fbc51f0d31..088b60e393aa 100644 --- a/source/common/ssl/tls_certificate_config_impl.h +++ b/source/common/ssl/tls_certificate_config_impl.h @@ -22,6 +22,8 @@ class TlsCertificateConfigImpl : public TlsCertificateConfig { const std::string& privateKeyPath() const override { return private_key_path_; } const std::string& password() const override { return password_; } const std::string& passwordPath() const override { return password_path_; } + const std::vector& ocspStaple() const override { return ocsp_staple_; } + const std::string& ocspStaplePath() const override { return ocsp_staple_path_; } Envoy::Ssl::PrivateKeyMethodProviderSharedPtr privateKeyMethod() const override { return private_key_method_; } @@ -33,6 +35,8 @@ class TlsCertificateConfigImpl : public TlsCertificateConfig { const std::string private_key_path_; const std::string password_; const std::string password_path_; + const std::vector ocsp_staple_; + const std::string ocsp_staple_path_; Envoy::Ssl::PrivateKeyMethodProviderSharedPtr private_key_method_{}; }; diff --git a/source/common/stats/BUILD b/source/common/stats/BUILD index bc5c41f6e9e2..fb8528063934 100644 --- a/source/common/stats/BUILD +++ b/source/common/stats/BUILD @@ -46,7 +46,6 @@ envoy_cc_library( srcs = ["isolated_store_impl.cc"], hdrs = ["isolated_store_impl.h"], deps = [ - ":fake_symbol_table_lib", ":histogram_lib", ":null_counter_lib", ":null_gauge_lib", @@ -54,7 +53,6 @@ envoy_cc_library( ":scope_prefixer_lib", ":stats_lib", ":store_impl_lib", - ":symbol_table_creator_lib", ":tag_utility_lib", "//include/envoy/stats:stats_macros", "//source/common/stats:allocator_lib", @@ -191,23 +189,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "symbol_table_creator_lib", - srcs = ["symbol_table_creator.cc"], - hdrs = ["symbol_table_creator.h"], - external_deps = ["abseil_base"], - deps = [ - ":fake_symbol_table_lib", - ":symbol_table_lib", - ], -) - -envoy_cc_library( - name = "fake_symbol_table_lib", - hdrs = ["fake_symbol_table_impl.h"], - deps = [":symbol_table_lib"], -) - envoy_cc_library( name = "tag_extractor_lib", srcs = ["tag_extractor_impl.cc"], diff --git a/source/common/stats/fake_symbol_table_impl.h b/source/common/stats/fake_symbol_table_impl.h deleted file mode 100644 index 19bfa00daa79..000000000000 --- a/source/common/stats/fake_symbol_table_impl.h +++ /dev/null @@ -1,144 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "envoy/common/exception.h" -#include "envoy/stats/symbol_table.h" - -#include "common/common/assert.h" -#include "common/common/hash.h" -#include "common/common/lock_guard.h" -#include "common/common/non_copyable.h" -#include "common/common/thread.h" -#include "common/common/utility.h" -#include "common/stats/symbol_table_impl.h" - -#include "absl/strings/str_join.h" -#include "absl/strings/str_split.h" - -namespace Envoy { -namespace Stats { - -/** - * Implements the SymbolTable interface without taking locks or saving memory. - * This implementation is intended as a transient state for the Envoy codebase - * to allow incremental conversion of Envoy stats call-sites to use the - * SymbolTable interface, pre-allocating symbols during construction time for - * all stats tokens. - * - * Once all stat tokens are symbolized at construction time, this - * FakeSymbolTable implementation can be deleted, and real-symbol tables can be - * used, thereby reducing memory and improving stat construction time. - * - * Note that it is not necessary to pre-allocate all elaborated stat names - * because multiple StatNames can be joined together without taking locks, - * even in SymbolTableImpl. - * - * This implementation simply stores the characters directly in the uint8_t[] - * that backs each StatName, so there is no sharing or memory savings, but also - * no state associated with the SymbolTable, and thus no locks needed. - * - * TODO(#6307): delete this class once SymbolTable is fully deployed in the - * Envoy codebase. - */ -class FakeSymbolTableImpl : public SymbolTable { -public: - // SymbolTable - void populateList(const StatName* names, uint32_t num_names, StatNameList& list) override { - // This implementation of populateList is similar to - // SymbolTableImpl::populateList. This variant is more efficient for - // FakeSymbolTableImpl, because it avoid "encoding" each name in names. The - // strings are laid out abutting each other with 2-byte length prefixes, so - // encoding isn't needed, and doing a dummy encoding step would cost one - // memory allocation per element, adding significant overhead as measured by - // thread_local_store_speed_test. - - // We encode the number of names in a single byte, thus there must be less - // than 256 of them. - RELEASE_ASSERT(num_names < 256, "Maximum number elements in a StatNameList exceeded"); - - size_t total_size_bytes = 1; /* one byte for holding the number of names */ - for (uint32_t i = 0; i < num_names; ++i) { - total_size_bytes += names[i].size(); - } - - // Now allocate the exact number of bytes required and move the encodings - // into storage. - MemBlockBuilder mem_block(total_size_bytes); - mem_block.appendOne(num_names); - for (uint32_t i = 0; i < num_names; ++i) { - SymbolTableImpl::Encoding::appendToMemBlock(names[i], mem_block); - } - - // This assertion double-checks the arithmetic where we computed - // total_size_bytes. After appending all the encoded data into the - // allocated byte array, we should have exhausted all the memory - // we though we needed. - ASSERT(mem_block.capacityRemaining() == 0); - list.moveStorageIntoList(mem_block.release()); - } - - std::string toString(const StatName& stat_name) const override { - return std::string(toStringView(stat_name)); - } - uint64_t numSymbols() const override { return 0; } - bool lessThan(const StatName& a, const StatName& b) const override { - return toStringView(a) < toStringView(b); - } - void free(const StatName&) override {} - void incRefCount(const StatName&) override {} - StoragePtr encode(absl::string_view name) override { return encodeHelper(name); } - StoragePtr makeDynamicStorage(absl::string_view name) override { return encodeHelper(name); } - SymbolTable::StoragePtr join(const StatNameVec& names) const override { - std::vector strings; - for (StatName name : names) { - if (!name.empty()) { - strings.push_back(toStringView(name)); - } - } - return encodeHelper(absl::StrJoin(strings, ".")); - } - -#ifndef ENVOY_CONFIG_COVERAGE - void debugPrint() const override {} -#endif - - void callWithStringView(StatName stat_name, - const std::function& fn) const override { - fn(toStringView(stat_name)); - } - - StatNameSetPtr makeSet(absl::string_view name) override { - // make_unique does not work with private ctor, even though FakeSymbolTableImpl is a friend. - return StatNameSetPtr(new StatNameSet(*this, name)); - } - uint64_t getRecentLookups(const RecentLookupsFn&) const override { return 0; } - void clearRecentLookups() override {} - void setRecentLookupCapacity(uint64_t) override {} - uint64_t recentLookupCapacity() const override { return 0; } - DynamicSpans getDynamicSpans(StatName) const override { return DynamicSpans(); } - -private: - absl::string_view toStringView(const StatName& stat_name) const { - return {reinterpret_cast(stat_name.data()), - static_cast(stat_name.dataSize())}; - } - - StoragePtr encodeHelper(absl::string_view name) const { - name = StringUtil::removeTrailingCharacters(name, '.'); - MemBlockBuilder mem_block(SymbolTableImpl::Encoding::totalSizeBytes(name.size())); - SymbolTableImpl::Encoding::appendEncoding(name.size(), mem_block); - mem_block.appendData( - absl::MakeSpan(reinterpret_cast(name.data()), name.size())); - ASSERT(mem_block.capacityRemaining() == 0); - return mem_block.release(); - } -}; - -} // namespace Stats -} // namespace Envoy diff --git a/source/common/stats/isolated_store_impl.cc b/source/common/stats/isolated_store_impl.cc index d9511916e844..bbc2a267c918 100644 --- a/source/common/stats/isolated_store_impl.cc +++ b/source/common/stats/isolated_store_impl.cc @@ -5,16 +5,14 @@ #include #include "common/common/utility.h" -#include "common/stats/fake_symbol_table_impl.h" #include "common/stats/histogram_impl.h" #include "common/stats/scope_prefixer.h" -#include "common/stats/symbol_table_creator.h" #include "common/stats/utility.h" namespace Envoy { namespace Stats { -IsolatedStoreImpl::IsolatedStoreImpl() : IsolatedStoreImpl(SymbolTableCreator::makeSymbolTable()) {} +IsolatedStoreImpl::IsolatedStoreImpl() : IsolatedStoreImpl(std::make_unique()) {} IsolatedStoreImpl::IsolatedStoreImpl(std::unique_ptr&& symbol_table) : IsolatedStoreImpl(*symbol_table) { diff --git a/source/common/stats/symbol_table_creator.cc b/source/common/stats/symbol_table_creator.cc deleted file mode 100644 index 755c8fcce2e4..000000000000 --- a/source/common/stats/symbol_table_creator.cc +++ /dev/null @@ -1,24 +0,0 @@ -#include "common/stats/symbol_table_creator.h" - -namespace Envoy { -namespace Stats { - -bool SymbolTableCreator::initialized_ = false; -bool SymbolTableCreator::use_fake_symbol_tables_ = false; - -SymbolTablePtr SymbolTableCreator::initAndMakeSymbolTable(bool use_fake) { - ASSERT(!initialized_ || (use_fake_symbol_tables_ == use_fake)); - use_fake_symbol_tables_ = use_fake; - return makeSymbolTable(); -} - -SymbolTablePtr SymbolTableCreator::makeSymbolTable() { - initialized_ = true; - if (use_fake_symbol_tables_) { - return std::make_unique(); - } - return std::make_unique(); -} - -} // namespace Stats -} // namespace Envoy diff --git a/source/common/stats/symbol_table_creator.h b/source/common/stats/symbol_table_creator.h deleted file mode 100644 index 4b51468890ce..000000000000 --- a/source/common/stats/symbol_table_creator.h +++ /dev/null @@ -1,57 +0,0 @@ -#pragma once - -#include "common/stats/fake_symbol_table_impl.h" -#include "common/stats/symbol_table_impl.h" - -namespace Envoy { -namespace Stats { - -namespace TestUtil { -class SymbolTableCreatorTestPeer; -} - -class SymbolTableCreator { -public: - /** - * Initializes the symbol-table creation system. Once this is called, it is a - * runtime assertion to call this again in production code, changing the - * use_fakes setting. However, tests can change the setting via - * TestUtil::SymbolTableCreatorTestPeer::setUseFakeSymbolTables(use_fakes). - * - * @param use_fakes Whether to use fake symbol tables; typically from a command-line option. - * @return a SymbolTable. - */ - static SymbolTablePtr initAndMakeSymbolTable(bool use_fakes); - - /** - * Factory method to create SymbolTables. This is needed to help make it - * possible to flag-flip use of real symbol tables, and ultimately should be - * removed. - * - * @return a SymbolTable. - */ - static SymbolTablePtr makeSymbolTable(); - - /** - * @return whether the system is initialized to use fake symbol tables. - */ - static bool useFakeSymbolTables() { return use_fake_symbol_tables_; } - -private: - friend class TestUtil::SymbolTableCreatorTestPeer; - - /** - * Sets whether fake or real symbol tables should be used. Tests that alter - * this should restore previous value at the end of the test. This must be - * called via TestUtil::SymbolTableCreatorTestPeer. - * - * *param use_fakes whether to use fake symbol tables. - */ - static void setUseFakeSymbolTables(bool use_fakes) { use_fake_symbol_tables_ = use_fakes; } - - static bool initialized_; - static bool use_fake_symbol_tables_; -}; - -} // namespace Stats -} // namespace Envoy diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 54d0c78eba9b..4bd4ec6a9d6a 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -206,11 +206,13 @@ void ThreadLocalStoreImpl::mergeHistograms(PostMergeCb merge_complete_cb) { ASSERT(!merge_in_progress_); merge_in_progress_ = true; tls_->runOnAllThreads( - [this]() -> void { - for (const auto& id_hist : tls_->getTyped().tls_histogram_cache_) { + [](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + for (const auto& id_hist : object->asType().tls_histogram_cache_) { const TlsHistogramSharedPtr& tls_hist = id_hist.second; tls_hist->beginMerge(); } + return object; }, [this, merge_complete_cb]() -> void { mergeInternal(merge_complete_cb); }); } else { @@ -304,7 +306,11 @@ void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id, if (!shutting_down_) { // Perform a cache flush on all threads. tls_->runOnAllThreads( - [this, scope_id]() { tls_->getTyped().eraseScope(scope_id); }, + [scope_id](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + object->asType().eraseScope(scope_id); + return object; + }, [central_cache]() { /* Holds onto central_cache until all tls caches are clear */ }); } } @@ -320,8 +326,11 @@ void ThreadLocalStoreImpl::clearHistogramFromCaches(uint64_t histogram_id) { // https://gist.github.com/jmarantz/838cb6de7e74c0970ea6b63eded0139a // contains a patch that will implement batching together to clear multiple // histograms. - tls_->runOnAllThreads( - [this, histogram_id]() { tls_->getTyped().eraseHistogram(histogram_id); }); + tls_->runOnAllThreads([histogram_id](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + object->asType().eraseHistogram(histogram_id); + return object; + }); } } diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 22d72bfaa9e0..8ef60df207ba 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -498,6 +498,15 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo NullHistogramImpl null_histogram_; NullTextReadoutImpl null_text_readout_; + Thread::ThreadSynchronizer sync_; + std::atomic next_scope_id_{}; + uint64_t next_histogram_id_ ABSL_GUARDED_BY(hist_mutex_) = 0; + + StatNameSetPtr well_known_tags_; + + mutable Thread::MutexBasicLockable hist_mutex_; + StatSet histogram_set_ ABSL_GUARDED_BY(hist_mutex_); + // Retain storage for deleted stats; these are no longer in maps because the // matcher-pattern was established after they were created. Since the stats // are held by reference in code that expects them to be there, we can't @@ -510,15 +519,6 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo std::vector deleted_gauges_ ABSL_GUARDED_BY(lock_); std::vector deleted_histograms_ ABSL_GUARDED_BY(lock_); std::vector deleted_text_readouts_ ABSL_GUARDED_BY(lock_); - - Thread::ThreadSynchronizer sync_; - std::atomic next_scope_id_{}; - uint64_t next_histogram_id_ ABSL_GUARDED_BY(hist_mutex_) = 0; - - StatNameSetPtr well_known_tags_; - - mutable Thread::MutexBasicLockable hist_mutex_; - StatSet histogram_set_ ABSL_GUARDED_BY(hist_mutex_); }; using ThreadLocalStoreImplPtr = std::unique_ptr; diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index a384cd401cf3..1c0614d93cbf 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -14,9 +14,23 @@ #include "common/http/request_id_extension_impl.h" #include "common/stream_info/filter_state_impl.h" +#include "absl/strings/str_replace.h" + namespace Envoy { namespace StreamInfo { +namespace { + +using ReplacementMap = absl::flat_hash_map; + +const ReplacementMap& emptySpaceReplacement() { + CONSTRUCT_ON_FIRST_USE( + ReplacementMap, + ReplacementMap{{" ", "_"}, {"\t", "_"}, {"\f", "_"}, {"\v", "_"}, {"\n", "_"}, {"\r", "_"}}); +} + +} // namespace + struct StreamInfoImpl : public StreamInfo { StreamInfoImpl(TimeSource& time_source, FilterState::LifeSpan life_span = FilterState::LifeSpan::FilterChain) @@ -118,7 +132,15 @@ struct StreamInfoImpl : public StreamInfo { } void setResponseCodeDetails(absl::string_view rc_details) override { - response_code_details_.emplace(rc_details); + response_code_details_.emplace(absl::StrReplaceAll(rc_details, emptySpaceReplacement())); + } + + const absl::optional& connectionTerminationDetails() const override { + return connection_termination_details_; + } + + void setConnectionTerminationDetails(absl::string_view connection_termination_details) override { + connection_termination_details_.emplace(connection_termination_details); } void addBytesSent(uint64_t bytes_sent) override { bytes_sent_ += bytes_sent; } @@ -268,6 +290,10 @@ struct StreamInfoImpl : public StreamInfo { return upstream_cluster_info_; } + void setConnectionID(uint64_t id) override { connection_id_ = id; } + + absl::optional connectionID() const override { return connection_id_; } + TimeSource& time_source_; const SystemTime start_time_; const MonotonicTime start_time_monotonic_; @@ -280,6 +306,7 @@ struct StreamInfoImpl : public StreamInfo { absl::optional protocol_; absl::optional response_code_; absl::optional response_code_details_; + absl::optional connection_termination_details_; uint64_t response_flags_{}; Upstream::HostDescriptionConstSharedPtr upstream_host_{}; bool health_check_request_{}; @@ -311,6 +338,7 @@ struct StreamInfoImpl : public StreamInfo { UpstreamTiming upstream_timing_; std::string upstream_transport_failure_reason_; absl::optional upstream_cluster_info_; + absl::optional connection_id_; }; } // namespace StreamInfo diff --git a/source/common/stream_info/utility.cc b/source/common/stream_info/utility.cc index 2f7049545bd3..ea339fe02983 100644 --- a/source/common/stream_info/utility.cc +++ b/source/common/stream_info/utility.cc @@ -28,6 +28,7 @@ const std::string ResponseFlagUtils::DOWNSTREAM_PROTOCOL_ERROR = "DPE"; const std::string ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED = "UMSDR"; const std::string ResponseFlagUtils::RESPONSE_FROM_CACHE_FILTER = "RFCF"; const std::string ResponseFlagUtils::NO_FILTER_CONFIG_FOUND = "NFCF"; +const std::string ResponseFlagUtils::DURATION_TIMEOUT = "DT"; void ResponseFlagUtils::appendString(std::string& result, const std::string& append) { if (result.empty()) { @@ -40,7 +41,7 @@ void ResponseFlagUtils::appendString(std::string& result, const std::string& app const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info) { std::string result; - static_assert(ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x400000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(ResponseFlag::FailedLocalHealthCheck)) { appendString(result, FAILED_LOCAL_HEALTH_CHECK); @@ -129,6 +130,10 @@ const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info appendString(result, NO_FILTER_CONFIG_FOUND); } + if (stream_info.hasResponseFlag(ResponseFlag::DurationTimeout)) { + appendString(result, DURATION_TIMEOUT); + } + return result.empty() ? NONE : result; } @@ -159,6 +164,7 @@ absl::optional ResponseFlagUtils::toResponseFlag(const std::string ResponseFlag::UpstreamMaxStreamDurationReached}, {ResponseFlagUtils::RESPONSE_FROM_CACHE_FILTER, ResponseFlag::ResponseFromCacheFilter}, {ResponseFlagUtils::NO_FILTER_CONFIG_FOUND, ResponseFlag::NoFilterConfigFound}, + {ResponseFlagUtils::DURATION_TIMEOUT, ResponseFlag::DurationTimeout}, }; const auto& it = map.find(flag); if (it != map.end()) { diff --git a/source/common/stream_info/utility.h b/source/common/stream_info/utility.h index 9b4ac08e413c..4612ecaaad04 100644 --- a/source/common/stream_info/utility.h +++ b/source/common/stream_info/utility.h @@ -43,6 +43,7 @@ class ResponseFlagUtils { const static std::string UPSTREAM_MAX_STREAM_DURATION_REACHED; const static std::string RESPONSE_FROM_CACHE_FILTER; const static std::string NO_FILTER_CONFIG_FOUND; + const static std::string DURATION_TIMEOUT; }; /** diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index cf5ab3cea651..c75b28f59156 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -158,6 +158,9 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, TcpAttachContext context(&callbacks); return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context); } + bool maybePrefetch(float prefetch_ratio) override { + return Envoy::ConnectionPool::ConnPoolImplBase::maybePrefetch(prefetch_ratio); + } ConnectionPool::Cancellable* newPendingStream(Envoy::ConnectionPool::AttachContext& context) override { diff --git a/source/common/tcp/original_conn_pool.h b/source/common/tcp/original_conn_pool.h index 2c0af2d50680..e17a4bb2ac38 100644 --- a/source/common/tcp/original_conn_pool.h +++ b/source/common/tcp/original_conn_pool.h @@ -33,6 +33,8 @@ class OriginalConnPoolImpl : Logger::Loggable, public Connecti void drainConnections() override; void closeConnections() override; ConnectionPool::Cancellable* newConnection(ConnectionPool::Callbacks& callbacks) override; + // The old pool does not implement prefetching. + bool maybePrefetch(float) override { return false; } Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } protected: diff --git a/source/common/tcp_proxy/BUILD b/source/common/tcp_proxy/BUILD index 328aca0a23e9..f75138d14134 100644 --- a/source/common/tcp_proxy/BUILD +++ b/source/common/tcp_proxy/BUILD @@ -44,6 +44,7 @@ envoy_cc_library( "//source/common/network:cidr_range_lib", "//source/common/network:filter_lib", "//source/common/network:hash_policy_lib", + "//source/common/network:proxy_protocol_filter_state_lib", "//source/common/network:transport_socket_options_lib", "//source/common/network:upstream_server_name_lib", "//source/common/network:utility_lib", diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index 91abf2f82e5a..3688634f5305 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -22,6 +22,7 @@ #include "common/common/utility.h" #include "common/config/well_known_names.h" #include "common/network/application_protocol.h" +#include "common/network/proxy_protocol_filter_state.h" #include "common/network/transport_socket_options_impl.h" #include "common/network/upstream_server_name.h" #include "common/router/metadatamatchcriteria_impl.h" @@ -110,6 +111,11 @@ Config::SharedConfig::SharedConfig( if (config.has_tunneling_config()) { tunneling_config_ = config.tunneling_config(); } + if (config.has_max_downstream_connection_duration()) { + const uint64_t connection_duration = + DurationUtil::durationToMilliseconds(config.max_downstream_connection_duration()); + max_downstream_connection_duration_ = std::chrono::milliseconds(connection_duration); + } } Config::Config(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& config, @@ -117,7 +123,7 @@ Config::Config(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProx : max_connect_attempts_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_connect_attempts, 1)), upstream_drain_manager_slot_(context.threadLocal().allocateSlot()), shared_config_(std::make_shared(config, context)), - random_generator_(context.random()) { + random_generator_(context.api().randomGenerator()) { upstream_drain_manager_slot_->set([](Event::Dispatcher&) { ThreadLocal::ThreadLocalObjectSharedPtr drain_manager = @@ -219,7 +225,7 @@ Filter::~Filter() { access_log->log(nullptr, nullptr, nullptr, getStreamInfo()); } - ASSERT(upstream_handle_ == nullptr); + ASSERT(generic_conn_pool_ == nullptr); ASSERT(upstream_ == nullptr); } @@ -409,6 +415,18 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { } if (downstreamConnection()) { + if (!read_callbacks_->connection() + .streamInfo() + .filterState() + ->hasData( + Network::ProxyProtocolFilterState::key())) { + read_callbacks_->connection().streamInfo().filterState()->setData( + Network::ProxyProtocolFilterState::key(), + std::make_unique(Network::ProxyProtocolData{ + downstreamConnection()->remoteAddress(), downstreamConnection()->localAddress()}), + StreamInfo::FilterState::StateType::ReadOnly, + StreamInfo::FilterState::LifeSpan::Connection); + } transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState( downstreamConnection()->streamInfo().filterState()); } @@ -424,24 +442,17 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { bool Filter::maybeTunnel(const std::string& cluster_name) { if (!config_->tunnelingConfig()) { - Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster( - cluster_name, Upstream::ResourcePriority::Default, this); - if (conn_pool) { + generic_conn_pool_ = + std::make_unique(cluster_name, cluster_manager_, this, *upstream_callbacks_); + if (generic_conn_pool_->valid()) { connecting_ = true; connect_attempts_++; - - // Given this function is reentrant, make sure we only reset the upstream_handle_ if given a - // valid connection handle. If newConnection fails inline it may result in attempting to - // select a new host, and a recursive call to initializeUpstreamConnection. In this case the - // first call to newConnection will return null and the inner call will persist. - Tcp::ConnectionPool::Cancellable* handle = conn_pool->newConnection(*this); - if (handle) { - ASSERT(upstream_handle_.get() == nullptr); - upstream_handle_ = std::make_shared(handle); - } + generic_conn_pool_->newStream(this); // Because we never return open connections to the pool, this either has a handle waiting on // connection completion, or onPoolFailure has been invoked. Either way, stop iteration. return true; + } else { + generic_conn_pool_.reset(); } } else { auto* cluster = cluster_manager_.get(cluster_name); @@ -456,28 +467,23 @@ bool Filter::maybeTunnel(const std::string& cluster_name) { "http2_protocol_options on the cluster."); return false; } - Http::ConnectionPool::Instance* conn_pool = cluster_manager_.httpConnPoolForCluster( - cluster_name, Upstream::ResourcePriority::Default, absl::nullopt, this); - if (conn_pool) { - upstream_ = std::make_unique(*upstream_callbacks_, - config_->tunnelingConfig()->hostname()); - HttpUpstream* http_upstream = static_cast(upstream_.get()); - Http::ConnectionPool::Cancellable* cancellable = - conn_pool->newStream(http_upstream->responseDecoder(), *this); - if (cancellable) { - ASSERT(upstream_handle_.get() == nullptr); - upstream_handle_ = std::make_shared(cancellable); - } + + generic_conn_pool_ = std::make_unique(cluster_name, cluster_manager_, this, + config_->tunnelingConfig()->hostname(), + *upstream_callbacks_); + if (generic_conn_pool_->valid()) { + generic_conn_pool_->newStream(this); return true; + } else { + generic_conn_pool_.reset(); } } return false; } -void Filter::onPoolFailure(ConnectionPool::PoolFailureReason reason, - Upstream::HostDescriptionConstSharedPtr host) { - upstream_handle_.reset(); - +void Filter::onGenericPoolFailure(ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) { + generic_conn_pool_.reset(); read_callbacks_->upstreamHost(host); getStreamInfo().onUpstreamHostSelected(host); @@ -500,44 +506,22 @@ void Filter::onPoolFailure(ConnectionPool::PoolFailureReason reason, } } -void Filter::onPoolReadyBase(Upstream::HostDescriptionConstSharedPtr& host, - const Network::Address::InstanceConstSharedPtr& local_address, - Ssl::ConnectionInfoConstSharedPtr ssl_info) { - upstream_handle_.reset(); +void Filter::onGenericPoolReady(StreamInfo::StreamInfo* info, + std::unique_ptr&& upstream, + Upstream::HostDescriptionConstSharedPtr& host, + const Network::Address::InstanceConstSharedPtr& local_address, + Ssl::ConnectionInfoConstSharedPtr ssl_info) { + upstream_ = std::move(upstream); + generic_conn_pool_.reset(); read_callbacks_->upstreamHost(host); getStreamInfo().onUpstreamHostSelected(host); getStreamInfo().setUpstreamLocalAddress(local_address); getStreamInfo().setUpstreamSslConnection(ssl_info); onUpstreamConnection(); read_callbacks_->continueReading(); -} - -void Filter::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, - Upstream::HostDescriptionConstSharedPtr host) { - Tcp::ConnectionPool::ConnectionData* latched_data = conn_data.get(); - - upstream_ = std::make_unique(std::move(conn_data), *upstream_callbacks_); - onPoolReadyBase(host, latched_data->connection().localAddress(), - latched_data->connection().streamInfo().downstreamSslConnection()); - read_callbacks_->connection().streamInfo().setUpstreamFilterState( - latched_data->connection().streamInfo().filterState()); -} - -void Filter::onPoolFailure(ConnectionPool::PoolFailureReason failure, absl::string_view, - Upstream::HostDescriptionConstSharedPtr host) { - onPoolFailure(failure, host); -} - -void Filter::onPoolReady(Http::RequestEncoder& request_encoder, - Upstream::HostDescriptionConstSharedPtr host, - const StreamInfo::StreamInfo& info) { - Http::RequestEncoder* latched_encoder = &request_encoder; - HttpUpstream* http_upstream = static_cast(upstream_.get()); - http_upstream->setRequestEncoder(request_encoder, - host->transportSocketFactory().implementsSecureTransport()); - - onPoolReadyBase(host, latched_encoder->getStream().connectionLocalAddress(), - info.downstreamSslConnection()); + if (info) { + read_callbacks_->connection().streamInfo().setUpstreamFilterState(info->filterState()); + } } const Router::MetadataMatchCriteria* Filter::metadataMatchCriteria() { @@ -583,6 +567,15 @@ Network::FilterStatus Filter::onData(Buffer::Instance& data, bool end_stream) { return Network::FilterStatus::StopIteration; } +Network::FilterStatus Filter::onNewConnection() { + if (config_->maxDownstreamConnectionDuration()) { + connection_duration_timer_ = read_callbacks_->connection().dispatcher().createTimer( + [this]() -> void { onMaxDownstreamConnectionDuration(); }); + connection_duration_timer_->enableTimer(config_->maxDownstreamConnectionDuration().value()); + } + return initializeUpstreamConnection(); +} + void Filter::onDownstreamEvent(Network::ConnectionEvent event) { if (upstream_) { Tcp::ConnectionPool::ConnectionDataPtr conn_data(upstream_->onDownstreamEvent(event)); @@ -597,12 +590,11 @@ void Filter::onDownstreamEvent(Network::ConnectionEvent event) { disableIdleTimer(); } } - if (upstream_handle_) { + if (generic_conn_pool_) { if (event == Network::ConnectionEvent::LocalClose || event == Network::ConnectionEvent::RemoteClose) { // Cancel the conn pool request and close any excess pending requests. - upstream_handle_->cancel(); - upstream_handle_.reset(); + generic_conn_pool_.reset(); } } } @@ -679,6 +671,13 @@ void Filter::onIdleTimeout() { read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); } +void Filter::onMaxDownstreamConnectionDuration() { + ENVOY_CONN_LOG(debug, "max connection duration reached", read_callbacks_->connection()); + getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::DurationTimeout); + config_->stats().max_downstream_connection_duration_.inc(); + read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); +} + void Filter::resetIdleTimer() { if (idle_timer_ != nullptr) { ASSERT(config_->idleTimeout()); diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index b1abf95cc247..12b482e01690 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -45,6 +45,7 @@ namespace TcpProxy { COUNTER(downstream_flow_control_paused_reading_total) \ COUNTER(downstream_flow_control_resumed_reading_total) \ COUNTER(idle_timeout) \ + COUNTER(max_downstream_connection_duration) \ COUNTER(upstream_flush_total) \ GAUGE(downstream_cx_rx_bytes_buffered, Accumulate) \ GAUGE(downstream_cx_tx_bytes_buffered, Accumulate) \ @@ -107,6 +108,9 @@ class Config { const TcpProxyStats& stats() { return stats_; } const absl::optional& idleTimeout() { return idle_timeout_; } const absl::optional tunnelingConfig() { return tunneling_config_; } + const absl::optional& maxDownstreamConnectinDuration() const { + return max_downstream_connection_duration_; + } private: static TcpProxyStats generateStats(Stats::Scope& scope); @@ -118,6 +122,7 @@ class Config { const TcpProxyStats stats_; absl::optional idle_timeout_; absl::optional tunneling_config_; + absl::optional max_downstream_connection_duration_; }; using SharedConfigSharedPtr = std::shared_ptr; @@ -142,6 +147,9 @@ class Config { const absl::optional& idleTimeout() { return shared_config_->idleTimeout(); } + const absl::optional& maxDownstreamConnectionDuration() const { + return shared_config_->maxDownstreamConnectinDuration(); + } const absl::optional tunnelingConfig() { return shared_config_->tunnelingConfig(); } @@ -234,35 +242,24 @@ class PerConnectionCluster : public StreamInfo::FilterState::Object { */ class Filter : public Network::ReadFilter, public Upstream::LoadBalancerContextBase, - Tcp::ConnectionPool::Callbacks, - public Http::ConnectionPool::Callbacks, - protected Logger::Loggable { + protected Logger::Loggable, + public GenericConnectionPoolCallbacks { public: Filter(ConfigSharedPtr config, Upstream::ClusterManager& cluster_manager); ~Filter() override; // Network::ReadFilter Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; - Network::FilterStatus onNewConnection() override { return initializeUpstreamConnection(); } + Network::FilterStatus onNewConnection() override; void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override; - // Tcp::ConnectionPool::Callbacks - void onPoolFailure(ConnectionPool::PoolFailureReason reason, - Upstream::HostDescriptionConstSharedPtr host) override; - void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, - Upstream::HostDescriptionConstSharedPtr host) override; - - // Http::ConnectionPool::Callbacks, - void onPoolFailure(ConnectionPool::PoolFailureReason reason, - absl::string_view transport_failure_reason, - Upstream::HostDescriptionConstSharedPtr host) override; - void onPoolReady(Http::RequestEncoder& request_encoder, - Upstream::HostDescriptionConstSharedPtr host, - const StreamInfo::StreamInfo& info) override; - - void onPoolReadyBase(Upstream::HostDescriptionConstSharedPtr& host, - const Network::Address::InstanceConstSharedPtr& local_address, - Ssl::ConnectionInfoConstSharedPtr ssl_info); + // GenericConnectionPoolCallbacks + void onGenericPoolReady(StreamInfo::StreamInfo* info, std::unique_ptr&& upstream, + Upstream::HostDescriptionConstSharedPtr& host, + const Network::Address::InstanceConstSharedPtr& local_address, + Ssl::ConnectionInfoConstSharedPtr ssl_info) override; + void onGenericPoolFailure(ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) override; // Upstream::LoadBalancerContext const Router::MetadataMatchCriteria* metadataMatchCriteria() override; @@ -357,6 +354,7 @@ class Filter : public Network::ReadFilter, void onIdleTimeout(); void resetIdleTimer(); void disableIdleTimer(); + void onMaxDownstreamConnectionDuration(); const ConfigSharedPtr config_; Upstream::ClusterManager& cluster_manager_; @@ -364,11 +362,17 @@ class Filter : public Network::ReadFilter, DownstreamCallbacks downstream_callbacks_; Event::TimerPtr idle_timer_; + Event::TimerPtr connection_duration_timer_; - std::shared_ptr upstream_handle_; std::shared_ptr upstream_callbacks_; // shared_ptr required for passing as a // read filter. + // The upstream handle (either TCP or HTTP). This is set in onGenericPoolReady and should persist + // until either the upstream or downstream connection is terminated. std::unique_ptr upstream_; + // The connection pool used to set up |upstream_|. + // This will be non-null from when an upstream connection is attempted until + // it either succeeds or fails. + std::unique_ptr generic_conn_pool_; RouteConstSharedPtr route_; Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_; Network::TransportSocketOptionsSharedPtr transport_socket_options_; diff --git a/source/common/tcp_proxy/upstream.cc b/source/common/tcp_proxy/upstream.cc index 451a277e0865..1da6eb915797 100644 --- a/source/common/tcp_proxy/upstream.cc +++ b/source/common/tcp_proxy/upstream.cc @@ -1,5 +1,7 @@ #include "common/tcp_proxy/upstream.h" +#include "envoy/upstream/cluster_manager.h" + #include "common/http/header_map_impl.h" #include "common/http/headers.h" #include "common/http/utility.h" @@ -152,5 +154,99 @@ void HttpUpstream::doneWriting() { } } +TcpConnPool::TcpConnPool(const std::string& cluster_name, Upstream::ClusterManager& cluster_manager, + Upstream::LoadBalancerContext* context, + Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks) + : upstream_callbacks_(upstream_callbacks) { + conn_pool_ = cluster_manager.tcpConnPoolForCluster(cluster_name, + Upstream::ResourcePriority::Default, context); +} + +TcpConnPool::~TcpConnPool() { + if (upstream_handle_ != nullptr) { + upstream_handle_->cancel(ConnectionPool::CancelPolicy::CloseExcess); + } +} + +bool TcpConnPool::valid() const { return conn_pool_ != nullptr; } + +void TcpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { + callbacks_ = callbacks; + // Given this function is reentrant, make sure we only reset the upstream_handle_ if given a + // valid connection handle. If newConnection fails inline it may result in attempting to + // select a new host, and a recursive call to initializeUpstreamConnection. In this case the + // first call to newConnection will return null and the inner call will persist. + Tcp::ConnectionPool::Cancellable* handle = conn_pool_->newConnection(*this); + if (handle) { + ASSERT(upstream_handle_ == nullptr); + upstream_handle_ = handle; + } +} + +void TcpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) { + upstream_handle_ = nullptr; + callbacks_->onGenericPoolFailure(reason, host); +} + +void TcpConnPool::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) { + upstream_handle_ = nullptr; + Tcp::ConnectionPool::ConnectionData* latched_data = conn_data.get(); + Network::Connection& connection = conn_data->connection(); + + auto upstream = std::make_unique(std::move(conn_data), upstream_callbacks_); + callbacks_->onGenericPoolReady(&connection.streamInfo(), std::move(upstream), host, + latched_data->connection().localAddress(), + latched_data->connection().streamInfo().downstreamSslConnection()); +} + +HttpConnPool::HttpConnPool(const std::string& cluster_name, + Upstream::ClusterManager& cluster_manager, + Upstream::LoadBalancerContext* context, std::string hostname, + Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks) + : hostname_(hostname), upstream_callbacks_(upstream_callbacks) { + conn_pool_ = cluster_manager.httpConnPoolForCluster( + cluster_name, Upstream::ResourcePriority::Default, absl::nullopt, context); +} + +HttpConnPool::~HttpConnPool() { + if (upstream_handle_ != nullptr) { + // Because HTTP connections are generally shorter lived and have a higher probability of use + // before going idle, they are closed with Default rather than CloseExcess. + upstream_handle_->cancel(ConnectionPool::CancelPolicy::Default); + } +} + +bool HttpConnPool::valid() const { return conn_pool_ != nullptr; } + +void HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { + callbacks_ = callbacks; + upstream_ = std::make_unique(upstream_callbacks_, hostname_); + Tcp::ConnectionPool::Cancellable* handle = + conn_pool_->newStream(upstream_->responseDecoder(), *this); + if (handle != nullptr) { + upstream_handle_ = handle; + } +} + +void HttpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view, + Upstream::HostDescriptionConstSharedPtr host) { + upstream_handle_ = nullptr; + callbacks_->onGenericPoolFailure(reason, host); +} + +void HttpConnPool::onPoolReady(Http::RequestEncoder& request_encoder, + Upstream::HostDescriptionConstSharedPtr host, + const StreamInfo::StreamInfo& info) { + upstream_handle_ = nullptr; + Http::RequestEncoder* latched_encoder = &request_encoder; + upstream_->setRequestEncoder(request_encoder, + host->transportSocketFactory().implementsSecureTransport()); + callbacks_->onGenericPoolReady(nullptr, std::move(upstream_), host, + latched_encoder->getStream().connectionLocalAddress(), + info.downstreamSslConnection()); +} + } // namespace TcpProxy } // namespace Envoy diff --git a/source/common/tcp_proxy/upstream.h b/source/common/tcp_proxy/upstream.h index 8d2a301d7137..33943e70b982 100644 --- a/source/common/tcp_proxy/upstream.h +++ b/source/common/tcp_proxy/upstream.h @@ -3,42 +3,95 @@ #include "envoy/http/conn_pool.h" #include "envoy/network/connection.h" #include "envoy/tcp/conn_pool.h" +#include "envoy/upstream/load_balancer.h" #include "envoy/upstream/upstream.h" namespace Envoy { namespace TcpProxy { -// Interface for a generic ConnectionHandle, which can wrap a TcpConnectionHandle -// or an HttpConnectionHandle -class ConnectionHandle { +class GenericConnectionPoolCallbacks; +class GenericUpstream; + +// An API for wrapping either an HTTP or a TCP connection pool. +class GenericConnPool : public Logger::Loggable { public: - virtual ~ConnectionHandle() = default; - // Cancel the conn pool request and close any excess pending requests. - virtual void cancel() PURE; + virtual ~GenericConnPool() = default; + + // Called to create a new HTTP stream or TCP connection. The implementation + // is then responsible for calling either onPoolReady or onPoolFailure on the + // supplied GenericConnectionPoolCallbacks. + virtual void newStream(GenericConnectionPoolCallbacks* callbacks) PURE; + // Returns true if there was a valid connection pool, false otherwise. + virtual bool valid() const PURE; }; -// An implementation of ConnectionHandle which works with the Tcp::ConnectionPool. -class TcpConnectionHandle : public ConnectionHandle { +class TcpConnPool : public GenericConnPool, public Tcp::ConnectionPool::Callbacks { public: - TcpConnectionHandle(Tcp::ConnectionPool::Cancellable* handle) : upstream_handle_(handle) {} + TcpConnPool(const std::string& cluster_name, Upstream::ClusterManager& cluster_manager, + Upstream::LoadBalancerContext* context, + Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks); + ~TcpConnPool() override; + + // GenericConnPool + bool valid() const override; + void newStream(GenericConnectionPoolCallbacks* callbacks) override; - void cancel() override { - upstream_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::CloseExcess); - } + // Tcp::ConnectionPool::Callbacks + void onPoolFailure(ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) override; + void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) override; private: + Tcp::ConnectionPool::Instance* conn_pool_{}; Tcp::ConnectionPool::Cancellable* upstream_handle_{}; + GenericConnectionPoolCallbacks* callbacks_{}; + Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks_; }; -class HttpConnectionHandle : public ConnectionHandle { +class HttpUpstream; + +class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callbacks { public: - HttpConnectionHandle(Http::ConnectionPool::Cancellable* handle) : upstream_http_handle_(handle) {} - void cancel() override { - upstream_http_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); - } + HttpConnPool(const std::string& cluster_name, Upstream::ClusterManager& cluster_manager, + Upstream::LoadBalancerContext* context, std::string hostname, + Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks); + ~HttpConnPool() override; + + // GenericConnPool + bool valid() const override; + void newStream(GenericConnectionPoolCallbacks* callbacks) override; + + // Http::ConnectionPool::Callbacks, + void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) override; + void onPoolReady(Http::RequestEncoder& request_encoder, + Upstream::HostDescriptionConstSharedPtr host, + const StreamInfo::StreamInfo& info) override; private: - Http::ConnectionPool::Cancellable* upstream_http_handle_{}; + const std::string hostname_; + Http::ConnectionPool::Instance* conn_pool_{}; + Http::ConnectionPool::Cancellable* upstream_handle_{}; + GenericConnectionPoolCallbacks* callbacks_{}; + Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks_; + std::unique_ptr upstream_; +}; + +// An API for the UpstreamRequest to get callbacks from either an HTTP or TCP +// connection pool. +class GenericConnectionPoolCallbacks { +public: + virtual ~GenericConnectionPoolCallbacks() = default; + + virtual void onGenericPoolReady(StreamInfo::StreamInfo* info, + std::unique_ptr&& upstream, + Upstream::HostDescriptionConstSharedPtr& host, + const Network::Address::InstanceConstSharedPtr& local_address, + Ssl::ConnectionInfoConstSharedPtr ssl_info) PURE; + virtual void onGenericPoolFailure(ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) PURE; }; // Interface for a generic Upstream, which can communicate with a TCP or HTTP diff --git a/source/common/thread/BUILD b/source/common/thread/BUILD new file mode 100644 index 000000000000..6f79301e8676 --- /dev/null +++ b/source/common/thread/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "terminate_thread_lib", + srcs = ["terminate_thread.cc"], + hdrs = ["terminate_thread.h"], + deps = [ + "//include/envoy/thread:thread_interface", + "//source/common/common:minimal_logger_lib", + ], +) diff --git a/source/common/thread/terminate_thread.cc b/source/common/thread/terminate_thread.cc new file mode 100644 index 000000000000..435d704e6d24 --- /dev/null +++ b/source/common/thread/terminate_thread.cc @@ -0,0 +1,31 @@ +#include "common/thread/terminate_thread.h" + +#include + +#include + +#include "common/common/logger.h" + +namespace Envoy { +namespace Thread { +namespace { +#ifdef __linux__ +pid_t toPlatformTid(int64_t tid) { return static_cast(tid); } +#elif defined(__APPLE__) +uint64_t toPlatformTid(int64_t tid) { return static_cast(tid); } +#endif +} // namespace + +bool terminateThread(const ThreadId& tid) { +#ifndef WIN32 + // Assume POSIX-compatible system and signal to the thread. + return kill(toPlatformTid(tid.getId()), SIGABRT) == 0; +#else + // Windows, currently unsupported termination of thread. + ENVOY_LOG_MISC(error, "Windows is currently unsupported for terminateThread."); + return false; +#endif +} + +} // namespace Thread +} // namespace Envoy diff --git a/source/common/thread/terminate_thread.h b/source/common/thread/terminate_thread.h new file mode 100644 index 000000000000..a9a20b1903cf --- /dev/null +++ b/source/common/thread/terminate_thread.h @@ -0,0 +1,19 @@ +#pragma once + +#include "envoy/thread/thread.h" + +namespace Envoy { +namespace Thread { +/** + * Tries to terminates the process by killing the thread specified by + * the ThreadId. The implementation is platform dependent and currently + * only works on platforms that support SIGABRT. + * + * Returns true if the platform specific function to terminate the thread + * succeeded (i.e. kill() == 0). If the platform is currently unsupported, this + * will return false. + */ +bool terminateThread(const ThreadId& tid); + +} // namespace Thread +} // namespace Envoy diff --git a/source/common/thread_local/thread_local_impl.cc b/source/common/thread_local/thread_local_impl.cc index d4d02f8b2f5f..7ed9eeca7942 100644 --- a/source/common/thread_local/thread_local_impl.cc +++ b/source/common/thread_local/thread_local_impl.cc @@ -26,79 +26,71 @@ SlotPtr InstanceImpl::allocateSlot() { ASSERT(!shutdown_); if (free_slot_indexes_.empty()) { - SlotImplPtr slot(new SlotImpl(*this, slots_.size())); - auto wrapper = std::make_unique(*this, std::move(slot)); - slots_.push_back(wrapper->slot_.get()); - return wrapper; + SlotPtr slot = std::make_unique(*this, slots_.size()); + slots_.push_back(slot.get()); + return slot; } const uint32_t idx = free_slot_indexes_.front(); free_slot_indexes_.pop_front(); ASSERT(idx < slots_.size()); - SlotImplPtr slot(new SlotImpl(*this, idx)); + SlotPtr slot = std::make_unique(*this, idx); slots_[idx] = slot.get(); - return std::make_unique(*this, std::move(slot)); + return slot; } -bool InstanceImpl::SlotImpl::currentThreadRegistered() { - return thread_local_data_.data_.size() > index_; -} +InstanceImpl::SlotImpl::SlotImpl(InstanceImpl& parent, uint32_t index) + : parent_(parent), index_(index), still_alive_guard_(std::make_shared(true)) {} -void InstanceImpl::SlotImpl::runOnAllThreads(const UpdateCb& cb) { - parent_.runOnAllThreads([this, cb]() { setThreadLocal(index_, cb(get())); }); +Event::PostCb InstanceImpl::SlotImpl::wrapCallback(Event::PostCb&& cb) { + // See the header file comments for still_alive_guard_ for the purpose of this capture and the + // expired check below. + return [still_alive_guard = std::weak_ptr(still_alive_guard_), cb] { + if (!still_alive_guard.expired()) { + cb(); + } + }; } -void InstanceImpl::SlotImpl::runOnAllThreads(const UpdateCb& cb, Event::PostCb complete_cb) { - parent_.runOnAllThreads([this, cb]() { setThreadLocal(index_, cb(get())); }, complete_cb); +bool InstanceImpl::SlotImpl::currentThreadRegisteredWorker(uint32_t index) { + return thread_local_data_.data_.size() > index; } -ThreadLocalObjectSharedPtr InstanceImpl::SlotImpl::get() { - ASSERT(currentThreadRegistered()); - return thread_local_data_.data_[index_]; +bool InstanceImpl::SlotImpl::currentThreadRegistered() { + return currentThreadRegisteredWorker(index_); } -InstanceImpl::Bookkeeper::Bookkeeper(InstanceImpl& parent, SlotImplPtr&& slot) - : parent_(parent), slot_(std::move(slot)), - ref_count_(/*not used.*/ nullptr, - [slot = slot_.get(), &parent = this->parent_](uint32_t* /* not used */) { - // On destruction, post a cleanup callback on main thread, this could happen on - // any thread. - parent.scheduleCleanup(slot); - }) {} - -ThreadLocalObjectSharedPtr InstanceImpl::Bookkeeper::get() { return slot_->get(); } - -void InstanceImpl::Bookkeeper::runOnAllThreads(const UpdateCb& cb, Event::PostCb complete_cb) { - slot_->runOnAllThreads( - [cb, ref_count = this->ref_count_](ThreadLocalObjectSharedPtr previous) { - return cb(std::move(previous)); - }, - complete_cb); +ThreadLocalObjectSharedPtr InstanceImpl::SlotImpl::getWorker(uint32_t index) { + ASSERT(currentThreadRegisteredWorker(index)); + return thread_local_data_.data_[index]; } -void InstanceImpl::Bookkeeper::runOnAllThreads(const UpdateCb& cb) { - slot_->runOnAllThreads([cb, ref_count = this->ref_count_](ThreadLocalObjectSharedPtr previous) { - return cb(std::move(previous)); - }); -} +ThreadLocalObjectSharedPtr InstanceImpl::SlotImpl::get() { return getWorker(index_); } -bool InstanceImpl::Bookkeeper::currentThreadRegistered() { - return slot_->currentThreadRegistered(); +void InstanceImpl::SlotImpl::runOnAllThreads(const UpdateCb& cb, Event::PostCb complete_cb) { + // See the header file comments for still_alive_guard_ for why we capture index_. + parent_.runOnAllThreads( + wrapCallback([cb, index = index_]() { setThreadLocal(index, cb(getWorker(index))); }), + complete_cb); } -void InstanceImpl::Bookkeeper::runOnAllThreads(Event::PostCb cb) { - // Use ref_count_ to bookkeep how many on-the-fly callback are out there. - slot_->runOnAllThreads([cb, ref_count = this->ref_count_]() { cb(); }); +void InstanceImpl::SlotImpl::runOnAllThreads(const UpdateCb& cb) { + // See the header file comments for still_alive_guard_ for why we capture index_. + parent_.runOnAllThreads( + wrapCallback([cb, index = index_]() { setThreadLocal(index, cb(getWorker(index))); })); } -void InstanceImpl::Bookkeeper::runOnAllThreads(Event::PostCb cb, Event::PostCb main_callback) { - // Use ref_count_ to bookkeep how many on-the-fly callback are out there. - slot_->runOnAllThreads([cb, main_callback, ref_count = this->ref_count_]() { cb(); }, - main_callback); -} +void InstanceImpl::SlotImpl::set(InitializeCb cb) { + ASSERT(std::this_thread::get_id() == parent_.main_thread_id_); + ASSERT(!parent_.shutdown_); -void InstanceImpl::Bookkeeper::set(InitializeCb cb) { - slot_->set([cb, ref_count = this->ref_count_](Event::Dispatcher& dispatcher) - -> ThreadLocalObjectSharedPtr { return cb(dispatcher); }); + for (Event::Dispatcher& dispatcher : parent_.registered_threads_) { + // See the header file comments for still_alive_guard_ for why we capture index_. + dispatcher.post(wrapCallback( + [index = index_, cb, &dispatcher]() -> void { setThreadLocal(index, cb(dispatcher)); })); + } + + // Handle main thread. + setThreadLocal(index_, cb(*parent_.main_thread_dispatcher_)); } void InstanceImpl::registerThread(Event::Dispatcher& dispatcher, bool main_thread) { @@ -115,39 +107,7 @@ void InstanceImpl::registerThread(Event::Dispatcher& dispatcher, bool main_threa } } -// Puts the slot into a deferred delete container, the slot will be destructed when its out-going -// callback reference count goes to 0. -void InstanceImpl::recycle(SlotImplPtr&& slot) { - ASSERT(std::this_thread::get_id() == main_thread_id_); - ASSERT(slot != nullptr); - auto* slot_addr = slot.get(); - deferred_deletes_.insert({slot_addr, std::move(slot)}); -} - -// Called by the Bookkeeper ref_count destructor, the SlotImpl in the deferred deletes map can be -// destructed now. -void InstanceImpl::scheduleCleanup(SlotImpl* slot) { - if (shutdown_) { - // If server is shutting down, do nothing here. - // The destruction of Bookkeeper has already transferred the SlotImpl to the deferred_deletes_ - // queue. No matter if this method is called from a Worker thread, the SlotImpl will be - // destructed on main thread when InstanceImpl destructs. - return; - } - if (std::this_thread::get_id() == main_thread_id_) { - // If called from main thread, save a callback. - ASSERT(deferred_deletes_.contains(slot)); - deferred_deletes_.erase(slot); - return; - } - main_thread_dispatcher_->post([slot, this]() { - ASSERT(deferred_deletes_.contains(slot)); - // The slot is guaranteed to be put into the deferred_deletes_ map by Bookkeeper destructor. - deferred_deletes_.erase(slot); - }); -} - -void InstanceImpl::removeSlot(SlotImpl& slot) { +void InstanceImpl::removeSlot(uint32_t slot) { ASSERT(std::this_thread::get_id() == main_thread_id_); // When shutting down, we do not post slot removals to other threads. This is because the other @@ -158,18 +118,18 @@ void InstanceImpl::removeSlot(SlotImpl& slot) { return; } - const uint64_t index = slot.index_; - slots_[index] = nullptr; - ASSERT(std::find(free_slot_indexes_.begin(), free_slot_indexes_.end(), index) == + slots_[slot] = nullptr; + ASSERT(std::find(free_slot_indexes_.begin(), free_slot_indexes_.end(), slot) == free_slot_indexes_.end(), - fmt::format("slot index {} already in free slot set!", index)); - free_slot_indexes_.push_back(index); - runOnAllThreads([index]() -> void { + fmt::format("slot index {} already in free slot set!", slot)); + free_slot_indexes_.push_back(slot); + runOnAllThreads([slot]() -> void { // This runs on each thread and clears the slot, making it available for a new allocations. // This is safe even if a new allocation comes in, because everything happens with post() and - // will be sequenced after this removal. - if (index < thread_local_data_.data_.size()) { - thread_local_data_.data_[index] = nullptr; + // will be sequenced after this removal. It is also safe if there are callbacks pending on + // other threads because they will run first. + if (slot < thread_local_data_.data_.size()) { + thread_local_data_.data_[slot] = nullptr; } }); } @@ -205,19 +165,6 @@ void InstanceImpl::runOnAllThreads(Event::PostCb cb, Event::PostCb all_threads_c } } -void InstanceImpl::SlotImpl::set(InitializeCb cb) { - ASSERT(std::this_thread::get_id() == parent_.main_thread_id_); - ASSERT(!parent_.shutdown_); - - for (Event::Dispatcher& dispatcher : parent_.registered_threads_) { - const uint32_t index = index_; - dispatcher.post([index, cb, &dispatcher]() -> void { setThreadLocal(index, cb(dispatcher)); }); - } - - // Handle main thread. - setThreadLocal(index_, cb(*parent_.main_thread_dispatcher_)); -} - void InstanceImpl::setThreadLocal(uint32_t index, ThreadLocalObjectSharedPtr object) { if (thread_local_data_.data_.size() <= index) { thread_local_data_.data_.resize(index + 1); diff --git a/source/common/thread_local/thread_local_impl.h b/source/common/thread_local/thread_local_impl.h index 71153107fb3d..2b83a2aebf47 100644 --- a/source/common/thread_local/thread_local_impl.h +++ b/source/common/thread_local/thread_local_impl.h @@ -11,8 +11,6 @@ #include "common/common/logger.h" #include "common/common/non_copyable.h" -#include "absl/container/flat_hash_map.h" - namespace Envoy { namespace ThreadLocal { @@ -32,45 +30,38 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub Event::Dispatcher& dispatcher() override; private: + // On destruction returns the slot index to the deferred delete queue (detaches it). This allows + // a slot to be destructed on the main thread while controlling the lifetime of the underlying + // slot as callbacks drain from workers. struct SlotImpl : public Slot { - SlotImpl(InstanceImpl& parent, uint64_t index) : parent_(parent), index_(index) {} - ~SlotImpl() override { parent_.removeSlot(*this); } - - // ThreadLocal::Slot - ThreadLocalObjectSharedPtr get() override; - bool currentThreadRegistered() override; - void runOnAllThreads(const UpdateCb& cb) override; - void runOnAllThreads(const UpdateCb& cb, Event::PostCb complete_cb) override; - void runOnAllThreads(Event::PostCb cb) override { parent_.runOnAllThreads(cb); } - void runOnAllThreads(Event::PostCb cb, Event::PostCb main_callback) override { - parent_.runOnAllThreads(cb, main_callback); - } - void set(InitializeCb cb) override; - - InstanceImpl& parent_; - const uint64_t index_; - }; - - using SlotImplPtr = std::unique_ptr; - - // A Wrapper of SlotImpl which on destruction returns the SlotImpl to the deferred delete queue - // (detaches it). - struct Bookkeeper : public Slot { - Bookkeeper(InstanceImpl& parent, SlotImplPtr&& slot); - ~Bookkeeper() override { parent_.recycle(std::move(slot_)); } + SlotImpl(InstanceImpl& parent, uint32_t index); + ~SlotImpl() override { parent_.removeSlot(index_); } + Event::PostCb wrapCallback(Event::PostCb&& cb); + static bool currentThreadRegisteredWorker(uint32_t index); + static ThreadLocalObjectSharedPtr getWorker(uint32_t index); // ThreadLocal::Slot ThreadLocalObjectSharedPtr get() override; void runOnAllThreads(const UpdateCb& cb) override; void runOnAllThreads(const UpdateCb& cb, Event::PostCb complete_cb) override; bool currentThreadRegistered() override; - void runOnAllThreads(Event::PostCb cb) override; - void runOnAllThreads(Event::PostCb cb, Event::PostCb main_callback) override; void set(InitializeCb cb) override; InstanceImpl& parent_; - SlotImplPtr slot_; - std::shared_ptr ref_count_; + const uint32_t index_; + // The following is used to safely verify via weak_ptr that this slot is still alive. This + // does not prevent all races if a callback does not capture appropriately, but it does fix + // the common case of a slot destroyed immediately before anything is posted to a worker. + // NOTE: The general safety model of a slot is that it is destroyed immediately on the main + // thread. This means that *all* captures must not reference the slot object directly. + // this is why index_ is captured manually in callbacks that require it. + // NOTE: When the slot is destroyed, the index is immediately recycled. This is safe because + // any new posts for a recycled index must come after any previous callbacks for the + // previous owner of the index. + // TODO(mattklein123): Add clang-tidy analysis rule to check that "this" is not captured by + // a TLS function call. This check will not prevent all bad captures, but it will at least + // make the programmer more aware of potential issues. + std::shared_ptr still_alive_guard_; }; struct ThreadLocalData { @@ -78,26 +69,16 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub std::vector data_; }; - void recycle(SlotImplPtr&& slot); - // Cleanup the deferred deletes queue. - void scheduleCleanup(SlotImpl* slot); - - void removeSlot(SlotImpl& slot); + void removeSlot(uint32_t slot); void runOnAllThreads(Event::PostCb cb); void runOnAllThreads(Event::PostCb cb, Event::PostCb main_callback); static void setThreadLocal(uint32_t index, ThreadLocalObjectSharedPtr object); static thread_local ThreadLocalData thread_local_data_; - // A indexed container for Slots that has to be deferred to delete due to out-going callbacks - // pointing to the Slot. To let the ref_count_ deleter find the SlotImpl by address, the container - // is defined as a map of SlotImpl address to the unique_ptr. - absl::flat_hash_map deferred_deletes_; - - std::vector slots_; + std::vector slots_; // A list of index of freed slots. std::list free_slot_indexes_; - std::list> registered_threads_; std::thread::id main_thread_id_; Event::Dispatcher* main_thread_dispatcher_{}; diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index 4e3cba7023d7..8f46c812af64 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -322,8 +322,9 @@ absl::string_view RequestHeaderCustomTag::value(const CustomTagContext& ctx) con if (!ctx.request_headers) { return default_value_; } - const Http::HeaderEntry* entry = ctx.request_headers->get(name_); - return entry ? entry->value().getStringView() : default_value_; + // TODO(https://github.com/envoyproxy/envoy/issues/13454): Potentially populate all header values. + const auto entry = ctx.request_headers->get(name_); + return !entry.empty() ? entry[0]->value().getStringView() : default_value_; } MetadataCustomTag::MetadataCustomTag(const std::string& tag, diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 2d0fb940cf00..9219e59d768d 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -202,12 +202,23 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "locality_endpoint_lib", + hdrs = ["locality_endpoint.h"], + deps = [ + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "health_discovery_service_lib", srcs = ["health_discovery_service.cc"], hdrs = ["health_discovery_service.h"], deps = [ ":health_checker_lib", + ":locality_endpoint_lib", ":upstream_includes", "//include/envoy/api:api_interface", "//include/envoy/event:dispatcher_interface", diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index 2f85ef31188f..cda9c64b51ec 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -27,11 +27,10 @@ std::pair ClusterFactoryImplBase:: const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cluster_manager, Stats::Store& stats, ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, - Random::RandomGenerator& random, Event::Dispatcher& dispatcher, - AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, - Server::Admin& admin, Singleton::Manager& singleton_manager, - Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) { + Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, + const LocalInfo::LocalInfo& local_info, Server::Admin& admin, + Singleton::Manager& singleton_manager, Outlier::EventLoggerSharedPtr outlier_event_logger, + bool added_via_api, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) { std::string cluster_type; if (!cluster.has_cluster_type()) { @@ -73,7 +72,7 @@ std::pair ClusterFactoryImplBase:: } ClusterFactoryContextImpl context( - cluster_manager, stats, tls, std::move(dns_resolver), ssl_context_manager, runtime, random, + cluster_manager, stats, tls, std::move(dns_resolver), ssl_context_manager, runtime, dispatcher, log_manager, local_info, admin, singleton_manager, std::move(outlier_event_logger), added_via_api, validation_visitor, api); return factory->create(cluster, context); @@ -108,8 +107,8 @@ ClusterFactoryImplBase::create(const envoy::config::cluster::v3::Cluster& cluste auto stats_scope = generateStatsScope(cluster, context.stats()); Server::Configuration::TransportSocketFactoryContextImpl factory_context( context.admin(), context.sslContextManager(), *stats_scope, context.clusterManager(), - context.localInfo(), context.dispatcher(), context.random(), context.stats(), - context.singletonManager(), context.tls(), context.messageValidationVisitor(), context.api()); + context.localInfo(), context.dispatcher(), context.stats(), context.singletonManager(), + context.tls(), context.messageValidationVisitor(), context.api()); std::pair new_cluster_pair = createClusterImpl(cluster, context, factory_context, std::move(stats_scope)); @@ -120,7 +119,7 @@ ClusterFactoryImplBase::create(const envoy::config::cluster::v3::Cluster& cluste throw EnvoyException("Multiple health checks not supported"); } else { new_cluster_pair.first->setHealthChecker(HealthCheckerFactory::create( - cluster.health_checks()[0], *new_cluster_pair.first, context.runtime(), context.random(), + cluster.health_checks()[0], *new_cluster_pair.first, context.runtime(), context.dispatcher(), context.logManager(), context.messageValidationVisitor(), context.api())); } diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h index 4e8c6d1a811d..0cc4b76e844e 100644 --- a/source/common/upstream/cluster_factory_impl.h +++ b/source/common/upstream/cluster_factory_impl.h @@ -11,7 +11,6 @@ #include #include -#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/typed_metadata.h" #include "envoy/event/timer.h" @@ -56,15 +55,14 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { ThreadLocal::SlotAllocator& tls, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, - Random::RandomGenerator& random, Event::Dispatcher& dispatcher, - AccessLog::AccessLogManager& log_manager, + Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : cluster_manager_(cluster_manager), stats_(stats), tls_(tls), dns_resolver_(std::move(dns_resolver)), ssl_context_manager_(ssl_context_manager), - runtime_(runtime), random_(random), dispatcher_(dispatcher), log_manager_(log_manager), + runtime_(runtime), dispatcher_(dispatcher), log_manager_(log_manager), local_info_(local_info), admin_(admin), singleton_manager_(singleton_manager), outlier_event_logger_(std::move(outlier_event_logger)), added_via_api_(added_via_api), validation_visitor_(validation_visitor), api_(api) {} @@ -75,7 +73,6 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { Network::DnsResolverSharedPtr dnsResolver() override { return dns_resolver_; } Ssl::ContextManager& sslContextManager() override { return ssl_context_manager_; } Runtime::Loader& runtime() override { return runtime_; } - Random::RandomGenerator& random() override { return random_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } AccessLog::AccessLogManager& logManager() override { return log_manager_; } const LocalInfo::LocalInfo& localInfo() override { return local_info_; } @@ -95,7 +92,6 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { Network::DnsResolverSharedPtr dns_resolver_; Ssl::ContextManager& ssl_context_manager_; Runtime::Loader& runtime_; - Random::RandomGenerator& random_; Event::Dispatcher& dispatcher_; AccessLog::AccessLogManager& log_manager_; const LocalInfo::LocalInfo& local_info_; @@ -121,7 +117,7 @@ class ClusterFactoryImplBase : public ClusterFactory { create(const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cluster_manager, Stats::Store& stats, ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, - Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, + Runtime::Loader& runtime, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 56dda590fd41..80a9ef9ac070 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -234,19 +234,20 @@ void ClusterManagerInitHelper::setPrimaryClustersInitializedCb( ClusterManagerImpl::ClusterManagerImpl( const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, - AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, - Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, + const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, + Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, + ProtobufMessage::ValidationContext& validation_context, Api::Api& api, Http::Context& http_context, Grpc::Context& grpc_context) : factory_(factory), runtime_(runtime), stats_(stats), tls_(tls.allocateSlot()), - random_(random), bind_config_(bootstrap.cluster_manager().upstream_bind_config()), - local_info_(local_info), cm_stats_(generateStats(stats)), + random_(api.randomGenerator()), + bind_config_(bootstrap.cluster_manager().upstream_bind_config()), local_info_(local_info), + cm_stats_(generateStats(stats)), init_helper_(*this, [this](Cluster& cluster) { onClusterInit(cluster); }), config_tracker_entry_( admin.getConfigTracker().add("clusters", [this] { return dumpClusterConfigs(); })), time_source_(main_thread_dispatcher.timeSource()), dispatcher_(main_thread_dispatcher), http_context_(http_context), - subscription_factory_(local_info, main_thread_dispatcher, *this, random, + subscription_factory_(local_info, main_thread_dispatcher, *this, validation_context.dynamicValidationVisitor(), api, runtime_) { async_client_manager_ = std::make_unique( *this, tls, time_source_, api, grpc_context.statNames()); @@ -416,7 +417,16 @@ void ClusterManagerImpl::onClusterInit(Cluster& cluster) { // been setup for cross-thread updates to avoid needless updates during initialization. The order // of operations here is important. We start by initializing the thread aware load balancer if // needed. This must happen first so cluster updates are heard first by the load balancer. - auto cluster_data = active_clusters_.find(cluster.info()->name()); + // Also, it assures that all of clusters which this function is called should be always active. + auto cluster_data = warming_clusters_.find(cluster.info()->name()); + // We have a situation that clusters will be immediately active, such as static and primary + // cluster. So we must have this prevention logic here. + if (cluster_data != warming_clusters_.end()) { + clusterWarmingToActive(cluster.info()->name()); + updateClusterCounts(); + } + cluster_data = active_clusters_.find(cluster.info()->name()); + if (cluster_data->second->thread_aware_lb_ != nullptr) { cluster_data->second->thread_aware_lb_->initialize(); } @@ -586,17 +596,6 @@ bool ClusterManagerImpl::addOrUpdateCluster(const envoy::config::cluster::v3::Cl // The following init manager remove call is a NOP in the case we are already initialized. // It's just kept here to avoid additional logic. init_helper_.removeCluster(*existing_active_cluster->second->cluster_); - } else { - // Validate that warming clusters are not added to the init_helper_. - // NOTE: This loop is compiled out in optimized builds. - for (const std::list& cluster_list : - {std::cref(init_helper_.primary_init_clusters_), - std::cref(init_helper_.secondary_init_clusters_)}) { - ASSERT(!std::any_of(cluster_list.begin(), cluster_list.end(), - [&existing_warming_cluster](Cluster* cluster) { - return existing_warming_cluster->second->cluster_.get() == cluster; - })); - } } cm_stats_.cluster_modified_.inc(); } else { @@ -613,45 +612,46 @@ bool ClusterManagerImpl::addOrUpdateCluster(const envoy::config::cluster::v3::Cl // the future we may decide to undergo a refactor to unify the logic but the effort/risk to // do that right now does not seem worth it given that the logic is generally pretty clean // and easy to understand. - const bool use_active_map = - init_helper_.state() != ClusterManagerInitHelper::State::AllClustersInitialized; - loadCluster(cluster, version_info, true, use_active_map ? active_clusters_ : warming_clusters_); - - if (use_active_map) { + const bool all_clusters_initialized = + init_helper_.state() == ClusterManagerInitHelper::State::AllClustersInitialized; + loadCluster(cluster, version_info, true, warming_clusters_); + auto& cluster_entry = warming_clusters_.at(cluster_name); + if (!all_clusters_initialized) { ENVOY_LOG(debug, "add/update cluster {} during init", cluster_name); - auto& cluster_entry = active_clusters_.at(cluster_name); createOrUpdateThreadLocalCluster(*cluster_entry); init_helper_.addCluster(*cluster_entry->cluster_); } else { - auto& cluster_entry = warming_clusters_.at(cluster_name); ENVOY_LOG(debug, "add/update cluster {} starting warming", cluster_name); cluster_entry->cluster_->initialize([this, cluster_name] { - auto warming_it = warming_clusters_.find(cluster_name); - auto& cluster_entry = *warming_it->second; - - // If the cluster is being updated, we need to cancel any pending merged updates. - // Otherwise, applyUpdates() will fire with a dangling cluster reference. - updates_map_.erase(cluster_name); - - active_clusters_[cluster_name] = std::move(warming_it->second); - warming_clusters_.erase(warming_it); - ENVOY_LOG(debug, "warming cluster {} complete", cluster_name); - createOrUpdateThreadLocalCluster(cluster_entry); - onClusterInit(*cluster_entry.cluster_); - updateClusterCounts(); + auto state_changed_cluster_entry = warming_clusters_.find(cluster_name); + createOrUpdateThreadLocalCluster(*state_changed_cluster_entry->second); + onClusterInit(*state_changed_cluster_entry->second->cluster_); }); } - updateClusterCounts(); return true; } +void ClusterManagerImpl::clusterWarmingToActive(const std::string& cluster_name) { + auto warming_it = warming_clusters_.find(cluster_name); + ASSERT(warming_it != warming_clusters_.end()); + + // If the cluster is being updated, we need to cancel any pending merged updates. + // Otherwise, applyUpdates() will fire with a dangling cluster reference. + updates_map_.erase(cluster_name); + + active_clusters_[cluster_name] = std::move(warming_it->second); + warming_clusters_.erase(warming_it); +} + void ClusterManagerImpl::createOrUpdateThreadLocalCluster(ClusterData& cluster) { - tls_->runOnAllThreads([this, new_cluster = cluster.cluster_->info(), - thread_aware_lb_factory = cluster.loadBalancerFactory()]() -> void { + tls_->runOnAllThreads([new_cluster = cluster.cluster_->info(), + thread_aware_lb_factory = cluster.loadBalancerFactory()]( + ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { ThreadLocalClusterManagerImpl& cluster_manager = - tls_->getTyped(); + object->asType(); if (cluster_manager.thread_local_clusters_.count(new_cluster->name()) > 0) { ENVOY_LOG(debug, "updating TLS cluster {}", new_cluster->name()); @@ -665,6 +665,8 @@ void ClusterManagerImpl::createOrUpdateThreadLocalCluster(ClusterData& cluster) for (auto& cb : cluster_manager.update_callbacks_) { cb->onClusterAddOrUpdate(*thread_local_cluster); } + + return object; }); } @@ -678,9 +680,10 @@ bool ClusterManagerImpl::removeCluster(const std::string& cluster_name) { active_clusters_.erase(existing_active_cluster); ENVOY_LOG(info, "removing cluster {}", cluster_name); - tls_->runOnAllThreads([this, cluster_name]() -> void { + tls_->runOnAllThreads([cluster_name](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { ThreadLocalClusterManagerImpl& cluster_manager = - tls_->getTyped(); + object->asType(); ASSERT(cluster_manager.thread_local_clusters_.count(cluster_name) == 1); ENVOY_LOG(debug, "removing TLS cluster {}", cluster_name); @@ -688,6 +691,7 @@ bool ClusterManagerImpl::removeCluster(const std::string& cluster_name) { cb->onClusterRemoval(cluster_name); } cluster_manager.thread_local_clusters_.erase(cluster_name); + return object; }); } @@ -695,6 +699,7 @@ bool ClusterManagerImpl::removeCluster(const std::string& cluster_name) { if (existing_warming_cluster != warming_clusters_.end() && existing_warming_cluster->second->added_via_api_) { removed = true; + init_helper_.removeCluster(*existing_warming_cluster->second->cluster_); warming_clusters_.erase(existing_warming_cluster); ENVOY_LOG(info, "removing warming cluster {}", cluster_name); } @@ -797,7 +802,9 @@ void ClusterManagerImpl::updateClusterCounts() { // Once cluster is warmed up, CDS is resumed, and ACK is sent to ADS, providing a // signal to ADS to proceed with RDS updates. // If we're in the middle of shutting down (ads_mux_ already gone) then this is irrelevant. - if (ads_mux_) { + const bool all_clusters_initialized = + init_helper_.state() == ClusterManagerInitHelper::State::AllClustersInitialized; + if (all_clusters_initialized && ads_mux_) { const auto type_urls = Config::getAllVersionTypeUrls(); const uint64_t previous_warming = cm_stats_.warming_clusters_.value(); if (previous_warming == 0 && !warming_clusters_.empty()) { @@ -822,6 +829,31 @@ ThreadLocalCluster* ClusterManagerImpl::get(absl::string_view cluster) { } } +void ClusterManagerImpl::maybePrefetch( + ThreadLocalClusterManagerImpl::ClusterEntryPtr& cluster_entry, + std::function pick_prefetch_pool) { + // TODO(alyssawilk) As currently implemented, this will always just prefetch + // one connection ahead of actually needed connections. + // + // Instead we want to track the following metrics across the entire connection + // pool and use the same algorithm we do for per-upstream prefetch: + // ((pending_streams_ + num_active_streams_) * global_prefetch_ratio > + // (connecting_stream_capacity_ + num_active_streams_))) + // and allow multiple prefetches per pick. + // Also cap prefetches such that + // num_unused_prefetch < num hosts + // since if we have more prefetches than hosts, we should consider kicking into + // per-upstream prefetch. + // + // Once we do this, this should loop capped number of times while shouldPrefetch is true. + if (cluster_entry->cluster_info_->peekaheadRatio() > 1.0) { + ConnectionPool::Instance* prefetch_pool = pick_prefetch_pool(); + if (prefetch_pool) { + prefetch_pool->maybePrefetch(cluster_entry->cluster_info_->peekaheadRatio()); + } + } +} + Http::ConnectionPool::Instance* ClusterManagerImpl::httpConnPoolForCluster(const std::string& cluster, ResourcePriority priority, absl::optional protocol, @@ -834,7 +866,19 @@ ClusterManagerImpl::httpConnPoolForCluster(const std::string& cluster, ResourceP } // Select a host and create a connection pool for it if it does not already exist. - return entry->second->connPool(priority, protocol, context); + auto ret = entry->second->connPool(priority, protocol, context, false); + + // Now see if another host should be prefetched. + // httpConnPoolForCluster is called immediately before a call for newStream. newStream doesn't + // have the load balancer context needed to make selection decisions so prefetching must be + // performed here in anticipation of the new stream. + // TODO(alyssawilk) refactor to have one function call and return a pair, so this invariant is + // code-enforced. + maybePrefetch(entry->second, [&entry, &priority, &protocol, &context]() { + return entry->second->connPool(priority, protocol, context, true); + }); + + return ret; } Tcp::ConnectionPool::Instance* @@ -848,14 +892,29 @@ ClusterManagerImpl::tcpConnPoolForCluster(const std::string& cluster, ResourcePr } // Select a host and create a connection pool for it if it does not already exist. - return entry->second->tcpConnPool(priority, context); + auto ret = entry->second->tcpConnPool(priority, context, false); + + // tcpConnPoolForCluster is called immediately before a call for newConnection. newConnection + // doesn't have the load balancer context needed to make selection decisions so prefetching must + // be performed here in anticipation of the new connection. + // TODO(alyssawilk) refactor to have one function call and return a pair, so this invariant is + // code-enforced. + // Now see if another host should be prefetched. + maybePrefetch(entry->second, [&entry, &priority, &context]() { + return entry->second->tcpConnPool(priority, context, true); + }); + + return ret; } void ClusterManagerImpl::postThreadLocalDrainConnections(const Cluster& cluster, const HostVector& hosts_removed) { - tls_->runOnAllThreads([this, name = cluster.info()->name(), hosts_removed]() { - ThreadLocalClusterManagerImpl::removeHosts(name, hosts_removed, *tls_); - }); + tls_->runOnAllThreads( + [name = cluster.info()->name(), hosts_removed](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + object->asType().removeHosts(name, hosts_removed); + return object; + }); } void ClusterManagerImpl::postThreadLocalClusterUpdate(const Cluster& cluster, uint32_t priority, @@ -863,19 +922,25 @@ void ClusterManagerImpl::postThreadLocalClusterUpdate(const Cluster& cluster, ui const HostVector& hosts_removed) { const auto& host_set = cluster.prioritySet().hostSetsPerPriority()[priority]; - tls_->runOnAllThreads([this, name = cluster.info()->name(), priority, + tls_->runOnAllThreads([name = cluster.info()->name(), priority, update_params = HostSetImpl::updateHostsParams(*host_set), locality_weights = host_set->localityWeights(), hosts_added, hosts_removed, - overprovisioning_factor = host_set->overprovisioningFactor()]() { - ThreadLocalClusterManagerImpl::updateClusterMembership( - name, priority, update_params, locality_weights, hosts_added, hosts_removed, *tls_, + overprovisioning_factor = host_set->overprovisioningFactor()]( + ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + object->asType().updateClusterMembership( + name, priority, update_params, locality_weights, hosts_added, hosts_removed, overprovisioning_factor); + return object; }); } void ClusterManagerImpl::postThreadLocalHealthFailure(const HostSharedPtr& host) { - tls_->runOnAllThreads( - [this, host] { ThreadLocalClusterManagerImpl::onHostHealthFailure(host, *tls_); }); + tls_->runOnAllThreads([host](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + object->asType().onHostHealthFailure(host); + return object; + }); } Host::CreateConnectionData ClusterManagerImpl::tcpConnForCluster(const std::string& cluster, @@ -1111,13 +1176,10 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::removeTcpConn( } } -void ClusterManagerImpl::ThreadLocalClusterManagerImpl::removeHosts(const std::string& name, - const HostVector& hosts_removed, - ThreadLocal::Slot& tls) { - ThreadLocalClusterManagerImpl& config = tls.getTyped(); - - ASSERT(config.thread_local_clusters_.find(name) != config.thread_local_clusters_.end()); - const auto& cluster_entry = config.thread_local_clusters_[name]; +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::removeHosts( + const std::string& name, const HostVector& hosts_removed) { + ASSERT(thread_local_clusters_.find(name) != thread_local_clusters_.end()); + const auto& cluster_entry = thread_local_clusters_[name]; ENVOY_LOG(debug, "removing hosts for TLS cluster {} removed {}", name, hosts_removed.size()); // We need to go through and purge any connection pools for hosts that got deleted. @@ -1129,11 +1191,9 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::removeHosts(const std::s void ClusterManagerImpl::ThreadLocalClusterManagerImpl::updateClusterMembership( const std::string& name, uint32_t priority, PrioritySet::UpdateHostsParams update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, - const HostVector& hosts_removed, ThreadLocal::Slot& tls, uint64_t overprovisioning_factor) { - ThreadLocalClusterManagerImpl& config = tls.getTyped(); - - ASSERT(config.thread_local_clusters_.find(name) != config.thread_local_clusters_.end()); - const auto& cluster_entry = config.thread_local_clusters_[name]; + const HostVector& hosts_removed, uint64_t overprovisioning_factor) { + ASSERT(thread_local_clusters_.find(name) != thread_local_clusters_.end()); + const auto& cluster_entry = thread_local_clusters_[name]; ENVOY_LOG(debug, "membership update for TLS cluster {} added {} removed {}", name, hosts_added.size(), hosts_removed.size()); cluster_entry->priority_set_.updateHosts(priority, std::move(update_hosts_params), @@ -1148,7 +1208,7 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::updateClusterMembership( } void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( - const HostSharedPtr& host, ThreadLocal::Slot& tls) { + const HostSharedPtr& host) { // Drain all HTTP connection pool connections in the case of a host health failure. If outlier/ // health is due to ECMP flow hashing issues for example, a new set of connections might do @@ -1156,9 +1216,8 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( // TODO(mattklein123): This function is currently very specific, but in the future when we do // more granular host set changes, we should be able to capture single host changes and make them // more targeted. - ThreadLocalClusterManagerImpl& config = tls.getTyped(); { - const auto container = config.getHttpConnPoolsContainer(host); + const auto container = getHttpConnPoolsContainer(host); if (container != nullptr) { container->pools_->drainConnections(); } @@ -1168,8 +1227,8 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( // connections being closed, it only prevents new connections through the pool. The // CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE can be used to make the pool close any // active connections. - const auto& container = config.host_tcp_conn_pool_map_.find(host); - if (container != config.host_tcp_conn_pool_map_.end()) { + const auto& container = host_tcp_conn_pool_map_.find(host); + if (container != host_tcp_conn_pool_map_.end()) { for (const auto& pair : container->second.pools_) { const Tcp::ConnectionPool::InstancePtr& pool = pair.second; if (host->cluster().features() & @@ -1198,8 +1257,8 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( // in the configuration documentation in cluster setting // "close_connections_on_host_health_failure". Update the docs if this if this changes. while (true) { - const auto& it = config.host_tcp_conn_map_.find(host); - if (it == config.host_tcp_conn_map_.end()) { + const auto& it = host_tcp_conn_map_.find(host); + if (it == host_tcp_conn_map_.end()) { break; } TcpConnectionsMap& container = it->second; @@ -1292,8 +1351,8 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::~ClusterEntry() Http::ConnectionPool::Instance* ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( ResourcePriority priority, absl::optional downstream_protocol, - LoadBalancerContext* context) { - HostConstSharedPtr host = lb_->chooseHost(context); + LoadBalancerContext* context, bool peek) { + HostConstSharedPtr host = (peek ? lb_->peekAnotherHost(context) : lb_->chooseHost(context)); if (!host) { ENVOY_LOG(debug, "no healthy host for HTTP connection pool"); cluster_info_->stats().upstream_cx_none_healthy_.inc(); @@ -1352,8 +1411,8 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( Tcp::ConnectionPool::Instance* ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPool( - ResourcePriority priority, LoadBalancerContext* context) { - HostConstSharedPtr host = lb_->chooseHost(context); + ResourcePriority priority, LoadBalancerContext* context, bool peek) { + HostConstSharedPtr host = (peek ? lb_->peekAnotherHost(context) : lb_->chooseHost(context)); if (!host) { ENVOY_LOG(debug, "no healthy host for TCP connection pool"); cluster_info_->stats().upstream_cx_none_healthy_.inc(); @@ -1398,8 +1457,8 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPool( ClusterManagerPtr ProdClusterManagerFactory::clusterManagerFromProto( const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { return ClusterManagerPtr{new ClusterManagerImpl( - bootstrap, *this, stats_, tls_, runtime_, random_, local_info_, log_manager_, - main_thread_dispatcher_, admin_, validation_context_, api_, http_context_, grpc_context_)}; + bootstrap, *this, stats_, tls_, runtime_, local_info_, log_manager_, main_thread_dispatcher_, + admin_, validation_context_, api_, http_context_, grpc_context_)}; } Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( @@ -1408,14 +1467,14 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { if (protocol == Http::Protocol::Http2 && runtime_.snapshot().featureEnabled("upstream.use_http2", 100)) { - return Http::Http2::allocateConnPool(dispatcher, host, priority, options, - transport_socket_options); + return Http::Http2::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority, + options, transport_socket_options); } else if (protocol == Http::Protocol::Http3) { // Quic connection pool is not implemented. NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } else { - return Http::Http1::allocateConnPool(dispatcher, host, priority, options, - transport_socket_options); + return Http::Http1::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority, + options, transport_socket_options); } } @@ -1436,7 +1495,7 @@ std::pair ProdClusterManagerFactor const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api) { return ClusterFactoryImplBase::create( - cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, + cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, main_thread_dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, outlier_event_logger, added_via_api, added_via_api ? validation_context_.dynamicValidationVisitor() diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index c229395c1353..1aa14c4be78c 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -40,17 +40,19 @@ namespace Upstream { */ class ProdClusterManagerFactory : public ClusterManagerFactory { public: - ProdClusterManagerFactory( - Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats, - ThreadLocal::Instance& tls, Random::RandomGenerator& random, - Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, - Event::Dispatcher& main_thread_dispatcher, const LocalInfo::LocalInfo& local_info, - Secret::SecretManager& secret_manager, ProtobufMessage::ValidationContext& validation_context, - Api::Api& api, Http::Context& http_context, Grpc::Context& grpc_context, - AccessLog::AccessLogManager& log_manager, Singleton::Manager& singleton_manager) + ProdClusterManagerFactory(Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats, + ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, + Ssl::ContextManager& ssl_context_manager, + Event::Dispatcher& main_thread_dispatcher, + const LocalInfo::LocalInfo& local_info, + Secret::SecretManager& secret_manager, + ProtobufMessage::ValidationContext& validation_context, Api::Api& api, + Http::Context& http_context, Grpc::Context& grpc_context, + AccessLog::AccessLogManager& log_manager, + Singleton::Manager& singleton_manager) : main_thread_dispatcher_(main_thread_dispatcher), validation_context_(validation_context), api_(api), http_context_(http_context), grpc_context_(grpc_context), admin_(admin), - runtime_(runtime), stats_(stats), tls_(tls), random_(random), dns_resolver_(dns_resolver), + runtime_(runtime), stats_(stats), tls_(tls), dns_resolver_(dns_resolver), ssl_context_manager_(ssl_context_manager), local_info_(local_info), secret_manager_(secret_manager), log_manager_(log_manager), singleton_manager_(singleton_manager) {} @@ -84,7 +86,6 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { Runtime::Loader& runtime_; Stats::Store& stats_; ThreadLocal::Instance& tls_; - Random::RandomGenerator& random_; Network::DnsResolverSharedPtr dns_resolver_; Ssl::ContextManager& ssl_context_manager_; const LocalInfo::LocalInfo& local_info_; @@ -193,7 +194,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable downstream_protocol, - LoadBalancerContext* context); + LoadBalancerContext* context, bool peek); Tcp::ConnectionPool::Instance* tcpConnPool(ResourcePriority priority, - LoadBalancerContext* context); + LoadBalancerContext* context, bool peek); // Upstream::ThreadLocalCluster const PrioritySet& prioritySet() override { return priority_set_; } @@ -368,15 +369,13 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable prefetch_pool); ClusterManagerFactory& factory_; Runtime::Loader& runtime_; diff --git a/source/common/upstream/edf_scheduler.h b/source/common/upstream/edf_scheduler.h index fc135dfc1490..2f91d0e5cbed 100644 --- a/source/common/upstream/edf_scheduler.h +++ b/source/common/upstream/edf_scheduler.h @@ -1,6 +1,6 @@ #pragma once - #include +#include #include #include "common/common/assert.h" @@ -25,32 +25,47 @@ namespace Upstream { // weights and an O(log n) pick time. template class EdfScheduler { public: + // Each time peekAgain is called, it will return the best-effort subsequent + // pick, popping and reinserting the entry as if it had been picked, and + // inserting it into the pre-picked queue. + // The first time peekAgain is called, it will return the + // first item which will be picked, the second time it is called it will + // return the second item which will be picked. As picks occur, that window + // will shrink. + std::shared_ptr peekAgain(std::function calculate_weight) { + if (hasEntry()) { + prepick_list_.push_back(std::move(queue_.top().entry_)); + std::shared_ptr ret{prepick_list_.back()}; + add(calculate_weight(*ret), ret); + queue_.pop(); + return ret; + } + return nullptr; + } + /** - * Pick queue entry with closest deadline. - * @return std::shared_ptr to the queue entry if a valid entry exists in the queue, nullptr - * otherwise. The entry is removed from the queue. + * Pick queue entry with closest deadline and adds it back using the weight + * from calculate_weight. + * @return std::shared_ptr to next valid the queue entry if or nullptr if none exists. */ - std::shared_ptr pick() { - EDF_TRACE("Queue pick: queue_.size()={}, current_time_={}.", queue_.size(), current_time_); - while (true) { - if (queue_.empty()) { - EDF_TRACE("Queue is empty."); - return nullptr; - } - const EdfEntry& edf_entry = queue_.top(); - // Entry has been removed, let's see if there's another one. - if (edf_entry.entry_.expired()) { - EDF_TRACE("Entry has expired, repick."); - queue_.pop(); + std::shared_ptr pickAndAdd(std::function calculate_weight) { + while (!prepick_list_.empty()) { + // In this case the entry was added back during peekAgain so don't re-add. + if (prepick_list_.front().expired()) { + prepick_list_.pop_front(); continue; } - std::shared_ptr ret{edf_entry.entry_}; - ASSERT(edf_entry.deadline_ >= current_time_); - current_time_ = edf_entry.deadline_; + std::shared_ptr ret{prepick_list_.front()}; + prepick_list_.pop_front(); + return ret; + } + if (hasEntry()) { + std::shared_ptr ret{queue_.top().entry_}; queue_.pop(); - EDF_TRACE("Picked {}, current_time_={}.", static_cast(ret.get()), current_time_); + add(calculate_weight(*ret), ret); return ret; } + return nullptr; } /** @@ -74,6 +89,31 @@ template class EdfScheduler { bool empty() const { return queue_.empty(); } private: + /** + * Clears expired entries, and returns true if there's still entries in the queue. + */ + bool hasEntry() { + EDF_TRACE("Queue pick: queue_.size()={}, current_time_={}.", queue_.size(), current_time_); + while (true) { + if (queue_.empty()) { + EDF_TRACE("Queue is empty."); + return false; + } + const EdfEntry& edf_entry = queue_.top(); + // Entry has been removed, let's see if there's another one. + if (edf_entry.entry_.expired()) { + EDF_TRACE("Entry has expired, repick."); + queue_.pop(); + continue; + } + std::shared_ptr ret{edf_entry.entry_}; + ASSERT(edf_entry.deadline_ >= current_time_); + current_time_ = edf_entry.deadline_; + EDF_TRACE("Picked {}, current_time_={}.", static_cast(ret.get()), current_time_); + return true; + } + } + struct EdfEntry { double deadline_; // Tie breaker for entries with the same deadline. This is used to provide FIFO behavior. @@ -98,6 +138,7 @@ template class EdfScheduler { uint64_t order_offset_{}; // Min priority queue for EDF. std::priority_queue queue_; + std::list> prepick_list_; }; #undef EDF_DEBUG diff --git a/source/common/upstream/health_checker_base_impl.cc b/source/common/upstream/health_checker_base_impl.cc index 47a67c3f29a7..f6357559eec8 100644 --- a/source/common/upstream/health_checker_base_impl.cc +++ b/source/common/upstream/health_checker_base_impl.cc @@ -26,6 +26,8 @@ HealthCheckerImplBase::HealthCheckerImplBase(const Cluster& cluster, reuse_connection_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, reuse_connection, true)), event_logger_(std::move(event_logger)), interval_(PROTOBUF_GET_MS_REQUIRED(config, interval)), no_traffic_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, no_traffic_interval, 60000)), + no_traffic_healthy_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, no_traffic_healthy_interval, + no_traffic_interval_.count())), initial_jitter_(PROTOBUF_GET_MS_OR_DEFAULT(config, initial_jitter, 0)), interval_jitter_(PROTOBUF_GET_MS_OR_DEFAULT(config, interval_jitter, 0)), interval_jitter_percent_(config.interval_jitter_percent()), @@ -123,7 +125,10 @@ std::chrono::milliseconds HealthCheckerImplBase::interval(HealthState state, break; } } else { - base_time_ms = no_traffic_interval_.count(); + base_time_ms = + (state == HealthState::Healthy && changed_state != HealthTransition::ChangePending) + ? no_traffic_healthy_interval_.count() + : no_traffic_interval_.count(); } return intervalWithJitter(base_time_ms, interval_jitter_); } diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index ff2f62101f57..c1e4bb7affff 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -37,6 +37,23 @@ struct HealthCheckerStats { ALL_HEALTH_CHECKER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; +/** + * HealthCheckerHash and HealthCheckerEqualTo are used to allow the HealthCheck proto to be used as + * a flat_hash_map key. + */ +struct HealthCheckerHash { + size_t operator()(const envoy::config::core::v3::HealthCheck& health_check) const { + return MessageUtil::hash(health_check); + } +}; + +struct HealthCheckerEqualTo { + bool operator()(const envoy::config::core::v3::HealthCheck& lhs, + const envoy::config::core::v3::HealthCheck& rhs) const { + return Protobuf::util::MessageDifferencer::Equals(lhs, rhs); + } +}; + /** * Base implementation for all health checkers. */ @@ -148,6 +165,7 @@ class HealthCheckerImplBase : public HealthChecker, std::list callbacks_; const std::chrono::milliseconds interval_; const std::chrono::milliseconds no_traffic_interval_; + const std::chrono::milliseconds no_traffic_healthy_interval_; const std::chrono::milliseconds initial_jitter_; const std::chrono::milliseconds interval_jitter_; const uint32_t interval_jitter_percent_; diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index bce69e033bb8..475f2c377e1f 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -59,17 +59,15 @@ const std::string& getHostname(const HostSharedPtr& host, class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthCheckerFactoryContext { public: HealthCheckerFactoryContextImpl(Upstream::Cluster& cluster, Envoy::Runtime::Loader& runtime, - Envoy::Random::RandomGenerator& random, Event::Dispatcher& dispatcher, HealthCheckEventLoggerPtr&& event_logger, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) - : cluster_(cluster), runtime_(runtime), random_(random), dispatcher_(dispatcher), + : cluster_(cluster), runtime_(runtime), dispatcher_(dispatcher), event_logger_(std::move(event_logger)), validation_visitor_(validation_visitor), api_(api) { } Upstream::Cluster& cluster() override { return cluster_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } - Envoy::Random::RandomGenerator& random() override { return random_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } HealthCheckEventLoggerPtr eventLogger() override { return std::move(event_logger_); } ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { @@ -80,7 +78,6 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec private: Upstream::Cluster& cluster_; Envoy::Runtime::Loader& runtime_; - Envoy::Random::RandomGenerator& random_; Event::Dispatcher& dispatcher_; HealthCheckEventLoggerPtr event_logger_; ProtobufMessage::ValidationVisitor& validation_visitor_; @@ -89,7 +86,7 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec HealthCheckerSharedPtr HealthCheckerFactory::create( const envoy::config::core::v3::HealthCheck& health_check_config, Upstream::Cluster& cluster, - Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, + Runtime::Loader& runtime, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) { HealthCheckEventLoggerPtr event_logger; @@ -100,24 +97,26 @@ HealthCheckerSharedPtr HealthCheckerFactory::create( switch (health_check_config.health_checker_case()) { case envoy::config::core::v3::HealthCheck::HealthCheckerCase::kHttpHealthCheck: return std::make_shared(cluster, health_check_config, dispatcher, - runtime, random, std::move(event_logger)); + runtime, api.randomGenerator(), + std::move(event_logger)); case envoy::config::core::v3::HealthCheck::HealthCheckerCase::kTcpHealthCheck: return std::make_shared(cluster, health_check_config, dispatcher, runtime, - random, std::move(event_logger)); + api.randomGenerator(), std::move(event_logger)); case envoy::config::core::v3::HealthCheck::HealthCheckerCase::kGrpcHealthCheck: if (!(cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP2)) { throw EnvoyException(fmt::format("{} cluster must support HTTP/2 for gRPC healthchecking", cluster.info()->name())); } return std::make_shared(cluster, health_check_config, dispatcher, - runtime, random, std::move(event_logger)); + runtime, api.randomGenerator(), + std::move(event_logger)); case envoy::config::core::v3::HealthCheck::HealthCheckerCase::kCustomHealthCheck: { auto& factory = Config::Utility::getAndCheckFactory( health_check_config.custom_health_check()); std::unique_ptr context( - new HealthCheckerFactoryContextImpl(cluster, runtime, random, dispatcher, - std::move(event_logger), validation_visitor, api)); + new HealthCheckerFactoryContextImpl(cluster, runtime, dispatcher, std::move(event_logger), + validation_visitor, api)); return factory.createCustomHealthChecker(health_check_config, *context); } default: @@ -142,7 +141,8 @@ HttpHealthCheckerImpl::HttpHealthCheckerImpl(const Cluster& cluster, codec_client_type_( codecClientType(config.http_health_check().hidden_envoy_deprecated_use_http2() ? envoy::type::v3::HTTP2 - : config.http_health_check().codec_client_type())) { + : config.http_health_check().codec_client_type())), + random_generator_(random) { // The deprecated service_name field was previously being used to compare with the health checked // cluster name using a StartsWith comparison. Since StartsWith is essentially a prefix // comparison, representing the intent by using a StringMatcher prefix is a more natural way. @@ -404,7 +404,7 @@ HttpHealthCheckerImpl::codecClientType(const envoy::type::v3::CodecClientType& t Http::CodecClient* ProdHttpHealthCheckerImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { return new Http::CodecClientProd(codec_client_type_, std::move(data.connection_), - data.host_description_, dispatcher_); + data.host_description_, dispatcher_, random_generator_); } TcpHealthCheckMatcher::MatchSegments TcpHealthCheckMatcher::loadProtoBytes( @@ -547,6 +547,7 @@ GrpcHealthCheckerImpl::GrpcHealthCheckerImpl(const Cluster& cluster, Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger) : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), + random_generator_(random), service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "grpc.health.v1.Health.Check")) { if (!config.grpc_health_check().service_name().empty()) { @@ -849,9 +850,9 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::logHealthCheckStatus( Http::CodecClientPtr ProdGrpcHealthCheckerImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { - return std::make_unique(Http::CodecClient::Type::HTTP2, - std::move(data.connection_), - data.host_description_, dispatcher_); + return std::make_unique( + Http::CodecClient::Type::HTTP2, std::move(data.connection_), data.host_description_, + dispatcher_, random_generator_); } std::ostream& operator<<(std::ostream& out, HealthState state) { diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index dc2fa6c1bbbf..97976d14287a 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -31,7 +31,6 @@ class HealthCheckerFactory : public Logger::Loggable * @param health_check_config supplies the health check proto. * @param cluster supplies the owning cluster. * @param runtime supplies the runtime loader. - * @param random supplies the random generator. * @param dispatcher supplies the dispatcher. * @param log_manager supplies the log_manager. * @param validation_visitor message validation visitor instance. @@ -40,8 +39,8 @@ class HealthCheckerFactory : public Logger::Loggable */ static HealthCheckerSharedPtr create(const envoy::config::core::v3::HealthCheck& health_check_config, - Upstream::Cluster& cluster, Runtime::Loader& runtime, Random::RandomGenerator& random, - Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, + Upstream::Cluster& cluster, Runtime::Loader& runtime, Event::Dispatcher& dispatcher, + AccessLog::AccessLogManager& log_manager, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); }; @@ -151,6 +150,7 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { protected: const Http::CodecClient::Type codec_client_type_; + Random::RandomGenerator& random_generator_; }; /** @@ -375,6 +375,10 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { return envoy::data::core::v3::GRPC; } +protected: + Random::RandomGenerator& random_generator_; + +private: const Protobuf::MethodDescriptor& service_method_; absl::optional service_name_; absl::optional authority_value_; diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index c43de71f93ee..12c92979f418 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -32,7 +32,7 @@ HdsDelegate::HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_clie envoy::config::core::v3::ApiVersion transport_api_version, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Envoy::Stats::Store& stats, Ssl::ContextManager& ssl_context_manager, - Random::RandomGenerator& random, ClusterInfoFactory& info_factory, + ClusterInfoFactory& info_factory, AccessLog::AccessLogManager& access_log_manager, ClusterManager& cm, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, @@ -44,14 +44,14 @@ HdsDelegate::HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_clie .getMethodDescriptorForVersion(transport_api_version)), async_client_(std::move(async_client)), transport_api_version_(transport_api_version), dispatcher_(dispatcher), runtime_(runtime), store_stats_(stats), - ssl_context_manager_(ssl_context_manager), random_(random), info_factory_(info_factory), + ssl_context_manager_(ssl_context_manager), info_factory_(info_factory), access_log_manager_(access_log_manager), cm_(cm), local_info_(local_info), admin_(admin), - singleton_manager_(singleton_manager), tls_(tls), validation_visitor_(validation_visitor), - api_(api) { + singleton_manager_(singleton_manager), tls_(tls), specifier_hash_(0), + validation_visitor_(validation_visitor), api_(api) { health_check_request_.mutable_health_check_request()->mutable_node()->MergeFrom( local_info_.node()); backoff_strategy_ = std::make_unique( - RetryInitialDelayMilliseconds, RetryMaxDelayMilliseconds, random_); + RetryInitialDelayMilliseconds, RetryMaxDelayMilliseconds, api_.randomGenerator()); hds_retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); }); hds_stream_response_timer_ = dispatcher.createTimer([this]() -> void { sendResponse(); }); @@ -99,7 +99,6 @@ void HdsDelegate::handleFailure() { setHdsRetryTimer(); } -// TODO(lilika): Add support for the same endpoint in different clusters/ports envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse HdsDelegate::sendResponse() { envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse response; @@ -165,69 +164,139 @@ void HdsDelegate::onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata UNREFERENCED_PARAMETER(metadata); } +envoy::config::cluster::v3::Cluster HdsDelegate::createClusterConfig( + const envoy::service::health::v3::ClusterHealthCheck& cluster_health_check) { + // Create HdsCluster config + envoy::config::cluster::v3::Cluster cluster_config; + + cluster_config.set_name(cluster_health_check.cluster_name()); + cluster_config.mutable_connect_timeout()->set_seconds(ClusterTimeoutSeconds); + cluster_config.mutable_per_connection_buffer_limit_bytes()->set_value( + ClusterConnectionBufferLimitBytes); + + // Add endpoints to cluster + for (const auto& locality_endpoints : cluster_health_check.locality_endpoints()) { + // add endpoint group by locality to config + auto* endpoints = cluster_config.mutable_load_assignment()->add_endpoints(); + // if this group contains locality information, save it. + if (locality_endpoints.has_locality()) { + endpoints->mutable_locality()->MergeFrom(locality_endpoints.locality()); + } + + // add all endpoints for this locality group to the config + for (const auto& endpoint : locality_endpoints.endpoints()) { + endpoints->add_lb_endpoints()->mutable_endpoint()->mutable_address()->MergeFrom( + endpoint.address()); + } + } + + // TODO(lilika): Add support for optional per-endpoint health checks + + // Add healthchecks to cluster + for (auto& health_check : cluster_health_check.health_checks()) { + cluster_config.add_health_checks()->MergeFrom(health_check); + } + + // Add transport_socket_match to cluster for use in host connections. + cluster_config.mutable_transport_socket_matches()->MergeFrom( + cluster_health_check.transport_socket_matches()); + + ENVOY_LOG(debug, "New HdsCluster config {} ", cluster_config.DebugString()); + + return cluster_config; +} + +void HdsDelegate::updateHdsCluster(HdsClusterPtr cluster, + const envoy::config::cluster::v3::Cluster& cluster_config) { + cluster->update(admin_, cluster_config, info_factory_, cm_, local_info_, dispatcher_, + singleton_manager_, tls_, validation_visitor_, api_, access_log_manager_, + runtime_); +} + +HdsClusterPtr +HdsDelegate::createHdsCluster(const envoy::config::cluster::v3::Cluster& cluster_config) { + static const envoy::config::core::v3::BindConfig bind_config; + + // Create HdsCluster. + auto new_cluster = std::make_shared( + admin_, runtime_, std::move(cluster_config), bind_config, store_stats_, ssl_context_manager_, + false, info_factory_, cm_, local_info_, dispatcher_, singleton_manager_, tls_, + validation_visitor_, api_); + + // Begin HCs in the background. + new_cluster->initialize([] {}); + new_cluster->initHealthchecks(access_log_manager_, runtime_, dispatcher_, api_); + + return new_cluster; +} + void HdsDelegate::processMessage( std::unique_ptr&& message) { ENVOY_LOG(debug, "New health check response message {} ", message->DebugString()); ASSERT(message); + std::vector hds_clusters; + // Maps to replace the current member variable versions. + absl::flat_hash_map new_hds_clusters_name_map; for (const auto& cluster_health_check : message->cluster_health_checks()) { - // Create HdsCluster config - static const envoy::config::core::v3::BindConfig bind_config; - envoy::config::cluster::v3::Cluster cluster_config; - - cluster_config.set_name(cluster_health_check.cluster_name()); - cluster_config.mutable_connect_timeout()->set_seconds(ClusterTimeoutSeconds); - cluster_config.mutable_per_connection_buffer_limit_bytes()->set_value( - ClusterConnectionBufferLimitBytes); - - // Add endpoints to cluster - for (const auto& locality_endpoints : cluster_health_check.locality_endpoints()) { - // add endpoint group by locality to config - auto* endpoints = cluster_config.mutable_load_assignment()->add_endpoints(); - // if this group contains locality information, save it. - if (locality_endpoints.has_locality()) { - endpoints->mutable_locality()->MergeFrom(locality_endpoints.locality()); + if (!new_hds_clusters_name_map.contains(cluster_health_check.cluster_name())) { + HdsClusterPtr cluster_ptr; + + // Create a new configuration for a cluster based on our different or new config. + auto cluster_config = createClusterConfig(cluster_health_check); + + // If this particular cluster configuration happens to have a name, then it is possible + // this particular cluster exists in the name map. We check and if we found a match, + // attempt to update this cluster. If no match was found, either the cluster name is empty + // or we have not seen a cluster by this name before. In either case, create a new cluster. + auto cluster_map_pair = hds_clusters_name_map_.find(cluster_health_check.cluster_name()); + if (cluster_map_pair != hds_clusters_name_map_.end()) { + // We have a previous cluster with this name, update. + cluster_ptr = cluster_map_pair->second; + updateHdsCluster(cluster_ptr, cluster_config); + } else { + // There is no cluster with this name previously or its an empty string, so just create a + // new cluster. + cluster_ptr = createHdsCluster(cluster_config); } - // add all endpoints for this locality group to the config - for (const auto& endpoint : locality_endpoints.endpoints()) { - endpoints->add_lb_endpoints()->mutable_endpoint()->mutable_address()->MergeFrom( - endpoint.address()); + // If this cluster does not have a name, do not add it to the name map since cluster_name is + // an optional field, and reconstruct these clusters on every update. + if (!cluster_health_check.cluster_name().empty()) { + // Since this cluster has a name, add it to our by-name map so we can update it later. + new_hds_clusters_name_map.insert({cluster_health_check.cluster_name(), cluster_ptr}); + } else { + ENVOY_LOG(warn, + "HDS Cluster has no cluster_name, it will be recreated instead of updated on " + "every reconfiguration."); } - } - - // TODO(lilika): Add support for optional per-endpoint health checks - // Add healthchecks to cluster - for (auto& health_check : cluster_health_check.health_checks()) { - cluster_config.add_health_checks()->MergeFrom(health_check); + // Add this cluster to the flat list for health checking. + hds_clusters.push_back(cluster_ptr); + } else { + ENVOY_LOG(warn, "An HDS Cluster with this cluster_name has already been added, not using."); } + } - // Add transport_socket_match to cluster for use in host connections. - cluster_config.mutable_transport_socket_matches()->MergeFrom( - cluster_health_check.transport_socket_matches()); - - ENVOY_LOG(debug, "New HdsCluster config {} ", cluster_config.DebugString()); - - // Create HdsCluster - hds_clusters_.emplace_back( - new HdsCluster(admin_, runtime_, std::move(cluster_config), bind_config, store_stats_, - ssl_context_manager_, false, info_factory_, cm_, local_info_, dispatcher_, - random_, singleton_manager_, tls_, validation_visitor_, api_)); - hds_clusters_.back()->initialize([] {}); + // Overwrite our map data structures. + hds_clusters_name_map_ = std::move(new_hds_clusters_name_map); + hds_clusters_ = std::move(hds_clusters); - hds_clusters_.back()->startHealthchecks(access_log_manager_, runtime_, random_, dispatcher_, - api_); - } + // TODO: add stats reporting for number of clusters added, removed, and reused. } -// TODO(lilika): Add support for subsequent HealthCheckSpecifier messages that -// might modify the HdsClusters void HdsDelegate::onReceiveMessage( std::unique_ptr&& message) { stats_.requests_.inc(); ENVOY_LOG(debug, "New health check response message {} ", message->DebugString()); + const uint64_t hash = MessageUtil::hash(*message); + + if (hash == specifier_hash_) { + ENVOY_LOG(debug, "New health check specifier is unchanged, no action taken."); + return; + } + // Validate message fields try { MessageUtil::validate(*message, validation_visitor_); @@ -240,15 +309,17 @@ void HdsDelegate::onReceiveMessage( return; } - // Reset - hds_clusters_.clear(); - // Set response auto server_response_ms = PROTOBUF_GET_MS_OR_DEFAULT(*message, interval, 1000); // Process the HealthCheckSpecifier message. processMessage(std::move(message)); + stats_.updates_.inc(); + + // Update the stored hash. + specifier_hash_ = hash; + if (server_response_ms_ != server_response_ms) { server_response_ms_ = server_response_ms; setHdsStreamResponseTimer(); @@ -273,18 +344,20 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, Ssl::ContextManager& ssl_context_manager, bool added_via_api, ClusterInfoFactory& info_factory, ClusterManager& cm, const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Random::RandomGenerator& random, Singleton::Manager& singleton_manager, - ThreadLocal::SlotAllocator& tls, + Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : runtime_(runtime), cluster_(std::move(cluster)), bind_config_(bind_config), stats_(stats), ssl_context_manager_(ssl_context_manager), added_via_api_(added_via_api), - initial_hosts_(new HostVector()), validation_visitor_(validation_visitor) { + hosts_(new HostVector()), validation_visitor_(validation_visitor) { ENVOY_LOG(debug, "Creating an HdsCluster"); priority_set_.getOrCreateHostSet(0); + // Set initial hashes for possible delta updates. + config_hash_ = MessageUtil::hash(cluster_); + socket_match_hash_ = RepeatedPtrUtil::hash(cluster_.transport_socket_matches()); info_ = info_factory.createClusterInfo( {admin, runtime_, cluster_, bind_config_, stats_, ssl_context_manager_, added_via_api_, cm, - local_info, dispatcher, random, singleton_manager, tls, validation_visitor, api}); + local_info, dispatcher, singleton_manager, tls, validation_visitor, api}); // Temporary structure to hold Host pointers grouped by locality, to build // initial_hosts_per_locality_. @@ -298,6 +371,7 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, hosts_by_locality.back().reserve(locality_endpoints.lb_endpoints_size()); for (const auto& host : locality_endpoints.lb_endpoints()) { + const LocalityEndpointTuple endpoint_key = {locality_endpoints.locality(), host}; // Initialize an endpoint host object. HostSharedPtr endpoint = std::make_shared( info_, "", Network::Address::resolveProtoAddress(host.endpoint().address()), nullptr, 1, @@ -305,17 +379,154 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0, envoy::config::core::v3::UNKNOWN); // Add this host/endpoint pointer to our flat list of endpoints for health checking. - initial_hosts_->push_back(endpoint); + hosts_->push_back(endpoint); // Add this host/endpoint pointer to our structured list by locality so results can be // requested by locality. hosts_by_locality.back().push_back(endpoint); + // Add this host/endpoint pointer to our map so we can rebuild this later. + hosts_map_.insert({endpoint_key, endpoint}); } } // Create the HostsPerLocality. - initial_hosts_per_locality_ = + hosts_per_locality_ = std::make_shared(std::move(hosts_by_locality), false); } +void HdsCluster::update(Server::Admin& admin, envoy::config::cluster::v3::Cluster cluster, + ClusterInfoFactory& info_factory, ClusterManager& cm, + const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, + Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, + AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime) { + + // check to see if the config changed. If it did, update. + const uint64_t config_hash = MessageUtil::hash(cluster); + if (config_hash_ != config_hash) { + config_hash_ = config_hash; + cluster_ = std::move(cluster); + + // Check to see if our list of socket matches have changed. If they have, create a new matcher + // in info_. + bool update_cluster_info = false; + const uint64_t socket_match_hash = RepeatedPtrUtil::hash(cluster_.transport_socket_matches()); + if (socket_match_hash_ != socket_match_hash) { + socket_match_hash_ = socket_match_hash; + update_cluster_info = true; + info_ = info_factory.createClusterInfo( + {admin, runtime_, cluster_, bind_config_, stats_, ssl_context_manager_, added_via_api_, + cm, local_info, dispatcher, singleton_manager, tls, validation_visitor, api}); + } + + // Check to see if anything in the endpoints list has changed. + updateHosts(cluster_.load_assignment().endpoints(), update_cluster_info); + + // Check to see if any of the health checkers have changed. + updateHealthchecks(cluster_.health_checks(), access_log_manager, runtime, dispatcher, api); + } +} + +void HdsCluster::updateHealthchecks( + const Protobuf::RepeatedPtrField& health_checks, + AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, + Event::Dispatcher& dispatcher, Api::Api& api) { + std::vector health_checkers; + HealthCheckerMap health_checkers_map; + + for (const auto& health_check : health_checks) { + // Check to see if this exact same health_check config already has a health checker. + auto health_checker = health_checkers_map_.find(health_check); + if (health_checker != health_checkers_map_.end()) { + // If it does, use it. + health_checkers_map.insert({health_check, health_checker->second}); + health_checkers.push_back(health_checker->second); + } else { + // If it does not, create a new one. + auto new_health_checker = Upstream::HealthCheckerFactory::create( + health_check, *this, runtime, dispatcher, access_log_manager, validation_visitor_, api); + health_checkers_map.insert({health_check, new_health_checker}); + health_checkers.push_back(new_health_checker); + + // Start these health checks now because upstream assumes they already have been started. + new_health_checker->start(); + } + } + + // replace our member data structures with our newly created ones. + health_checkers_ = std::move(health_checkers); + health_checkers_map_ = std::move(health_checkers_map); + + // TODO: add stats reporting for number of health checkers added, removed, and reused. +} + +void HdsCluster::updateHosts( + const Protobuf::RepeatedPtrField& + locality_endpoints, + bool update_cluster_info) { + // Create the data structures needed for PrioritySet::update. + HostVectorSharedPtr hosts = std::make_shared>(); + std::vector hosts_added; + std::vector hosts_removed; + std::vector hosts_by_locality; + + // Use for delta update comparison. + HostsMap hosts_map; + + for (auto& endpoints : locality_endpoints) { + hosts_by_locality.emplace_back(); + for (auto& endpoint : endpoints.lb_endpoints()) { + LocalityEndpointTuple endpoint_key = {endpoints.locality(), endpoint}; + + // Check to see if this exact Locality+Endpoint has been seen before. + // Also, if we made changes to our info, re-create all endpoints. + auto host_pair = hosts_map_.find(endpoint_key); + HostSharedPtr host; + if (!update_cluster_info && host_pair != hosts_map_.end()) { + // If we have this exact pair, save the shared pointer. + host = host_pair->second; + } else { + // We do not have this endpoint saved, so create a new one. + host = std::make_shared( + info_, "", Network::Address::resolveProtoAddress(endpoint.endpoint().address()), + nullptr, 1, endpoints.locality(), + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0, + envoy::config::core::v3::UNKNOWN); + + // Set the initial health status as in HdsCluster::initialize. + host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + + // Add to our hosts added list and save the shared pointer. + hosts_added.push_back(host); + } + + // No matter if it is reused or new, always add to these data structures. + hosts_by_locality.back().push_back(host); + hosts->push_back(host); + hosts_map.insert({endpoint_key, host}); + } + } + + // Compare the old map to the new to find out which endpoints are going to be removed. + for (auto& host_pair : hosts_map_) { + if (!hosts_map.contains(host_pair.first)) { + hosts_removed.push_back(host_pair.second); + } + } + + // Update the member data structures. + hosts_ = std::move(hosts); + hosts_map_ = std::move(hosts_map); + + // TODO: add stats reporting for number of endpoints added, removed, and reused. + ENVOY_LOG(debug, "Hosts Added: {}, Removed: {}, Reused: {}", hosts_added.size(), + hosts_removed.size(), hosts_->size() - hosts_added.size()); + + // Update the priority set. + hosts_per_locality_ = + std::make_shared(std::move(hosts_by_locality), false); + priority_set_.updateHosts(0, HostSetImpl::partitionHosts(hosts_, hosts_per_locality_), {}, + hosts_added, hosts_removed, absl::nullopt); +} + ClusterSharedPtr HdsCluster::create() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } ClusterInfoConstSharedPtr @@ -325,7 +536,7 @@ ProdClusterInfoFactory::createClusterInfo(const CreateClusterInfoParams& params) Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( params.admin_, params.ssl_context_manager_, *scope, params.cm_, params.local_info_, - params.dispatcher_, params.random_, params.stats_, params.singleton_manager_, params.tls_, + params.dispatcher_, params.stats_, params.singleton_manager_, params.tls_, params.validation_visitor_, params.api_); // TODO(JimmyCYJ): Support SDS for HDS cluster. @@ -339,26 +550,34 @@ ProdClusterInfoFactory::createClusterInfo(const CreateClusterInfoParams& params) params.added_via_api_, factory_context); } -void HdsCluster::startHealthchecks(AccessLog::AccessLogManager& access_log_manager, - Runtime::Loader& runtime, Random::RandomGenerator& random, - Event::Dispatcher& dispatcher, Api::Api& api) { +void HdsCluster::initHealthchecks(AccessLog::AccessLogManager& access_log_manager, + Runtime::Loader& runtime, Event::Dispatcher& dispatcher, + Api::Api& api) { for (auto& health_check : cluster_.health_checks()) { - health_checkers_.push_back( - Upstream::HealthCheckerFactory::create(health_check, *this, runtime, random, dispatcher, - access_log_manager, validation_visitor_, api)); - health_checkers_.back()->start(); + auto health_checker = Upstream::HealthCheckerFactory::create( + health_check, *this, runtime, dispatcher, access_log_manager, validation_visitor_, api); + + health_checkers_.push_back(health_checker); + health_checkers_map_.insert({health_check, health_checker}); + health_checker->start(); } } void HdsCluster::initialize(std::function callback) { initialization_complete_callback_ = callback; - for (const auto& host : *initial_hosts_) { - host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + + // If this function gets called again we do not want to touch the priority set again with the + // initial hosts, because the hosts may have changed. + if (!initialized_) { + for (const auto& host : *hosts_) { + host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + } + // Use the ungrouped and grouped hosts lists to retain locality structure in the priority set. + priority_set_.updateHosts(0, HostSetImpl::partitionHosts(hosts_, hosts_per_locality_), {}, + *hosts_, {}, absl::nullopt); + + initialized_ = true; } - // Use the ungrouped and grouped hosts lists to retain locality structure in the priority set. - priority_set_.updateHosts( - 0, HostSetImpl::partitionHosts(initial_hosts_, initial_hosts_per_locality_), {}, - *initial_hosts_, {}, absl::nullopt); } void HdsCluster::setOutlierDetector(const Outlier::DetectorSharedPtr&) { diff --git a/source/common/upstream/health_discovery_service.h b/source/common/upstream/health_discovery_service.h index 186985640e11..a3ecbb7c428e 100644 --- a/source/common/upstream/health_discovery_service.h +++ b/source/common/upstream/health_discovery_service.h @@ -18,15 +18,24 @@ #include "common/grpc/async_client_impl.h" #include "common/network/resolver_impl.h" #include "common/upstream/health_checker_impl.h" +#include "common/upstream/locality_endpoint.h" #include "common/upstream/upstream_impl.h" #include "server/transport_socket_config_impl.h" #include "extensions/transport_sockets/well_known_names.h" +#include "absl/container/flat_hash_map.h" + namespace Envoy { namespace Upstream { +using HostsMap = absl::flat_hash_map; +using HealthCheckerMap = + absl::flat_hash_map; + class ProdClusterInfoFactory : public ClusterInfoFactory, Logger::Loggable { public: ClusterInfoConstSharedPtr createClusterInfo(const CreateClusterInfoParams& params) override; @@ -47,8 +56,7 @@ class HdsCluster : public Cluster, Logger::Loggable { Ssl::ContextManager& ssl_context_manager, bool added_via_api, ClusterInfoFactory& info_factory, ClusterManager& cm, const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Random::RandomGenerator& random, Singleton::Manager& singleton_manager, - ThreadLocal::SlotAllocator& tls, + Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); // Upstream::Cluster @@ -61,13 +69,19 @@ class HdsCluster : public Cluster, Logger::Loggable { Outlier::Detector* outlierDetector() override { return outlier_detector_.get(); } const Outlier::Detector* outlierDetector() const override { return outlier_detector_.get(); } void initialize(std::function callback) override; - - // Creates and starts healthcheckers to its endpoints - void startHealthchecks(AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, - Random::RandomGenerator& random, Event::Dispatcher& dispatcher, - Api::Api& api); + // Compare changes in the cluster proto, and update parts of the cluster as needed. + void update(Server::Admin& admin, envoy::config::cluster::v3::Cluster cluster, + ClusterInfoFactory& info_factory, ClusterManager& cm, + const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, + Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, + AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime); + // Creates healthcheckers and adds them to the list, then does initial start. + void initHealthchecks(AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, + Event::Dispatcher& dispatcher, Api::Api& api); std::vector healthCheckers() { return health_checkers_; }; + std::vector hosts() { return *hosts_; }; protected: PrioritySetImpl priority_set_; @@ -78,17 +92,31 @@ class HdsCluster : public Cluster, Logger::Loggable { std::function initialization_complete_callback_; Runtime::Loader& runtime_; - const envoy::config::cluster::v3::Cluster cluster_; + envoy::config::cluster::v3::Cluster cluster_; const envoy::config::core::v3::BindConfig& bind_config_; Stats::Store& stats_; Ssl::ContextManager& ssl_context_manager_; bool added_via_api_; + bool initialized_ = false; + uint64_t config_hash_; + uint64_t socket_match_hash_; - HostVectorSharedPtr initial_hosts_; - HostsPerLocalitySharedPtr initial_hosts_per_locality_; + HostVectorSharedPtr hosts_; + HostsPerLocalitySharedPtr hosts_per_locality_; + HostsMap hosts_map_; ClusterInfoConstSharedPtr info_; std::vector health_checkers_; + HealthCheckerMap health_checkers_map_; ProtobufMessage::ValidationVisitor& validation_visitor_; + + void updateHealthchecks( + const Protobuf::RepeatedPtrField& health_checks, + AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, + Event::Dispatcher& dispatcher, Api::Api& api); + void + updateHosts(const Protobuf::RepeatedPtrField& + locality_endpoints, + bool update_socket_matches); }; using HdsClusterPtr = std::shared_ptr; @@ -99,7 +127,8 @@ using HdsClusterPtr = std::shared_ptr; #define ALL_HDS_STATS(COUNTER) \ COUNTER(requests) \ COUNTER(responses) \ - COUNTER(errors) + COUNTER(errors) \ + COUNTER(updates) /** * Struct definition for all hds stats. @see stats_macros.h @@ -121,9 +150,9 @@ class HdsDelegate : Grpc::AsyncStreamCallbacks&& message); - + envoy::config::cluster::v3::Cluster + createClusterConfig(const envoy::service::health::v3::ClusterHealthCheck& cluster_health_check); + void updateHdsCluster(HdsClusterPtr cluster, + const envoy::config::cluster::v3::Cluster& cluster_health_check); + HdsClusterPtr createHdsCluster(const envoy::config::cluster::v3::Cluster& cluster_health_check); HdsDelegateStats stats_; const Protobuf::MethodDescriptor& service_method_; @@ -161,7 +194,6 @@ class HdsDelegate : Grpc::AsyncStreamCallbacks health_check_message_; + uint64_t specifier_hash_; std::vector clusters_; std::vector hds_clusters_; + absl::flat_hash_map hds_clusters_name_map_; Event::TimerPtr hds_stream_response_timer_; Event::TimerPtr hds_retry_timer_; diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index b7b2435ec266..f12085eea88d 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -123,6 +123,7 @@ LoadBalancerBase::LoadBalancerBase( [this](uint32_t priority, const HostVector&, const HostVector&) -> void { UNREFERENCED_PARAMETER(priority); recalculatePerPriorityPanic(); + stashed_random_.clear(); }); } @@ -321,21 +322,18 @@ void LoadBalancerBase::recalculateLoadInTotalPanic() { } std::pair -LoadBalancerBase::chooseHostSet(LoadBalancerContext* context) { +LoadBalancerBase::chooseHostSet(LoadBalancerContext* context, uint64_t hash) const { if (context) { const auto priority_loads = context->determinePriorityLoad( priority_set_, per_priority_load_, Upstream::RetryPriority::defaultPriorityMapping); - - const auto priority_and_source = - choosePriority(random_.random(), priority_loads.healthy_priority_load_, - priority_loads.degraded_priority_load_); + const auto priority_and_source = choosePriority(hash, priority_loads.healthy_priority_load_, + priority_loads.degraded_priority_load_); return {*priority_set_.hostSetsPerPriority()[priority_and_source.first], priority_and_source.second}; } - const auto priority_and_source = - choosePriority(random_.random(), per_priority_load_.healthy_priority_load_, - per_priority_load_.degraded_priority_load_); + const auto priority_and_source = choosePriority(hash, per_priority_load_.healthy_priority_load_, + per_priority_load_.degraded_priority_load_); return {*priority_set_.hostSetsPerPriority()[priority_and_source.first], priority_and_source.second}; } @@ -525,7 +523,7 @@ HostConstSharedPtr LoadBalancerBase::chooseHost(LoadBalancerContext* context) { return host; } -bool LoadBalancerBase::isHostSetInPanic(const HostSet& host_set) { +bool LoadBalancerBase::isHostSetInPanic(const HostSet& host_set) const { uint64_t global_panic_threshold = std::min( 100, runtime_.snapshot().getInteger(RuntimePanicThreshold, default_healthy_panic_percent_)); const auto host_count = host_set.hosts().size() - host_set.excludedHosts().size(); @@ -557,7 +555,7 @@ void ZoneAwareLoadBalancerBase::calculateLocalityPercentage( } } -uint32_t ZoneAwareLoadBalancerBase::tryChooseLocalLocalityHosts(const HostSet& host_set) { +uint32_t ZoneAwareLoadBalancerBase::tryChooseLocalLocalityHosts(const HostSet& host_set) const { PerPriorityState& state = *per_priority_state_[host_set.priority()]; ASSERT(state.locality_routing_state_ != LocalityRoutingState::NoLocalityRouting); @@ -608,8 +606,8 @@ uint32_t ZoneAwareLoadBalancerBase::tryChooseLocalLocalityHosts(const HostSet& h } absl::optional -ZoneAwareLoadBalancerBase::hostSourceToUse(LoadBalancerContext* context) { - auto host_set_and_source = chooseHostSet(context); +ZoneAwareLoadBalancerBase::hostSourceToUse(LoadBalancerContext* context, uint64_t hash) const { + auto host_set_and_source = chooseHostSet(context, hash); // The second argument tells us which availability we should target from the selected host set. const auto host_availability = host_set_and_source.second; @@ -674,7 +672,7 @@ ZoneAwareLoadBalancerBase::hostSourceToUse(LoadBalancerContext* context) { return hosts_source; } -const HostVector& ZoneAwareLoadBalancerBase::hostSourceToHosts(HostsSource hosts_source) { +const HostVector& ZoneAwareLoadBalancerBase::hostSourceToHosts(HostsSource hosts_source) const { const HostSet& host_set = *priority_set_.hostSetsPerPriority()[hosts_source.priority_]; switch (hosts_source.source_type_) { case HostsSource::SourceType::AllHosts: @@ -748,8 +746,8 @@ void EdfLoadBalancerBase::refresh(uint32_t priority) { // refreshes for the weighted case. if (!hosts.empty()) { for (uint32_t i = 0; i < seed_ % hosts.size(); ++i) { - auto host = scheduler.edf_->pick(); - scheduler.edf_->add(hostWeight(*host), host); + auto host = + scheduler.edf_->pickAndAdd([this](const Host& host) { return hostWeight(host); }); } } }; @@ -775,8 +773,8 @@ void EdfLoadBalancerBase::refresh(uint32_t priority) { } } -HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* context) { - const absl::optional hosts_source = hostSourceToUse(context); +HostConstSharedPtr EdfLoadBalancerBase::peekAnotherHost(LoadBalancerContext* context) { + const absl::optional hosts_source = hostSourceToUse(context, random(true)); if (!hosts_source) { return nullptr; } @@ -791,10 +789,33 @@ HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* cont // whether to use EDF or do unweighted (fast) selection. EDF is non-null iff the original weights // of 2 or more hosts differ. if (scheduler.edf_ != nullptr) { - auto host = scheduler.edf_->pick(); - if (host != nullptr) { - scheduler.edf_->add(hostWeight(*host), host); + return scheduler.edf_->peekAgain([this](const Host& host) { return hostWeight(host); }); + } else { + const HostVector& hosts_to_use = hostSourceToHosts(*hosts_source); + if (hosts_to_use.empty()) { + return nullptr; } + return unweightedHostPeek(hosts_to_use, *hosts_source); + } +} + +HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* context) { + const absl::optional hosts_source = hostSourceToUse(context, random(false)); + if (!hosts_source) { + return nullptr; + } + auto scheduler_it = scheduler_.find(*hosts_source); + // We should always have a scheduler for any return value from + // hostSourceToUse() via the construction in refresh(); + ASSERT(scheduler_it != scheduler_.end()); + auto& scheduler = scheduler_it->second; + + // As has been commented in both EdfLoadBalancerBase::refresh and + // BaseDynamicClusterImpl::updateDynamicHostList, we must do a runtime pivot here to determine + // whether to use EDF or do unweighted (fast) selection. EDF is non-null iff the original weights + // of 2 or more hosts differ. + if (scheduler.edf_ != nullptr) { + auto host = scheduler.edf_->pickAndAdd([this](const Host& host) { return hostWeight(host); }); return host; } else { const HostVector& hosts_to_use = hostSourceToHosts(*hosts_source); @@ -805,6 +826,14 @@ HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* cont } } +HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPeek(const HostVector&, + const HostsSource&) { + // LeastRequestLoadBalancer can not do deterministic prefetching, because + // any other thread might select the least-requested-host between prefetch and + // host-pick, and change the rq_active checks. + return nullptr; +} + HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector& hosts_to_use, const HostsSource&) { HostSharedPtr candidate_host = nullptr; @@ -828,8 +857,17 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector return candidate_host; } +HostConstSharedPtr RandomLoadBalancer::peekAnotherHost(LoadBalancerContext* context) { + return peekOrChoose(context, true); +} + HostConstSharedPtr RandomLoadBalancer::chooseHostOnce(LoadBalancerContext* context) { - const absl::optional hosts_source = hostSourceToUse(context); + return peekOrChoose(context, false); +} + +HostConstSharedPtr RandomLoadBalancer::peekOrChoose(LoadBalancerContext* context, bool peek) { + uint64_t random_hash = random(peek); + const absl::optional hosts_source = hostSourceToUse(context, random_hash); if (!hosts_source) { return nullptr; } @@ -839,7 +877,7 @@ HostConstSharedPtr RandomLoadBalancer::chooseHostOnce(LoadBalancerContext* conte return nullptr; } - return hosts_to_use[random_.random() % hosts_to_use.size()]; + return hosts_to_use[random_hash % hosts_to_use.size()]; } SubsetSelectorImpl::SubsetSelectorImpl( diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 11da8b6282be..fab367067255 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -62,7 +62,7 @@ class LoadBalancerBase : public LoadBalancer { * majority of hosts are unhealthy we'll be likely in a panic mode. In this case we'll route * requests to hosts regardless of whether they are healthy or not. */ - bool isHostSetInPanic(const HostSet& host_set); + bool isHostSetInPanic(const HostSet& host_set) const; /** * Method is called when all host sets are in panic mode. @@ -81,7 +81,8 @@ class LoadBalancerBase : public LoadBalancer { // degraded_per_priority_load_, only degraded hosts should be selected from that host set. // // @return host set to use and which availability to target. - std::pair chooseHostSet(LoadBalancerContext* context); + std::pair chooseHostSet(LoadBalancerContext* context, + uint64_t hash) const; uint32_t percentageLoad(uint32_t priority) const { return per_priority_load_.healthy_priority_load_.get()[priority]; @@ -90,9 +91,24 @@ class LoadBalancerBase : public LoadBalancer { return per_priority_load_.degraded_priority_load_.get()[priority]; } bool isInPanic(uint32_t priority) const { return per_priority_panic_[priority]; } + uint64_t random(bool peeking) { + if (peeking) { + stashed_random_.push_back(random_.random()); + return stashed_random_.back(); + } else { + if (!stashed_random_.empty()) { + auto random = stashed_random_.front(); + stashed_random_.pop_front(); + return random; + } else { + return random_.random(); + } + } + } ClusterStats& stats_; Runtime::Loader& runtime_; + std::deque stashed_random_; Random::RandomGenerator& random_; const uint32_t default_healthy_panic_percent_; // The priority-ordered set of hosts to use for load balancing. @@ -240,12 +256,12 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { * Pick the host source to use, doing zone aware routing when the hosts are sufficiently healthy. * If no host is chosen (due to fail_traffic_on_panic being set), return absl::nullopt. */ - absl::optional hostSourceToUse(LoadBalancerContext* context); + absl::optional hostSourceToUse(LoadBalancerContext* context, uint64_t hash) const; /** * Index into priority_set via hosts source descriptor. */ - const HostVector& hostSourceToHosts(HostsSource hosts_source); + const HostVector& hostSourceToHosts(HostsSource hosts_source) const; private: enum class LocalityRoutingState { @@ -273,7 +289,7 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { * Try to select upstream hosts from the same locality. * @param host_set the last host set returned by chooseHostSet() */ - uint32_t tryChooseLocalLocalityHosts(const HostSet& host_set); + uint32_t tryChooseLocalLocalityHosts(const HostSet& host_set) const; /** * @return (number of hosts in a given locality)/(total number of hosts) in `ret` param. @@ -359,6 +375,7 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); // Upstream::LoadBalancerBase + HostConstSharedPtr peekAnotherHost(LoadBalancerContext* context) override; HostConstSharedPtr chooseHostOnce(LoadBalancerContext* context) override; protected: @@ -383,6 +400,8 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { private: virtual void refreshHostSource(const HostsSource& source) PURE; virtual double hostWeight(const Host& host) PURE; + virtual HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use, + const HostsSource& source) PURE; virtual HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use, const HostsSource& source) PURE; @@ -411,10 +430,25 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { // already exists. Note that host sources will never be removed, but given how uncommon this // is it probably doesn't matter. rr_indexes_.insert({source, seed_}); + // If the list of hosts changes, the order of picks change. Discard the + // index. + peekahead_index_ = 0; } double hostWeight(const Host& host) override { return host.weight(); } + HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use, + const HostsSource& source) override { + auto i = rr_indexes_.find(source); + if (i == rr_indexes_.end()) { + return nullptr; + } + return hosts_to_use[(i->second + (peekahead_index_)++) % hosts_to_use.size()]; + } + HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use, const HostsSource& source) override { + if (peekahead_index_ > 0) { + --peekahead_index_; + } // To avoid storing the RR index in the base class, we end up using a second map here with // host source as the key. This means that each LB decision will require two map lookups in // the unweighted case. We might consider trying to optimize this in the future. @@ -422,6 +456,7 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { return hosts_to_use[rr_indexes_[source]++ % hosts_to_use.size()]; } + uint64_t peekahead_index_{}; absl::node_hash_map rr_indexes_; }; @@ -510,6 +545,8 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase, return static_cast(host.weight()) / std::pow(host.stats().rq_active_.value() + 1, active_request_bias_); } + HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use, + const HostsSource& source) override; HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use, const HostsSource& source) override; @@ -536,6 +573,10 @@ class RandomLoadBalancer : public ZoneAwareLoadBalancerBase { // Upstream::LoadBalancerBase HostConstSharedPtr chooseHostOnce(LoadBalancerContext* context) override; + HostConstSharedPtr peekAnotherHost(LoadBalancerContext* context) override; + +protected: + HostConstSharedPtr peekOrChoose(LoadBalancerContext* context, bool peek); }; /** diff --git a/source/common/upstream/locality_endpoint.h b/source/common/upstream/locality_endpoint.h new file mode 100644 index 000000000000..a928cbde3ad0 --- /dev/null +++ b/source/common/upstream/locality_endpoint.h @@ -0,0 +1,29 @@ +#pragma once + +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/config/endpoint/v3/endpoint_components.pb.h" + +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Upstream { + +using LocalityEndpointTuple = std::tuple; +struct LocalityEndpointHash { + size_t operator()(const LocalityEndpointTuple& values) const { + const auto locality_hash = MessageUtil::hash(std::get<0>(values)); + const auto endpoint_hash = MessageUtil::hash(std::get<1>(values)); + return locality_hash ^ endpoint_hash; + } +}; + +struct LocalityEndpointEqualTo { + bool operator()(const LocalityEndpointTuple& lhs, const LocalityEndpointTuple& rhs) const { + return Protobuf::util::MessageDifferencer::Equals(std::get<0>(lhs), std::get<0>(rhs)) && + Protobuf::util::MessageDifferencer::Equals(std::get<1>(lhs), std::get<1>(rhs)); + } +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/logical_dns_cluster.cc b/source/common/upstream/logical_dns_cluster.cc index 709e801fb34a..fec807c928e4 100644 --- a/source/common/upstream/logical_dns_cluster.cc +++ b/source/common/upstream/logical_dns_cluster.cc @@ -62,7 +62,7 @@ LogicalDnsCluster::LogicalDnsCluster( : Config::Utility::translateClusterHosts(cluster.hidden_envoy_deprecated_hosts())) { failure_backoff_strategy_ = Config::Utility::prepareDnsRefreshStrategy( - cluster, dns_refresh_rate_ms_.count(), factory_context.random()); + cluster, dns_refresh_rate_ms_.count(), factory_context.api().randomGenerator()); const auto& locality_lb_endpoints = load_assignment_.endpoints(); if (locality_lb_endpoints.size() != 1 || locality_lb_endpoints[0].lb_endpoints().size() != 1) { diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index a84193ce64cb..2f2eec5e98ef 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -84,13 +84,15 @@ Network::Address::InstanceConstSharedPtr OriginalDstCluster::LoadBalancer::requestOverrideHost(LoadBalancerContext* context) { Network::Address::InstanceConstSharedPtr request_host; const Http::HeaderMap* downstream_headers = context->downstreamHeaders(); - if (downstream_headers && - downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost) != nullptr) { - const std::string request_override_host( - downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost) - ->value() - .getStringView()); + Http::HeaderMap::GetResult override_header; + if (downstream_headers) { + override_header = downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost); + } + if (!override_header.empty()) { try { + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + const std::string request_override_host(override_header[0]->value().getStringView()); request_host = Network::Utility::parseInternetAddressAndPort(request_override_host, false); ENVOY_LOG(debug, "Using request override host {}.", request_override_host); } catch (const Envoy::EnvoyException& e) { diff --git a/source/common/upstream/original_dst_cluster.h b/source/common/upstream/original_dst_cluster.h index 14970a46094a..a5e6b6e96cef 100644 --- a/source/common/upstream/original_dst_cluster.h +++ b/source/common/upstream/original_dst_cluster.h @@ -56,6 +56,8 @@ class OriginalDstCluster : public ClusterImplBase { // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; + // Prefetching is not implemented for OriginalDstCluster + HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; } private: Network::Address::InstanceConstSharedPtr requestOverrideHost(LoadBalancerContext* context); diff --git a/source/common/upstream/resource_manager_impl.h b/source/common/upstream/resource_manager_impl.h index f826d36fa198..4cbf371daf1d 100644 --- a/source/common/upstream/resource_manager_impl.h +++ b/source/common/upstream/resource_manager_impl.h @@ -24,7 +24,7 @@ struct ManagedResourceImpl : public BasicResourceLimitImpl { remaining_.set(max); } - // Upstream::Resource + // BasicResourceLimitImpl bool canCreate() override { return current_ < max(); } void inc() override { BasicResourceLimitImpl::inc(); diff --git a/source/common/upstream/strict_dns_cluster.cc b/source/common/upstream/strict_dns_cluster.cc index 279bf47bef27..328f2f7a79f8 100644 --- a/source/common/upstream/strict_dns_cluster.cc +++ b/source/common/upstream/strict_dns_cluster.cc @@ -21,7 +21,7 @@ StrictDnsClusterImpl::StrictDnsClusterImpl( respect_dns_ttl_(cluster.respect_dns_ttl()) { failure_backoff_strategy_ = Config::Utility::prepareDnsRefreshStrategy( - cluster, dns_refresh_rate_ms_.count(), factory_context.random()); + cluster, dns_refresh_rate_ms_.count(), factory_context.api().randomGenerator()); std::list resolve_targets; const envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment( diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index 0681d4526d26..d6a300cb318a 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -37,6 +37,8 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable; diff --git a/source/common/upstream/thread_aware_lb_impl.h b/source/common/upstream/thread_aware_lb_impl.h index e36554fcff86..8c02cd54284f 100644 --- a/source/common/upstream/thread_aware_lb_impl.h +++ b/source/common/upstream/thread_aware_lb_impl.h @@ -73,6 +73,8 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL HostConstSharedPtr chooseHostOnce(LoadBalancerContext*) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + // Prefetch not implemented for hash based load balancing + HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; } protected: ThreadAwareLoadBalancerBase( @@ -95,6 +97,8 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; + // Prefetch not implemented for hash based load balancing + HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; } ClusterStats& stats_; Random::RandomGenerator& random_; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index a0bb6d3936be..8e6a6db3c507 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -346,6 +346,7 @@ HostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& clu } else { connection_options = options; } + ASSERT(!address->envoyInternalAddress()); Network::ClientConnectionPtr connection = dispatcher.createClientConnection( address, cluster.sourceAddress(), socket_factory.createTransportSocket(std::move(transport_socket_options)), @@ -478,12 +479,12 @@ HostSetImpl::chooseLocality(EdfScheduler* locality_scheduler) { if (locality_scheduler == nullptr) { return {}; } - const std::shared_ptr locality = locality_scheduler->pick(); + const std::shared_ptr locality = locality_scheduler->pickAndAdd( + [](const LocalityEntry& locality) { return locality.effective_weight_; }); // We don't build a schedule if there are no weighted localities, so we should always succeed. ASSERT(locality != nullptr); // If we picked it before, its weight must have been positive. ASSERT(locality->effective_weight_ > 0); - locality_scheduler->add(locality->effective_weight_, locality); return locality->index_; } @@ -640,14 +641,12 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { FactoryContextImpl(Stats::Scope& stats_scope, Envoy::Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContext& c) : admin_(c.admin()), stats_scope_(stats_scope), cluster_manager_(c.clusterManager()), - local_info_(c.localInfo()), dispatcher_(c.dispatcher()), random_(c.random()), - runtime_(runtime), singleton_manager_(c.singletonManager()), tls_(c.threadLocal()), - api_(c.api()) {} + local_info_(c.localInfo()), dispatcher_(c.dispatcher()), runtime_(runtime), + singleton_manager_(c.singletonManager()), tls_(c.threadLocal()), api_(c.api()) {} Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } - Envoy::Random::RandomGenerator& random() override { return random_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } Stats::Scope& scope() override { return stats_scope_; } Singleton::Manager& singletonManager() override { return singleton_manager_; } @@ -666,7 +665,6 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { Upstream::ClusterManager& cluster_manager_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; - Envoy::Random::RandomGenerator& random_; Envoy::Runtime::Loader& runtime_; Singleton::Manager& singleton_manager_; ThreadLocal::SlotAllocator& tls_; @@ -687,8 +685,10 @@ ClusterInfoImpl::ClusterInfoImpl( Http::DEFAULT_MAX_HEADERS_COUNT))), connect_timeout_( std::chrono::milliseconds(PROTOBUF_GET_MS_REQUIRED(config, connect_timeout))), - prefetch_ratio_( - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.prefetch_policy(), prefetch_ratio, 1.0)), + per_upstream_prefetch_ratio_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + config.prefetch_policy(), per_upstream_prefetch_ratio, 1.0)), + peekahead_ratio_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.prefetch_policy(), predictive_prefetch_ratio, 0)), per_connection_buffer_limit_bytes_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)), socket_matcher_(std::move(socket_matcher)), stats_scope_(std::move(stats_scope)), diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index e905d8068d80..c74e489384f0 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -535,7 +535,8 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable idleTimeout() const override { return idle_timeout_; } - float prefetchRatio() const override { return prefetch_ratio_; } + float perUpstreamPrefetchRatio() const override { return per_upstream_prefetch_ratio_; } + float peekaheadRatio() const override { return peekahead_ratio_; } uint32_t perConnectionBufferLimitBytes() const override { return per_connection_buffer_limit_bytes_; } @@ -662,7 +663,8 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable idle_timeout_; - const float prefetch_ratio_; + const float per_upstream_prefetch_ratio_; + const float peekahead_ratio_; const uint32_t per_connection_buffer_limit_bytes_; TransportSocketMatcherPtr socket_matcher_; Stats::ScopePtr stats_scope_; diff --git a/source/common/watchdog/BUILD b/source/common/watchdog/BUILD new file mode 100644 index 000000000000..db6b6162ba75 --- /dev/null +++ b/source/common/watchdog/BUILD @@ -0,0 +1,38 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "abort_action_lib", + srcs = ["abort_action.cc"], + hdrs = ["abort_action.h"], + deps = [ + "//include/envoy/common:time_interface", + "//include/envoy/server:guarddog_config_interface", + "//include/envoy/thread:thread_interface", + "//source/common/common:assert_lib", + "//source/common/protobuf:utility_lib", + "//source/common/thread:terminate_thread_lib", + "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "abort_action_config", + srcs = ["abort_action_config.cc"], + hdrs = ["abort_action_config.h"], + deps = [ + ":abort_action_lib", + "//include/envoy/registry", + "//source/common/config:utility_lib", + "//source/common/protobuf", + "//source/common/protobuf:message_validator_lib", + "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + ], +) diff --git a/source/common/watchdog/README.md b/source/common/watchdog/README.md new file mode 100644 index 000000000000..c8433b9c05b5 --- /dev/null +++ b/source/common/watchdog/README.md @@ -0,0 +1,2 @@ +This contains watchdog actions that are part of core Envoy, and therefore cannot +be in the extensions directory. diff --git a/source/common/watchdog/abort_action.cc b/source/common/watchdog/abort_action.cc new file mode 100644 index 000000000000..d629b0bce70e --- /dev/null +++ b/source/common/watchdog/abort_action.cc @@ -0,0 +1,54 @@ +#include "common/watchdog/abort_action.h" + +#include "envoy/thread/thread.h" + +#include "common/common/assert.h" +#include "common/common/fmt.h" +#include "common/common/logger.h" +#include "common/protobuf/utility.h" +#include "common/thread/terminate_thread.h" + +namespace Envoy { +namespace Watchdog { +namespace { +constexpr uint64_t DefaultWaitDurationMs = 5000; +} // end namespace + +AbortAction::AbortAction(envoy::watchdog::v3alpha::AbortActionConfig& config, + Server::Configuration::GuardDogActionFactoryContext& /*context*/) + : wait_duration_(absl::Milliseconds( + PROTOBUF_GET_MS_OR_DEFAULT(config, wait_duration, DefaultWaitDurationMs))) {} + +void AbortAction::run( + envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent /*event*/, + const std::vector>& thread_last_checkin_pairs, + MonotonicTime /*now*/) { + + if (thread_last_checkin_pairs.empty()) { + ENVOY_LOG_MISC(warn, "Watchdog AbortAction called without any thread."); + return; + } + + // The following lines of code won't be considered covered by code coverage + // tools since they would run in DEATH tests. + const auto& thread_id = thread_last_checkin_pairs[0].first; + const std::string tid_string = thread_id.debugString(); + ENVOY_LOG_MISC(error, "Watchdog AbortAction terminating thread with tid {}.", tid_string); + + if (Thread::terminateThread(thread_id)) { + // Successfully signaled to thread to terminate, sleep for wait_duration. + absl::SleepFor(wait_duration_); + } else { + ENVOY_LOG_MISC(error, "Failed to terminate tid {}", tid_string); + } + + // Abort from the action since the signaled thread hasn't yet crashed the process. + // Panicing in the action gives flexibility since it doesn't depend on + // external code to kill the process if the signal fails. + PANIC(fmt::format( + "Failed to terminate thread with id {}, aborting from Watchdog AbortAction instead.", + tid_string)); +} + +} // namespace Watchdog +} // namespace Envoy diff --git a/source/common/watchdog/abort_action.h b/source/common/watchdog/abort_action.h new file mode 100644 index 000000000000..5170c8bbea00 --- /dev/null +++ b/source/common/watchdog/abort_action.h @@ -0,0 +1,29 @@ +#pragma once + +#include "envoy/server/guarddog_config.h" +#include "envoy/thread/thread.h" +#include "envoy/watchdog/v3alpha/abort_action.pb.h" + +namespace Envoy { +namespace Watchdog { +/** + * A GuardDogAction that will terminate the process by killing the + * stuck thread. + */ +class AbortAction : public Server::Configuration::GuardDogAction { +public: + AbortAction(envoy::watchdog::v3alpha::AbortActionConfig& config, + Server::Configuration::GuardDogActionFactoryContext& context); + + void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event, + const std::vector>& thread_last_checkin_pairs, + MonotonicTime now) override; + +private: + const absl::Duration wait_duration_; +}; + +using AbortActionPtr = std::unique_ptr; + +} // namespace Watchdog +} // namespace Envoy diff --git a/source/common/watchdog/abort_action_config.cc b/source/common/watchdog/abort_action_config.cc new file mode 100644 index 000000000000..916864386ace --- /dev/null +++ b/source/common/watchdog/abort_action_config.cc @@ -0,0 +1,27 @@ +#include "common/watchdog/abort_action_config.h" + +#include "envoy/registry/registry.h" + +#include "common/config/utility.h" +#include "common/protobuf/message_validator_impl.h" +#include "common/watchdog/abort_action.h" + +namespace Envoy { +namespace Watchdog { + +Server::Configuration::GuardDogActionPtr AbortActionFactory::createGuardDogActionFromProto( + const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& config, + Server::Configuration::GuardDogActionFactoryContext& context) { + AbortActionConfig message; + Config::Utility::translateOpaqueConfig(config.config().typed_config(), ProtobufWkt::Struct(), + ProtobufMessage::getStrictValidationVisitor(), message); + return std::make_unique(message, context); +} + +/** + * Static registration for the Abort Action factory. @see RegisterFactory. + */ +REGISTER_FACTORY(AbortActionFactory, Server::Configuration::GuardDogActionFactory); + +} // namespace Watchdog +} // namespace Envoy diff --git a/source/common/watchdog/abort_action_config.h b/source/common/watchdog/abort_action_config.h new file mode 100644 index 000000000000..27f65ea16b60 --- /dev/null +++ b/source/common/watchdog/abort_action_config.h @@ -0,0 +1,29 @@ +#pragma once + +#include "envoy/server/guarddog_config.h" +#include "envoy/watchdog/v3alpha/abort_action.pb.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Watchdog { + +class AbortActionFactory : public Server::Configuration::GuardDogActionFactory { +public: + AbortActionFactory() = default; + + Server::Configuration::GuardDogActionPtr createGuardDogActionFromProto( + const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& config, + Server::Configuration::GuardDogActionFactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { return "envoy.watchdog.abort_action"; } + + using AbortActionConfig = envoy::watchdog::v3alpha::AbortActionConfig; +}; + +} // namespace Watchdog +} // namespace Envoy diff --git a/source/docs/filters/http/cache/cache_filter.md b/source/docs/filters/http/cache/cache_filter.md index 5b1e5c08314e..dbd1455767a0 100644 --- a/source/docs/filters/http/cache/cache_filter.md +++ b/source/docs/filters/http/cache/cache_filter.md @@ -31,7 +31,7 @@ which request methods and response codes are cacheable, subject to the cache-related headers they also define: *cache-control*, *range*, *if-match*, *if-none-match*, *if-modified-since*, *if-unmodified-since*, *if-range*, *authorization*, *date*, *age*, *expires*, and *vary*. Responses with a *vary* header will only be cached -if the named headers are listed in *allowed_vary_headers*. +if the named headers are accepted by one of the matching rules in *allowed_vary_headers*. ## Status * ready for developers to write cache storage plugins; please contribute them diff --git a/source/docs/repokitteh.md b/source/docs/repokitteh.md index 1d2b747bdacb..0d07ba9ddc26 100644 --- a/source/docs/repokitteh.md +++ b/source/docs/repokitteh.md @@ -75,17 +75,6 @@ Sets the label `waiting:any` on a PR. When a new commit is pushed or any comment [Demo PR](https://github.com/envoyproxy/envoybot/pull/15) -### [CircleCI Retest](https://github.com/repokitteh/modules/blob/master/circleci.star) -Restart failed CircleCI tests. - -Example: -``` -/retest-circle -``` -Restarts all failed CircleCI tests, as reported in the commit statuses. - -[Demo PR](https://github.com/envoyproxy/envoy/pull/12613#issuecomment-676141200) - ### [Azure Pipelines Retest](https://github.com/envoyproxy/envoy/blob/master/ci/repokitteh/modules/azure_pipelines.star) Restart failed Azure pipelines. diff --git a/source/docs/stats.md b/source/docs/stats.md index f80d1b46932f..418a04f628d7 100644 --- a/source/docs/stats.md +++ b/source/docs/stats.md @@ -180,23 +180,10 @@ showing the memory layout for a few scenarios of constructing and joining symbol ![Symbol Table Memory Diagram](symtab.png) -### Current State and Strategy To Deploy Symbol Tables - -As of September 5, 2019, the symbol table API has been integrated into the -production code, using a temporary ["fake" symbol table -implementation](https://github.com/envoyproxy/envoy/blob/master/source/common/stats/fake_symbol_table_impl.h). This -fake has enabled us to incrementally transform the codebase to pre-symbolize -names as much as possible, avoiding contention in the hot-path. - -There are no longer any explicit production calls to create counters -or gauges directly from a string via `Stats::Scope::counter(const -std::string&)`, though they are ubiquitous in tests. There is also a -`check_format` protection against reintroducting production calls to -`counter()`. - -However, there are still several ways to create hot-path contention -looking up stats by name, and there is no bulletproof way to prevent it from -occurring. +### Symbol Contention Risk + +There are several ways to create hot-path contention looking up stats by name, +and there is no bulletproof way to prevent it from occurring. * The [stats macros](https://github.com/envoyproxy/envoy/blob/master/include/envoy/stats/stats_macros.h) may be used in a data structure which is constructed in response to requests. * An explicit symbol-table lookup, via `StatNamePool` or `StatNameSet` can be made in the hot path. @@ -204,22 +191,21 @@ occurring. It is difficult to search for those scenarios in the source code or prevent them with a format-check, but we can determine whether symbol-table lookups are occurring during via an admin endpoint that shows 20 recent lookups by name, at -`ENVOY_HOST:ADMIN_PORT/stats?recentlookups`. This works only when real symbol -tables are enabled, via command-line option `--use-fake-symbol-table 0`. +`ENVOY_HOST:ADMIN_PORT/stats?recentlookups`. -Once we are confident we've removed all hot-path symbol-table lookups, ideally -through usage of real symbol tables in production, examining that endpoint, we -can enable real symbol tables by default. +As of October 6, 2020, the "fake" symbol table implementation has been removed +from the system, and the "--use-fake-symbol-table" option is now a no-op, +triggering a warning if set to "1". The option will be removed in a later +release. ### Symbol Table Class Overview Class | Superclass | Description -----| ---------- | --------- SymbolTable | | Abstract class providing an interface for symbol tables -FakeSymbolTableImpl | SymbolTable | Implementation of SymbolTable API where StatName is represented as a flat string SymbolTableImpl | SymbolTable | Implementation of SymbolTable API where StatName share symbols held in a table SymbolTableImpl::Encoding | | Helper class for incrementally encoding strings into symbols -StatName | | Provides an API and a view into a StatName (dynamic, symbolized, or fake). Like absl::string_view, the backing store must be separately maintained. +StatName | | Provides an API and a view into a StatName (dynamic orsymbolized). Like absl::string_view, the backing store must be separately maintained. StatNameStorageBase | | Holds storage (an array of bytes) for a dynamic or symbolized StatName StatNameStorage | StatNameStorageBase | Holds storage for a symbolized StatName. Must be explicitly freed (not just destructed). StatNameManagedStorage | StatNameStorage | Like StatNameStorage, but is 8 bytes larger, and can be destructed without free(). diff --git a/source/exe/BUILD b/source/exe/BUILD index a88969a6ae21..3edb3dd917c2 100644 --- a/source/exe/BUILD +++ b/source/exe/BUILD @@ -67,7 +67,6 @@ envoy_cc_library( "//source/common/common:compiler_requirements_lib", "//source/common/common:perf_annotation_lib", "//source/common/grpc:google_grpc_context_lib", - "//source/common/stats:symbol_table_creator_lib", "//source/server:hot_restart_lib", "//source/server:hot_restart_nop_lib", "//source/server/config_validation:server_lib", @@ -104,16 +103,16 @@ envoy_cc_library( envoy_cc_library( name = "envoy_main_common_with_core_extensions_lib", + srcs = ["main_common.cc"], + hdrs = ["main_common.h"], deps = [ ":envoy_common_with_core_extensions_lib", - ":main_common_lib", ":platform_impl_lib", ":process_wide_lib", "//source/common/api:os_sys_calls_lib", "//source/common/common:compiler_requirements_lib", "//source/common/common:perf_annotation_lib", "//source/common/grpc:google_grpc_context_lib", - "//source/common/stats:symbol_table_creator_lib", "//source/server:hot_restart_lib", "//source/server:hot_restart_nop_lib", "//source/server/config_validation:server_lib", diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index 32b13d695306..a9e478871ad7 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -11,7 +11,6 @@ #include "common/common/logger.h" #include "common/common/perf_annotation.h" #include "common/network/utility.h" -#include "common/stats/symbol_table_creator.h" #include "common/stats/thread_local_store.h" #include "server/config_validation/server.h" @@ -51,9 +50,7 @@ MainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& ti Filesystem::Instance& file_system, std::unique_ptr process_context) : options_(options), component_factory_(component_factory), thread_factory_(thread_factory), - file_system_(file_system), symbol_table_(Stats::SymbolTableCreator::initAndMakeSymbolTable( - options_.fakeSymbolTableEnabled())), - stats_allocator_(*symbol_table_) { + file_system_(file_system), stats_allocator_(symbol_table_) { // Process the option to disable extensions as early as possible, // before we do any configuration loading. OptionsImpl::disableExtensions(options.disabledExtensions()); @@ -120,7 +117,8 @@ void MainCommonBase::configureHotRestarter(Random::RandomGenerator& random_gener base_id = static_cast(random_generator.random()) & 0x0FFFFFFF; try { - restarter = std::make_unique(base_id, 0); + restarter = std::make_unique(base_id, 0, options_.socketPath(), + options_.socketMode()); } catch (Server::HotRestartDomainSocketInUseException& ex) { // No luck, try again. ENVOY_LOG_MISC(debug, "dynamic base id: {}", ex.what()); @@ -133,7 +131,8 @@ void MainCommonBase::configureHotRestarter(Random::RandomGenerator& random_gener restarter_.swap(restarter); } else { - restarter_ = std::make_unique(base_id, options_.restartEpoch()); + restarter_ = std::make_unique( + base_id, options_.restartEpoch(), options_.socketPath(), options_.socketMode()); } // Write the base-id to the requested path whether we selected it diff --git a/source/exe/main_common.h b/source/exe/main_common.h index 91ea197def3c..a986d502fb42 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -6,7 +6,7 @@ #include "common/common/thread.h" #include "common/event/real_time_system.h" #include "common/grpc/google_grpc_context.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "common/stats/thread_local_store.h" #include "common/thread_local/thread_local_impl.h" @@ -75,7 +75,7 @@ class MainCommonBase { Server::ComponentFactory& component_factory_; Thread::ThreadFactory& thread_factory_; Filesystem::Instance& file_system_; - Stats::SymbolTablePtr symbol_table_; + Stats::SymbolTableImpl symbol_table_; Stats::AllocatorImpl stats_allocator_; ThreadLocal::InstanceImplPtr tls_; diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc index 74b061cbad7c..86556f02ffc5 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc @@ -37,7 +37,7 @@ void Utility::responseFlagsToAccessLogResponseFlags( envoy::data::accesslog::v3::AccessLogCommon& common_access_log, const StreamInfo::StreamInfo& stream_info) { - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x200000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x400000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck)) { @@ -125,6 +125,9 @@ void Utility::responseFlagsToAccessLogResponseFlags( if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::NoFilterConfigFound)) { common_access_log.mutable_response_flags()->set_no_filter_config_found(true); } + if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DurationTimeout)) { + common_access_log.mutable_response_flags()->set_duration_timeout(true); + } } void Utility::extractCommonAccessLogProperties( diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc index 92bc5e38ee73..4d81f7b19477 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc @@ -115,9 +115,11 @@ void HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, auto* logged_headers = request_properties->mutable_request_headers(); for (const auto& header : request_headers_to_log_) { - const Http::HeaderEntry* entry = request_headers.get(header); - if (entry != nullptr) { - logged_headers->insert({header.get(), std::string(entry->value().getStringView())}); + const auto entry = request_headers.get(header); + if (!entry.empty()) { + // TODO(https://github.com/envoyproxy/envoy/issues/13454): Potentially log all header + // values. + logged_headers->insert({header.get(), std::string(entry[0]->value().getStringView())}); } } } @@ -136,9 +138,11 @@ void HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, auto* logged_headers = response_properties->mutable_response_headers(); for (const auto& header : response_headers_to_log_) { - const Http::HeaderEntry* entry = response_headers.get(header); - if (entry != nullptr) { - logged_headers->insert({header.get(), std::string(entry->value().getStringView())}); + const auto entry = response_headers.get(header); + if (!entry.empty()) { + // TODO(https://github.com/envoyproxy/envoy/issues/13454): Potentially log all header + // values. + logged_headers->insert({header.get(), std::string(entry[0]->value().getStringView())}); } } } @@ -147,9 +151,11 @@ void HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, auto* logged_headers = response_properties->mutable_response_trailers(); for (const auto& header : response_trailers_to_log_) { - const Http::HeaderEntry* entry = response_trailers.get(header); - if (entry != nullptr) { - logged_headers->insert({header.get(), std::string(entry->value().getStringView())}); + const auto entry = response_trailers.get(header); + if (!entry.empty()) { + // TODO(https://github.com/envoyproxy/envoy/issues/13454): Potentially log all header + // values. + logged_headers->insert({header.get(), std::string(entry[0]->value().getStringView())}); } } } diff --git a/source/extensions/access_loggers/wasm/BUILD b/source/extensions/access_loggers/wasm/BUILD new file mode 100644 index 000000000000..efb16906d78e --- /dev/null +++ b/source/extensions/access_loggers/wasm/BUILD @@ -0,0 +1,41 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +# Access log implementation that calls into a WASM VM. + +envoy_cc_library( + name = "wasm_access_log_lib", + hdrs = ["wasm_access_log_impl.h"], + deps = [ + "//include/envoy/access_log:access_log_interface", + "//source/common/http:header_map_lib", + "//source/extensions/access_loggers:well_known_names", + "//source/extensions/common/wasm:wasm_lib", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "unknown", + status = "alpha", + deps = [ + ":wasm_access_log_lib", + "//include/envoy/registry", + "//include/envoy/server:access_log_config_interface", + "//source/common/config:datasource_lib", + "//source/common/protobuf", + "//source/extensions/access_loggers:well_known_names", + "//source/extensions/common/wasm:wasm_lib", + "@envoy_api//envoy/extensions/access_loggers/wasm/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/access_loggers/wasm/config.cc b/source/extensions/access_loggers/wasm/config.cc new file mode 100644 index 000000000000..718adb0fad93 --- /dev/null +++ b/source/extensions/access_loggers/wasm/config.cc @@ -0,0 +1,87 @@ +#include "extensions/access_loggers/wasm/config.h" + +#include "envoy/extensions/access_loggers/wasm/v3/wasm.pb.validate.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "common/common/logger.h" +#include "common/protobuf/protobuf.h" + +#include "extensions/access_loggers/wasm/wasm_access_log_impl.h" +#include "extensions/access_loggers/well_known_names.h" +#include "extensions/common/wasm/wasm.h" + +namespace Envoy { +namespace Extensions { +namespace AccessLoggers { +namespace Wasm { + +AccessLog::InstanceSharedPtr +WasmAccessLogFactory::createAccessLogInstance(const Protobuf::Message& proto_config, + AccessLog::FilterPtr&& filter, + Server::Configuration::FactoryContext& context) { + const auto& config = MessageUtil::downcastAndValidate< + const envoy::extensions::access_loggers::wasm::v3::WasmAccessLog&>( + proto_config, context.messageValidationVisitor()); + auto access_log = + std::make_shared(config.config().root_id(), nullptr, std::move(filter)); + + // Create a base WASM to verify that the code loads before setting/cloning the for the + // individual threads. + auto plugin = std::make_shared( + config.config().name(), config.config().root_id(), config.config().vm_config().vm_id(), + config.config().vm_config().runtime(), + Common::Wasm::anyToBytes(config.config().configuration()), config.config().fail_open(), + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, context.localInfo(), + nullptr /* listener_metadata */); + + auto callback = [access_log, &context, plugin](Common::Wasm::WasmHandleSharedPtr base_wasm) { + auto tls_slot = context.threadLocal().allocateSlot(); + + // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call. + tls_slot->set( + [base_wasm, + plugin](Event::Dispatcher& dispatcher) -> std::shared_ptr { + if (!base_wasm) { + // There is no way to prevent the connection at this point. The user could choose to use + // an HTTP Wasm plugin and only handle onLog() which would correctly close the + // connection in onRequestHeaders(). + if (!plugin->fail_open_) { + ENVOY_LOG(critical, "Plugin configured to fail closed failed to load"); + } + return nullptr; + } + return std::static_pointer_cast( + Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, dispatcher)); + }); + access_log->setTlsSlot(std::move(tls_slot)); + }; + + if (!Common::Wasm::createWasm( + config.config().vm_config(), plugin, context.scope().createScope(""), + context.clusterManager(), context.initManager(), context.dispatcher(), context.api(), + context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { + throw Common::Wasm::WasmException( + fmt::format("Unable to create Wasm access log {}", plugin->name_)); + } + + return access_log; +} + +ProtobufTypes::MessagePtr WasmAccessLogFactory::createEmptyConfigProto() { + return ProtobufTypes::MessagePtr{ + new envoy::extensions::access_loggers::wasm::v3::WasmAccessLog()}; +} + +std::string WasmAccessLogFactory::name() const { return AccessLogNames::get().Wasm; } + +/** + * Static registration for the wasm access log. @see RegisterFactory. + */ +REGISTER_FACTORY(WasmAccessLogFactory, + Server::Configuration::AccessLogInstanceFactory){"envoy.wasm_access_log"}; + +} // namespace Wasm +} // namespace AccessLoggers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/access_loggers/wasm/config.h b/source/extensions/access_loggers/wasm/config.h new file mode 100644 index 000000000000..2fb297108c35 --- /dev/null +++ b/source/extensions/access_loggers/wasm/config.h @@ -0,0 +1,34 @@ +#pragma once + +#include "envoy/server/access_log_config.h" + +#include "common/config/datasource.h" + +namespace Envoy { +namespace Extensions { +namespace AccessLoggers { +namespace Wasm { + +/** + * Config registration for the file access log. @see AccessLogInstanceFactory. + */ +class WasmAccessLogFactory : public Server::Configuration::AccessLogInstanceFactory, + Logger::Loggable { +public: + AccessLog::InstanceSharedPtr + createAccessLogInstance(const Protobuf::Message& config, AccessLog::FilterPtr&& filter, + Server::Configuration::FactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + + std::string name() const override; + +private: + absl::flat_hash_map convertJsonFormatToMap(ProtobufWkt::Struct config); + Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_; +}; + +} // namespace Wasm +} // namespace AccessLoggers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/access_loggers/wasm/wasm_access_log_impl.h b/source/extensions/access_loggers/wasm/wasm_access_log_impl.h new file mode 100644 index 000000000000..5a7654b97bee --- /dev/null +++ b/source/extensions/access_loggers/wasm/wasm_access_log_impl.h @@ -0,0 +1,53 @@ +#pragma once + +#include "envoy/access_log/access_log.h" + +#include "common/common/logger.h" + +#include "extensions/access_loggers/well_known_names.h" +#include "extensions/common/wasm/wasm.h" + +namespace Envoy { +namespace Extensions { +namespace AccessLoggers { +namespace Wasm { + +using Envoy::Extensions::Common::Wasm::WasmHandle; + +class WasmAccessLog : public AccessLog::Instance { +public: + WasmAccessLog(absl::string_view root_id, ThreadLocal::SlotPtr tls_slot, + AccessLog::FilterPtr filter) + : root_id_(root_id), tls_slot_(std::move(tls_slot)), filter_(std::move(filter)) {} + void log(const Http::RequestHeaderMap* request_headers, + const Http::ResponseHeaderMap* response_headers, + const Http::ResponseTrailerMap* response_trailers, + const StreamInfo::StreamInfo& stream_info) override { + if (filter_ && request_headers && response_headers && response_trailers) { + if (!filter_->evaluate(stream_info, *request_headers, *response_headers, + *response_trailers)) { + return; + } + } + + if (tls_slot_->get()) { + tls_slot_->getTyped().wasm()->log(root_id_, request_headers, response_headers, + response_trailers, stream_info); + } + } + + void setTlsSlot(ThreadLocal::SlotPtr tls_slot) { + ASSERT(tls_slot_ == nullptr); + tls_slot_ = std::move(tls_slot); + } + +private: + std::string root_id_; + ThreadLocal::SlotPtr tls_slot_; + AccessLog::FilterPtr filter_; +}; + +} // namespace Wasm +} // namespace AccessLoggers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/access_loggers/well_known_names.h b/source/extensions/access_loggers/well_known_names.h index 56aca60de3f2..ea39ab3e400a 100644 --- a/source/extensions/access_loggers/well_known_names.h +++ b/source/extensions/access_loggers/well_known_names.h @@ -20,6 +20,8 @@ class AccessLogNameValues { const std::string HttpGrpc = "envoy.access_loggers.http_grpc"; // TCP gRPC access log const std::string TcpGrpc = "envoy.access_loggers.tcp_grpc"; + // WASM access log + const std::string Wasm = "envoy.access_loggers.wasm"; }; using AccessLogNames = ConstSingleton; diff --git a/source/extensions/bootstrap/wasm/BUILD b/source/extensions/bootstrap/wasm/BUILD new file mode 100644 index 000000000000..e23ac8fc84a2 --- /dev/null +++ b/source/extensions/bootstrap/wasm/BUILD @@ -0,0 +1,34 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +# WASM service. + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = [ + "config.h", + ], + security_posture = "unknown", + status = "alpha", + deps = [ + "//include/envoy/registry", + "//include/envoy/server:bootstrap_extension_config_interface", + "//include/envoy/server:factory_context_interface", + "//include/envoy/server:instance_interface", + "//source/common/common:assert_lib", + "//source/common/common:empty_string", + "//source/common/config:datasource_lib", + "//source/common/protobuf:utility_lib", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/grpc_credentials:well_known_names", + "@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/bootstrap/wasm/config.cc b/source/extensions/bootstrap/wasm/config.cc new file mode 100644 index 000000000000..3cc0068b9a16 --- /dev/null +++ b/source/extensions/bootstrap/wasm/config.cc @@ -0,0 +1,88 @@ +#include "extensions/bootstrap/wasm/config.h" + +#include "envoy/registry/registry.h" +#include "envoy/server/factory_context.h" + +#include "common/common/empty_string.h" +#include "common/config/datasource.h" +#include "common/protobuf/utility.h" + +#include "extensions/common/wasm/wasm.h" + +namespace Envoy { +namespace Extensions { +namespace Bootstrap { +namespace Wasm { + +static const std::string INLINE_STRING = ""; + +void WasmFactory::createWasm(const envoy::extensions::wasm::v3::WasmService& config, + Server::Configuration::ServerFactoryContext& context, + CreateWasmServiceCallback&& cb) { + auto plugin = std::make_shared( + config.config().name(), config.config().root_id(), config.config().vm_config().vm_id(), + config.config().vm_config().runtime(), + Common::Wasm::anyToBytes(config.config().configuration()), config.config().fail_open(), + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, context.localInfo(), nullptr); + + bool singleton = config.singleton(); + auto callback = [&context, singleton, plugin, cb](Common::Wasm::WasmHandleSharedPtr base_wasm) { + if (!base_wasm) { + if (plugin->fail_open_) { + ENVOY_LOG(error, "Unable to create Wasm service {}", plugin->name_); + } else { + ENVOY_LOG(critical, "Unable to create Wasm service {}", plugin->name_); + } + return; + } + if (singleton) { + // Return a Wasm VM which will be stored as a singleton by the Server. + cb(std::make_unique( + Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, context.dispatcher()))); + return; + } + // Per-thread WASM VM. + // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call. + auto tls_slot = context.threadLocal().allocateSlot(); + tls_slot->set([base_wasm, plugin](Event::Dispatcher& dispatcher) { + return std::static_pointer_cast( + Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, dispatcher)); + }); + cb(std::make_unique(std::move(tls_slot))); + }; + + if (!Common::Wasm::createWasm( + config.config().vm_config(), plugin, context.scope().createScope(""), + context.clusterManager(), context.initManager(), context.dispatcher(), context.api(), + context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { + // NB: throw if we get a synchronous configuration failures as this is how such failures are + // reported to xDS. + throw Common::Wasm::WasmException( + fmt::format("Unable to create Wasm service {}", plugin->name_)); + } +} + +Server::BootstrapExtensionPtr +WasmFactory::createBootstrapExtension(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& context) { + auto typed_config = + MessageUtil::downcastAndValidate( + config, context.messageValidationContext().staticValidationVisitor()); + + auto wasm_service_extension = std::make_unique(); + createWasm(typed_config, context, + [extension = wasm_service_extension.get()](WasmServicePtr wasm) { + extension->wasm_service_ = std::move(wasm); + }); + return wasm_service_extension; +} + +// /** +// * Static registration for the wasm factory. @see RegistryFactory. +// */ +REGISTER_FACTORY(WasmFactory, Server::Configuration::BootstrapExtensionFactory); + +} // namespace Wasm +} // namespace Bootstrap +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/bootstrap/wasm/config.h b/source/extensions/bootstrap/wasm/config.h new file mode 100644 index 000000000000..e70306746389 --- /dev/null +++ b/source/extensions/bootstrap/wasm/config.h @@ -0,0 +1,66 @@ +#pragma once + +#include "envoy/common/pure.h" +#include "envoy/extensions/wasm/v3/wasm.pb.h" +#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/server/bootstrap_extension_config.h" +#include "envoy/server/filter_config.h" +#include "envoy/server/instance.h" + +#include "common/protobuf/protobuf.h" + +#include "extensions/common/wasm/wasm.h" + +namespace Envoy { +namespace Extensions { +namespace Bootstrap { +namespace Wasm { + +class WasmService { +public: + WasmService(Common::Wasm::WasmHandleSharedPtr singleton) : singleton_(std::move(singleton)) {} + WasmService(ThreadLocal::SlotPtr tls_slot) : tls_slot_(std::move(tls_slot)) {} + +private: + Common::Wasm::WasmHandleSharedPtr singleton_; + ThreadLocal::SlotPtr tls_slot_; +}; + +using WasmServicePtr = std::unique_ptr; +using CreateWasmServiceCallback = std::function; + +class WasmFactory : public Server::Configuration::BootstrapExtensionFactory, + Logger::Loggable { +public: + ~WasmFactory() override = default; + std::string name() const override { return "envoy.bootstrap.wasm"; } + void createWasm(const envoy::extensions::wasm::v3::WasmService& config, + Server::Configuration::ServerFactoryContext& context, + CreateWasmServiceCallback&& cb); + Server::BootstrapExtensionPtr + createBootstrapExtension(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& context) override; + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + +private: + Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_; +}; + +class WasmServiceExtension : public Server::BootstrapExtension { +public: + WasmService& wasmService() { + ASSERT(wasm_service_ != nullptr); + return *wasm_service_; + } + +private: + WasmServicePtr wasm_service_; + friend class WasmFactory; +}; + +} // namespace Wasm +} // namespace Bootstrap +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/clusters/aggregate/cluster.cc b/source/extensions/clusters/aggregate/cluster.cc index 836ef16627cc..958c678d0202 100644 --- a/source/extensions/clusters/aggregate/cluster.cc +++ b/source/extensions/clusters/aggregate/cluster.cc @@ -90,12 +90,16 @@ void Cluster::startPreInit() { void Cluster::refresh(const std::function& skip_predicate) { // Post the priority set to worker threads. - tls_->runOnAllThreads([this, skip_predicate, cluster_name = this->info()->name()]() { + // TODO(mattklein123): Remove "this" capture. + tls_->runOnAllThreads([this, skip_predicate, cluster_name = this->info()->name()]( + ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { PriorityContextPtr priority_context = linearizePrioritySet(skip_predicate); Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(cluster_name); ASSERT(cluster != nullptr); dynamic_cast(cluster->loadBalancer()) .refresh(std::move(priority_context)); + return object; }); } @@ -172,9 +176,10 @@ ClusterFactory::createClusterWithConfig( Upstream::ClusterFactoryContext& context, Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context, Stats::ScopePtr&& stats_scope) { - auto new_cluster = std::make_shared( - cluster, proto_config, context.clusterManager(), context.runtime(), context.random(), - socket_factory_context, std::move(stats_scope), context.tls(), context.addedViaApi()); + auto new_cluster = + std::make_shared(cluster, proto_config, context.clusterManager(), context.runtime(), + context.api().randomGenerator(), socket_factory_context, + std::move(stats_scope), context.tls(), context.addedViaApi()); auto lb = std::make_unique(*new_cluster); return std::make_pair(new_cluster, std::move(lb)); } diff --git a/source/extensions/clusters/aggregate/cluster.h b/source/extensions/clusters/aggregate/cluster.h index 417a8e8de156..92adfe68f187 100644 --- a/source/extensions/clusters/aggregate/cluster.h +++ b/source/extensions/clusters/aggregate/cluster.h @@ -77,6 +77,10 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { // Upstream::LoadBalancer Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override; + // Prefetching not yet implemented for extensions. + Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { + return nullptr; + } private: // Use inner class to extend LoadBalancerBase. When initializing AggregateClusterLoadBalancer, the @@ -92,6 +96,10 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { // Upstream::LoadBalancer Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override; + // Prefetching not yet implemented for extensions. + Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { + return nullptr; + } // Upstream::LoadBalancerBase Upstream::HostConstSharedPtr chooseHostOnce(Upstream::LoadBalancerContext*) override { diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index c5af40dfd401..c5215d894b57 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -197,8 +197,8 @@ ClusterFactory::createClusterWithConfig( Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context, Stats::ScopePtr&& stats_scope) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.tls(), context.random(), - context.runtime(), context.stats()); + context.singletonManager(), context.dispatcher(), context.tls(), + context.api().randomGenerator(), context.runtime(), context.stats()); envoy::config::cluster::v3::Cluster cluster_config = cluster; if (cluster_config.has_upstream_http_protocol_options()) { if (!proto_config.allow_insecure_cluster_options() && diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.h b/source/extensions/clusters/dynamic_forward_proxy/cluster.h index 7354c60de168..a34b8d6b1871 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.h +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.h @@ -59,6 +59,10 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, // Upstream::LoadBalancer Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override; + // Prefetching not implemented. + Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { + return nullptr; + } const HostInfoMapSharedPtr host_map_; }; diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index db5f04d91807..b87c00a4982d 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -43,7 +43,7 @@ RedisCluster::RedisCluster( cluster.has_load_assignment() ? cluster.load_assignment() : Config::Utility::translateClusterHosts(cluster.hidden_envoy_deprecated_hosts())), - local_info_(factory_context.localInfo()), random_(factory_context.random()), + local_info_(factory_context.localInfo()), random_(api.randomGenerator()), redis_discovery_session_(*this, redis_client_factory), lb_factory_(std::move(lb_factory)), auth_username_( NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authUsername(info(), api)), @@ -397,7 +397,8 @@ RedisClusterFactory::createClusterWithConfig( std::move(stats_scope), context.addedViaApi(), nullptr), nullptr); } - auto lb_factory = std::make_shared(context.random()); + auto lb_factory = + std::make_shared(context.api().randomGenerator()); return std::make_pair(std::make_shared( cluster, proto_config, NetworkFilters::Common::Redis::Client::ClientFactoryImpl::instance_, diff --git a/source/extensions/clusters/redis/redis_cluster_lb.h b/source/extensions/clusters/redis/redis_cluster_lb.h index 0c5142a8290a..561de3b681e5 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.h +++ b/source/extensions/clusters/redis/redis_cluster_lb.h @@ -189,6 +189,9 @@ class RedisClusterLoadBalancerFactory : public ClusterSlotUpdateCallBack, // Upstream::LoadBalancerBase Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext*) override; + Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { + return nullptr; + } private: const SlotArraySharedPtr slot_array_; diff --git a/source/extensions/common/aws/signer_impl.cc b/source/extensions/common/aws/signer_impl.cc index 86730647966b..ec0ae8d4b2f9 100644 --- a/source/extensions/common/aws/signer_impl.cc +++ b/source/extensions/common/aws/signer_impl.cc @@ -82,8 +82,8 @@ std::string SignerImpl::createContentHash(Http::RequestMessage& message, bool si return SignatureConstants::get().HashedEmptyString; } auto& crypto_util = Envoy::Common::Crypto::UtilitySingleton::get(); - const auto content_hash = message.body() - ? Hex::encode(crypto_util.getSha256Digest(*message.body())) + const auto content_hash = message.body().length() > 0 + ? Hex::encode(crypto_util.getSha256Digest(message.body())) : SignatureConstants::get().HashedEmptyString; return content_hash; } diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index b2e2d5defce1..20c8a62e7bcf 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -257,8 +257,10 @@ void DnsCacheImpl::updateTlsHostsMap() { } } - tls_slot_->runOnAllThreads([this, new_host_map]() { - tls_slot_->getTyped().updateHostMap(new_host_map); + tls_slot_->runOnAllThreads([new_host_map](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + object->asType().updateHostMap(new_host_map); + return object; }); } diff --git a/source/extensions/common/tap/extension_config_base.cc b/source/extensions/common/tap/extension_config_base.cc index fda84e29fbeb..6578c02fc37b 100644 --- a/source/extensions/common/tap/extension_config_base.cc +++ b/source/extensions/common/tap/extension_config_base.cc @@ -63,15 +63,22 @@ const absl::string_view ExtensionConfigBase::adminId() { } void ExtensionConfigBase::clearTapConfig() { - tls_slot_->runOnAllThreads([this] { tls_slot_->getTyped().config_ = nullptr; }); + tls_slot_->runOnAllThreads([](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + object->asType().config_ = nullptr; + return object; + }); } void ExtensionConfigBase::installNewTap(envoy::config::tap::v3::TapConfig&& proto_config, Sink* admin_streamer) { TapConfigSharedPtr new_config = config_factory_->createConfigFromProto(std::move(proto_config), admin_streamer); - tls_slot_->runOnAllThreads( - [this, new_config] { tls_slot_->getTyped().config_ = new_config; }); + tls_slot_->runOnAllThreads([new_config](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + object->asType().config_ = new_config; + return object; + }); } void ExtensionConfigBase::newTapConfig(envoy::config::tap::v3::TapConfig&& proto_config, diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index e594ac846209..02eb727951c1 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -16,32 +16,101 @@ envoy_cc_library( ], ) +# NB: Used to break the circular dependency between wasm_lib and null_plugin_lib. envoy_cc_library( - name = "wasm_vm_interface", - hdrs = ["wasm_vm.h"], + name = "wasm_hdr", + hdrs = [ + "context.h", + "wasm.h", + "wasm_extension.h", + "wasm_state.h", + "wasm_vm.h", + ], + visibility = ["//visibility:public"], deps = [ ":well_known_names", - "//include/envoy/stats:stats_interface", - "//source/common/common:minimal_logger_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/server:lifecycle_notifier_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/config:datasource_lib", + "//source/common/singleton:const_singleton", + "//source/common/stats:stats_lib", + "//source/common/version:version_includes", + "//source/extensions/filters/common/expr:evaluator_lib", + "//source/extensions/filters/http:well_known_names", + "@com_google_cel_cpp//eval/public:activation", + "@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto", + "@proxy_wasm_cpp_host//:include", + "@proxy_wasm_cpp_sdk//:common_lib", ], ) envoy_cc_library( - name = "wasm_vm_base", - hdrs = ["wasm_vm_base.h"], + name = "wasm_interoperation_lib", + srcs = [ + "wasm_state.cc", + ], + hdrs = [ + "wasm_state.h", + ], + visibility = ["//visibility:public"], deps = [ - ":wasm_vm_interface", - "//source/common/stats:stats_lib", + "//include/envoy/stream_info:filter_state_interface", + "//source/common/protobuf", + "//source/common/singleton:const_singleton", + "@com_github_google_flatbuffers//:flatbuffers", + "@com_google_cel_cpp//eval/public:cel_value", + "@com_google_cel_cpp//tools:flatbuffers_backed_impl", ], ) envoy_cc_library( - name = "wasm_vm_lib", - srcs = ["wasm_vm.cc"], - deps = [ - ":wasm_vm_interface", - "//source/common/common:assert_lib", - "//source/extensions/common/wasm/null:null_lib", - "//source/extensions/common/wasm/v8:v8_lib", + name = "wasm_lib", + srcs = [ + "context.cc", + "foreign.cc", + "wasm.cc", + "wasm_extension.cc", + "wasm_vm.cc", ], + copts = select({ + "//bazel:windows_x86_64": [], # TODO: fix the windows ANTLR build + "//conditions:default": [ + "-DWASM_USE_CEL_PARSER", + ], + }), + visibility = ["//visibility:public"], + deps = [ + ":wasm_hdr", + ":wasm_interoperation_lib", + "//external:abseil_base", + "//external:abseil_node_hash_map", + "//include/envoy/server:lifecycle_notifier_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:enum_to_int", + "//source/common/config:remote_data_fetcher_lib", + "//source/common/http:message_lib", + "//source/common/http:utility_lib", + "//source/common/tracing:http_tracer_lib", + "//source/extensions/common/wasm/ext:declare_property_cc_proto", + "//source/extensions/common/wasm/ext:envoy_null_vm_wasm_api", + "//source/extensions/filters/common/expr:context_lib", + "@com_google_cel_cpp//eval/eval:field_access", + "@com_google_cel_cpp//eval/eval:field_backed_list_impl", + "@com_google_cel_cpp//eval/eval:field_backed_map_impl", + "@com_google_cel_cpp//eval/public:builtin_func_registrar", + "@com_google_cel_cpp//eval/public:cel_expr_builder_factory", + "@com_google_cel_cpp//eval/public:cel_value", + "@com_google_cel_cpp//eval/public:value_export_util", + "@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto", + "@proxy_wasm_cpp_host//:lib", + ] + select( + { + "//bazel:windows_x86_64": [], + "//conditions:default": [ + "@com_google_cel_cpp//parser", + ], + }, + ), ) diff --git a/source/extensions/common/wasm/context.cc b/source/extensions/common/wasm/context.cc new file mode 100644 index 000000000000..e6e4f8ae0f05 --- /dev/null +++ b/source/extensions/common/wasm/context.cc @@ -0,0 +1,1853 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "envoy/common/exception.h" +#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/grpc/status.h" +#include "envoy/http/codes.h" +#include "envoy/local_info/local_info.h" +#include "envoy/network/filter.h" +#include "envoy/stats/sink.h" +#include "envoy/thread_local/thread_local.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/assert.h" +#include "common/common/empty_string.h" +#include "common/common/enum_to_int.h" +#include "common/common/logger.h" +#include "common/http/header_map_impl.h" +#include "common/http/message_impl.h" +#include "common/http/utility.h" +#include "common/tracing/http_tracer_impl.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/common/wasm/well_known_names.h" +#include "extensions/filters/common/expr/context.h" + +#include "absl/base/casts.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/node_hash_map.h" +#include "absl/strings/str_cat.h" +#include "absl/synchronization/mutex.h" +#include "eval/eval/field_access.h" +#include "eval/eval/field_backed_list_impl.h" +#include "eval/eval/field_backed_map_impl.h" +#include "eval/public/cel_value.h" +#include "openssl/bytestring.h" +#include "openssl/hmac.h" +#include "openssl/sha.h" + +using proxy_wasm::MetricType; +using proxy_wasm::Word; + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +namespace { + +using HashPolicy = envoy::config::route::v3::RouteAction::HashPolicy; + +Http::RequestTrailerMapPtr buildRequestTrailerMapFromPairs(const Pairs& pairs) { + auto map = Http::RequestTrailerMapImpl::create(); + for (auto& p : pairs) { + // Note: because of the lack of a string_view interface for addCopy and + // the lack of an interface to add an entry with an empty value and return + // the entry, there is no efficient way to prevent either a double copy + // of the value or a double lookup of the entry. + map->addCopy(Http::LowerCaseString(std::string(p.first)), std::string(p.second)); + } + return map; +} + +Http::RequestHeaderMapPtr buildRequestHeaderMapFromPairs(const Pairs& pairs) { + auto map = Http::RequestHeaderMapImpl::create(); + for (auto& p : pairs) { + // Note: because of the lack of a string_view interface for addCopy and + // the lack of an interface to add an entry with an empty value and return + // the entry, there is no efficient way to prevent either a double copy + // of the value or a double lookup of the entry. + map->addCopy(Http::LowerCaseString(std::string(p.first)), std::string(p.second)); + } + return map; +} + +template static uint32_t headerSize(const P& p) { return p ? p->size() : 0; } + +constexpr absl::string_view FailStreamResponseDetails = "wasm_fail_stream"; + +} // namespace + +// Test support. + +size_t Buffer::size() const { + if (const_buffer_instance_) { + return const_buffer_instance_->length(); + } + return proxy_wasm::BufferBase::size(); +} + +WasmResult Buffer::copyTo(WasmBase* wasm, size_t start, size_t length, uint64_t ptr_ptr, + uint64_t size_ptr) const { + if (const_buffer_instance_) { + uint64_t pointer; + auto p = wasm->allocMemory(length, &pointer); + if (!p) { + return WasmResult::InvalidMemoryAccess; + } + const_buffer_instance_->copyOut(start, length, p); + if (!wasm->wasm_vm()->setWord(ptr_ptr, Word(pointer))) { + return WasmResult::InvalidMemoryAccess; + } + if (!wasm->wasm_vm()->setWord(size_ptr, Word(length))) { + return WasmResult::InvalidMemoryAccess; + } + return WasmResult::Ok; + } + return proxy_wasm::BufferBase::copyTo(wasm, start, length, ptr_ptr, size_ptr); +} + +WasmResult Buffer::copyFrom(size_t start, size_t length, absl::string_view data) { + if (buffer_instance_) { + if (start == 0) { + if (length == 0) { + buffer_instance_->prepend(data); + return WasmResult::Ok; + } else if (length >= buffer_instance_->length()) { + buffer_instance_->drain(buffer_instance_->length()); + buffer_instance_->add(data); + return WasmResult::Ok; + } else { + return WasmResult::BadArgument; + } + } else if (start >= buffer_instance_->length()) { + buffer_instance_->add(data); + return WasmResult::Ok; + } else { + return WasmResult::BadArgument; + } + } + if (const_buffer_instance_) { // This buffer is immutable. + return WasmResult::BadArgument; + } + return proxy_wasm::BufferBase::copyFrom(start, length, data); +} + +Context::Context() = default; +Context::Context(Wasm* wasm) : ContextBase(wasm) {} +Context::Context(Wasm* wasm, const PluginSharedPtr& plugin) : ContextBase(wasm, plugin) { + root_local_info_ = &std::static_pointer_cast(plugin)->local_info_; +} +Context::Context(Wasm* wasm, uint32_t root_context_id, const PluginSharedPtr& plugin) + : ContextBase(wasm, root_context_id, plugin) {} + +Wasm* Context::wasm() const { return static_cast(wasm_); } +Plugin* Context::plugin() const { return static_cast(plugin_.get()); } +Context* Context::rootContext() const { return static_cast(root_context()); } +Upstream::ClusterManager& Context::clusterManager() const { return wasm()->clusterManager(); } + +void Context::error(absl::string_view message) { ENVOY_LOG(trace, message); } + +uint64_t Context::getCurrentTimeNanoseconds() { + return std::chrono::duration_cast( + wasm()->time_source_.systemTime().time_since_epoch()) + .count(); +} + +void Context::onCloseTCP() { + if (tcp_connection_closed_ || !in_vm_context_created_) { + return; + } + tcp_connection_closed_ = true; + onDone(); + onLog(); + onDelete(); +} + +void Context::onResolveDns(uint32_t token, Envoy::Network::DnsResolver::ResolutionStatus status, + std::list&& response) { + proxy_wasm::DeferAfterCallActions actions(this); + if (wasm()->isFailed() || !wasm()->on_resolve_dns_) { + return; + } + if (status != Network::DnsResolver::ResolutionStatus::Success) { + buffer_.set(""); + wasm()->on_resolve_dns_(this, id_, token, 0); + return; + } + // buffer format: + // 4 bytes number of entries = N + // N * 4 bytes TTL for each entry + // N * null-terminated addresses + uint32_t s = 4; // length + for (auto& e : response) { + s += 4; // for TTL + s += e.address_->asStringView().size() + 1; // null terminated. + } + auto buffer = std::unique_ptr(new char[s]); + char* b = buffer.get(); + uint32_t n = response.size(); + memcpy(b, &n, sizeof(uint32_t)); + b += sizeof(uint32_t); + for (auto& e : response) { + uint32_t ttl = e.ttl_.count(); + memcpy(b, &ttl, sizeof(uint32_t)); + b += sizeof(uint32_t); + }; + for (auto& e : response) { + memcpy(b, e.address_->asStringView().data(), e.address_->asStringView().size()); + b += e.address_->asStringView().size(); + *b++ = 0; + }; + buffer_.set(std::move(buffer), s); + wasm()->on_resolve_dns_(this, id_, token, s); +} + +template inline uint32_t align(uint32_t i) { + return (i + sizeof(I) - 1) & ~(sizeof(I) - 1); +} + +template inline char* align(char* p) { + return reinterpret_cast((reinterpret_cast(p) + sizeof(I) - 1) & + ~(sizeof(I) - 1)); +} + +void Context::onStatsUpdate(Envoy::Stats::MetricSnapshot& snapshot) { + proxy_wasm::DeferAfterCallActions actions(this); + if (wasm()->isFailed() || !wasm()->on_stats_update_) { + return; + } + // buffer format: + // uint32 size of block of this type + // uint32 type + // uint32 count + // uint32 length of name + // name + // 8 byte alignment padding + // 8 bytes of absolute value + // 8 bytes of delta (if appropriate, e.g. for counters) + // uint32 size of block of this type + + uint32_t counter_block_size = 3 * sizeof(uint32_t); // type of stat + uint32_t num_counters = snapshot.counters().size(); + uint32_t counter_type = 1; + + uint32_t gauge_block_size = 3 * sizeof(uint32_t); // type of stat + uint32_t num_gauges = snapshot.gauges().size(); + uint32_t gauge_type = 2; + + uint32_t n = 0; + uint64_t v = 0; + + for (const auto& counter : snapshot.counters()) { + if (counter.counter_.get().used()) { + counter_block_size += sizeof(uint32_t) + counter.counter_.get().name().size(); + counter_block_size = align(counter_block_size + 2 * sizeof(uint64_t)); + } + } + + for (const auto& gauge : snapshot.gauges()) { + if (gauge.get().used()) { + gauge_block_size += sizeof(uint32_t) + gauge.get().name().size(); + gauge_block_size += align(gauge_block_size + sizeof(uint64_t)); + } + } + + auto buffer = std::unique_ptr(new char[counter_block_size + gauge_block_size]); + char* b = buffer.get(); + + memcpy(b, &counter_block_size, sizeof(uint32_t)); + b += sizeof(uint32_t); + memcpy(b, &counter_type, sizeof(uint32_t)); + b += sizeof(uint32_t); + memcpy(b, &num_counters, sizeof(uint32_t)); + b += sizeof(uint32_t); + + for (const auto& counter : snapshot.counters()) { + if (counter.counter_.get().used()) { + n = counter.counter_.get().name().size(); + memcpy(b, &n, sizeof(uint32_t)); + b += sizeof(uint32_t); + memcpy(b, counter.counter_.get().name().data(), counter.counter_.get().name().size()); + b = align(b + counter.counter_.get().name().size()); + v = counter.counter_.get().value(); + memcpy(b, &v, sizeof(uint64_t)); + b += sizeof(uint64_t); + v = counter.delta_; + memcpy(b, &v, sizeof(uint64_t)); + b += sizeof(uint64_t); + } + } + + memcpy(b, &gauge_block_size, sizeof(uint32_t)); + b += sizeof(uint32_t); + memcpy(b, &gauge_type, sizeof(uint32_t)); + b += sizeof(uint32_t); + memcpy(b, &num_gauges, sizeof(uint32_t)); + b += sizeof(uint32_t); + + for (const auto& gauge : snapshot.gauges()) { + if (gauge.get().used()) { + n = gauge.get().name().size(); + memcpy(b, &n, sizeof(uint32_t)); + b += sizeof(uint32_t); + memcpy(b, gauge.get().name().data(), gauge.get().name().size()); + b = align(b + gauge.get().name().size()); + v = gauge.get().value(); + memcpy(b, &v, sizeof(uint64_t)); + b += sizeof(uint64_t); + } + } + buffer_.set(std::move(buffer), counter_block_size + gauge_block_size); + wasm()->on_stats_update_(this, id_, counter_block_size + gauge_block_size); +} + +// Native serializer carrying over bit representation from CEL value to the extension. +// This implementation assumes that the value type is static and known to the consumer. +WasmResult serializeValue(Filters::Common::Expr::CelValue value, std::string* result) { + using Filters::Common::Expr::CelValue; + int64_t out_int64; + uint64_t out_uint64; + double out_double; + bool out_bool; + const Protobuf::Message* out_message; + switch (value.type()) { + case CelValue::Type::kString: + result->assign(value.StringOrDie().value().data(), value.StringOrDie().value().size()); + return WasmResult::Ok; + case CelValue::Type::kBytes: + result->assign(value.BytesOrDie().value().data(), value.BytesOrDie().value().size()); + return WasmResult::Ok; + case CelValue::Type::kInt64: + out_int64 = value.Int64OrDie(); + result->assign(reinterpret_cast(&out_int64), sizeof(int64_t)); + return WasmResult::Ok; + case CelValue::Type::kUint64: + out_uint64 = value.Uint64OrDie(); + result->assign(reinterpret_cast(&out_uint64), sizeof(uint64_t)); + return WasmResult::Ok; + case CelValue::Type::kDouble: + out_double = value.DoubleOrDie(); + result->assign(reinterpret_cast(&out_double), sizeof(double)); + return WasmResult::Ok; + case CelValue::Type::kBool: + out_bool = value.BoolOrDie(); + result->assign(reinterpret_cast(&out_bool), sizeof(bool)); + return WasmResult::Ok; + case CelValue::Type::kDuration: + // Warning: loss of precision to nanoseconds + out_int64 = absl::ToInt64Nanoseconds(value.DurationOrDie()); + result->assign(reinterpret_cast(&out_int64), sizeof(int64_t)); + return WasmResult::Ok; + case CelValue::Type::kTimestamp: + // Warning: loss of precision to nanoseconds + out_int64 = absl::ToUnixNanos(value.TimestampOrDie()); + result->assign(reinterpret_cast(&out_int64), sizeof(int64_t)); + return WasmResult::Ok; + case CelValue::Type::kMessage: + out_message = value.MessageOrDie(); + result->clear(); + if (!out_message || out_message->SerializeToString(result)) { + return WasmResult::Ok; + } + return WasmResult::SerializationFailure; + case CelValue::Type::kMap: { + const auto& map = *value.MapOrDie(); + const auto& keys = *map.ListKeys(); + std::vector> pairs(map.size(), std::make_pair("", "")); + for (auto i = 0; i < map.size(); i++) { + if (serializeValue(keys[i], &pairs[i].first) != WasmResult::Ok) { + return WasmResult::SerializationFailure; + } + if (serializeValue(map[keys[i]].value(), &pairs[i].second) != WasmResult::Ok) { + return WasmResult::SerializationFailure; + } + } + auto size = proxy_wasm::exports::pairsSize(pairs); + // prevent string inlining which violates byte alignment + result->resize(std::max(size, static_cast(30))); + proxy_wasm::exports::marshalPairs(pairs, result->data()); + result->resize(size); + return WasmResult::Ok; + } + case CelValue::Type::kList: { + const auto& list = *value.ListOrDie(); + std::vector> pairs(list.size(), std::make_pair("", "")); + for (auto i = 0; i < list.size(); i++) { + if (serializeValue(list[i], &pairs[i].first) != WasmResult::Ok) { + return WasmResult::SerializationFailure; + } + } + auto size = proxy_wasm::exports::pairsSize(pairs); + // prevent string inlining which violates byte alignment + if (size < 30) { + result->reserve(30); + } + result->resize(size); + proxy_wasm::exports::marshalPairs(pairs, result->data()); + return WasmResult::Ok; + } + default: + break; + } + return WasmResult::SerializationFailure; +} + +#define PROPERTY_TOKENS(_f) \ + _f(METADATA) _f(REQUEST) _f(RESPONSE) _f(CONNECTION) _f(UPSTREAM) _f(NODE) _f(SOURCE) \ + _f(DESTINATION) _f(LISTENER_DIRECTION) _f(LISTENER_METADATA) _f(CLUSTER_NAME) \ + _f(CLUSTER_METADATA) _f(ROUTE_NAME) _f(ROUTE_METADATA) _f(PLUGIN_NAME) \ + _f(UPSTREAM_HOST_METADATA) _f(PLUGIN_ROOT_ID) _f(PLUGIN_VM_ID) _f(CONNECTION_ID) \ + _f(FILTER_STATE) + +static inline std::string downCase(std::string s) { + std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c) { return std::tolower(c); }); + return s; +} + +#define _DECLARE(_t) _t, +enum class PropertyToken { PROPERTY_TOKENS(_DECLARE) }; +#undef _DECLARE + +#define _PAIR(_t) {downCase(#_t), PropertyToken::_t}, +static absl::flat_hash_map property_tokens = {PROPERTY_TOKENS(_PAIR)}; +#undef _PAIR + +absl::optional +Context::findValue(absl::string_view name, Protobuf::Arena* arena, bool last) const { + using google::api::expr::runtime::CelValue; + + const StreamInfo::StreamInfo* info = getConstRequestStreamInfo(); + + // Convert into a dense token to enable a jump table implementation. + auto part_token = property_tokens.find(name); + if (part_token == property_tokens.end()) { + if (info) { + std::string key; + absl::StrAppend(&key, WasmStateKeyPrefix, name); + const WasmState* state; + if (info->filterState().hasData(key)) { + state = &info->filterState().getDataReadOnly(key); + } else if (info->upstreamFilterState() && + info->upstreamFilterState()->hasData(key)) { + state = &info->upstreamFilterState()->getDataReadOnly(key); + } else { + return {}; + } + return state->exprValue(arena, last); + } + return {}; + } + + switch (part_token->second) { + case PropertyToken::METADATA: + if (info) { + return CelValue::CreateMessage(&info->dynamicMetadata(), arena); + } + break; + case PropertyToken::REQUEST: + if (info) { + return CelValue::CreateMap(Protobuf::Arena::Create( + arena, *arena, request_headers_ ? request_headers_ : access_log_request_headers_, *info)); + } + break; + case PropertyToken::RESPONSE: + if (info) { + return CelValue::CreateMap(Protobuf::Arena::Create( + arena, *arena, response_headers_ ? response_headers_ : access_log_response_headers_, + response_trailers_ ? response_trailers_ : access_log_response_trailers_, *info)); + } + break; + case PropertyToken::CONNECTION: + if (info) { + return CelValue::CreateMap( + Protobuf::Arena::Create(arena, *info)); + } + break; + case PropertyToken::CONNECTION_ID: { + auto conn = getConnection(); + if (conn) { + return CelValue::CreateUint64(conn->id()); + } + break; + } + case PropertyToken::UPSTREAM: + if (info) { + return CelValue::CreateMap( + Protobuf::Arena::Create(arena, *info)); + } + break; + case PropertyToken::NODE: + if (root_local_info_) { + return CelValue::CreateMessage(&root_local_info_->node(), arena); + } else if (plugin_) { + return CelValue::CreateMessage(&plugin()->local_info_.node(), arena); + } + break; + case PropertyToken::SOURCE: + if (info) { + return CelValue::CreateMap( + Protobuf::Arena::Create(arena, *info, false)); + } + break; + case PropertyToken::DESTINATION: + if (info) { + return CelValue::CreateMap( + Protobuf::Arena::Create(arena, *info, true)); + } + break; + case PropertyToken::LISTENER_DIRECTION: + if (plugin_) { + return CelValue::CreateInt64(plugin()->direction_); + } + break; + case PropertyToken::LISTENER_METADATA: + if (plugin_) { + return CelValue::CreateMessage(plugin()->listener_metadata_, arena); + } + break; + case PropertyToken::CLUSTER_NAME: + if (info && info->upstreamHost()) { + return CelValue::CreateString(&info->upstreamHost()->cluster().name()); + } else if (info && info->routeEntry()) { + return CelValue::CreateString(&info->routeEntry()->clusterName()); + } else if (info && info->upstreamClusterInfo().has_value() && + info->upstreamClusterInfo().value()) { + return CelValue::CreateString(&info->upstreamClusterInfo().value()->name()); + } + break; + case PropertyToken::CLUSTER_METADATA: + if (info && info->upstreamHost()) { + return CelValue::CreateMessage(&info->upstreamHost()->cluster().metadata(), arena); + } else if (info && info->upstreamClusterInfo().has_value() && + info->upstreamClusterInfo().value()) { + return CelValue::CreateMessage(&info->upstreamClusterInfo().value()->metadata(), arena); + } + break; + case PropertyToken::UPSTREAM_HOST_METADATA: + if (info && info->upstreamHost() && info->upstreamHost()->metadata()) { + return CelValue::CreateMessage(info->upstreamHost()->metadata().get(), arena); + } + break; + case PropertyToken::ROUTE_NAME: + if (info) { + return CelValue::CreateString(&info->getRouteName()); + } + break; + case PropertyToken::ROUTE_METADATA: + if (info && info->routeEntry()) { + return CelValue::CreateMessage(&info->routeEntry()->metadata(), arena); + } + break; + case PropertyToken::PLUGIN_NAME: + if (plugin_) { + return CelValue::CreateStringView(plugin()->name_); + } + break; + case PropertyToken::PLUGIN_ROOT_ID: + return CelValue::CreateStringView(root_id()); + case PropertyToken::PLUGIN_VM_ID: + return CelValue::CreateStringView(wasm()->vm_id()); + case PropertyToken::FILTER_STATE: + return Protobuf::Arena::Create(arena, + info->filterState()) + ->Produce(arena); + } + return {}; +} + +WasmResult Context::getProperty(absl::string_view path, std::string* result) { + using google::api::expr::runtime::CelValue; + + bool first = true; + CelValue value; + Protobuf::Arena arena; + + size_t start = 0; + while (true) { + if (start >= path.size()) { + break; + } + + size_t end = path.find('\0', start); + if (end == absl::string_view::npos) { + end = start + path.size(); + } + auto part = path.substr(start, end - start); + start = end + 1; + + if (first) { + // top-level identifier + first = false; + auto top_value = findValue(part, &arena, start >= path.size()); + if (!top_value.has_value()) { + return WasmResult::NotFound; + } + value = top_value.value(); + } else if (value.IsMap()) { + auto& map = *value.MapOrDie(); + auto field = map[CelValue::CreateStringView(part)]; + if (!field.has_value()) { + return WasmResult::NotFound; + } + value = field.value(); + } else if (value.IsMessage()) { + auto msg = value.MessageOrDie(); + if (msg == nullptr) { + return WasmResult::NotFound; + } + const Protobuf::Descriptor* desc = msg->GetDescriptor(); + const Protobuf::FieldDescriptor* field_desc = desc->FindFieldByName(std::string(part)); + if (field_desc == nullptr) { + return WasmResult::NotFound; + } + if (field_desc->is_map()) { + value = CelValue::CreateMap( + Protobuf::Arena::Create( + &arena, msg, field_desc, &arena)); + } else if (field_desc->is_repeated()) { + value = CelValue::CreateList( + Protobuf::Arena::Create( + &arena, msg, field_desc, &arena)); + } else { + auto status = + google::api::expr::runtime::CreateValueFromSingleField(msg, field_desc, &arena, &value); + if (!status.ok()) { + return WasmResult::InternalFailure; + } + } + } else if (value.IsList()) { + auto& list = *value.ListOrDie(); + int idx = 0; + if (!absl::SimpleAtoi(part, &idx)) { + return WasmResult::NotFound; + } + if (idx < 0 || idx >= list.size()) { + return WasmResult::NotFound; + } + value = list[idx]; + } else { + return WasmResult::NotFound; + } + } + + return serializeValue(value, result); +} + +// Header/Trailer/Metadata Maps. +Http::HeaderMap* Context::getMap(WasmHeaderMapType type) { + switch (type) { + case WasmHeaderMapType::RequestHeaders: + return request_headers_; + case WasmHeaderMapType::RequestTrailers: + return request_trailers_; + case WasmHeaderMapType::ResponseHeaders: + return response_headers_; + case WasmHeaderMapType::ResponseTrailers: + return response_trailers_; + default: + return nullptr; + } +} + +const Http::HeaderMap* Context::getConstMap(WasmHeaderMapType type) { + switch (type) { + case WasmHeaderMapType::RequestHeaders: + if (access_log_request_headers_) { + return access_log_request_headers_; + } + return request_headers_; + case WasmHeaderMapType::RequestTrailers: + return request_trailers_; + case WasmHeaderMapType::ResponseHeaders: + if (access_log_response_headers_) { + return access_log_response_headers_; + } + return response_headers_; + case WasmHeaderMapType::ResponseTrailers: + if (access_log_response_trailers_) { + return access_log_response_trailers_; + } + return response_trailers_; + case WasmHeaderMapType::GrpcReceiveInitialMetadata: + return rootContext()->grpc_receive_initial_metadata_.get(); + case WasmHeaderMapType::GrpcReceiveTrailingMetadata: + return rootContext()->grpc_receive_trailing_metadata_.get(); + case WasmHeaderMapType::HttpCallResponseHeaders: { + Envoy::Http::ResponseMessagePtr* response = rootContext()->http_call_response_; + if (response) { + return &(*response)->headers(); + } + return nullptr; + } + case WasmHeaderMapType::HttpCallResponseTrailers: { + Envoy::Http::ResponseMessagePtr* response = rootContext()->http_call_response_; + if (response) { + return (*response)->trailers(); + } + return nullptr; + } + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + +WasmResult Context::addHeaderMapValue(WasmHeaderMapType type, absl::string_view key, + absl::string_view value) { + auto map = getMap(type); + if (!map) { + return WasmResult::BadArgument; + } + const Http::LowerCaseString lower_key{std::string(key)}; + map->addCopy(lower_key, std::string(value)); + return WasmResult::Ok; +} + +WasmResult Context::getHeaderMapValue(WasmHeaderMapType type, absl::string_view key, + absl::string_view* value) { + auto map = getConstMap(type); + if (!map) { + return WasmResult::BadArgument; + } + const Http::LowerCaseString lower_key{std::string(key)}; + const auto entry = map->get(lower_key); + if (entry.empty()) { + if (wasm()->abiVersion() == proxy_wasm::AbiVersion::ProxyWasm_0_1_0) { + *value = ""; + return WasmResult::Ok; + } else { + return WasmResult::NotFound; + } + } + // TODO(kyessenov, PiotrSikora): This needs to either return a concatenated list of values, or + // the ABI needs to be changed to return multiple values. This is a potential security issue. + *value = entry[0]->value().getStringView(); + return WasmResult::Ok; +} + +Pairs headerMapToPairs(const Http::HeaderMap* map) { + if (!map) { + return {}; + } + Pairs pairs; + pairs.reserve(map->size()); + map->iterate([&pairs](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + pairs.push_back(std::make_pair(header.key().getStringView(), header.value().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }); + return pairs; +} + +WasmResult Context::getHeaderMapPairs(WasmHeaderMapType type, Pairs* result) { + *result = headerMapToPairs(getConstMap(type)); + return WasmResult::Ok; +} + +WasmResult Context::setHeaderMapPairs(WasmHeaderMapType type, const Pairs& pairs) { + auto map = getMap(type); + if (!map) { + return WasmResult::BadArgument; + } + std::vector keys; + map->iterate([&keys](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + keys.push_back(std::string(header.key().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }); + for (auto& k : keys) { + const Http::LowerCaseString lower_key{k}; + map->remove(lower_key); + } + for (auto& p : pairs) { + const Http::LowerCaseString lower_key{std::string(p.first)}; + map->addCopy(lower_key, std::string(p.second)); + } + return WasmResult::Ok; +} + +WasmResult Context::removeHeaderMapValue(WasmHeaderMapType type, absl::string_view key) { + auto map = getMap(type); + if (!map) { + return WasmResult::BadArgument; + } + const Http::LowerCaseString lower_key{std::string(key)}; + map->remove(lower_key); + return WasmResult::Ok; +} + +WasmResult Context::replaceHeaderMapValue(WasmHeaderMapType type, absl::string_view key, + absl::string_view value) { + auto map = getMap(type); + if (!map) { + return WasmResult::BadArgument; + } + const Http::LowerCaseString lower_key{std::string(key)}; + map->setCopy(lower_key, value); + return WasmResult::Ok; +} + +WasmResult Context::getHeaderMapSize(WasmHeaderMapType type, uint32_t* result) { + auto map = getMap(type); + if (!map) { + return WasmResult::BadArgument; + } + *result = map->byteSize(); + return WasmResult::Ok; +} + +// Buffer + +BufferInterface* Context::getBuffer(WasmBufferType type) { + Envoy::Http::ResponseMessagePtr* response = nullptr; + switch (type) { + case WasmBufferType::CallData: + // Set before the call. + return &buffer_; + case WasmBufferType::VmConfiguration: + return buffer_.set(wasm()->vm_configuration()); + case WasmBufferType::PluginConfiguration: + if (plugin_) { + return buffer_.set(plugin_->plugin_configuration_); + } + return nullptr; + case WasmBufferType::HttpRequestBody: + if (buffering_request_body_) { + // We need the mutable version, so capture it using a callback. + // TODO: consider adding a mutableDecodingBuffer() interface. + ::Envoy::Buffer::Instance* buffer_instance{}; + decoder_callbacks_->modifyDecodingBuffer( + [&buffer_instance](::Envoy::Buffer::Instance& buffer) { buffer_instance = &buffer; }); + return buffer_.set(buffer_instance); + } + return buffer_.set(request_body_buffer_); + case WasmBufferType::HttpResponseBody: + if (buffering_response_body_) { + // TODO: consider adding a mutableDecodingBuffer() interface. + ::Envoy::Buffer::Instance* buffer_instance{}; + encoder_callbacks_->modifyEncodingBuffer( + [&buffer_instance](::Envoy::Buffer::Instance& buffer) { buffer_instance = &buffer; }); + return buffer_.set(buffer_instance); + } + return buffer_.set(response_body_buffer_); + case WasmBufferType::NetworkDownstreamData: + return buffer_.set(network_downstream_data_buffer_); + case WasmBufferType::NetworkUpstreamData: + return buffer_.set(network_upstream_data_buffer_); + case WasmBufferType::HttpCallResponseBody: + response = rootContext()->http_call_response_; + if (response) { + auto& body = (*response)->body(); + return buffer_.set(absl::string_view(static_cast(body.linearize(body.length())), + body.length())); + } + return nullptr; + case WasmBufferType::GrpcReceiveBuffer: + return buffer_.set(rootContext()->grpc_receive_buffer_.get()); + default: + return nullptr; + } +} + +void Context::onDownstreamConnectionClose(CloseType close_type) { + ContextBase::onDownstreamConnectionClose(close_type); + downstream_closed_ = true; + // Call close on TCP connection, if upstream connection closed or there was a failure seen in + // this connection. + if (upstream_closed_ || getRequestStreamInfo()->hasAnyResponseFlag()) { + onCloseTCP(); + } +} + +void Context::onUpstreamConnectionClose(CloseType close_type) { + ContextBase::onUpstreamConnectionClose(close_type); + upstream_closed_ = true; + if (downstream_closed_) { + onCloseTCP(); + } +} + +uint32_t Context::nextHttpCallToken() { + uint32_t token = next_http_call_token_++; + // Handle rollover. + for (;;) { + if (token == 0) { + token = next_http_call_token_++; + } + if (!http_request_.count(token)) { + break; + } + token = next_http_call_token_++; + } + return token; +} + +// Async call via HTTP +WasmResult Context::httpCall(absl::string_view cluster, const Pairs& request_headers, + absl::string_view request_body, const Pairs& request_trailers, + int timeout_milliseconds, uint32_t* token_ptr) { + if (timeout_milliseconds < 0) { + return WasmResult::BadArgument; + } + auto cluster_string = std::string(cluster); + if (clusterManager().get(cluster_string) == nullptr) { + return WasmResult::BadArgument; + } + + Http::RequestMessagePtr message( + new Http::RequestMessageImpl(buildRequestHeaderMapFromPairs(request_headers))); + + // Check that we were provided certain headers. + if (message->headers().Path() == nullptr || message->headers().Method() == nullptr || + message->headers().Host() == nullptr) { + return WasmResult::BadArgument; + } + + if (!request_body.empty()) { + message->body().add(request_body); + message->headers().setContentLength(request_body.size()); + } + + if (!request_trailers.empty()) { + message->trailers(buildRequestTrailerMapFromPairs(request_trailers)); + } + + absl::optional timeout; + if (timeout_milliseconds > 0) { + timeout = std::chrono::milliseconds(timeout_milliseconds); + } + + uint32_t token = nextHttpCallToken(); + auto& handler = http_request_[token]; + + // set default hash policy to be based on :authority to enable consistent hash + Http::AsyncClient::RequestOptions options; + options.setTimeout(timeout); + Protobuf::RepeatedPtrField hash_policy; + hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); + options.setHashPolicy(hash_policy); + auto http_request = clusterManager() + .httpAsyncClientForCluster(cluster_string) + .send(std::move(message), handler, options); + if (!http_request) { + http_request_.erase(token); + return WasmResult::InternalFailure; + } + handler.context_ = this; + handler.token_ = token; + handler.request_ = http_request; + *token_ptr = token; + return WasmResult::Ok; +} + +uint32_t Context::nextGrpcCallToken() { + uint32_t token = next_grpc_token_++; + if (isGrpcStreamToken(token)) { + token = next_grpc_token_++; + } + // Handle rollover. Note: token is always odd. + for (;;) { + if (!grpc_call_request_.count(token)) { + break; + } + next_grpc_token_++; // Skip stream token. + token = next_grpc_token_++; + } + return token; +} + +WasmResult Context::grpcCall(absl::string_view grpc_service, absl::string_view service_name, + absl::string_view method_name, const Pairs& initial_metadata, + absl::string_view request, std::chrono::milliseconds timeout, + uint32_t* token_ptr) { + GrpcService service_proto; + if (!service_proto.ParseFromArray(grpc_service.data(), grpc_service.size())) { + return WasmResult::ParseFailure; + } + uint32_t token = nextGrpcCallToken(); + auto& handler = grpc_call_request_[token]; + handler.context_ = this; + handler.token_ = token; + auto grpc_client = + clusterManager() + .grpcAsyncClientManager() + .factoryForGrpcService(service_proto, *wasm()->scope_, true /* skip_cluster_check */) + ->create(); + grpc_initial_metadata_ = buildRequestHeaderMapFromPairs(initial_metadata); + + // set default hash policy to be based on :authority to enable consistent hash + Http::AsyncClient::RequestOptions options; + options.setTimeout(timeout); + Protobuf::RepeatedPtrField hash_policy; + hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); + options.setHashPolicy(hash_policy); + + auto grpc_request = grpc_client->sendRaw(service_name, method_name, + std::make_unique<::Envoy::Buffer::OwnedImpl>(request), + handler, Tracing::NullSpan::instance(), options); + if (!grpc_request) { + grpc_call_request_.erase(token); + return WasmResult::InternalFailure; + } + handler.client_ = std::move(grpc_client); + handler.request_ = grpc_request; + *token_ptr = token; + return WasmResult::Ok; +} + +uint32_t Context::nextGrpcStreamToken() { + uint32_t token = next_grpc_token_++; + if (isGrpcCallToken(token)) { + token = next_grpc_token_++; + } + // Handle rollover. Note: token is always even. + for (;;) { + if (token == 0) { + next_grpc_token_++; // Skip call token. + token = next_grpc_token_++; + } + if (!grpc_stream_.count(token)) { + break; + } + next_grpc_token_++; // Skip call token. + token = next_grpc_token_++; + } + return token; +} + +WasmResult Context::grpcStream(absl::string_view grpc_service, absl::string_view service_name, + absl::string_view method_name, const Pairs& initial_metadata, + uint32_t* token_ptr) { + GrpcService service_proto; + if (!service_proto.ParseFromArray(grpc_service.data(), grpc_service.size())) { + return WasmResult::ParseFailure; + } + uint32_t token = nextGrpcStreamToken(); + auto& handler = grpc_stream_[token]; + handler.context_ = this; + handler.token_ = token; + auto grpc_client = + clusterManager() + .grpcAsyncClientManager() + .factoryForGrpcService(service_proto, *wasm()->scope_, true /* skip_cluster_check */) + ->create(); + grpc_initial_metadata_ = buildRequestHeaderMapFromPairs(initial_metadata); + + // set default hash policy to be based on :authority to enable consistent hash + Http::AsyncClient::StreamOptions options; + Protobuf::RepeatedPtrField hash_policy; + hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); + options.setHashPolicy(hash_policy); + + auto grpc_stream = grpc_client->startRaw(service_name, method_name, handler, options); + if (!grpc_stream) { + grpc_stream_.erase(token); + return WasmResult::InternalFailure; + } + handler.client_ = std::move(grpc_client); + handler.stream_ = grpc_stream; + *token_ptr = token; + return WasmResult::Ok; +} + +// NB: this is currently called inline, so the token is known to be that of the currently +// executing grpcCall or grpcStream. +void Context::onGrpcCreateInitialMetadata(uint32_t /* token */, + Http::RequestHeaderMap& initial_metadata) { + if (grpc_initial_metadata_) { + initial_metadata = std::move(*grpc_initial_metadata_); + grpc_initial_metadata_.reset(); + } +} + +// StreamInfo +const StreamInfo::StreamInfo* Context::getConstRequestStreamInfo() const { + if (encoder_callbacks_) { + return &encoder_callbacks_->streamInfo(); + } else if (decoder_callbacks_) { + return &decoder_callbacks_->streamInfo(); + } else if (access_log_stream_info_) { + return access_log_stream_info_; + } else if (network_read_filter_callbacks_) { + return &network_read_filter_callbacks_->connection().streamInfo(); + } else if (network_write_filter_callbacks_) { + return &network_write_filter_callbacks_->connection().streamInfo(); + } + return nullptr; +} + +StreamInfo::StreamInfo* Context::getRequestStreamInfo() const { + if (encoder_callbacks_) { + return &encoder_callbacks_->streamInfo(); + } else if (decoder_callbacks_) { + return &decoder_callbacks_->streamInfo(); + } else if (network_read_filter_callbacks_) { + return &network_read_filter_callbacks_->connection().streamInfo(); + } else if (network_write_filter_callbacks_) { + return &network_write_filter_callbacks_->connection().streamInfo(); + } + return nullptr; +} + +const Network::Connection* Context::getConnection() const { + if (encoder_callbacks_) { + return encoder_callbacks_->connection(); + } else if (decoder_callbacks_) { + return decoder_callbacks_->connection(); + } else if (network_read_filter_callbacks_) { + return &network_read_filter_callbacks_->connection(); + } else if (network_write_filter_callbacks_) { + return &network_write_filter_callbacks_->connection(); + } + return nullptr; +} + +WasmResult Context::setProperty(absl::string_view path, absl::string_view value) { + auto* stream_info = getRequestStreamInfo(); + if (!stream_info) { + return WasmResult::NotFound; + } + std::string key; + absl::StrAppend(&key, WasmStateKeyPrefix, path); + WasmState* state; + if (stream_info->filterState()->hasData(key)) { + state = &stream_info->filterState()->getDataMutable(key); + } else { + const auto& it = rootContext()->state_prototypes_.find(path); + const WasmStatePrototype& prototype = it == rootContext()->state_prototypes_.end() + ? DefaultWasmStatePrototype::get() + : *it->second.get(); // NOLINT + auto state_ptr = std::make_unique(prototype); + state = state_ptr.get(); + stream_info->filterState()->setData(key, std::move(state_ptr), + StreamInfo::FilterState::StateType::Mutable, + prototype.life_span_); + } + if (!state->setValue(value)) { + return WasmResult::BadArgument; + } + return WasmResult::Ok; +} + +WasmResult Context::declareProperty(absl::string_view path, + std::unique_ptr state_prototype) { + // Do not delete existing schema since it can be referenced by state objects. + if (state_prototypes_.find(path) == state_prototypes_.end()) { + state_prototypes_[path] = std::move(state_prototype); + return WasmResult::Ok; + } + return WasmResult::BadArgument; +} + +WasmResult Context::log(uint32_t level, absl::string_view message) { + switch (static_cast(level)) { + case spdlog::level::trace: + ENVOY_LOG(trace, "wasm log{}: {}", log_prefix(), message); + return WasmResult::Ok; + case spdlog::level::debug: + ENVOY_LOG(debug, "wasm log{}: {}", log_prefix(), message); + return WasmResult::Ok; + case spdlog::level::info: + ENVOY_LOG(info, "wasm log{}: {}", log_prefix(), message); + return WasmResult::Ok; + case spdlog::level::warn: + ENVOY_LOG(warn, "wasm log{}: {}", log_prefix(), message); + return WasmResult::Ok; + case spdlog::level::err: + ENVOY_LOG(error, "wasm log{}: {}", log_prefix(), message); + return WasmResult::Ok; + case spdlog::level::critical: + ENVOY_LOG(critical, "wasm log{}: {}", log_prefix(), message); + return WasmResult::Ok; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +uint32_t Context::getLogLevel() { + // Like the "log" call above, assume that spdlog level as an int + // matches the enum in the SDK + return static_cast(ENVOY_LOGGER().level()); +} + +// +// Calls into the Wasm code. +// +bool Context::validateConfiguration(absl::string_view configuration, + const std::shared_ptr& plugin_base) { + auto plugin = std::static_pointer_cast(plugin_base); + if (!wasm()->validate_configuration_) { + return true; + } + plugin_ = plugin_base; + auto result = + wasm() + ->validate_configuration_(this, id_, static_cast(configuration.size())) + .u64_ != 0; + plugin_.reset(); + return result; +} + +absl::string_view Context::getConfiguration() { + if (plugin_) { + return plugin_->plugin_configuration_; + } else { + return wasm()->vm_configuration(); + } +}; + +std::pair Context::getStatus() { + return std::make_pair(status_code_, status_message_); +} + +void Context::onGrpcReceiveInitialMetadataWrapper(uint32_t token, Http::HeaderMapPtr&& metadata) { + grpc_receive_initial_metadata_ = std::move(metadata); + onGrpcReceiveInitialMetadata(token, headerSize(grpc_receive_initial_metadata_)); + grpc_receive_initial_metadata_ = nullptr; +} + +void Context::onGrpcReceiveTrailingMetadataWrapper(uint32_t token, Http::HeaderMapPtr&& metadata) { + grpc_receive_trailing_metadata_ = std::move(metadata); + onGrpcReceiveTrailingMetadata(token, headerSize(grpc_receive_trailing_metadata_)); + grpc_receive_trailing_metadata_ = nullptr; +} + +WasmResult Context::defineMetric(uint32_t metric_type, absl::string_view name, + uint32_t* metric_id_ptr) { + if (metric_type > static_cast(MetricType::Max)) { + return WasmResult::BadArgument; + } + auto type = static_cast(metric_type); + // TODO: Consider rethinking the scoping policy as it does not help in this case. + Stats::StatNameManagedStorage storage(name, wasm()->scope_->symbolTable()); + Stats::StatName stat_name = storage.statName(); + if (type == MetricType::Counter) { + auto id = wasm()->nextCounterMetricId(); + auto c = &wasm()->scope_->counterFromStatName(stat_name); + wasm()->counters_.emplace(id, c); + *metric_id_ptr = id; + return WasmResult::Ok; + } + if (type == MetricType::Gauge) { + auto id = wasm()->nextGaugeMetricId(); + auto g = &wasm()->scope_->gaugeFromStatName(stat_name, Stats::Gauge::ImportMode::Accumulate); + wasm()->gauges_.emplace(id, g); + *metric_id_ptr = id; + return WasmResult::Ok; + } + // (type == MetricType::Histogram) { + auto id = wasm()->nextHistogramMetricId(); + auto h = &wasm()->scope_->histogramFromStatName(stat_name, Stats::Histogram::Unit::Unspecified); + wasm()->histograms_.emplace(id, h); + *metric_id_ptr = id; + return WasmResult::Ok; +} + +WasmResult Context::incrementMetric(uint32_t metric_id, int64_t offset) { + auto type = static_cast(metric_id & Wasm::kMetricTypeMask); + if (type == MetricType::Counter) { + auto it = wasm()->counters_.find(metric_id); + if (it != wasm()->counters_.end()) { + if (offset > 0) { + it->second->add(offset); + return WasmResult::Ok; + } else { + return WasmResult::BadArgument; + } + } + return WasmResult::NotFound; + } else if (type == MetricType::Gauge) { + auto it = wasm()->gauges_.find(metric_id); + if (it != wasm()->gauges_.end()) { + if (offset > 0) { + it->second->add(offset); + return WasmResult::Ok; + } else { + it->second->sub(-offset); + return WasmResult::Ok; + } + } + return WasmResult::NotFound; + } + return WasmResult::BadArgument; +} + +WasmResult Context::recordMetric(uint32_t metric_id, uint64_t value) { + auto type = static_cast(metric_id & Wasm::kMetricTypeMask); + if (type == MetricType::Counter) { + auto it = wasm()->counters_.find(metric_id); + if (it != wasm()->counters_.end()) { + it->second->add(value); + return WasmResult::Ok; + } + } else if (type == MetricType::Gauge) { + auto it = wasm()->gauges_.find(metric_id); + if (it != wasm()->gauges_.end()) { + it->second->set(value); + return WasmResult::Ok; + } + } else if (type == MetricType::Histogram) { + auto it = wasm()->histograms_.find(metric_id); + if (it != wasm()->histograms_.end()) { + it->second->recordValue(value); + return WasmResult::Ok; + } + } + return WasmResult::NotFound; +} + +WasmResult Context::getMetric(uint32_t metric_id, uint64_t* result_uint64_ptr) { + auto type = static_cast(metric_id & Wasm::kMetricTypeMask); + if (type == MetricType::Counter) { + auto it = wasm()->counters_.find(metric_id); + if (it != wasm()->counters_.end()) { + *result_uint64_ptr = it->second->value(); + return WasmResult::Ok; + } + return WasmResult::NotFound; + } else if (type == MetricType::Gauge) { + auto it = wasm()->gauges_.find(metric_id); + if (it != wasm()->gauges_.end()) { + *result_uint64_ptr = it->second->value(); + return WasmResult::Ok; + } + return WasmResult::NotFound; + } + return WasmResult::BadArgument; +} + +Context::~Context() { + // Cancel any outstanding requests. + for (auto& p : http_request_) { + p.second.request_->cancel(); + } + for (auto& p : grpc_call_request_) { + p.second.request_->cancel(); + } + for (auto& p : grpc_stream_) { + p.second.stream_->resetStream(); + } +} + +Network::FilterStatus convertNetworkFilterStatus(proxy_wasm::FilterStatus status) { + switch (status) { + default: + case proxy_wasm::FilterStatus::Continue: + return Network::FilterStatus::Continue; + case proxy_wasm::FilterStatus::StopIteration: + return Network::FilterStatus::StopIteration; + } +}; + +Http::FilterHeadersStatus convertFilterHeadersStatus(proxy_wasm::FilterHeadersStatus status) { + switch (status) { + default: + case proxy_wasm::FilterHeadersStatus::Continue: + return Http::FilterHeadersStatus::Continue; + case proxy_wasm::FilterHeadersStatus::StopIteration: + return Http::FilterHeadersStatus::StopIteration; + case proxy_wasm::FilterHeadersStatus::StopAllIterationAndBuffer: + return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + case proxy_wasm::FilterHeadersStatus::StopAllIterationAndWatermark: + return Http::FilterHeadersStatus::StopAllIterationAndWatermark; + } +}; + +Http::FilterTrailersStatus convertFilterTrailersStatus(proxy_wasm::FilterTrailersStatus status) { + switch (status) { + default: + case proxy_wasm::FilterTrailersStatus::Continue: + return Http::FilterTrailersStatus::Continue; + case proxy_wasm::FilterTrailersStatus::StopIteration: + return Http::FilterTrailersStatus::StopIteration; + } +}; + +Http::FilterMetadataStatus convertFilterMetadataStatus(proxy_wasm::FilterMetadataStatus status) { + switch (status) { + default: + case proxy_wasm::FilterMetadataStatus::Continue: + return Http::FilterMetadataStatus::Continue; + } +}; + +Http::FilterDataStatus convertFilterDataStatus(proxy_wasm::FilterDataStatus status) { + switch (status) { + default: + case proxy_wasm::FilterDataStatus::Continue: + return Http::FilterDataStatus::Continue; + case proxy_wasm::FilterDataStatus::StopIterationAndBuffer: + return Http::FilterDataStatus::StopIterationAndBuffer; + case proxy_wasm::FilterDataStatus::StopIterationAndWatermark: + return Http::FilterDataStatus::StopIterationAndWatermark; + case proxy_wasm::FilterDataStatus::StopIterationNoBuffer: + return Http::FilterDataStatus::StopIterationNoBuffer; + } +}; + +Network::FilterStatus Context::onNewConnection() { + onCreate(); + return convertNetworkFilterStatus(onNetworkNewConnection()); +}; + +Network::FilterStatus Context::onData(::Envoy::Buffer::Instance& data, bool end_stream) { + if (!in_vm_context_created_) { + return Network::FilterStatus::Continue; + } + network_downstream_data_buffer_ = &data; + end_of_stream_ = end_stream; + auto result = convertNetworkFilterStatus(onDownstreamData(data.length(), end_stream)); + if (result == Network::FilterStatus::Continue) { + network_downstream_data_buffer_ = nullptr; + } + return result; +} + +Network::FilterStatus Context::onWrite(::Envoy::Buffer::Instance& data, bool end_stream) { + if (!in_vm_context_created_) { + return Network::FilterStatus::Continue; + } + network_upstream_data_buffer_ = &data; + end_of_stream_ = end_stream; + auto result = convertNetworkFilterStatus(onUpstreamData(data.length(), end_stream)); + if (result == Network::FilterStatus::Continue) { + network_upstream_data_buffer_ = nullptr; + } + if (end_stream) { + // This is called when seeing end_stream=true and not on an upstream connection event, + // because registering for latter requires replicating the whole TCP proxy extension. + onUpstreamConnectionClose(CloseType::Unknown); + } + return result; +} + +void Context::onEvent(Network::ConnectionEvent event) { + if (!in_vm_context_created_) { + return; + } + switch (event) { + case Network::ConnectionEvent::LocalClose: + onDownstreamConnectionClose(CloseType::Local); + break; + case Network::ConnectionEvent::RemoteClose: + onDownstreamConnectionClose(CloseType::Remote); + break; + default: + break; + } +} + +void Context::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { + network_read_filter_callbacks_ = &callbacks; + network_read_filter_callbacks_->connection().addConnectionCallbacks(*this); +} + +void Context::initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) { + network_write_filter_callbacks_ = &callbacks; +} + +void Context::log(const Http::RequestHeaderMap* request_headers, + const Http::ResponseHeaderMap* response_headers, + const Http::ResponseTrailerMap* response_trailers, + const StreamInfo::StreamInfo& stream_info) { + if (!in_vm_context_created_) { + // If the request is invalid then onRequestHeaders() will not be called and neither will + // onCreate() in cases like sendLocalReply who short-circuits envoy + // lifecycle. This is because Envoy does not have a well defined lifetime for the combined + // HTTP + // + AccessLog filter. Thus, to log these scenarios, we call onCreate() in log function below. + onCreate(); + } + + access_log_request_headers_ = request_headers; + // ? request_trailers ? + access_log_response_headers_ = response_headers; + access_log_response_trailers_ = response_trailers; + access_log_stream_info_ = &stream_info; + + onLog(); + + access_log_request_headers_ = nullptr; + // ? request_trailers ? + access_log_response_headers_ = nullptr; + access_log_response_trailers_ = nullptr; + access_log_stream_info_ = nullptr; +} + +void Context::onDestroy() { + if (destroyed_ || !in_vm_context_created_) { + return; + } + destroyed_ = true; + onDone(); + onDelete(); +} + +WasmResult Context::continueStream(WasmStreamType stream_type) { + switch (stream_type) { + case WasmStreamType::Request: + if (decoder_callbacks_) { + decoder_callbacks_->continueDecoding(); + } + break; + case WasmStreamType::Response: + if (encoder_callbacks_) { + encoder_callbacks_->continueEncoding(); + } + break; + default: + return WasmResult::BadArgument; + } + request_headers_ = nullptr; + request_body_buffer_ = nullptr; + request_trailers_ = nullptr; + request_metadata_ = nullptr; + return WasmResult::Ok; +} + +WasmResult Context::closeStream(WasmStreamType stream_type) { + switch (stream_type) { + case WasmStreamType::Request: + if (decoder_callbacks_) { + if (!decoder_callbacks_->streamInfo().responseCodeDetails().has_value()) { + decoder_callbacks_->streamInfo().setResponseCodeDetails(FailStreamResponseDetails); + } + decoder_callbacks_->resetStream(); + } + return WasmResult::Ok; + case WasmStreamType::Response: + if (encoder_callbacks_) { + if (!encoder_callbacks_->streamInfo().responseCodeDetails().has_value()) { + encoder_callbacks_->streamInfo().setResponseCodeDetails(FailStreamResponseDetails); + } + encoder_callbacks_->resetStream(); + } + return WasmResult::Ok; + case WasmStreamType::Downstream: + if (network_read_filter_callbacks_) { + network_read_filter_callbacks_->connection().close( + Envoy::Network::ConnectionCloseType::FlushWrite); + } + return WasmResult::Ok; + case WasmStreamType::Upstream: + network_write_filter_callbacks_->connection().close( + Envoy::Network::ConnectionCloseType::FlushWrite); + return WasmResult::Ok; + } + return WasmResult::BadArgument; +} + +WasmResult Context::sendLocalResponse(uint32_t response_code, absl::string_view body_text, + Pairs additional_headers, uint32_t grpc_status, + absl::string_view details) { + // "additional_headers" is a collection of string_views. These will no longer + // be valid when "modify_headers" is finally called below, so we must + // make copies of all the headers. + std::vector> additional_headers_copy; + for (auto& p : additional_headers) { + const Http::LowerCaseString lower_key{std::string(p.first)}; + additional_headers_copy.emplace_back(lower_key, std::string(p.second)); + } + + auto modify_headers = [additional_headers_copy](Http::HeaderMap& headers) { + for (auto& p : additional_headers_copy) { + headers.addCopy(p.first, p.second); + } + }; + + if (decoder_callbacks_) { + // This is a bit subtle because proxy_on_delete() does call DeferAfterCallActions(), + // so in theory it could call this and the Context in the VM would be invalid, + // but because it only gets called after the connections have drained, the call to + // sendLocalReply() will fail. Net net, this is safe. + wasm()->addAfterVmCallAction([this, response_code, body_text = std::string(body_text), + modify_headers = std::move(modify_headers), grpc_status, + details = std::string(details)] { + decoder_callbacks_->sendLocalReply(static_cast(response_code), body_text, + modify_headers, grpc_status, details); + }); + } + return WasmResult::Ok; +} + +Http::FilterHeadersStatus Context::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) { + onCreate(); + http_request_started_ = true; + request_headers_ = &headers; + end_of_stream_ = end_stream; + auto result = convertFilterHeadersStatus(onRequestHeaders(headerSize(&headers), end_stream)); + if (result == Http::FilterHeadersStatus::Continue) { + request_headers_ = nullptr; + } + return result; +} + +Http::FilterDataStatus Context::decodeData(::Envoy::Buffer::Instance& data, bool end_stream) { + if (!http_request_started_) { + return Http::FilterDataStatus::Continue; + } + request_body_buffer_ = &data; + end_of_stream_ = end_stream; + const auto buffer = getBuffer(WasmBufferType::HttpRequestBody); + const auto buffer_size = (buffer == nullptr) ? 0 : buffer->size(); + auto result = convertFilterDataStatus(onRequestBody(buffer_size, end_stream)); + buffering_request_body_ = false; + switch (result) { + case Http::FilterDataStatus::Continue: + request_body_buffer_ = nullptr; + break; + case Http::FilterDataStatus::StopIterationAndBuffer: + buffering_request_body_ = true; + break; + case Http::FilterDataStatus::StopIterationAndWatermark: + case Http::FilterDataStatus::StopIterationNoBuffer: + break; + } + return result; +} + +Http::FilterTrailersStatus Context::decodeTrailers(Http::RequestTrailerMap& trailers) { + if (!http_request_started_) { + return Http::FilterTrailersStatus::Continue; + } + request_trailers_ = &trailers; + auto result = convertFilterTrailersStatus(onRequestTrailers(headerSize(&trailers))); + if (result == Http::FilterTrailersStatus::Continue) { + request_trailers_ = nullptr; + } + return result; +} + +Http::FilterMetadataStatus Context::decodeMetadata(Http::MetadataMap& request_metadata) { + if (!http_request_started_) { + return Http::FilterMetadataStatus::Continue; + } + request_metadata_ = &request_metadata; + auto result = convertFilterMetadataStatus(onRequestMetadata(headerSize(&request_metadata))); + if (result == Http::FilterMetadataStatus::Continue) { + request_metadata_ = nullptr; + } + return result; +} + +void Context::setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks& callbacks) { + decoder_callbacks_ = &callbacks; +} + +Http::FilterHeadersStatus Context::encode100ContinueHeaders(Http::ResponseHeaderMap&) { + return Http::FilterHeadersStatus::Continue; +} + +Http::FilterHeadersStatus Context::encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) { + if (!http_request_started_) { + return Http::FilterHeadersStatus::Continue; + } + response_headers_ = &headers; + end_of_stream_ = end_stream; + auto result = convertFilterHeadersStatus(onResponseHeaders(headerSize(&headers), end_stream)); + if (result == Http::FilterHeadersStatus::Continue) { + response_headers_ = nullptr; + } + return result; +} + +Http::FilterDataStatus Context::encodeData(::Envoy::Buffer::Instance& data, bool end_stream) { + if (!http_request_started_) { + return Http::FilterDataStatus::Continue; + } + response_body_buffer_ = &data; + end_of_stream_ = end_stream; + const auto buffer = getBuffer(WasmBufferType::HttpResponseBody); + const auto buffer_size = (buffer == nullptr) ? 0 : buffer->size(); + auto result = convertFilterDataStatus(onResponseBody(buffer_size, end_stream)); + buffering_response_body_ = false; + switch (result) { + case Http::FilterDataStatus::Continue: + request_body_buffer_ = nullptr; + break; + case Http::FilterDataStatus::StopIterationAndBuffer: + buffering_response_body_ = true; + break; + case Http::FilterDataStatus::StopIterationAndWatermark: + case Http::FilterDataStatus::StopIterationNoBuffer: + break; + } + return result; +} + +Http::FilterTrailersStatus Context::encodeTrailers(Http::ResponseTrailerMap& trailers) { + if (!http_request_started_) { + return Http::FilterTrailersStatus::Continue; + } + response_trailers_ = &trailers; + auto result = convertFilterTrailersStatus(onResponseTrailers(headerSize(&trailers))); + if (result == Http::FilterTrailersStatus::Continue) { + response_trailers_ = nullptr; + } + return result; +} + +Http::FilterMetadataStatus Context::encodeMetadata(Http::MetadataMap& response_metadata) { + if (!http_request_started_) { + return Http::FilterMetadataStatus::Continue; + } + response_metadata_ = &response_metadata; + auto result = convertFilterMetadataStatus(onResponseMetadata(headerSize(&response_metadata))); + if (result == Http::FilterMetadataStatus::Continue) { + response_metadata_ = nullptr; + } + return result; +} + +// Http::FilterMetadataStatus::Continue; + +void Context::setEncoderFilterCallbacks(Envoy::Http::StreamEncoderFilterCallbacks& callbacks) { + encoder_callbacks_ = &callbacks; +} + +void Context::onHttpCallSuccess(uint32_t token, Envoy::Http::ResponseMessagePtr&& response) { + // TODO: convert this into a function in proxy-wasm-cpp-host and use here. + if (proxy_wasm::current_context_ != nullptr) { + // We are in a reentrant call, so defer. + wasm()->addAfterVmCallAction([this, token, response = response.release()] { + onHttpCallSuccess(token, std::unique_ptr(response)); + }); + return; + } + http_call_response_ = &response; + uint32_t body_size = response->body().length(); + onHttpCallResponse(token, response->headers().size(), body_size, + headerSize(response->trailers())); + http_call_response_ = nullptr; + http_request_.erase(token); +} + +void Context::onHttpCallFailure(uint32_t token, Http::AsyncClient::FailureReason reason) { + if (proxy_wasm::current_context_ != nullptr) { + // We are in a reentrant call, so defer. + wasm()->addAfterVmCallAction([this, token, reason] { onHttpCallFailure(token, reason); }); + return; + } + status_code_ = static_cast(WasmResult::BrokenConnection); + // This is the only value currently. + ASSERT(reason == Http::AsyncClient::FailureReason::Reset); + status_message_ = "reset"; + onHttpCallResponse(token, 0, 0, 0); + status_message_ = ""; + http_request_.erase(token); +} + +void Context::onGrpcReceiveWrapper(uint32_t token, ::Envoy::Buffer::InstancePtr response) { + ASSERT(proxy_wasm::current_context_ == nullptr); // Non-reentrant. + if (wasm()->on_grpc_receive_) { + grpc_receive_buffer_ = std::move(response); + uint32_t response_size = grpc_receive_buffer_->length(); + ContextBase::onGrpcReceive(token, response_size); + grpc_receive_buffer_.reset(); + } + if (isGrpcCallToken(token)) { + grpc_call_request_.erase(token); + } +} + +void Context::onGrpcCloseWrapper(uint32_t token, const Grpc::Status::GrpcStatus& status, + const absl::string_view message) { + if (proxy_wasm::current_context_ != nullptr) { + // We are in a reentrant call, so defer. + wasm()->addAfterVmCallAction([this, token, status, message = std::string(message)] { + onGrpcCloseWrapper(token, status, message); + }); + return; + } + if (wasm()->on_grpc_close_) { + status_code_ = static_cast(status); + status_message_ = message; + onGrpcClose(token, status_code_); + status_message_ = ""; + } + if (isGrpcCallToken(token)) { + grpc_call_request_.erase(token); + } else { + auto it = grpc_stream_.find(token); + if (it != grpc_stream_.end()) { + if (it->second.local_closed_) { + grpc_stream_.erase(token); + } + } + } +} + +WasmResult Context::grpcSend(uint32_t token, absl::string_view message, bool end_stream) { + if (isGrpcCallToken(token)) { + return WasmResult::BadArgument; + } + auto it = grpc_stream_.find(token); + if (it == grpc_stream_.end()) { + return WasmResult::NotFound; + } + if (it->second.stream_) { + it->second.stream_->sendMessageRaw(::Envoy::Buffer::InstancePtr(new ::Envoy::Buffer::OwnedImpl( + message.data(), message.size())), + end_stream); + } + return WasmResult::Ok; +} + +WasmResult Context::grpcClose(uint32_t token) { + if (isGrpcCallToken(token)) { + auto it = grpc_call_request_.find(token); + if (it == grpc_call_request_.end()) { + return WasmResult::NotFound; + } + if (it->second.request_) { + it->second.request_->cancel(); + } + grpc_call_request_.erase(token); + } else { + auto it = grpc_stream_.find(token); + if (it == grpc_stream_.end()) { + return WasmResult::NotFound; + } + if (it->second.stream_) { + it->second.stream_->closeStream(); + } + if (it->second.remote_closed_) { + grpc_stream_.erase(token); + } else { + it->second.local_closed_ = true; + } + } + return WasmResult::Ok; +} + +WasmResult Context::grpcCancel(uint32_t token) { + if (isGrpcCallToken(token)) { + auto it = grpc_call_request_.find(token); + if (it == grpc_call_request_.end()) { + return WasmResult::NotFound; + } + if (it->second.request_) { + it->second.request_->cancel(); + } + grpc_call_request_.erase(token); + } else { + auto it = grpc_stream_.find(token); + if (it == grpc_stream_.end()) { + return WasmResult::NotFound; + } + if (it->second.stream_) { + it->second.stream_->resetStream(); + } + grpc_stream_.erase(token); + } + return WasmResult::Ok; +} + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/context.h b/source/extensions/common/wasm/context.h new file mode 100644 index 000000000000..e288c1e50602 --- /dev/null +++ b/source/extensions/common/wasm/context.h @@ -0,0 +1,485 @@ +#pragma once + +#include +#include +#include + +#include "envoy/access_log/access_log.h" +#include "envoy/buffer/buffer.h" +#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/http/filter.h" +#include "envoy/stats/sink.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/assert.h" +#include "common/common/logger.h" + +#include "extensions/common/wasm/wasm_state.h" +#include "extensions/filters/common/expr/evaluator.h" + +#include "eval/public/activation.h" +#include "include/proxy-wasm/wasm.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +using proxy_wasm::BufferInterface; +using proxy_wasm::CloseType; +using proxy_wasm::ContextBase; +using proxy_wasm::Pairs; +using proxy_wasm::PairsWithStringValues; +using proxy_wasm::PluginBase; +using proxy_wasm::SharedQueueDequeueToken; +using proxy_wasm::SharedQueueEnqueueToken; +using proxy_wasm::WasmBase; +using proxy_wasm::WasmBufferType; +using proxy_wasm::WasmHandleBase; +using proxy_wasm::WasmHeaderMapType; +using proxy_wasm::WasmResult; +using proxy_wasm::WasmStreamType; + +using VmConfig = envoy::extensions::wasm::v3::VmConfig; +using GrpcService = envoy::config::core::v3::GrpcService; + +class Wasm; + +using WasmHandleBaseSharedPtr = std::shared_ptr; + +// Opaque context object. +class StorageObject { +public: + virtual ~StorageObject() = default; +}; + +class Buffer : public proxy_wasm::BufferBase { +public: + Buffer() = default; + + // proxy_wasm::BufferInterface + size_t size() const override; + WasmResult copyTo(WasmBase* wasm, size_t start, size_t length, uint64_t ptr_ptr, + uint64_t size_ptr) const override; + WasmResult copyFrom(size_t start, size_t length, absl::string_view data) override; + + // proxy_wasm::BufferBase + void clear() override { + proxy_wasm::BufferBase::clear(); + const_buffer_instance_ = nullptr; + buffer_instance_ = nullptr; + } + Buffer* set(absl::string_view data) { + return static_cast(proxy_wasm::BufferBase::set(data)); + } + Buffer* set(std::unique_ptr owned_data, uint32_t owned_data_size) { + return static_cast( + proxy_wasm::BufferBase::set(std::move(owned_data), owned_data_size)); + } + + Buffer* set(::Envoy::Buffer::Instance* buffer_instance) { + clear(); + buffer_instance_ = buffer_instance; + const_buffer_instance_ = buffer_instance; + return this; + } + Buffer* set(const ::Envoy::Buffer::Instance* buffer_instance) { + clear(); + const_buffer_instance_ = buffer_instance; + return this; + } + +private: + const ::Envoy::Buffer::Instance* const_buffer_instance_{}; + ::Envoy::Buffer::Instance* buffer_instance_{}; +}; + +// Plugin contains the information for a filter/service. +struct Plugin : public PluginBase { + Plugin(absl::string_view name, absl::string_view root_id, absl::string_view vm_id, + absl::string_view runtime, absl::string_view plugin_configuration, bool fail_open, + envoy::config::core::v3::TrafficDirection direction, + const LocalInfo::LocalInfo& local_info, + const envoy::config::core::v3::Metadata* listener_metadata) + : PluginBase(name, root_id, vm_id, runtime, plugin_configuration, fail_open), + direction_(direction), local_info_(local_info), listener_metadata_(listener_metadata) {} + + envoy::config::core::v3::TrafficDirection direction_; + const LocalInfo::LocalInfo& local_info_; + const envoy::config::core::v3::Metadata* listener_metadata_; +}; +using PluginSharedPtr = std::shared_ptr; + +// A context which will be the target of callbacks for a particular session +// e.g. a handler of a stream. +class Context : public proxy_wasm::ContextBase, + public Logger::Loggable, + public AccessLog::Instance, + public Http::StreamFilter, + public Network::ConnectionCallbacks, + public Network::Filter, + public google::api::expr::runtime::BaseActivation, + public std::enable_shared_from_this { +public: + Context(); // Testing. + Context(Wasm* wasm); // Vm Context. + Context(Wasm* wasm, const PluginSharedPtr& plugin); // Root Context. + Context(Wasm* wasm, uint32_t root_context_id, const PluginSharedPtr& plugin); // Stream context. + ~Context() override; + + Wasm* wasm() const; + Plugin* plugin() const; + Context* rootContext() const; + Upstream::ClusterManager& clusterManager() const; + + // proxy_wasm::ContextBase + void error(absl::string_view message) override; + + // Retrieves the stream info associated with the request (a.k.a active stream). + // It selects a value based on the following order: encoder callback, decoder + // callback, log callback, network read filter callback, network write filter + // callback. As long as any one of the callbacks is invoked, the value should be + // available. + const StreamInfo::StreamInfo* getConstRequestStreamInfo() const; + StreamInfo::StreamInfo* getRequestStreamInfo() const; + + // Retrieves the connection object associated with the request (a.k.a active stream). + // It selects a value based on the following order: encoder callback, decoder + // callback. As long as any one of the callbacks is invoked, the value should be + // available. + const Network::Connection* getConnection() const; + + // + // VM level down-calls into the Wasm code on Context(id == 0). + // + virtual bool validateConfiguration(absl::string_view configuration, + const std::shared_ptr& plugin); // deprecated + + // AccessLog::Instance + void log(const Http::RequestHeaderMap* request_headers, + const Http::ResponseHeaderMap* response_headers, + const Http::ResponseTrailerMap* response_trailers, + const StreamInfo::StreamInfo& stream_info) override; + + uint32_t getLogLevel() override; + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + // Network::ReadFilter + Network::FilterStatus onNewConnection() override; + Network::FilterStatus onData(::Envoy::Buffer::Instance& data, bool end_stream) override; + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override; + + // Network::WriteFilter + Network::FilterStatus onWrite(::Envoy::Buffer::Instance& data, bool end_stream) override; + void initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) override; + + // proxy_wasm::ContextBase + void onDownstreamConnectionClose(CloseType) override; + void onUpstreamConnectionClose(CloseType) override; + + // Http::StreamFilterBase. Note: This calls onDone() in Wasm. + void onDestroy() override; + + // Http::StreamDecoderFilter + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, + bool end_stream) override; + Http::FilterDataStatus decodeData(::Envoy::Buffer::Instance& data, bool end_stream) override; + Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override; + Http::FilterMetadataStatus decodeMetadata(Http::MetadataMap& metadata_map) override; + void setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks& callbacks) override; + + // Http::StreamEncoderFilter + Http::FilterHeadersStatus encode100ContinueHeaders(Http::ResponseHeaderMap&) override; + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) override; + Http::FilterDataStatus encodeData(::Envoy::Buffer::Instance& data, bool end_stream) override; + Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override; + Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap& metadata_map) override; + void setEncoderFilterCallbacks(Envoy::Http::StreamEncoderFilterCallbacks& callbacks) override; + + // VM calls out to host. + // proxy_wasm::ContextBase + + // General + WasmResult log(uint32_t level, absl::string_view message) override; + uint64_t getCurrentTimeNanoseconds() override; + absl::string_view getConfiguration() override; + std::pair getStatus() override; + + // State accessors + WasmResult getProperty(absl::string_view path, std::string* result) override; + WasmResult setProperty(absl::string_view path, absl::string_view value) override; + WasmResult declareProperty(absl::string_view path, + std::unique_ptr state_prototype); + + // Continue + WasmResult continueStream(WasmStreamType stream_type) override; + WasmResult closeStream(WasmStreamType stream_type) override; + WasmResult sendLocalResponse(uint32_t response_code, absl::string_view body_text, + Pairs additional_headers, uint32_t grpc_status, + absl::string_view details) override; + void clearRouteCache() override { + if (decoder_callbacks_) { + decoder_callbacks_->clearRouteCache(); + } + } + + // Header/Trailer/Metadata Maps + WasmResult addHeaderMapValue(WasmHeaderMapType type, absl::string_view key, + absl::string_view value) override; + WasmResult getHeaderMapValue(WasmHeaderMapType type, absl::string_view key, + absl::string_view* value) override; + WasmResult getHeaderMapPairs(WasmHeaderMapType type, Pairs* result) override; + WasmResult setHeaderMapPairs(WasmHeaderMapType type, const Pairs& pairs) override; + + WasmResult removeHeaderMapValue(WasmHeaderMapType type, absl::string_view key) override; + WasmResult replaceHeaderMapValue(WasmHeaderMapType type, absl::string_view key, + absl::string_view value) override; + + WasmResult getHeaderMapSize(WasmHeaderMapType type, uint32_t* size) override; + + // Buffer + BufferInterface* getBuffer(WasmBufferType type) override; + // TODO: use stream_type. + bool endOfStream(WasmStreamType /* stream_type */) override { return end_of_stream_; } + + // HTTP + WasmResult httpCall(absl::string_view cluster, const Pairs& request_headers, + absl::string_view request_body, const Pairs& request_trailers, + int timeout_millisconds, uint32_t* token_ptr) override; + + // Stats/Metrics + WasmResult defineMetric(uint32_t type, absl::string_view name, uint32_t* metric_id_ptr) override; + WasmResult incrementMetric(uint32_t metric_id, int64_t offset) override; + WasmResult recordMetric(uint32_t metric_id, uint64_t value) override; + WasmResult getMetric(uint32_t metric_id, uint64_t* value_ptr) override; + + // gRPC + WasmResult grpcCall(absl::string_view grpc_service, absl::string_view service_name, + absl::string_view method_name, const Pairs& initial_metadata, + absl::string_view request, std::chrono::milliseconds timeout, + uint32_t* token_ptr) override; + WasmResult grpcStream(absl::string_view grpc_service, absl::string_view service_name, + absl::string_view method_name, const Pairs& initial_metadat, + uint32_t* token_ptr) override; + + WasmResult grpcClose(uint32_t token) override; + WasmResult grpcCancel(uint32_t token) override; + WasmResult grpcSend(uint32_t token, absl::string_view message, bool end_stream) override; + + // Envoy specific ABI + void onResolveDns(uint32_t token, Envoy::Network::DnsResolver::ResolutionStatus status, + std::list&& response); + + void onStatsUpdate(Envoy::Stats::MetricSnapshot& snapshot); + + // CEL evaluation + std::vector + FindFunctionOverloads(absl::string_view) const override { + return {}; + } + absl::optional + findValue(absl::string_view name, Protobuf::Arena* arena, bool last) const; + absl::optional + FindValue(absl::string_view name, Protobuf::Arena* arena) const override { + return findValue(name, arena, false); + } + bool IsPathUnknown(absl::string_view) const override { return false; } + const std::vector& + unknown_attribute_patterns() const override { + static const std::vector empty; + return empty; + } + const Protobuf::FieldMask& unknown_paths() const override { + return Protobuf::FieldMask::default_instance(); + } + + // Foreign function state + virtual void setForeignData(absl::string_view data_name, std::unique_ptr data) { + data_storage_[data_name] = std::move(data); + } + template T* getForeignData(absl::string_view data_name) { + const auto& it = data_storage_.find(data_name); + if (it == data_storage_.end()) { + return nullptr; + } + return dynamic_cast(it->second.get()); + } + + uint32_t nextGrpcCallToken(); + uint32_t nextGrpcStreamToken(); + uint32_t nextHttpCallToken(); + void setNextGrpcTokenForTesting(uint32_t token) { next_grpc_token_ = token; } + void setNextHttpCallTokenForTesting(uint32_t token) { next_http_call_token_ = token; } + +protected: + friend class Wasm; + + void addAfterVmCallAction(std::function f); + void onCloseTCP(); + + struct AsyncClientHandler : public Http::AsyncClient::Callbacks { + // Http::AsyncClient::Callbacks + void onSuccess(const Http::AsyncClient::Request&, + Envoy::Http::ResponseMessagePtr&& response) override { + context_->onHttpCallSuccess(token_, std::move(response)); + } + void onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason reason) override { + context_->onHttpCallFailure(token_, reason); + } + void + onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span& /* span */, + const Http::ResponseHeaderMap* /* response_headers */) override {} + + Context* context_; + uint32_t token_; + Http::AsyncClient::Request* request_; + }; + + struct GrpcCallClientHandler : public Grpc::RawAsyncRequestCallbacks { + // Grpc::AsyncRequestCallbacks + void onCreateInitialMetadata(Http::RequestHeaderMap& initial_metadata) override { + context_->onGrpcCreateInitialMetadata(token_, initial_metadata); + } + void onSuccessRaw(::Envoy::Buffer::InstancePtr&& response, Tracing::Span& /* span */) override { + context_->onGrpcReceiveWrapper(token_, std::move(response)); + } + void onFailure(Grpc::Status::GrpcStatus status, const std::string& message, + Tracing::Span& /* span */) override { + context_->onGrpcCloseWrapper(token_, status, message); + } + + Context* context_; + uint32_t token_; + Grpc::RawAsyncClientPtr client_; + Grpc::AsyncRequest* request_; + }; + + struct GrpcStreamClientHandler : public Grpc::RawAsyncStreamCallbacks { + // Grpc::AsyncStreamCallbacks + void onCreateInitialMetadata(Http::RequestHeaderMap&) override {} + void onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata) override { + context_->onGrpcReceiveInitialMetadataWrapper(token_, std::move(metadata)); + } + bool onReceiveMessageRaw(::Envoy::Buffer::InstancePtr&& response) override { + context_->onGrpcReceiveWrapper(token_, std::move(response)); + return true; + } + void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) override { + context_->onGrpcReceiveTrailingMetadataWrapper(token_, std::move(metadata)); + } + void onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) override { + remote_closed_ = true; + context_->onGrpcCloseWrapper(token_, status, message); + } + + Context* context_; + uint32_t token_; + Grpc::RawAsyncClientPtr client_; + Grpc::RawAsyncStream* stream_; + bool local_closed_ = false; + bool remote_closed_ = false; + }; + + void onHttpCallSuccess(uint32_t token, Envoy::Http::ResponseMessagePtr&& response); + void onHttpCallFailure(uint32_t token, Http::AsyncClient::FailureReason reason); + + void onGrpcCreateInitialMetadata(uint32_t token, Http::RequestHeaderMap& metadata); + void onGrpcReceiveInitialMetadataWrapper(uint32_t token, Http::HeaderMapPtr&& metadata); + void onGrpcReceiveWrapper(uint32_t token, ::Envoy::Buffer::InstancePtr response); + void onGrpcReceiveTrailingMetadataWrapper(uint32_t token, Http::HeaderMapPtr&& metadata); + void onGrpcCloseWrapper(uint32_t token, const Grpc::Status::GrpcStatus& status, + const absl::string_view message); + + bool isGrpcStreamToken(uint32_t token) { return (token & 1) == 0; } + bool isGrpcCallToken(uint32_t token) { return (token & 1) == 1; } + + Http::HeaderMap* getMap(WasmHeaderMapType type); + const Http::HeaderMap* getConstMap(WasmHeaderMapType type); + + const LocalInfo::LocalInfo* root_local_info_{nullptr}; // set only for root_context. + + uint32_t next_http_call_token_ = 1; + uint32_t next_grpc_token_ = 1; // Odd tokens are for Calls even for Streams. + + // Network callbacks. + Network::ReadFilterCallbacks* network_read_filter_callbacks_{}; + Network::WriteFilterCallbacks* network_write_filter_callbacks_{}; + + // HTTP callbacks. + Envoy::Http::StreamDecoderFilterCallbacks* decoder_callbacks_{}; + Envoy::Http::StreamEncoderFilterCallbacks* encoder_callbacks_{}; + + // Status. + uint32_t status_code_{0}; + absl::string_view status_message_; + + // Network filter state. + ::Envoy::Buffer::Instance* network_downstream_data_buffer_{}; + ::Envoy::Buffer::Instance* network_upstream_data_buffer_{}; + + // HTTP filter state. + bool http_request_started_ = false; // When decodeHeaders() is called the request is "started". + Http::RequestHeaderMap* request_headers_{}; + Http::ResponseHeaderMap* response_headers_{}; + ::Envoy::Buffer::Instance* request_body_buffer_{}; + ::Envoy::Buffer::Instance* response_body_buffer_{}; + Http::RequestTrailerMap* request_trailers_{}; + Http::ResponseTrailerMap* response_trailers_{}; + Http::MetadataMap* request_metadata_{}; + Http::MetadataMap* response_metadata_{}; + + // Only available during onHttpCallResponse. + Envoy::Http::ResponseMessagePtr* http_call_response_{}; + + Http::HeaderMapPtr grpc_receive_initial_metadata_{}; + Http::HeaderMapPtr grpc_receive_trailing_metadata_{}; + + // Only available (non-nullptr) during onGrpcReceive. + ::Envoy::Buffer::InstancePtr grpc_receive_buffer_; + + // Only available (non-nullptr) during grpcCall and grpcStream. + Http::RequestHeaderMapPtr grpc_initial_metadata_; + + // Access log state. + const StreamInfo::StreamInfo* access_log_stream_info_{}; + const Http::RequestHeaderMap* access_log_request_headers_{}; + const Http::ResponseHeaderMap* access_log_response_headers_{}; + const Http::ResponseTrailerMap* access_log_response_trailers_{}; + + // Temporary state. + ProtobufWkt::Struct temporary_metadata_; + bool end_of_stream_; + bool buffering_request_body_ = false; + bool buffering_response_body_ = false; + Buffer buffer_; + + // MB: must be a node-type map as we take persistent references to the entries. + std::map http_request_; + std::map grpc_call_request_; + std::map grpc_stream_; + + // Opaque state. + absl::flat_hash_map> data_storage_; + + // TCP State. + bool upstream_closed_ = false; + bool downstream_closed_ = false; + bool tcp_connection_closed_ = false; + + // Filter state prototype declaration. + absl::flat_hash_map> state_prototypes_; +}; +using ContextSharedPtr = std::shared_ptr; + +WasmResult serializeValue(Filters::Common::Expr::CelValue value, std::string* result); + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/ext/BUILD b/source/extensions/common/wasm/ext/BUILD new file mode 100644 index 000000000000..286c0774edfe --- /dev/null +++ b/source/extensions/common/wasm/ext/BUILD @@ -0,0 +1,95 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "envoy_null_vm_wasm_api", + hdrs = [ + "envoy_null_vm_wasm_api.h", + "envoy_proxy_wasm_api.h", + ], + visibility = ["//visibility:public"], + deps = [ + "@proxy_wasm_cpp_sdk//:api_lib", + "@proxy_wasm_cpp_sdk//:common_lib", + ], +) + +envoy_cc_library( + name = "envoy_null_plugin", + hdrs = [ + "envoy_null_plugin.h", + "envoy_proxy_wasm_api.h", + ], + visibility = ["//visibility:public"], + deps = [ + ":declare_property_cc_proto", + "//source/common/grpc:async_client_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +# NB: this target is compiled to Wasm. Hence the generic rule. +cc_library( + name = "envoy_proxy_wasm_api_lib", + srcs = ["envoy_proxy_wasm_api.cc"], + hdrs = ["envoy_proxy_wasm_api.h"], + tags = ["manual"], + visibility = ["//visibility:public"], + deps = [ + ":declare_property_cc_proto", + ":node_subset_cc_proto", + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], + alwayslink = 1, +) + +# NB: this target is compiled both to native code and to Wasm. Hence the generic rule. +proto_library( + name = "declare_property_proto", + srcs = ["declare_property.proto"], + visibility = ["//visibility:public"], +) + +# NB: this target is compiled both to native code and to Wasm. Hence the generic rule. +cc_proto_library( + name = "declare_property_cc_proto", + visibility = ["//visibility:public"], + deps = [":declare_property_proto"], +) + +# NB: this target is compiled both to native code and to Wasm. Hence the generic rule. +proto_library( + name = "node_subset_proto", + srcs = ["node_subset.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_google_protobuf//:struct_proto", + ], +) + +# NB: this target is compiled both to native code and to Wasm. Hence the generic rule. +cc_proto_library( + name = "node_subset_cc_proto", + visibility = ["//visibility:public"], + deps = [ + ":node_subset_proto", + # "//external:protobuf_clib", + ], +) + +filegroup( + name = "jslib", + srcs = [ + "envoy_wasm_intrinsics.js", + ], + visibility = ["//visibility:public"], +) diff --git a/source/extensions/common/wasm/ext/README.md b/source/extensions/common/wasm/ext/README.md new file mode 100644 index 000000000000..b9e1e44d4dbc --- /dev/null +++ b/source/extensions/common/wasm/ext/README.md @@ -0,0 +1 @@ +# Envoy specific extensions to the proxy-wasm SDK diff --git a/source/extensions/common/wasm/ext/declare_property.proto b/source/extensions/common/wasm/ext/declare_property.proto new file mode 100644 index 000000000000..b08ce6375481 --- /dev/null +++ b/source/extensions/common/wasm/ext/declare_property.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.source.extensions.common.wasm; + +enum WasmType { + Bytes = 0; + String = 1; + FlatBuffers = 2; + Protobuf = 3; +}; + +enum LifeSpan { + FilterChain = 0; + DownstreamRequest = 1; + DownstreamConnection = 2; +}; + +message DeclarePropertyArguments { + string name = 1; + bool readonly = 2; + WasmType type = 3; + bytes schema = 4; + LifeSpan span = 5; +}; diff --git a/source/extensions/common/wasm/ext/envoy_null_plugin.h b/source/extensions/common/wasm/ext/envoy_null_plugin.h new file mode 100644 index 000000000000..1463e00e1ef5 --- /dev/null +++ b/source/extensions/common/wasm/ext/envoy_null_plugin.h @@ -0,0 +1,48 @@ +// NOLINT(namespace-envoy) +#pragma once + +#define PROXY_WASM_PROTOBUF 1 +#define PROXY_WASM_PROTOBUF_FULL 1 + +#include "envoy/config/core/v3/grpc_service.pb.h" + +#include "source/extensions/common/wasm/ext/declare_property.pb.h" + +#include "include/proxy-wasm/null_plugin.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +proxy_wasm::Word resolve_dns(void* raw_context, proxy_wasm::Word dns_address, + proxy_wasm::Word dns_address_size, proxy_wasm::Word token_ptr); + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy + +namespace proxy_wasm { +namespace null_plugin { + +#include "extensions/common/wasm/ext/envoy_proxy_wasm_api.h" +using GrpcService = envoy::config::core::v3::GrpcService; +using namespace proxy_wasm::null_plugin; + +#define WS(_x) Word(static_cast(_x)) +#define WR(_x) Word(reinterpret_cast(_x)) + +inline WasmResult envoy_resolve_dns(const char* dns_address, size_t dns_address_size, + uint32_t* token) { + return static_cast( + ::Envoy::Extensions::Common::Wasm::resolve_dns(proxy_wasm::current_context_, WR(dns_address), + WS(dns_address_size), WR(token)) + .u64_); +} + +#undef WS +#undef WR + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/source/extensions/common/wasm/ext/envoy_null_vm_wasm_api.h b/source/extensions/common/wasm/ext/envoy_null_vm_wasm_api.h new file mode 100644 index 000000000000..f6415b3fd2fa --- /dev/null +++ b/source/extensions/common/wasm/ext/envoy_null_vm_wasm_api.h @@ -0,0 +1,24 @@ +// NOLINT(namespace-envoy) +#pragma once + +namespace proxy_wasm { +namespace null_plugin { + +#include "proxy_wasm_common.h" +#include "proxy_wasm_enums.h" +#include "proxy_wasm_externs.h" + +/* + * The following headers are used in two different environments, in the Null VM and in Wasm code + * which require different headers to precede these such that they can not include the above + * headers directly. These macros prevent header reordering + */ +#define _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_ 1 +#include "proxy_wasm_api.h" +#undef _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_ +#define _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_ 1 +#include "extensions/common/wasm/ext/envoy_proxy_wasm_api.h" +#undef _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_ + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/source/extensions/common/wasm/ext/envoy_proxy_wasm_api.cc b/source/extensions/common/wasm/ext/envoy_proxy_wasm_api.cc new file mode 100644 index 000000000000..cb0cb3429144 --- /dev/null +++ b/source/extensions/common/wasm/ext/envoy_proxy_wasm_api.cc @@ -0,0 +1,40 @@ +// NOLINT(namespace-envoy) + +#include "proxy_wasm_intrinsics.h" + +/* + * These headers span repositories and therefor the following header can not include the above + * header to enforce the required order. This macros prevent header reordering. + */ +#define _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_ 1 +#include "source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h" +#undef _THE_FOLLOWING_INCLUDE_MUST_COME_AFTER_THOSE_ABOVE_ + +EnvoyContextBase* getEnvoyContextBase(uint32_t context_id) { + auto context_base = getContextBase(context_id); + if (auto root = context_base->asRoot()) { + return static_cast(static_cast(root)); + } else { + return static_cast(static_cast(context_base->asContext())); + } +} + +EnvoyContext* getEnvoyContext(uint32_t context_id) { + auto context_base = getContextBase(context_id); + return static_cast(context_base->asContext()); +} + +EnvoyRootContext* getEnvoyRootContext(uint32_t context_id) { + auto context_base = getContextBase(context_id); + return static_cast(context_base->asRoot()); +} + +extern "C" PROXY_WASM_KEEPALIVE void envoy_on_resolve_dns(uint32_t context_id, uint32_t token, + uint32_t data_size) { + getEnvoyRootContext(context_id)->onResolveDns(token, data_size); +} + +extern "C" PROXY_WASM_KEEPALIVE void envoy_on_stats_update(uint32_t context_id, + uint32_t data_size) { + getEnvoyRootContext(context_id)->onStatsUpdate(data_size); +} diff --git a/source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h b/source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h new file mode 100644 index 000000000000..601713012c5a --- /dev/null +++ b/source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h @@ -0,0 +1,131 @@ +// NOLINT(namespace-envoy) +#pragma once + +// Note that this file is included in emscripten and NullVM environments and thus depends on +// the context in which it is included, hence we need to disable clang-tidy warnings. + +extern "C" WasmResult envoy_resolve_dns(const char* dns_address, size_t dns_address_size, + uint32_t* token); + +class EnvoyContextBase { +public: + virtual ~EnvoyContextBase() = default; +}; + +class EnvoyRootContext : public RootContext, public EnvoyContextBase { +public: + EnvoyRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {} + ~EnvoyRootContext() override = default; + + virtual void onResolveDns(uint32_t /* token */, uint32_t /* result_size */) {} + virtual void onStatsUpdate(uint32_t /* result_size */) {} +}; + +class EnvoyContext : public Context, public EnvoyContextBase { +public: + EnvoyContext(uint32_t id, RootContext* root) : Context(id, root) {} + ~EnvoyContext() override = default; +}; + +struct DnsResult { + uint32_t ttl_seconds; + std::string address; +}; + +struct CounterResult { + uint64_t delta; + std::string_view name; + uint64_t value; +}; + +struct GaugeResult { + uint64_t value; + std::string_view name; +}; + +struct StatResult { + std::vector counters; + std::vector gauges; +}; + +enum class StatType : uint32_t { + Counter = 1, + Gauge = 2, +}; + +inline std::vector parseDnsResults(std::string_view data) { + if (data.size() < 4) { + return {}; + } + const uint32_t* pn = reinterpret_cast(data.data()); + uint32_t n = *pn++; + std::vector results; + results.resize(n); + const char* pa = data.data() + (1 + n) * sizeof(uint32_t); // skip n + n TTLs + for (uint32_t i = 0; i < n; i++) { + auto& e = results[i]; + e.ttl_seconds = *pn++; + auto alen = strlen(pa); + e.address.assign(pa, alen); + pa += alen + 1; + } + return results; +} + +template inline uint32_t align(uint32_t i) { + return (i + sizeof(I) - 1) & ~(sizeof(I) - 1); +} + +inline StatResult parseStatResults(std::string_view data) { + StatResult results; + uint32_t data_len = 0; + while (data_len < data.length()) { + const uint32_t* n = reinterpret_cast(data.data() + data_len); + uint32_t block_size = *n++; + uint32_t block_type = *n++; + uint32_t num_stats = *n++; + if (static_cast(block_type) == StatType::Counter) { // counter + std::vector counters(num_stats); + uint32_t stat_index = data_len + 3 * sizeof(uint32_t); + for (uint32_t i = 0; i < num_stats; i++) { + const uint32_t* stat_name = reinterpret_cast(data.data() + stat_index); + uint32_t name_len = *stat_name; + stat_index += sizeof(uint32_t); + + auto& e = counters[i]; + e.name = {data.data() + stat_index, name_len}; + stat_index = align(stat_index + name_len); + + const uint64_t* stat_vals = reinterpret_cast(data.data() + stat_index); + e.value = *stat_vals++; + e.delta = *stat_vals++; + + stat_index += 2 * sizeof(uint64_t); + } + results.counters = counters; + } else if (static_cast(block_type) == StatType::Gauge) { // gauge + std::vector gauges(num_stats); + uint32_t stat_index = data_len + 3 * sizeof(uint32_t); + for (uint32_t i = 0; i < num_stats; i++) { + const uint32_t* stat_name = reinterpret_cast(data.data() + stat_index); + uint32_t name_len = *stat_name; + stat_index += sizeof(uint32_t); + + auto& e = gauges[i]; + e.name = {data.data() + stat_index, name_len}; + stat_index = align(stat_index + name_len); + + const uint64_t* stat_vals = reinterpret_cast(data.data() + stat_index); + e.value = *stat_vals++; + + stat_index += sizeof(uint64_t); + } + results.gauges = gauges; + } + data_len += block_size; + } + + return results; +} + +extern "C" WasmResult envoy_resolve_dns(const char* address, size_t address_size, uint32_t* token); diff --git a/source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js b/source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js new file mode 100644 index 000000000000..116a5b8a3867 --- /dev/null +++ b/source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js @@ -0,0 +1,3 @@ +mergeInto(LibraryManager.library, { + envoy_resolve_dns: function() {}, +}); diff --git a/source/extensions/common/wasm/ext/node_subset.proto b/source/extensions/common/wasm/ext/node_subset.proto new file mode 100644 index 000000000000..9e766c4b12b0 --- /dev/null +++ b/source/extensions/common/wasm/ext/node_subset.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +import "google/protobuf/struct.proto"; + +package envoy.source.extensions.common.wasm; + +// A subset of message Node from api/envoy/config/core/v?/base.proto. +message NodeSubset { + string id = 1; + google.protobuf.Struct metadata = 3; +}; diff --git a/source/extensions/common/wasm/foreign.cc b/source/extensions/common/wasm/foreign.cc new file mode 100644 index 000000000000..565ca22adb9b --- /dev/null +++ b/source/extensions/common/wasm/foreign.cc @@ -0,0 +1,277 @@ +#include "common/common/logger.h" + +#include "source/extensions/common/wasm/ext/declare_property.pb.h" + +#include "extensions/common/wasm/wasm.h" + +#if defined(WASM_USE_CEL_PARSER) +#include "eval/public/builtin_func_registrar.h" +#include "eval/public/cel_expr_builder_factory.h" +#include "parser/parser.h" +#endif +#include "zlib.h" + +using proxy_wasm::RegisterForeignFunction; +using proxy_wasm::WasmForeignFunction; + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +template WasmForeignFunction createFromClass() { + auto c = std::make_shared(); + return c->create(c); +} + +RegisterForeignFunction registerCompressForeignFunction( + "compress", + [](WasmBase&, absl::string_view arguments, + const std::function& alloc_result) -> WasmResult { + unsigned long dest_len = compressBound(arguments.size()); + std::unique_ptr b(new unsigned char[dest_len]); + if (compress(b.get(), &dest_len, reinterpret_cast(arguments.data()), + arguments.size()) != Z_OK) { + return WasmResult::SerializationFailure; + } + auto result = alloc_result(dest_len); + memcpy(result, b.get(), dest_len); + return WasmResult::Ok; + }); + +RegisterForeignFunction registerUncompressForeignFunction( + "uncompress", + [](WasmBase&, absl::string_view arguments, + const std::function& alloc_result) -> WasmResult { + unsigned long dest_len = arguments.size() * 2 + 2; // output estimate. + while (true) { + std::unique_ptr b(new unsigned char[dest_len]); + auto r = + uncompress(b.get(), &dest_len, reinterpret_cast(arguments.data()), + arguments.size()); + if (r == Z_OK) { + auto result = alloc_result(dest_len); + memcpy(result, b.get(), dest_len); + return WasmResult::Ok; + } + if (r != Z_BUF_ERROR) { + return WasmResult::SerializationFailure; + } + dest_len = dest_len * 2; + } + }); + +#if defined(WASM_USE_CEL_PARSER) +class ExpressionFactory : public Logger::Loggable { +protected: + struct ExpressionData { + google::api::expr::v1alpha1::ParsedExpr parsed_expr_; + Filters::Common::Expr::ExpressionPtr compiled_expr_; + }; + + class ExpressionContext : public StorageObject { + public: + friend class ExpressionFactory; + ExpressionContext(Filters::Common::Expr::BuilderPtr builder) : builder_(std::move(builder)) {} + uint32_t createToken() { + uint32_t token = next_expr_token_++; + for (;;) { + if (!expr_.count(token)) { + break; + } + token = next_expr_token_++; + } + return token; + } + bool hasExpression(uint32_t token) { return expr_.contains(token); } + ExpressionData& getExpression(uint32_t token) { return expr_[token]; } + void deleteExpression(uint32_t token) { expr_.erase(token); } + Filters::Common::Expr::Builder* builder() { return builder_.get(); } + + private: + Filters::Common::Expr::BuilderPtr builder_{}; + uint32_t next_expr_token_ = 0; + absl::flat_hash_map expr_; + }; + + static ExpressionContext& getOrCreateContext(ContextBase* context_base) { + auto context = static_cast(context_base); + std::string data_name = "cel"; + auto expr_context = context->getForeignData(data_name); + if (!expr_context) { + google::api::expr::runtime::InterpreterOptions options; + auto builder = google::api::expr::runtime::CreateCelExpressionBuilder(options); + auto status = + google::api::expr::runtime::RegisterBuiltinFunctions(builder->GetRegistry(), options); + if (!status.ok()) { + ENVOY_LOG(warn, "failed to register built-in functions: {}", status.message()); + } + auto new_context = std::make_unique(std::move(builder)); + expr_context = new_context.get(); + context->setForeignData(data_name, std::move(new_context)); + } + return *expr_context; + } +}; + +class CreateExpressionFactory : public ExpressionFactory { +public: + WasmForeignFunction create(std::shared_ptr self) const { + WasmForeignFunction f = + [self](WasmBase&, absl::string_view expr, + const std::function& alloc_result) -> WasmResult { + auto parse_status = google::api::expr::parser::Parse(std::string(expr)); + if (!parse_status.ok()) { + ENVOY_LOG(info, "expr_create parse error: {}", parse_status.status().message()); + return WasmResult::BadArgument; + } + + auto& expr_context = getOrCreateContext(proxy_wasm::current_context_->root_context()); + auto token = expr_context.createToken(); + auto& handler = expr_context.getExpression(token); + + handler.parsed_expr_ = parse_status.value(); + auto cel_expression_status = expr_context.builder()->CreateExpression( + &handler.parsed_expr_.expr(), &handler.parsed_expr_.source_info()); + if (!cel_expression_status.ok()) { + ENVOY_LOG(info, "expr_create compile error: {}", cel_expression_status.status().message()); + expr_context.deleteExpression(token); + return WasmResult::BadArgument; + } + + handler.compiled_expr_ = std::move(cel_expression_status.value()); + auto result = reinterpret_cast(alloc_result(sizeof(uint32_t))); + *result = token; + return WasmResult::Ok; + }; + return f; + } +}; +RegisterForeignFunction + registerCreateExpressionForeignFunction("expr_create", + createFromClass()); + +class EvaluateExpressionFactory : public ExpressionFactory { +public: + WasmForeignFunction create(std::shared_ptr self) const { + WasmForeignFunction f = + [self](WasmBase&, absl::string_view argument, + const std::function& alloc_result) -> WasmResult { + auto& expr_context = getOrCreateContext(proxy_wasm::current_context_->root_context()); + if (argument.size() != sizeof(uint32_t)) { + return WasmResult::BadArgument; + } + uint32_t token = *reinterpret_cast(argument.data()); + if (!expr_context.hasExpression(token)) { + return WasmResult::NotFound; + } + Protobuf::Arena arena; + auto& handler = expr_context.getExpression(token); + auto context = static_cast(proxy_wasm::current_context_); + auto eval_status = handler.compiled_expr_->Evaluate(*context, &arena); + if (!eval_status.ok()) { + ENVOY_LOG(debug, "expr_evaluate error: {}", eval_status.status().message()); + return WasmResult::InternalFailure; + } + auto value = eval_status.value(); + if (value.IsError()) { + ENVOY_LOG(debug, "expr_evaluate value error: {}", value.ErrorOrDie()->message()); + return WasmResult::InternalFailure; + } + std::string result; + auto serialize_status = serializeValue(value, &result); + if (serialize_status != WasmResult::Ok) { + return serialize_status; + } + auto output = alloc_result(result.size()); + memcpy(output, result.data(), result.size()); + return WasmResult::Ok; + }; + return f; + } +}; +RegisterForeignFunction + registerEvaluateExpressionForeignFunction("expr_evaluate", + createFromClass()); + +class DeleteExpressionFactory : public ExpressionFactory { +public: + WasmForeignFunction create(std::shared_ptr self) const { + WasmForeignFunction f = [self](WasmBase&, absl::string_view argument, + const std::function&) -> WasmResult { + auto& expr_context = getOrCreateContext(proxy_wasm::current_context_->root_context()); + if (argument.size() != sizeof(uint32_t)) { + return WasmResult::BadArgument; + } + uint32_t token = *reinterpret_cast(argument.data()); + expr_context.deleteExpression(token); + return WasmResult::Ok; + }; + return f; + } +}; +RegisterForeignFunction + registerDeleteExpressionForeignFunction("expr_delete", + createFromClass()); +#endif + +// TODO(kyessenov) The factories should be separated into individual compilation units. +// TODO(kyessenov) Leverage the host argument marshaller instead of the protobuf argument list. +class DeclarePropertyFactory { +public: + WasmForeignFunction create(std::shared_ptr self) const { + WasmForeignFunction f = [self](WasmBase&, absl::string_view arguments, + const std::function&) -> WasmResult { + envoy::source::extensions::common::wasm::DeclarePropertyArguments args; + if (args.ParseFromArray(arguments.data(), arguments.size())) { + WasmType type = WasmType::Bytes; + switch (args.type()) { + case envoy::source::extensions::common::wasm::WasmType::Bytes: + type = WasmType::Bytes; + break; + case envoy::source::extensions::common::wasm::WasmType::Protobuf: + type = WasmType::Protobuf; + break; + case envoy::source::extensions::common::wasm::WasmType::String: + type = WasmType::String; + break; + case envoy::source::extensions::common::wasm::WasmType::FlatBuffers: + type = WasmType::FlatBuffers; + break; + default: + // do nothing + break; + } + StreamInfo::FilterState::LifeSpan span = StreamInfo::FilterState::LifeSpan::FilterChain; + switch (args.span()) { + case envoy::source::extensions::common::wasm::LifeSpan::FilterChain: + span = StreamInfo::FilterState::LifeSpan::FilterChain; + break; + case envoy::source::extensions::common::wasm::LifeSpan::DownstreamRequest: + span = StreamInfo::FilterState::LifeSpan::Request; + break; + case envoy::source::extensions::common::wasm::LifeSpan::DownstreamConnection: + span = StreamInfo::FilterState::LifeSpan::Connection; + break; + default: + // do nothing + break; + } + auto context = static_cast(proxy_wasm::current_context_); + return context->declareProperty( + args.name(), + std::make_unique(args.readonly(), type, args.schema(), span)); + } + return WasmResult::BadArgument; + }; + return f; + } +}; +RegisterForeignFunction + registerDeclarePropertyForeignFunction("declare_property", + createFromClass()); + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/null/BUILD b/source/extensions/common/wasm/null/BUILD deleted file mode 100644 index 31a33d8f4d49..000000000000 --- a/source/extensions/common/wasm/null/BUILD +++ /dev/null @@ -1,49 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_extension_package", -) - -licenses(["notice"]) # Apache 2 - -envoy_extension_package() - -envoy_cc_library( - name = "null_vm_plugin_interface", - hdrs = ["null_vm_plugin.h"], - deps = [ - "//include/envoy/config:typed_config_interface", - "//source/extensions/common/wasm:wasm_vm_interface", - "//source/extensions/common/wasm:well_known_names", - ], -) - -envoy_cc_library( - name = "null_vm_lib", - srcs = ["null_vm.cc"], - hdrs = ["null_vm.h"], - deps = [ - ":null_vm_plugin_interface", - "//external:abseil_node_hash_map", - "//include/envoy/registry", - "//source/common/common:assert_lib", - "//source/extensions/common/wasm:wasm_vm_base", - "//source/extensions/common/wasm:wasm_vm_interface", - "//source/extensions/common/wasm:well_known_names", - ], -) - -envoy_cc_library( - name = "null_lib", - srcs = ["null.cc"], - hdrs = ["null.h"], - deps = [ - ":null_vm_lib", - ":null_vm_plugin_interface", - "//external:abseil_node_hash_map", - "//include/envoy/registry", - "//source/common/common:assert_lib", - "//source/extensions/common/wasm:wasm_vm_interface", - "//source/extensions/common/wasm:well_known_names", - ], -) diff --git a/source/extensions/common/wasm/null/null.cc b/source/extensions/common/wasm/null/null.cc deleted file mode 100644 index af2ba77d1dc5..000000000000 --- a/source/extensions/common/wasm/null/null.cc +++ /dev/null @@ -1,27 +0,0 @@ -#include "extensions/common/wasm/null/null.h" - -#include -#include -#include - -#include "envoy/registry/registry.h" - -#include "common/common/assert.h" - -#include "extensions/common/wasm/null/null_vm.h" -#include "extensions/common/wasm/null/null_vm_plugin.h" -#include "extensions/common/wasm/well_known_names.h" - -namespace Envoy { -namespace Extensions { -namespace Common { -namespace Wasm { -namespace Null { - -WasmVmPtr createVm(const Stats::ScopeSharedPtr& scope) { return std::make_unique(scope); } - -} // namespace Null -} // namespace Wasm -} // namespace Common -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/common/wasm/null/null.h b/source/extensions/common/wasm/null/null.h deleted file mode 100644 index 285b13373fbc..000000000000 --- a/source/extensions/common/wasm/null/null.h +++ /dev/null @@ -1,20 +0,0 @@ -#pragma once - -#include - -#include "extensions/common/wasm/null/null_vm_plugin.h" -#include "extensions/common/wasm/wasm_vm.h" - -namespace Envoy { -namespace Extensions { -namespace Common { -namespace Wasm { -namespace Null { - -WasmVmPtr createVm(const Stats::ScopeSharedPtr& scope); - -} // namespace Null -} // namespace Wasm -} // namespace Common -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/common/wasm/null/null_vm.cc b/source/extensions/common/wasm/null/null_vm.cc deleted file mode 100644 index abd4418fc76b..000000000000 --- a/source/extensions/common/wasm/null/null_vm.cc +++ /dev/null @@ -1,94 +0,0 @@ -#include "extensions/common/wasm/null/null_vm.h" - -#include -#include -#include - -#include "envoy/registry/registry.h" - -#include "common/common/assert.h" - -#include "extensions/common/wasm/null/null_vm_plugin.h" -#include "extensions/common/wasm/well_known_names.h" - -namespace Envoy { -namespace Extensions { -namespace Common { -namespace Wasm { -namespace Null { - -WasmVmPtr NullVm::clone() { - auto cloned_null_vm = std::make_unique(*this); - cloned_null_vm->load(plugin_name_, false /* unused */); - return cloned_null_vm; -} - -// "Load" the plugin by obtaining a pointer to it from the factory. -bool NullVm::load(const std::string& name, bool /* allow_precompiled */) { - auto factory = Registry::FactoryRegistry::getFactory(name); - if (!factory) { - return false; - } - plugin_name_ = name; - plugin_ = factory->create(); - return true; -} - -void NullVm::link(absl::string_view /* name */) {} - -uint64_t NullVm::getMemorySize() { return std::numeric_limits::max(); } - -// NulVm pointers are just native pointers. -absl::optional NullVm::getMemory(uint64_t pointer, uint64_t size) { - if (pointer == 0 && size != 0) { - return absl::nullopt; - } - return absl::string_view(reinterpret_cast(pointer), static_cast(size)); -} - -bool NullVm::setMemory(uint64_t pointer, uint64_t size, const void* data) { - if ((pointer == 0 || data == nullptr)) { - if (size != 0) { - return false; - } else { - return true; - } - } - auto p = reinterpret_cast(pointer); - memcpy(p, data, size); - return true; -} - -bool NullVm::setWord(uint64_t pointer, Word data) { - if (pointer == 0) { - return false; - } - auto p = reinterpret_cast(pointer); - memcpy(p, &data.u64_, sizeof(data.u64_)); - return true; -} - -bool NullVm::getWord(uint64_t pointer, Word* data) { - if (pointer == 0) { - return false; - } - auto p = reinterpret_cast(pointer); - memcpy(&data->u64_, p, sizeof(data->u64_)); - return true; -} - -absl::string_view NullVm::getCustomSection(absl::string_view /* name */) { - // Return nothing: there is no WASM file. - return {}; -} - -absl::string_view NullVm::getPrecompiledSectionName() { - // Return nothing: there is no WASM file. - return {}; -} - -} // namespace Null -} // namespace Wasm -} // namespace Common -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/common/wasm/null/null_vm.h b/source/extensions/common/wasm/null/null_vm.h deleted file mode 100644 index 9bdaad668f8b..000000000000 --- a/source/extensions/common/wasm/null/null_vm.h +++ /dev/null @@ -1,65 +0,0 @@ -#pragma once - -#include -#include -#include - -#include "envoy/registry/registry.h" - -#include "common/common/assert.h" - -#include "extensions/common/wasm/null/null_vm_plugin.h" -#include "extensions/common/wasm/wasm_vm_base.h" -#include "extensions/common/wasm/well_known_names.h" - -namespace Envoy { -namespace Extensions { -namespace Common { -namespace Wasm { -namespace Null { - -// The NullVm wraps a C++ WASM plugin which has been compiled with the WASM API -// and linked directly into the Envoy process. This is useful for development -// in that it permits the debugger to set breakpoints in both Envoy and the plugin. -struct NullVm : public WasmVmBase { - NullVm(const Stats::ScopeSharedPtr& scope) : WasmVmBase(scope, WasmRuntimeNames::get().Null) {} - NullVm(const NullVm& other) - : WasmVmBase(other.scope_, WasmRuntimeNames::get().Null), plugin_name_(other.plugin_name_) {} - - // WasmVm - absl::string_view runtime() override { return WasmRuntimeNames::get().Null; } - Cloneable cloneable() override { return Cloneable::InstantiatedModule; }; - WasmVmPtr clone() override; - bool load(const std::string& code, bool allow_precompiled) override; - void link(absl::string_view debug_name) override; - uint64_t getMemorySize() override; - absl::optional getMemory(uint64_t pointer, uint64_t size) override; - bool setMemory(uint64_t pointer, uint64_t size, const void* data) override; - bool setWord(uint64_t pointer, Word data) override; - bool getWord(uint64_t pointer, Word* data) override; - absl::string_view getCustomSection(absl::string_view name) override; - absl::string_view getPrecompiledSectionName() override; - -#define _FORWARD_GET_FUNCTION(_T) \ - void getFunction(absl::string_view function_name, _T* f) override { \ - plugin_->getFunction(function_name, f); \ - } - FOR_ALL_WASM_VM_EXPORTS(_FORWARD_GET_FUNCTION) -#undef _FORWARD_GET_FUNCTION - - // These are not needed for NullVm which invokes the handlers directly. -#define _REGISTER_CALLBACK(_T) \ - void registerCallback(absl::string_view, absl::string_view, _T, \ - typename ConvertFunctionTypeWordToUint32<_T>::type) override{}; - FOR_ALL_WASM_VM_IMPORTS(_REGISTER_CALLBACK) -#undef _REGISTER_CALLBACK - - std::string plugin_name_; - NullVmPluginPtr plugin_; -}; - -} // namespace Null -} // namespace Wasm -} // namespace Common -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/common/wasm/null/null_vm_plugin.h b/source/extensions/common/wasm/null/null_vm_plugin.h deleted file mode 100644 index 1176c98c07c9..000000000000 --- a/source/extensions/common/wasm/null/null_vm_plugin.h +++ /dev/null @@ -1,51 +0,0 @@ -#pragma once - -#include - -#include "envoy/config/typed_config.h" - -#include "extensions/common/wasm/wasm_vm.h" - -namespace Envoy { -namespace Extensions { -namespace Common { -namespace Wasm { -namespace Null { - -// A wrapper for the natively compiled NullVm plugin which implements the WASM ABI. -class NullVmPlugin { -public: - NullVmPlugin() = default; - virtual ~NullVmPlugin() = default; - - // NB: These are defined rather than declared PURE because gmock uses __LINE__ internally for - // uniqueness, making it impossible to use FOR_ALL_WASM_VM_EXPORTS with MOCK_METHOD. -#define _DEFINE_GET_FUNCTION(_T) \ - virtual void getFunction(absl::string_view, _T* f) { *f = nullptr; } - FOR_ALL_WASM_VM_EXPORTS(_DEFINE_GET_FUNCTION) -#undef _DEFIN_GET_FUNCTIONE -}; - -using NullVmPluginPtr = std::unique_ptr; - -/** - * Pseudo-WASM plugins using the NullVM should implement this factory and register via - * Registry::registerFactory or the convenience class RegisterFactory. - */ -class NullVmPluginFactory : public Config::UntypedFactory { -public: - ~NullVmPluginFactory() override = default; - - std::string category() const override { return "envoy.wasm.null_vms"; } - - /** - * Create an instance of the plugin. - */ - virtual NullVmPluginPtr create() const PURE; -}; - -} // namespace Null -} // namespace Wasm -} // namespace Common -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/common/wasm/v8/BUILD b/source/extensions/common/wasm/v8/BUILD deleted file mode 100644 index 4ff62d112f2f..000000000000 --- a/source/extensions/common/wasm/v8/BUILD +++ /dev/null @@ -1,24 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_extension_package", -) - -licenses(["notice"]) # Apache 2 - -envoy_extension_package() - -envoy_cc_library( - name = "v8_lib", - srcs = ["v8.cc"], - hdrs = ["v8.h"], - external_deps = [ - "wee8", - ], - deps = [ - "//source/common/common:assert_lib", - "//source/extensions/common/wasm:wasm_vm_base", - "//source/extensions/common/wasm:wasm_vm_interface", - "//source/extensions/common/wasm:well_known_names", - ], -) diff --git a/source/extensions/common/wasm/v8/v8.cc b/source/extensions/common/wasm/v8/v8.cc deleted file mode 100644 index b9c3673315d2..000000000000 --- a/source/extensions/common/wasm/v8/v8.cc +++ /dev/null @@ -1,678 +0,0 @@ -#include "extensions/common/wasm/v8/v8.h" - -#include -#include -#include - -#include "common/common/assert.h" - -#include "extensions/common/wasm/wasm_vm_base.h" -#include "extensions/common/wasm/well_known_names.h" - -#include "absl/container/flat_hash_map.h" -#include "absl/strings/match.h" -#include "v8-version.h" -#include "wasm-api/wasm.hh" - -namespace Envoy { -namespace Extensions { -namespace Common { -namespace Wasm { -namespace V8 { - -wasm::Engine* engine() { - static const auto engine = wasm::Engine::make(); - return engine.get(); -} - -struct FuncData { - FuncData(std::string name) : name_(std::move(name)) {} - - std::string name_; - wasm::own callback_; - void* raw_func_; -}; - -using FuncDataPtr = std::unique_ptr; - -class V8 : public WasmVmBase { -public: - V8(const Stats::ScopeSharedPtr& scope) : WasmVmBase(scope, WasmRuntimeNames::get().V8) {} - - // Extensions::Common::Wasm::WasmVm - absl::string_view runtime() override { return WasmRuntimeNames::get().V8; } - - bool load(const std::string& code, bool allow_precompiled) override; - absl::string_view getCustomSection(absl::string_view name) override; - absl::string_view getPrecompiledSectionName() override; - void link(absl::string_view debug_name) override; - - Cloneable cloneable() override { return Cloneable::CompiledBytecode; } - WasmVmPtr clone() override; - - uint64_t getMemorySize() override; - absl::optional getMemory(uint64_t pointer, uint64_t size) override; - bool setMemory(uint64_t pointer, uint64_t size, const void* data) override; - bool getWord(uint64_t pointer, Word* word) override; - bool setWord(uint64_t pointer, Word word) override; - -#define _REGISTER_HOST_FUNCTION(T) \ - void registerCallback(absl::string_view module_name, absl::string_view function_name, T, \ - typename ConvertFunctionTypeWordToUint32::type f) override { \ - registerHostFunctionImpl(module_name, function_name, f); \ - }; - FOR_ALL_WASM_VM_IMPORTS(_REGISTER_HOST_FUNCTION) -#undef _REGISTER_HOST_FUNCTION - -#define _GET_MODULE_FUNCTION(T) \ - void getFunction(absl::string_view function_name, T* f) override { \ - getModuleFunctionImpl(function_name, f); \ - }; - FOR_ALL_WASM_VM_EXPORTS(_GET_MODULE_FUNCTION) -#undef _GET_MODULE_FUNCTION - -private: - wasm::vec getStrippedSource(); - - template - void registerHostFunctionImpl(absl::string_view module_name, absl::string_view function_name, - void (*function)(void*, Args...)); - - template - void registerHostFunctionImpl(absl::string_view module_name, absl::string_view function_name, - R (*function)(void*, Args...)); - - template - void getModuleFunctionImpl(absl::string_view function_name, - std::function* function); - - template - void getModuleFunctionImpl(absl::string_view function_name, - std::function* function); - - wasm::vec source_ = wasm::vec::invalid(); - wasm::own store_; - wasm::own module_; - wasm::own> shared_module_; - wasm::own instance_; - wasm::own memory_; - wasm::own table_; - - absl::flat_hash_map host_functions_; - absl::flat_hash_map> module_functions_; -}; - -// Helper functions. - -static std::string printValue(const wasm::Val& value) { - switch (value.kind()) { - case wasm::I32: - return std::to_string(value.get()); - case wasm::I64: - return std::to_string(value.get()); - case wasm::F32: - return std::to_string(value.get()); - case wasm::F64: - return std::to_string(value.get()); - default: - return "unknown"; - } -} - -static std::string printValues(const wasm::Val values[], size_t size) { - if (size == 0) { - return ""; - } - - std::string s; - for (size_t i = 0; i < size; i++) { - if (i) { - s.append(", "); - } - s.append(printValue(values[i])); - } - return s; -} - -static const char* printValKind(wasm::ValKind kind) { - switch (kind) { - case wasm::I32: - return "i32"; - case wasm::I64: - return "i64"; - case wasm::F32: - return "f32"; - case wasm::F64: - return "f64"; - case wasm::ANYREF: - return "anyref"; - case wasm::FUNCREF: - return "funcref"; - default: - return "unknown"; - } -} - -static std::string printValTypes(const wasm::ownvec& types) { - if (types.size() == 0) { - return "void"; - } - - std::string s; - s.reserve(types.size() * 8 /* max size + " " */ - 1); - for (size_t i = 0; i < types.size(); i++) { - if (i) { - s.append(" "); - } - s.append(printValKind(types[i]->kind())); - } - return s; -} - -static bool equalValTypes(const wasm::ownvec& left, - const wasm::ownvec& right) { - if (left.size() != right.size()) { - return false; - } - for (size_t i = 0; i < left.size(); i++) { - if (left[i]->kind() != right[i]->kind()) { - return false; - } - } - return true; -} - -static uint32_t parseVarint(const byte_t*& pos, const byte_t* end) { - uint32_t n = 0; - uint32_t shift = 0; - byte_t b; - do { - if (pos + 1 > end) { - throw WasmVmException("Failed to parse corrupted WASM module"); - } - b = *pos++; - n += (b & 0x7f) << shift; - shift += 7; - } while ((b & 0x80) != 0); - return n; -} - -// Template magic. - -template struct ConvertWordType { - using type = T; // NOLINT(readability-identifier-naming) -}; -template <> struct ConvertWordType { - using type = uint32_t; // NOLINT(readability-identifier-naming) -}; - -template wasm::Val makeVal(T t) { return wasm::Val::make(t); } -template <> wasm::Val makeVal(Word t) { return wasm::Val::make(static_cast(t.u64_)); } - -template constexpr auto convertArgToValKind(); -template <> constexpr auto convertArgToValKind() { return wasm::I32; }; -template <> constexpr auto convertArgToValKind() { return wasm::I32; }; -template <> constexpr auto convertArgToValKind() { return wasm::I32; }; -template <> constexpr auto convertArgToValKind() { return wasm::I64; }; -template <> constexpr auto convertArgToValKind() { return wasm::I64; }; -template <> constexpr auto convertArgToValKind() { return wasm::F32; }; -template <> constexpr auto convertArgToValKind() { return wasm::F64; }; - -template -constexpr auto convertArgsTupleToValTypesImpl(absl::index_sequence) { - return wasm::ownvec::make( - wasm::ValType::make(convertArgToValKind::type>())...); -} - -template constexpr auto convertArgsTupleToValTypes() { - return convertArgsTupleToValTypesImpl(absl::make_index_sequence::value>()); -} - -template -constexpr T convertValTypesToArgsTupleImpl(const U& arr, absl::index_sequence) { - return std::make_tuple( - (arr[I] - .template get< - typename ConvertWordType::type>::type>())...); -} - -template constexpr T convertValTypesToArgsTuple(const U& arr) { - return convertValTypesToArgsTupleImpl(arr, - absl::make_index_sequence::value>()); -} - -// V8 implementation. - -bool V8::load(const std::string& code, bool allow_precompiled) { - ENVOY_LOG(trace, "load()"); - store_ = wasm::Store::make(engine()); - - // Wasm file header is 8 bytes (magic number + version). - static const uint8_t magic_number[4] = {0x00, 0x61, 0x73, 0x6d}; - if (code.size() < 8 || ::memcmp(code.data(), magic_number, 4) != 0) { - return false; - } - - source_ = wasm::vec::make_uninitialized(code.size()); - ::memcpy(source_.get(), code.data(), code.size()); - - if (allow_precompiled) { - const auto section_name = getPrecompiledSectionName(); - if (!section_name.empty()) { - const auto precompiled = getCustomSection(section_name); - if (!precompiled.empty()) { - auto vec = wasm::vec::make_uninitialized(precompiled.size()); - ::memcpy(vec.get(), precompiled.data(), precompiled.size()); - - // TODO(PiotrSikora): fuzz loading of precompiled Wasm modules. - // See: https://github.com/envoyproxy/envoy/issues/9731 - module_ = wasm::Module::deserialize(store_.get(), vec); - if (!module_) { - // Precompiled module that cannot be loaded is considered a hard error, - // so don't fallback to compiling the bytecode. - return false; - } - } - } - } - - if (!module_) { - // TODO(PiotrSikora): fuzz loading of Wasm modules. - // See: https://github.com/envoyproxy/envoy/issues/9731 - const auto stripped_source = getStrippedSource(); - module_ = wasm::Module::make(store_.get(), stripped_source ? stripped_source : source_); - } - - if (module_) { - shared_module_ = module_->share(); - } - - return module_ != nullptr; -} - -WasmVmPtr V8::clone() { - ENVOY_LOG(trace, "clone()"); - ASSERT(shared_module_ != nullptr); - - auto clone = std::make_unique(scope_); - clone->store_ = wasm::Store::make(engine()); - - clone->module_ = wasm::Module::obtain(clone->store_.get(), shared_module_.get()); - - return clone; -} - -// Get Wasm module without Custom Sections to save some memory in workers. -wasm::vec V8::getStrippedSource() { - ENVOY_LOG(trace, "getStrippedSource()"); - ASSERT(source_.get() != nullptr); - - std::vector stripped; - - const byte_t* pos = source_.get() + 8 /* Wasm header */; - const byte_t* end = source_.get() + source_.size(); - while (pos < end) { - const auto section_start = pos; - if (pos + 1 > end) { - return wasm::vec::invalid(); - } - const auto section_type = *pos++; - const auto section_len = parseVarint(pos, end); - if (section_len == static_cast(-1) || pos + section_len > end) { - return wasm::vec::invalid(); - } - pos += section_len; - if (section_type == 0 /* custom section */) { - if (stripped.empty()) { - const byte_t* start = source_.get(); - stripped.insert(stripped.end(), start, section_start); - } - } else if (!stripped.empty()) { - stripped.insert(stripped.end(), section_start, pos /* section end */); - } - } - - // No custom sections found, use the original source. - if (stripped.empty()) { - return wasm::vec::invalid(); - } - - // Return stripped source, without custom sections. - return wasm::vec::make(stripped.size(), stripped.data()); -} - -absl::string_view V8::getCustomSection(absl::string_view name) { - ENVOY_LOG(trace, "getCustomSection(\"{}\")", name); - ASSERT(source_.get() != nullptr); - - const byte_t* pos = source_.get() + 8 /* Wasm header */; - const byte_t* end = source_.get() + source_.size(); - while (pos < end) { - if (pos + 1 > end) { - throw WasmVmException("Failed to parse corrupted WASM module"); - } - const auto section_type = *pos++; - const auto section_len = parseVarint(pos, end); - if (section_len == static_cast(-1) || pos + section_len > end) { - throw WasmVmException("Failed to parse corrupted WASM module"); - } - if (section_type == 0 /* custom section */) { - const auto section_data_start = pos; - const auto section_name_len = parseVarint(pos, end); - if (section_name_len == static_cast(-1) || pos + section_name_len > end) { - throw WasmVmException("Failed to parse corrupted WASM module"); - } - if (section_name_len == name.size() && ::memcmp(pos, name.data(), section_name_len) == 0) { - pos += section_name_len; - ENVOY_LOG(trace, "getCustomSection(\"{}\") found, size: {}", name, - section_data_start + section_len - pos); - return {pos, static_cast(section_data_start + section_len - pos)}; - } - pos = section_data_start + section_len; - } else { - pos += section_len; - } - } - return ""; -} - -#if defined(__linux__) && defined(__x86_64__) -#define WEE8_WASM_PRECOMPILE_PLATFORM "linux_x86_64" -#endif - -absl::string_view V8::getPrecompiledSectionName() { -#ifndef WEE8_WASM_PRECOMPILE_PLATFORM - return ""; -#else - static const auto name = - absl::StrCat("precompiled_v8_v", V8_MAJOR_VERSION, ".", V8_MINOR_VERSION, ".", - V8_BUILD_NUMBER, ".", V8_PATCH_LEVEL, "_", WEE8_WASM_PRECOMPILE_PLATFORM); - return name; -#endif -} - -void V8::link(absl::string_view debug_name) { - ENVOY_LOG(trace, "link(\"{}\")", debug_name); - ASSERT(module_ != nullptr); - - const auto import_types = module_.get()->imports(); - std::vector imports; - - for (size_t i = 0; i < import_types.size(); i++) { - absl::string_view module(import_types[i]->module().get(), import_types[i]->module().size()); - absl::string_view name(import_types[i]->name().get(), import_types[i]->name().size()); - auto import_type = import_types[i]->type(); - - switch (import_type->kind()) { - - case wasm::EXTERN_FUNC: { - ENVOY_LOG(trace, "link(), export host func: {}.{} ({} -> {})", module, name, - printValTypes(import_type->func()->params()), - printValTypes(import_type->func()->results())); - - auto it = host_functions_.find(absl::StrCat(module, ".", name)); - if (it == host_functions_.end()) { - throw WasmVmException( - fmt::format("Failed to load WASM module due to a missing import: {}.{}", module, name)); - } - auto func = it->second->callback_.get(); - if (!equalValTypes(import_type->func()->params(), func->type()->params()) || - !equalValTypes(import_type->func()->results(), func->type()->results())) { - throw WasmVmException(fmt::format( - "Failed to load WASM module due to an import type mismatch: {}.{}, " - "want: {} -> {}, but host exports: {} -> {}", - module, name, printValTypes(import_type->func()->params()), - printValTypes(import_type->func()->results()), printValTypes(func->type()->params()), - printValTypes(func->type()->results()))); - } - imports.push_back(func); - } break; - - case wasm::EXTERN_GLOBAL: { - // TODO(PiotrSikora): add support when/if needed. - ENVOY_LOG(trace, "link(), export host global: {}.{} ({})", module, name, - printValKind(import_type->global()->content()->kind())); - - throw WasmVmException( - fmt::format("Failed to load WASM module due to a missing import: {}.{}", module, name)); - } break; - - case wasm::EXTERN_MEMORY: { - ENVOY_LOG(trace, "link(), export host memory: {}.{} (min: {} max: {})", module, name, - import_type->memory()->limits().min, import_type->memory()->limits().max); - - ASSERT(memory_ == nullptr); - auto type = wasm::MemoryType::make(import_type->memory()->limits()); - memory_ = wasm::Memory::make(store_.get(), type.get()); - imports.push_back(memory_.get()); - } break; - - case wasm::EXTERN_TABLE: { - ENVOY_LOG(trace, "link(), export host table: {}.{} (min: {} max: {})", module, name, - import_type->table()->limits().min, import_type->table()->limits().max); - - ASSERT(table_ == nullptr); - auto type = - wasm::TableType::make(wasm::ValType::make(import_type->table()->element()->kind()), - import_type->table()->limits()); - table_ = wasm::Table::make(store_.get(), type.get()); - imports.push_back(table_.get()); - } break; - } - } - - ASSERT(import_types.size() == imports.size()); - - instance_ = wasm::Instance::make(store_.get(), module_.get(), imports.data()); - - const auto export_types = module_.get()->exports(); - const auto exports = instance_.get()->exports(); - ASSERT(export_types.size() == exports.size()); - - for (size_t i = 0; i < export_types.size(); i++) { - absl::string_view name(export_types[i]->name().get(), export_types[i]->name().size()); - auto export_type = export_types[i]->type(); - auto export_item = exports[i].get(); - ASSERT(export_type->kind() == export_item->kind()); - - switch (export_type->kind()) { - - case wasm::EXTERN_FUNC: { - ENVOY_LOG(trace, "link(), import module func: {} ({} -> {})", name, - printValTypes(export_type->func()->params()), - printValTypes(export_type->func()->results())); - - ASSERT(export_item->func() != nullptr); - module_functions_.insert_or_assign(name, export_item->func()->copy()); - } break; - - case wasm::EXTERN_GLOBAL: { - // TODO(PiotrSikora): add support when/if needed. - ENVOY_LOG(trace, "link(), import module global: {} ({}) --- IGNORED", name, - printValKind(export_type->global()->content()->kind())); - } break; - - case wasm::EXTERN_MEMORY: { - ENVOY_LOG(trace, "link(), import module memory: {} (min: {} max: {})", name, - export_type->memory()->limits().min, export_type->memory()->limits().max); - - ASSERT(export_item->memory() != nullptr); - ASSERT(memory_ == nullptr); - memory_ = exports[i]->memory()->copy(); - } break; - - case wasm::EXTERN_TABLE: { - // TODO(PiotrSikora): add support when/if needed. - ENVOY_LOG(trace, "link(), import module table: {} (min: {} max: {}) --- IGNORED", name, - export_type->table()->limits().min, export_type->table()->limits().max); - } break; - } - } -} - -uint64_t V8::getMemorySize() { - ENVOY_LOG(trace, "getMemorySize()"); - return memory_->data_size(); -} - -absl::optional V8::getMemory(uint64_t pointer, uint64_t size) { - ENVOY_LOG(trace, "getMemory({}, {})", pointer, size); - ASSERT(memory_ != nullptr); - if (pointer + size > memory_->data_size()) { - return absl::nullopt; - } - return absl::string_view(memory_->data() + pointer, size); -} - -bool V8::setMemory(uint64_t pointer, uint64_t size, const void* data) { - ENVOY_LOG(trace, "setMemory({}, {})", pointer, size); - ASSERT(memory_ != nullptr); - if (pointer + size > memory_->data_size()) { - return false; - } - ::memcpy(memory_->data() + pointer, data, size); - return true; -} - -bool V8::getWord(uint64_t pointer, Word* word) { - ENVOY_LOG(trace, "getWord({})", pointer); - constexpr auto size = sizeof(uint32_t); - if (pointer + size > memory_->data_size()) { - return false; - } - uint32_t word32; - ::memcpy(&word32, memory_->data() + pointer, size); - word->u64_ = word32; - return true; -} - -bool V8::setWord(uint64_t pointer, Word word) { - ENVOY_LOG(trace, "setWord({}, {})", pointer, word.u64_); - constexpr auto size = sizeof(uint32_t); - if (pointer + size > memory_->data_size()) { - return false; - } - uint32_t word32 = word.u32(); - ::memcpy(memory_->data() + pointer, &word32, size); - return true; -} - -template -void V8::registerHostFunctionImpl(absl::string_view module_name, absl::string_view function_name, - void (*function)(void*, Args...)) { - ENVOY_LOG(trace, "registerHostFunction(\"{}.{}\")", module_name, function_name); - auto data = std::make_unique(absl::StrCat(module_name, ".", function_name)); - auto type = wasm::FuncType::make(convertArgsTupleToValTypes>(), - convertArgsTupleToValTypes>()); - auto func = wasm::Func::make( - store_.get(), type.get(), - [](void* data, const wasm::Val params[], wasm::Val[]) -> wasm::own { - auto func_data = reinterpret_cast(data); - ENVOY_LOG(trace, "[vm->host] {}({})", func_data->name_, - printValues(params, std::tuple_size>::value)); - auto args_tuple = convertValTypesToArgsTuple>(params); - auto args = std::tuple_cat(std::make_tuple(current_context_), args_tuple); - auto function = reinterpret_cast(func_data->raw_func_); - absl::apply(function, args); - ENVOY_LOG(trace, "[vm<-host] {} return: void", func_data->name_); - return nullptr; - }, - data.get()); - data->callback_ = std::move(func); - data->raw_func_ = reinterpret_cast(function); - host_functions_.insert_or_assign(absl::StrCat(module_name, ".", function_name), std::move(data)); -} - -template -void V8::registerHostFunctionImpl(absl::string_view module_name, absl::string_view function_name, - R (*function)(void*, Args...)) { - ENVOY_LOG(trace, "registerHostFunction(\"{}.{}\")", module_name, function_name); - auto data = std::make_unique(absl::StrCat(module_name, ".", function_name)); - auto type = wasm::FuncType::make(convertArgsTupleToValTypes>(), - convertArgsTupleToValTypes>()); - auto func = wasm::Func::make( - store_.get(), type.get(), - [](void* data, const wasm::Val params[], wasm::Val results[]) -> wasm::own { - auto func_data = reinterpret_cast(data); - ENVOY_LOG(trace, "[vm->host] {}({})", func_data->name_, - printValues(params, sizeof...(Args))); - auto args_tuple = convertValTypesToArgsTuple>(params); - auto args = std::tuple_cat(std::make_tuple(current_context_), args_tuple); - auto function = reinterpret_cast(func_data->raw_func_); - R rvalue = absl::apply(function, args); - results[0] = makeVal(rvalue); - ENVOY_LOG(trace, "[vm<-host] {} return: {}", func_data->name_, rvalue); - return nullptr; - }, - data.get()); - data->callback_ = std::move(func); - data->raw_func_ = reinterpret_cast(function); - host_functions_.insert_or_assign(absl::StrCat(module_name, ".", function_name), std::move(data)); -} - -template -void V8::getModuleFunctionImpl(absl::string_view function_name, - std::function* function) { - ENVOY_LOG(trace, "getModuleFunction(\"{}\")", function_name); - auto it = module_functions_.find(function_name); - if (it == module_functions_.end()) { - *function = nullptr; - return; - } - const wasm::Func* func = it->second.get(); - if (!equalValTypes(func->type()->params(), convertArgsTupleToValTypes>()) || - !equalValTypes(func->type()->results(), convertArgsTupleToValTypes>())) { - throw WasmVmException(fmt::format("Bad function signature for: {}", function_name)); - } - *function = [func, function_name](Context* context, Args... args) -> void { - wasm::Val params[] = {makeVal(args)...}; - ENVOY_LOG(trace, "[host->vm] {}({})", function_name, printValues(params, sizeof...(Args))); - SaveRestoreContext saved_context(context); - auto trap = func->call(params, nullptr); - if (trap) { - throw WasmException( - fmt::format("Function: {} failed: {}", function_name, - absl::string_view(trap->message().get(), trap->message().size()))); - } - ENVOY_LOG(trace, "[host<-vm] {} return: void", function_name); - }; -} - -template -void V8::getModuleFunctionImpl(absl::string_view function_name, - std::function* function) { - ENVOY_LOG(trace, "getModuleFunction(\"{}\")", function_name); - auto it = module_functions_.find(function_name); - if (it == module_functions_.end()) { - *function = nullptr; - return; - } - const wasm::Func* func = it->second.get(); - if (!equalValTypes(func->type()->params(), convertArgsTupleToValTypes>()) || - !equalValTypes(func->type()->results(), convertArgsTupleToValTypes>())) { - throw WasmVmException(fmt::format("Bad function signature for: {}", function_name)); - } - *function = [func, function_name](Context* context, Args... args) -> R { - wasm::Val params[] = {makeVal(args)...}; - wasm::Val results[1]; - ENVOY_LOG(trace, "[host->vm] {}({})", function_name, printValues(params, sizeof...(Args))); - SaveRestoreContext saved_context(context); - auto trap = func->call(params, results); - if (trap) { - throw WasmException( - fmt::format("Function: {} failed: {}", function_name, - absl::string_view(trap->message().get(), trap->message().size()))); - } - R rvalue = results[0].get::type>(); - ENVOY_LOG(trace, "[host<-vm] {} return: {}", function_name, rvalue); - return rvalue; - }; -} - -WasmVmPtr createVm(const Stats::ScopeSharedPtr& scope) { return std::make_unique(scope); } - -} // namespace V8 -} // namespace Wasm -} // namespace Common -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/common/wasm/v8/v8.h b/source/extensions/common/wasm/v8/v8.h deleted file mode 100644 index a7288f0004a5..000000000000 --- a/source/extensions/common/wasm/v8/v8.h +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include - -#include "extensions/common/wasm/wasm_vm.h" - -namespace Envoy { -namespace Extensions { -namespace Common { -namespace Wasm { -namespace V8 { - -WasmVmPtr createVm(const Stats::ScopeSharedPtr& scope); - -} // namespace V8 -} // namespace Wasm -} // namespace Common -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/common/wasm/wasm.cc b/source/extensions/common/wasm/wasm.cc new file mode 100644 index 000000000000..ab2a45f0aaf7 --- /dev/null +++ b/source/extensions/common/wasm/wasm.cc @@ -0,0 +1,489 @@ +#include "extensions/common/wasm/wasm.h" + +#include +#include + +#include "envoy/event/deferred_deletable.h" + +#include "common/common/logger.h" + +#include "extensions/common/wasm/wasm_extension.h" + +#include "absl/strings/str_cat.h" + +#define WASM_CONTEXT(_c) \ + static_cast(proxy_wasm::exports::ContextOrEffectiveContext( \ + static_cast((void)_c, proxy_wasm::current_context_))) + +using proxy_wasm::FailState; +using proxy_wasm::Word; + +namespace Envoy { + +using ScopeWeakPtr = std::weak_ptr; + +namespace Extensions { +namespace Common { +namespace Wasm { +namespace { + +using WasmEvent = EnvoyWasm::WasmEvent; + +struct CodeCacheEntry { + std::string code; + bool in_progress; + MonotonicTime use_time; + MonotonicTime fetch_time; +}; + +class RemoteDataFetcherAdapter : public Config::DataFetcher::RemoteDataFetcherCallback, + public Event::DeferredDeletable { +public: + RemoteDataFetcherAdapter(std::function cb) : cb_(cb) {} + ~RemoteDataFetcherAdapter() override = default; + void onSuccess(const std::string& data) override { cb_(data); } + void onFailure(Config::DataFetcher::FailureReason) override { cb_(""); } + void setFetcher(std::unique_ptr&& fetcher) { + fetcher_ = std::move(fetcher); + } + +private: + std::function cb_; + std::unique_ptr fetcher_; +}; + +const std::string INLINE_STRING = ""; +const int CODE_CACHE_SECONDS_NEGATIVE_CACHING = 10; +const int CODE_CACHE_SECONDS_CACHING_TTL = 24 * 3600; // 24 hours. +MonotonicTime::duration cache_time_offset_for_testing{}; + +std::atomic active_wasms; +std::mutex code_cache_mutex; +absl::flat_hash_map* code_cache = nullptr; + +// Downcast WasmBase to the actual Wasm. +inline Wasm* getWasm(WasmHandleSharedPtr& base_wasm_handle) { + return static_cast(base_wasm_handle->wasm().get()); +} + +} // namespace + +std::string anyToBytes(const ProtobufWkt::Any& any) { + if (any.Is()) { + ProtobufWkt::StringValue s; + MessageUtil::unpackTo(any, s); + return s.value(); + } + if (any.Is()) { + Protobuf::BytesValue b; + MessageUtil::unpackTo(any, b); + return b.value(); + } + return any.value(); +} + +void Wasm::initializeStats() { + active_wasms++; + wasm_stats_.active_.set(active_wasms); + wasm_stats_.created_.inc(); +} + +void Wasm::initializeLifecycle(Server::ServerLifecycleNotifier& lifecycle_notifier) { + auto weak = std::weak_ptr(std::static_pointer_cast(shared_from_this())); + lifecycle_notifier.registerCallback(Server::ServerLifecycleNotifier::Stage::ShutdownExit, + [this, weak](Event::PostCb post_cb) { + auto lock = weak.lock(); + if (lock) { // See if we are still alive. + server_shutdown_post_cb_ = post_cb; + } + }); +} + +Wasm::Wasm(absl::string_view runtime, absl::string_view vm_id, absl::string_view vm_configuration, + absl::string_view vm_key, const Stats::ScopeSharedPtr& scope, + Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher) + : WasmBase(createWasmVm(runtime, scope), vm_id, vm_configuration, vm_key), scope_(scope), + cluster_manager_(cluster_manager), dispatcher_(dispatcher), + time_source_(dispatcher.timeSource()), + wasm_stats_(WasmStats{ + ALL_WASM_STATS(POOL_COUNTER_PREFIX(*scope_, absl::StrCat("wasm.", runtime, ".")), + POOL_GAUGE_PREFIX(*scope_, absl::StrCat("wasm.", runtime, ".")))}) { + initializeStats(); + ENVOY_LOG(debug, "Base Wasm created {} now active", active_wasms); +} + +Wasm::Wasm(WasmHandleSharedPtr base_wasm_handle, Event::Dispatcher& dispatcher) + : WasmBase(base_wasm_handle, + [&base_wasm_handle]() { + return createWasmVm( + getEnvoyWasmIntegration(*base_wasm_handle->wasm()->wasm_vm()).runtime(), + getWasm(base_wasm_handle)->scope_); + }), + scope_(getWasm(base_wasm_handle)->scope_), + cluster_manager_(getWasm(base_wasm_handle)->clusterManager()), dispatcher_(dispatcher), + time_source_(dispatcher.timeSource()), wasm_stats_(getWasm(base_wasm_handle)->wasm_stats_) { + initializeStats(); + ENVOY_LOG(debug, "Thread-Local Wasm created {} now active", active_wasms); +} + +void Wasm::error(absl::string_view message) { ENVOY_LOG(error, "Wasm VM failed {}", message); } + +void Wasm::setTimerPeriod(uint32_t context_id, std::chrono::milliseconds new_period) { + auto& period = timer_period_[context_id]; + auto& timer = timer_[context_id]; + bool was_running = timer && period.count() > 0; + period = new_period; + if (was_running) { + timer->disableTimer(); + } + if (period.count() > 0) { + timer = dispatcher_.createTimer( + [weak = std::weak_ptr(std::static_pointer_cast(shared_from_this())), + context_id]() { + auto shared = weak.lock(); + if (shared) { + shared->tickHandler(context_id); + } + }); + timer->enableTimer(period); + } +} + +void Wasm::tickHandler(uint32_t root_context_id) { + auto period = timer_period_.find(root_context_id); + auto timer = timer_.find(root_context_id); + if (period == timer_period_.end() || timer == timer_.end() || !on_tick_) { + return; + } + auto context = getContext(root_context_id); + if (context) { + context->onTick(0); + } + if (timer->second && period->second.count() > 0) { + timer->second->enableTimer(period->second); + } +} + +Wasm::~Wasm() { + active_wasms--; + wasm_stats_.active_.set(active_wasms); + ENVOY_LOG(debug, "~Wasm {} remaining active", active_wasms); + if (server_shutdown_post_cb_) { + dispatcher_.post(server_shutdown_post_cb_); + } +} + +// NOLINTNEXTLINE(readability-identifier-naming) +Word resolve_dns(void* raw_context, Word dns_address_ptr, Word dns_address_size, Word token_ptr) { + auto context = WASM_CONTEXT(raw_context); + auto root_context = context->isRootContext() ? context : context->rootContext(); + auto address = context->wasmVm()->getMemory(dns_address_ptr, dns_address_size); + if (!address) { + return WasmResult::InvalidMemoryAccess; + } + // Verify set and verify token_ptr before initiating the async resolve. + uint32_t token = context->wasm()->nextDnsToken(); + if (!context->wasm()->setDatatype(token_ptr, token)) { + return WasmResult::InvalidMemoryAccess; + } + auto callback = [weak_wasm = std::weak_ptr(context->wasm()->sharedThis()), root_context, + context_id = context->id(), + token](Envoy::Network::DnsResolver::ResolutionStatus status, + std::list&& response) { + auto wasm = weak_wasm.lock(); + if (!wasm) { + return; + } + root_context->onResolveDns(token, status, std::move(response)); + }; + if (!context->wasm()->dnsResolver()) { + context->wasm()->dnsResolver() = context->wasm()->dispatcher().createDnsResolver({}, false); + } + context->wasm()->dnsResolver()->resolve(std::string(address.value()), + Network::DnsLookupFamily::Auto, callback); + return WasmResult::Ok; +} + +void Wasm::registerCallbacks() { + WasmBase::registerCallbacks(); +#define _REGISTER(_fn) \ + wasm_vm_->registerCallback( \ + "env", "envoy_" #_fn, &_fn, \ + &proxy_wasm::ConvertFunctionWordToUint32::convertFunctionWordToUint32) + _REGISTER(resolve_dns); +#undef _REGISTER +} + +void Wasm::getFunctions() { + WasmBase::getFunctions(); +#define _GET(_fn) wasm_vm_->getFunction("envoy_" #_fn, &_fn##_); + _GET(on_resolve_dns) + _GET(on_stats_update) +#undef _GET +} + +proxy_wasm::CallOnThreadFunction Wasm::callOnThreadFunction() { + auto& dispatcher = dispatcher_; + return [&dispatcher](const std::function& f) { return dispatcher.post(f); }; +} + +ContextBase* Wasm::createContext(const std::shared_ptr& plugin) { + if (create_context_for_testing_) { + return create_context_for_testing_(this, std::static_pointer_cast(plugin)); + } + return new Context(this, std::static_pointer_cast(plugin)); +} + +ContextBase* Wasm::createRootContext(const std::shared_ptr& plugin) { + if (create_root_context_for_testing_) { + return create_root_context_for_testing_(this, std::static_pointer_cast(plugin)); + } + return new Context(this, std::static_pointer_cast(plugin)); +} + +ContextBase* Wasm::createVmContext() { return new Context(this); } + +void Wasm::log(absl::string_view root_id, const Http::RequestHeaderMap* request_headers, + const Http::ResponseHeaderMap* response_headers, + const Http::ResponseTrailerMap* response_trailers, + const StreamInfo::StreamInfo& stream_info) { + auto context = getRootContext(root_id); + context->log(request_headers, response_headers, response_trailers, stream_info); +} + +void Wasm::onStatsUpdate(absl::string_view root_id, Envoy::Stats::MetricSnapshot& snapshot) { + auto context = getRootContext(root_id); + context->onStatsUpdate(snapshot); +} + +void clearCodeCacheForTesting() { + std::lock_guard guard(code_cache_mutex); + if (code_cache) { + delete code_cache; + code_cache = nullptr; + } + getWasmExtension()->resetStatsForTesting(); +} + +// TODO: remove this post #4160: Switch default to SimulatedTimeSystem. +void setTimeOffsetForCodeCacheForTesting(MonotonicTime::duration d) { + cache_time_offset_for_testing = d; +} + +static proxy_wasm::WasmHandleCloneFactory +getCloneFactory(WasmExtension* wasm_extension, Event::Dispatcher& dispatcher, + CreateContextFn create_root_context_for_testing) { + auto wasm_clone_factory = wasm_extension->wasmCloneFactory(); + return [&dispatcher, create_root_context_for_testing, wasm_clone_factory]( + WasmHandleBaseSharedPtr base_wasm) -> std::shared_ptr { + return wasm_clone_factory(std::static_pointer_cast(base_wasm), dispatcher, + create_root_context_for_testing); + }; +} + +WasmEvent toWasmEvent(const std::shared_ptr& wasm) { + if (!wasm) { + return WasmEvent::UnableToCreateVM; + } + switch (wasm->wasm()->fail_state()) { + case FailState::Ok: + return WasmEvent::Ok; + case FailState::UnableToCreateVM: + return WasmEvent::UnableToCreateVM; + case FailState::UnableToCloneVM: + return WasmEvent::UnableToCloneVM; + case FailState::MissingFunction: + return WasmEvent::MissingFunction; + case FailState::UnableToInitializeCode: + return WasmEvent::UnableToInitializeCode; + case FailState::StartFailed: + return WasmEvent::StartFailed; + case FailState::ConfigureFailed: + return WasmEvent::ConfigureFailed; + case FailState::RuntimeError: + return WasmEvent::RuntimeError; + } + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; +} + +static bool createWasmInternal(const VmConfig& vm_config, const PluginSharedPtr& plugin, + const Stats::ScopeSharedPtr& scope, + Upstream::ClusterManager& cluster_manager, + Init::Manager& init_manager, Event::Dispatcher& dispatcher, + Api::Api& api, Server::ServerLifecycleNotifier& lifecycle_notifier, + Config::DataSource::RemoteAsyncDataProviderPtr& remote_data_provider, + CreateWasmCallback&& cb, + CreateContextFn create_root_context_for_testing = nullptr) { + auto wasm_extension = getWasmExtension(); + std::string source, code; + bool fetch = false; + if (vm_config.code().has_remote()) { + auto now = dispatcher.timeSource().monotonicTime() + cache_time_offset_for_testing; + source = vm_config.code().remote().http_uri().uri(); + std::lock_guard guard(code_cache_mutex); + if (!code_cache) { + code_cache = new std::remove_reference::type; + } + Stats::ScopeSharedPtr create_wasm_stats_scope = + wasm_extension->lockAndCreateStats(scope, plugin); + // Remove entries older than CODE_CACHE_SECONDS_CACHING_TTL except for our target. + for (auto it = code_cache->begin(); it != code_cache->end();) { + if (now - it->second.use_time > std::chrono::seconds(CODE_CACHE_SECONDS_CACHING_TTL) && + it->first != vm_config.code().remote().sha256()) { + code_cache->erase(it++); + } else { + ++it; + } + } + wasm_extension->onRemoteCacheEntriesChanged(code_cache->size()); + auto it = code_cache->find(vm_config.code().remote().sha256()); + if (it != code_cache->end()) { + it->second.use_time = now; + if (it->second.in_progress) { + wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheMiss, plugin); + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), warn, + "createWasm: failed to load (in progress) from {}", source); + cb(nullptr); + } + code = it->second.code; + if (code.empty()) { + if (now - it->second.fetch_time < + std::chrono::seconds(CODE_CACHE_SECONDS_NEGATIVE_CACHING)) { + wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheNegativeHit, plugin); + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), warn, + "createWasm: failed to load (cached) from {}", source); + cb(nullptr); + } + fetch = true; // Fetch failed, retry. + it->second.in_progress = true; + it->second.fetch_time = now; + } else { + wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheHit, plugin); + } + } else { + fetch = true; // Not in cache, fetch. + auto& e = (*code_cache)[vm_config.code().remote().sha256()]; + e.in_progress = true; + e.use_time = e.fetch_time = now; + wasm_extension->onRemoteCacheEntriesChanged(code_cache->size()); + wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheMiss, plugin); + } + } else if (vm_config.code().has_local()) { + code = Config::DataSource::read(vm_config.code().local(), true, api); + source = Config::DataSource::getPath(vm_config.code().local()) + .value_or(code.empty() ? EMPTY_STRING : INLINE_STRING); + } + + auto complete_cb = [cb, vm_config, plugin, scope, &cluster_manager, &dispatcher, + &lifecycle_notifier, create_root_context_for_testing, + wasm_extension](std::string code) -> bool { + if (code.empty()) { + cb(nullptr); + return false; + } + auto vm_key = + proxy_wasm::makeVmKey(vm_config.vm_id(), anyToBytes(vm_config.configuration()), code); + auto wasm_factory = wasm_extension->wasmFactory(); + proxy_wasm::WasmHandleFactory proxy_wasm_factory = + [&vm_config, scope, &cluster_manager, &dispatcher, &lifecycle_notifier, + wasm_factory](absl::string_view vm_key) -> WasmHandleBaseSharedPtr { + return wasm_factory(vm_config, scope, cluster_manager, dispatcher, lifecycle_notifier, + vm_key); + }; + auto wasm = proxy_wasm::createWasm( + vm_key, code, plugin, proxy_wasm_factory, + getCloneFactory(wasm_extension, dispatcher, create_root_context_for_testing), + vm_config.allow_precompiled()); + Stats::ScopeSharedPtr create_wasm_stats_scope = + wasm_extension->lockAndCreateStats(scope, plugin); + wasm_extension->onEvent(toWasmEvent(wasm), plugin); + if (!wasm || wasm->wasm()->isFailed()) { + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), trace, + "Unable to create Wasm"); + cb(nullptr); + return false; + } + cb(std::static_pointer_cast(wasm)); + return true; + }; + + if (fetch) { + auto holder = std::make_shared>(); + auto fetch_callback = [vm_config, complete_cb, source, &dispatcher, scope, holder, plugin, + wasm_extension](const std::string& code) { + { + std::lock_guard guard(code_cache_mutex); + auto& e = (*code_cache)[vm_config.code().remote().sha256()]; + e.in_progress = false; + e.code = code; + Stats::ScopeSharedPtr create_wasm_stats_scope = + wasm_extension->lockAndCreateStats(scope, plugin); + if (code.empty()) { + wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheFetchFailure, plugin); + } else { + wasm_extension->onEvent(WasmExtension::WasmEvent::RemoteLoadCacheFetchSuccess, plugin); + } + wasm_extension->onRemoteCacheEntriesChanged(code_cache->size()); + } + // NB: xDS currently does not support failing asynchronously, so we fail immediately + // if remote Wasm code is not cached and do a background fill. + if (!vm_config.nack_on_code_cache_miss()) { + if (code.empty()) { + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), trace, + "Failed to load Wasm code (fetch failed) from {}", source); + } + complete_cb(code); + } + // NB: must be deleted explicitly. + if (*holder) { + dispatcher.deferredDelete(Envoy::Event::DeferredDeletablePtr{holder->release()}); + } + }; + if (vm_config.nack_on_code_cache_miss()) { + auto adapter = std::make_unique(fetch_callback); + auto fetcher = std::make_unique( + cluster_manager, vm_config.code().remote().http_uri(), vm_config.code().remote().sha256(), + *adapter); + auto fetcher_ptr = fetcher.get(); + adapter->setFetcher(std::move(fetcher)); + *holder = std::move(adapter); + fetcher_ptr->fetch(); + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), trace, + fmt::format("Failed to load Wasm code (fetching) from {}", source)); + cb(nullptr); + return false; + } else { + remote_data_provider = std::make_unique( + cluster_manager, init_manager, vm_config.code().remote(), dispatcher, + api.randomGenerator(), true, fetch_callback); + } + } else { + return complete_cb(code); + } + return true; +} + +bool createWasm(const VmConfig& vm_config, const PluginSharedPtr& plugin, + const Stats::ScopeSharedPtr& scope, Upstream::ClusterManager& cluster_manager, + Init::Manager& init_manager, Event::Dispatcher& dispatcher, Api::Api& api, + Envoy::Server::ServerLifecycleNotifier& lifecycle_notifier, + Config::DataSource::RemoteAsyncDataProviderPtr& remote_data_provider, + CreateWasmCallback&& cb, CreateContextFn create_root_context_for_testing) { + return createWasmInternal(vm_config, plugin, scope, cluster_manager, init_manager, dispatcher, + api, lifecycle_notifier, remote_data_provider, std::move(cb), + create_root_context_for_testing); +} + +WasmHandleSharedPtr getOrCreateThreadLocalWasm(const WasmHandleSharedPtr& base_wasm, + const PluginSharedPtr& plugin, + Event::Dispatcher& dispatcher, + CreateContextFn create_root_context_for_testing) { + return std::static_pointer_cast(proxy_wasm::getOrCreateThreadLocalWasm( + std::static_pointer_cast(base_wasm), plugin, + getCloneFactory(getWasmExtension(), dispatcher, create_root_context_for_testing))); +} + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/wasm.h b/source/extensions/common/wasm/wasm.h new file mode 100644 index 000000000000..a812d1a1a522 --- /dev/null +++ b/source/extensions/common/wasm/wasm.h @@ -0,0 +1,166 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/common/exception.h" +#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/http/filter.h" +#include "envoy/server/lifecycle_notifier.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/assert.h" +#include "common/common/logger.h" +#include "common/config/datasource.h" +#include "common/stats/symbol_table_impl.h" +#include "common/version/version.h" + +#include "extensions/common/wasm/context.h" +#include "extensions/common/wasm/wasm_extension.h" +#include "extensions/common/wasm/wasm_vm.h" +#include "extensions/common/wasm/well_known_names.h" + +#include "include/proxy-wasm/exports.h" +#include "include/proxy-wasm/wasm.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +#define ALL_WASM_STATS(COUNTER, GAUGE) \ + COUNTER(created) \ + GAUGE(active, NeverImport) + +class WasmHandle; + +struct WasmStats { + ALL_WASM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) +}; + +// Wasm execution instance. Manages the Envoy side of the Wasm interface. +class Wasm : public WasmBase, Logger::Loggable { +public: + Wasm(absl::string_view runtime, absl::string_view vm_id, absl::string_view vm_configuration, + absl::string_view vm_key, const Stats::ScopeSharedPtr& scope, + Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher); + Wasm(std::shared_ptr other, Event::Dispatcher& dispatcher); + ~Wasm() override; + + Upstream::ClusterManager& clusterManager() const { return cluster_manager_; } + Event::Dispatcher& dispatcher() { return dispatcher_; } + Context* getRootContext(absl::string_view root_id) { + return static_cast(WasmBase::getRootContext(root_id)); + } + void setTimerPeriod(uint32_t root_context_id, std::chrono::milliseconds period) override; + virtual void tickHandler(uint32_t root_context_id); + std::shared_ptr sharedThis() { return std::static_pointer_cast(shared_from_this()); } + Network::DnsResolverSharedPtr& dnsResolver() { return dns_resolver_; } + + // WasmBase + void error(absl::string_view message) override; + proxy_wasm::CallOnThreadFunction callOnThreadFunction() override; + ContextBase* createContext(const std::shared_ptr& plugin) override; + ContextBase* createRootContext(const std::shared_ptr& plugin) override; + ContextBase* createVmContext() override; + void registerCallbacks() override; + void getFunctions() override; + + // AccessLog::Instance + void log(absl::string_view root_id, const Http::RequestHeaderMap* request_headers, + const Http::ResponseHeaderMap* response_headers, + const Http::ResponseTrailerMap* response_trailers, + const StreamInfo::StreamInfo& stream_info); + + void onStatsUpdate(absl::string_view root_id, Envoy::Stats::MetricSnapshot& snapshot); + virtual std::string buildVersion() { return BUILD_VERSION_NUMBER; } + + void initializeLifecycle(Server::ServerLifecycleNotifier& lifecycle_notifier); + uint32_t nextDnsToken() { + do { + dns_token_++; + } while (!dns_token_); + return dns_token_; + } + + void setCreateContextForTesting(CreateContextFn create_context, + CreateContextFn create_root_context) { + create_context_for_testing_ = create_context; + create_root_context_for_testing_ = create_root_context; + } + void setFailStateForTesting(proxy_wasm::FailState fail_state) { failed_ = fail_state; } + +protected: + friend class Context; + + void initializeStats(); + // Calls into the VM. + proxy_wasm::WasmCallVoid<3> on_resolve_dns_; + proxy_wasm::WasmCallVoid<2> on_stats_update_; + + Stats::ScopeSharedPtr scope_; + Upstream::ClusterManager& cluster_manager_; + Event::Dispatcher& dispatcher_; + Event::PostCb server_shutdown_post_cb_; + absl::flat_hash_map timer_; // per root_id. + TimeSource& time_source_; + + // Host Stats/Metrics + WasmStats wasm_stats_; + + // Plugin Stats/Metrics + absl::flat_hash_map counters_; + absl::flat_hash_map gauges_; + absl::flat_hash_map histograms_; + + CreateContextFn create_context_for_testing_; + CreateContextFn create_root_context_for_testing_; + Network::DnsResolverSharedPtr dns_resolver_; + uint32_t dns_token_ = 1; +}; +using WasmSharedPtr = std::shared_ptr; + +class WasmHandle : public WasmHandleBase, public ThreadLocal::ThreadLocalObject { +public: + explicit WasmHandle(const WasmSharedPtr& wasm) + : WasmHandleBase(std::static_pointer_cast(wasm)), wasm_(wasm) {} + + WasmSharedPtr& wasm() { return wasm_; } + +private: + WasmSharedPtr wasm_; +}; + +using CreateWasmCallback = std::function; + +// Returns false if createWasm failed synchronously. This is necessary because xDS *MUST* report +// all failures synchronously as it has no facility to report configuration update failures +// asynchronously. Callers should throw an exception if they are part of a synchronous xDS update +// because that is the mechanism for reporting configuration errors. +bool createWasm(const VmConfig& vm_config, const PluginSharedPtr& plugin, + const Stats::ScopeSharedPtr& scope, Upstream::ClusterManager& cluster_manager, + Init::Manager& init_manager, Event::Dispatcher& dispatcher, Api::Api& api, + Envoy::Server::ServerLifecycleNotifier& lifecycle_notifier, + Config::DataSource::RemoteAsyncDataProviderPtr& remote_data_provider, + CreateWasmCallback&& callback, + CreateContextFn create_root_context_for_testing = nullptr); + +WasmHandleSharedPtr +getOrCreateThreadLocalWasm(const WasmHandleSharedPtr& base_wasm, const PluginSharedPtr& plugin, + Event::Dispatcher& dispatcher, + CreateContextFn create_root_context_for_testing = nullptr); + +void clearCodeCacheForTesting(); +std::string anyToBytes(const ProtobufWkt::Any& any); +void setTimeOffsetForCodeCacheForTesting(MonotonicTime::duration d); +EnvoyWasm::WasmEvent toWasmEvent(const std::shared_ptr& wasm); + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/wasm_extension.cc b/source/extensions/common/wasm/wasm_extension.cc new file mode 100644 index 000000000000..c75168f1761c --- /dev/null +++ b/source/extensions/common/wasm/wasm_extension.cc @@ -0,0 +1,114 @@ +#include "extensions/common/wasm/wasm_extension.h" + +#include "extensions/common/wasm/context.h" +#include "extensions/common/wasm/wasm.h" +#include "extensions/common/wasm/wasm_vm.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { +namespace { + +WasmExtension* wasm_extension = nullptr; + +} // namespace + +Stats::ScopeSharedPtr WasmExtension::lockAndCreateStats(const Stats::ScopeSharedPtr& scope, + const PluginSharedPtr& plugin) { + absl::MutexLock l(&mutex_); + Stats::ScopeSharedPtr lock; + if (!(lock = scope_.lock())) { + resetStats(); + createStats(scope, plugin); + scope_ = ScopeWeakPtr(scope); + return scope; + } + createStats(scope, plugin); + return lock; +} + +void WasmExtension::resetStatsForTesting() { + absl::MutexLock l(&mutex_); + resetStats(); +} + +// Register a Wasm extension. Note: only one extension may be registered. +RegisterWasmExtension::RegisterWasmExtension(WasmExtension* extension) { + RELEASE_ASSERT(!wasm_extension, "Multiple Wasm extensions registered."); + wasm_extension = extension; +} + +std::unique_ptr +EnvoyWasm::createEnvoyWasmVmIntegration(const Stats::ScopeSharedPtr& scope, + absl::string_view runtime, + absl::string_view short_runtime) { + return std::make_unique(scope, runtime, short_runtime); +} + +WasmHandleExtensionFactory EnvoyWasm::wasmFactory() { + return [](const VmConfig vm_config, const Stats::ScopeSharedPtr& scope, + Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, + Server::ServerLifecycleNotifier& lifecycle_notifier, + absl::string_view vm_key) -> WasmHandleBaseSharedPtr { + auto wasm = std::make_shared(vm_config.runtime(), vm_config.vm_id(), + anyToBytes(vm_config.configuration()), vm_key, scope, + cluster_manager, dispatcher); + wasm->initializeLifecycle(lifecycle_notifier); + return std::static_pointer_cast(std::make_shared(std::move(wasm))); + }; +} + +WasmHandleExtensionCloneFactory EnvoyWasm::wasmCloneFactory() { + return [](const WasmHandleSharedPtr& base_wasm, Event::Dispatcher& dispatcher, + CreateContextFn create_root_context_for_testing) -> WasmHandleBaseSharedPtr { + auto wasm = std::make_shared(base_wasm, dispatcher); + wasm->setCreateContextForTesting(nullptr, create_root_context_for_testing); + return std::static_pointer_cast(std::make_shared(std::move(wasm))); + }; +} + +void EnvoyWasm::onEvent(WasmEvent event, const PluginSharedPtr&) { + switch (event) { + case WasmEvent::RemoteLoadCacheHit: + create_wasm_stats_->remote_load_cache_hits_.inc(); + break; + case WasmEvent::RemoteLoadCacheNegativeHit: + create_wasm_stats_->remote_load_cache_negative_hits_.inc(); + break; + case WasmEvent::RemoteLoadCacheMiss: + create_wasm_stats_->remote_load_cache_misses_.inc(); + break; + case WasmEvent::RemoteLoadCacheFetchSuccess: + create_wasm_stats_->remote_load_fetch_successes_.inc(); + break; + case WasmEvent::RemoteLoadCacheFetchFailure: + create_wasm_stats_->remote_load_fetch_failures_.inc(); + break; + default: + break; + } +} + +void EnvoyWasm::onRemoteCacheEntriesChanged(int entries) { + create_wasm_stats_->remote_load_cache_entries_.set(entries); +} + +void EnvoyWasm::createStats(const Stats::ScopeSharedPtr& scope, const PluginSharedPtr&) { + if (!create_wasm_stats_) { + create_wasm_stats_.reset(new CreateWasmStats{CREATE_WASM_STATS( // NOLINT + POOL_COUNTER_PREFIX(*scope, "wasm."), POOL_GAUGE_PREFIX(*scope, "wasm."))}); + } +} + +void EnvoyWasm::resetStats() { create_wasm_stats_.reset(); } + +WasmExtension* getWasmExtension() { + static WasmExtension* extension = wasm_extension ? wasm_extension : new EnvoyWasm(); + return extension; +} + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/wasm_extension.h b/source/extensions/common/wasm/wasm_extension.h new file mode 100644 index 000000000000..5d41a58bb337 --- /dev/null +++ b/source/extensions/common/wasm/wasm_extension.h @@ -0,0 +1,126 @@ +#pragma once + +#include + +#include "envoy/server/lifecycle_notifier.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/logger.h" +#include "common/stats/symbol_table_impl.h" + +#include "extensions/common/wasm/context.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +#define CREATE_WASM_STATS(COUNTER, GAUGE) \ + COUNTER(remote_load_cache_hits) \ + COUNTER(remote_load_cache_negative_hits) \ + COUNTER(remote_load_cache_misses) \ + COUNTER(remote_load_fetch_successes) \ + COUNTER(remote_load_fetch_failures) \ + GAUGE(remote_load_cache_entries, NeverImport) + +class WasmHandle; +class EnvoyWasmVmIntegration; + +using WasmHandleSharedPtr = std::shared_ptr; +using CreateContextFn = + std::function& plugin)>; +using WasmHandleExtensionFactory = std::function; +using WasmHandleExtensionCloneFactory = std::function; +using ScopeWeakPtr = std::weak_ptr; + +struct CreateWasmStats { + CREATE_WASM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) +}; + +// Extension point for Wasm clients in embedded Envoy. +class WasmExtension : Logger::Loggable { +public: + WasmExtension() = default; + virtual ~WasmExtension() = default; + + virtual void initialize() = 0; + virtual std::unique_ptr + createEnvoyWasmVmIntegration(const Stats::ScopeSharedPtr& scope, absl::string_view runtime, + absl::string_view short_runtime) = 0; + virtual WasmHandleExtensionFactory wasmFactory() = 0; + virtual WasmHandleExtensionCloneFactory wasmCloneFactory() = 0; + enum class WasmEvent : int { + Ok, + RemoteLoadCacheHit, + RemoteLoadCacheNegativeHit, + RemoteLoadCacheMiss, + RemoteLoadCacheFetchSuccess, + RemoteLoadCacheFetchFailure, + UnableToCreateVM, + UnableToCloneVM, + MissingFunction, + UnableToInitializeCode, + StartFailed, + ConfigureFailed, + RuntimeError, + }; + virtual void onEvent(WasmEvent event, const PluginSharedPtr& plugin) = 0; + virtual void onRemoteCacheEntriesChanged(int remote_cache_entries) = 0; + virtual void createStats(const Stats::ScopeSharedPtr& scope, const PluginSharedPtr& plugin) + EXCLUSIVE_LOCKS_REQUIRED(mutex_) = 0; + virtual void resetStats() EXCLUSIVE_LOCKS_REQUIRED(mutex_) = 0; // Delete stats pointers + + // NB: the Scope can become invalid if, for example, the owning FilterChain is deleted. When that + // happens the stats must be recreated. This hook verifies the Scope of any existing stats and if + // necessary recreates the stats with the newly provided scope. + // This call takes out the mutex_ and calls createStats and possibly resetStats(). + Stats::ScopeSharedPtr lockAndCreateStats(const Stats::ScopeSharedPtr& scope, + const PluginSharedPtr& plugin); + + void resetStatsForTesting(); + +protected: + absl::Mutex mutex_; + ScopeWeakPtr scope_; +}; + +// The default Envoy Wasm implementation. +class EnvoyWasm : public WasmExtension { +public: + EnvoyWasm() = default; + ~EnvoyWasm() override = default; + void initialize() override {} + std::unique_ptr + createEnvoyWasmVmIntegration(const Stats::ScopeSharedPtr& scope, absl::string_view runtime, + absl::string_view short_runtime) override; + WasmHandleExtensionFactory wasmFactory() override; + WasmHandleExtensionCloneFactory wasmCloneFactory() override; + void onEvent(WasmEvent event, const PluginSharedPtr& plugin) override; + void onRemoteCacheEntriesChanged(int remote_cache_entries) override; + void createStats(const Stats::ScopeSharedPtr& scope, const PluginSharedPtr& plugin) override; + void resetStats() override; + +private: + std::unique_ptr create_wasm_stats_; +}; + +// Register a Wasm extension. Note: only one extension may be registered. +struct RegisterWasmExtension { + RegisterWasmExtension(WasmExtension* extension); +}; +#define REGISTER_WASM_EXTENSION(_class) \ + ::Envoy::Extensions::Common::Wasm::RegisterWasmExtension register_wasm_extension(new _class()); + +WasmExtension* getWasmExtension(); + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/wasm_state.cc b/source/extensions/common/wasm/wasm_state.cc new file mode 100644 index 000000000000..573523f1d83e --- /dev/null +++ b/source/extensions/common/wasm/wasm_state.cc @@ -0,0 +1,59 @@ +#include "extensions/common/wasm/wasm_state.h" + +#include "flatbuffers/reflection.h" +#include "tools/flatbuffers_backed_impl.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +using google::api::expr::runtime::CelValue; + +CelValue WasmState::exprValue(Protobuf::Arena* arena, bool last) const { + if (initialized_) { + switch (type_) { + case WasmType::String: + return CelValue::CreateString(&value_); + case WasmType::Bytes: + return CelValue::CreateBytes(&value_); + case WasmType::Protobuf: { + if (last) { + return CelValue::CreateBytes(&value_); + } + // Note that this is very expensive since it incurs a de-serialization + const auto any = serializeAsProto(); + return CelValue::CreateMessage(any.get(), arena); + } + case WasmType::FlatBuffers: + if (last) { + return CelValue::CreateBytes(&value_); + } + return CelValue::CreateMap(google::api::expr::runtime::CreateFlatBuffersBackedObject( + reinterpret_cast(value_.data()), *reflection::GetSchema(schema_.data()), + arena)); + } + } + return CelValue::CreateNull(); +} + +ProtobufTypes::MessagePtr WasmState::serializeAsProto() const { + auto any = std::make_unique(); + + if (type_ != WasmType::Protobuf) { + ProtobufWkt::BytesValue value; + value.set_value(value_); + any->PackFrom(value); + } else { + // The Wasm extension serialized in its own type. + any->set_type_url(std::string(schema_)); + any->set_value(value_); + } + + return any; +} + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/wasm_state.h b/source/extensions/common/wasm/wasm_state.h new file mode 100644 index 000000000000..ee0371550f36 --- /dev/null +++ b/source/extensions/common/wasm/wasm_state.h @@ -0,0 +1,87 @@ +/* + * Wasm State Class available to Wasm/Non-Wasm modules. + */ + +#pragma once + +#include + +#include "envoy/stream_info/filter_state.h" + +#include "common/protobuf/protobuf.h" +#include "common/singleton/const_singleton.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "eval/public/cel_value.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +// FilterState prefix for WasmState values. +const absl::string_view WasmStateKeyPrefix = "wasm."; + +// WasmState content declaration. +enum class WasmType { + Bytes, + String, + // Schema contains the reflection flatbuffer + FlatBuffers, + // Schema contains the type URL + Protobuf, +}; + +// WasmState type declaration. +class WasmStatePrototype { +public: + WasmStatePrototype(bool readonly, WasmType type, absl::string_view schema, + StreamInfo::FilterState::LifeSpan life_span) + : readonly_(readonly), type_(type), schema_(schema), life_span_(life_span) {} + WasmStatePrototype() = default; + const bool readonly_{false}; + const WasmType type_{WasmType::Bytes}; + const std::string schema_{""}; + const StreamInfo::FilterState::LifeSpan life_span_{ + StreamInfo::FilterState::LifeSpan::FilterChain}; +}; + +using DefaultWasmStatePrototype = ConstSingleton; + +// A simple wrapper around generic values +class WasmState : public StreamInfo::FilterState::Object { +public: + explicit WasmState(const WasmStatePrototype& proto) + : readonly_(proto.readonly_), type_(proto.type_), schema_(proto.schema_) {} + + const std::string& value() const { return value_; } + + // Create a value from the state, given an arena. Last argument indicates whether the value + // is de-referenced. + google::api::expr::runtime::CelValue exprValue(Protobuf::Arena* arena, bool last) const; + + bool setValue(absl::string_view value) { + if (initialized_ && readonly_) { + return false; + } + value_.assign(value.data(), value.size()); + initialized_ = true; + return true; + } + + ProtobufTypes::MessagePtr serializeAsProto() const override; + absl::optional serializeAsString() const override { return value_; } + +private: + const bool readonly_; + const WasmType type_; + absl::string_view schema_; + std::string value_{}; + bool initialized_{false}; +}; + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/wasm/wasm_vm.cc b/source/extensions/common/wasm/wasm_vm.cc index 9299eceba2d1..9f888b18f8f5 100644 --- a/source/extensions/common/wasm/wasm_vm.cc +++ b/source/extensions/common/wasm/wasm_vm.cc @@ -1,30 +1,94 @@ #include "extensions/common/wasm/wasm_vm.h" +#include #include -#include "extensions/common/wasm/null/null.h" -#include "extensions/common/wasm/v8/v8.h" +#include "extensions/common/wasm/context.h" +#include "extensions/common/wasm/ext/envoy_null_vm_wasm_api.h" +#include "extensions/common/wasm/wasm_extension.h" #include "extensions/common/wasm/well_known_names.h" +#include "include/proxy-wasm/null.h" +#include "include/proxy-wasm/null_plugin.h" + +#if defined(ENVOY_WASM_V8) +#include "include/proxy-wasm/v8.h" +#endif +#if defined(ENVOY_WASM_WAVM) +#include "include/proxy-wasm/wavm.h" +#endif + +using ContextBase = proxy_wasm::ContextBase; +using Word = proxy_wasm::Word; + namespace Envoy { namespace Extensions { namespace Common { namespace Wasm { -thread_local Envoy::Extensions::Common::Wasm::Context* current_context_ = nullptr; -thread_local uint32_t effective_context_id_ = 0; +void EnvoyWasmVmIntegration::error(absl::string_view message) { ENVOY_LOG(trace, message); } + +bool EnvoyWasmVmIntegration::getNullVmFunction(absl::string_view function_name, bool returns_word, + int number_of_arguments, + proxy_wasm::NullPlugin* plugin, + void* ptr_to_function_return) { + if (function_name == "envoy_on_resolve_dns" && returns_word == false && + number_of_arguments == 3) { + *reinterpret_cast*>(ptr_to_function_return) = + [plugin](ContextBase* context, Word context_id, Word token, Word result_size) { + proxy_wasm::SaveRestoreContext saved_context(context); + // Need to add a new API header available to both .wasm and null vm targets. + auto context_base = plugin->getContextBase(context_id); + if (auto root = context_base->asRoot()) { + static_cast(root)->onResolveDns( + token, result_size); + } + }; + return true; + } else if (function_name == "envoy_on_stats_update" && returns_word == false && + number_of_arguments == 2) { + *reinterpret_cast*>( + ptr_to_function_return) = [plugin](ContextBase* context, Word context_id, + Word result_size) { + proxy_wasm::SaveRestoreContext saved_context(context); + // Need to add a new API header available to both .wasm and null vm targets. + auto context_base = plugin->getContextBase(context_id); + if (auto root = context_base->asRoot()) { + static_cast(root)->onStatsUpdate(result_size); + } + }; + return true; + } + return false; +} WasmVmPtr createWasmVm(absl::string_view runtime, const Stats::ScopeSharedPtr& scope) { if (runtime.empty()) { - throw WasmVmException("Failed to create WASM VM with unspecified runtime."); + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), warn, + "Failed to create Wasm VM with unspecified runtime"); + return nullptr; } else if (runtime == WasmRuntimeNames::get().Null) { - return Null::createVm(scope); + auto wasm = proxy_wasm::createNullVm(); + wasm->integration() = getWasmExtension()->createEnvoyWasmVmIntegration(scope, runtime, "null"); + return wasm; +#if defined(ENVOY_WASM_V8) } else if (runtime == WasmRuntimeNames::get().V8) { - return V8::createVm(scope); + auto wasm = proxy_wasm::createV8Vm(); + wasm->integration() = getWasmExtension()->createEnvoyWasmVmIntegration(scope, runtime, "v8"); + return wasm; +#endif +#if defined(ENVOY_WASM_WAVM) + } else if (runtime == WasmRuntimeNames::get().Wavm) { + auto wasm = proxy_wasm::createWavmVm(); + wasm->integration() = getWasmExtension()->createEnvoyWasmVmIntegration(scope, runtime, "wavm"); + return wasm; +#endif } else { - throw WasmVmException(fmt::format( - "Failed to create WASM VM using {} runtime. Envoy was compiled without support for it.", - runtime)); + ENVOY_LOG_TO_LOGGER( + Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm), warn, + "Failed to create Wasm VM using {} runtime. Envoy was compiled without support for it", + runtime); + return nullptr; } } diff --git a/source/extensions/common/wasm/wasm_vm.h b/source/extensions/common/wasm/wasm_vm.h index 3506eaaa0966..0099e63d1144 100644 --- a/source/extensions/common/wasm/wasm_vm.h +++ b/source/extensions/common/wasm/wasm_vm.h @@ -4,266 +4,73 @@ #include "envoy/common/exception.h" #include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" +#include "envoy/stats/stats_macros.h" #include "common/common/logger.h" -#include "absl/types/optional.h" +#include "absl/strings/str_cat.h" +#include "include/proxy-wasm/wasm_vm.h" +#include "include/proxy-wasm/word.h" namespace Envoy { namespace Extensions { namespace Common { namespace Wasm { -class Context; +/** + * Wasm host stats. + */ +#define ALL_VM_STATS(COUNTER, GAUGE) \ + COUNTER(created) \ + COUNTER(cloned) \ + GAUGE(active, NeverImport) -// Represents a WASM-native word-sized datum. On 32-bit VMs, the high bits are always zero. -// The WASM/VM API treats all bits as significant. -struct Word { - Word(uint64_t w) : u64_(w) {} // Implicit conversion into Word. - uint32_t u32() const { return static_cast(u64_); } - uint64_t u64_; +struct VmStats { + ALL_VM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; -inline std::ostream& operator<<(std::ostream& os, const Word& w) { return os << w.u64_; } - -// Convert Word type for use by 32-bit VMs. -template struct ConvertWordTypeToUint32 { - using type = T; // NOLINT(readability-identifier-naming) -}; -template <> struct ConvertWordTypeToUint32 { - using type = uint32_t; // NOLINT(readability-identifier-naming) -}; - -// Convert Word-based function types for 32-bit VMs. -template struct ConvertFunctionTypeWordToUint32 {}; -template struct ConvertFunctionTypeWordToUint32 { - using type = typename ConvertWordTypeToUint32::type (*)( - typename ConvertWordTypeToUint32::type...); -}; - -template inline auto convertWordToUint32(T t) { return t; } -template <> inline auto convertWordToUint32(Word t) { return static_cast(t.u64_); } - -// Convert a function of the form Word(Word...) to one of the form uint32_t(uint32_t...). -template struct ConvertFunctionWordToUint32 { - static void convertFunctionWordToUint32() {} -}; -template R> -struct ConvertFunctionWordToUint32 { - static typename ConvertWordTypeToUint32::type - convertFunctionWordToUint32(typename ConvertWordTypeToUint32::type... args) { - return convertWordToUint32(F(std::forward(args)...)); +// Wasm VM data providing stats. +class EnvoyWasmVmIntegration : public proxy_wasm::WasmVmIntegration, + Logger::Loggable { +public: + EnvoyWasmVmIntegration(const Stats::ScopeSharedPtr& scope, absl::string_view runtime, + absl::string_view short_runtime) + : scope_(scope), runtime_(std::string(runtime)), short_runtime_(std::string(short_runtime)), + runtime_prefix_(absl::StrCat("wasm_vm.", short_runtime, ".")), + stats_(VmStats{ALL_VM_STATS(POOL_COUNTER_PREFIX(*scope_, runtime_prefix_), + POOL_GAUGE_PREFIX(*scope_, runtime_prefix_))}) { + stats_.created_.inc(); + stats_.active_.inc(); + ENVOY_LOG(debug, "WasmVm created {} now active", runtime_, stats_.active_.value()); } -}; -template void> -struct ConvertFunctionWordToUint32 { - static void convertFunctionWordToUint32(typename ConvertWordTypeToUint32::type... args) { - F(std::forward(args)...); + ~EnvoyWasmVmIntegration() override { + stats_.active_.dec(); + ENVOY_LOG(debug, "~WasmVm {} {} remaining active", runtime_, stats_.active_.value()); } -}; - -#define CONVERT_FUNCTION_WORD_TO_UINT32(_f) \ - &ConvertFunctionWordToUint32::convertFunctionWordToUint32 - -// These are templates and its helper for constructing signatures of functions calling into and out -// of WASM VMs. -// - WasmFuncTypeHelper is a helper for WasmFuncType and shouldn't be used anywhere else than -// WasmFuncType definition. -// - WasmFuncType takes 4 template parameter which are number of argument, return type, context type -// and param type respectively, resolve to a function type. -// For example `WasmFuncType<3, void, Context*, Word>` resolves to `void(Context*, Word, Word, -// Word)` -template -struct WasmFuncTypeHelper {}; - -template -struct WasmFuncTypeHelper { - // NOLINTNEXTLINE(readability-identifier-naming) - using type = typename WasmFuncTypeHelper::type; -}; - -template -struct WasmFuncTypeHelper<0, ReturnType, ContextType, ParamType, ReturnType(ContextType, Args...)> { - using type = ReturnType(ContextType, Args...); // NOLINT(readability-identifier-naming) -}; - -template -using WasmFuncType = typename WasmFuncTypeHelper::type; - -// Calls into the WASM VM. -// 1st arg is always a pointer to Context (Context*). -template using WasmCallVoid = std::function>; -template using WasmCallWord = std::function>; - -#define FOR_ALL_WASM_VM_EXPORTS(_f) \ - _f(WasmCallVoid<0>) _f(WasmCallVoid<1>) _f(WasmCallVoid<2>) _f(WasmCallVoid<3>) \ - _f(WasmCallVoid<5>) _f(WasmCallWord<1>) _f(WasmCallWord<2>) _f(WasmCallWord<3>) - -// Calls out of the WASM VM. -// 1st arg is always a pointer to raw_context (void*). -template using WasmCallbackVoid = WasmFuncType*; -template using WasmCallbackWord = WasmFuncType*; - -// Using the standard g++/clang mangling algorithm: -// https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-builtin -// Extended with W = Word -// Z = void, j = uint32_t, l = int64_t, m = uint64_t -using WasmCallback_WWl = Word (*)(void*, Word, int64_t); -using WasmCallback_WWlWW = Word (*)(void*, Word, int64_t, Word, Word); -using WasmCallback_WWm = Word (*)(void*, Word, uint64_t); -using WasmCallback_dd = double (*)(void*, double); - -#define FOR_ALL_WASM_VM_IMPORTS(_f) \ - _f(WasmCallbackVoid<0>) _f(WasmCallbackVoid<1>) _f(WasmCallbackVoid<2>) _f(WasmCallbackVoid<3>) \ - _f(WasmCallbackVoid<4>) _f(WasmCallbackWord<0>) _f(WasmCallbackWord<1>) \ - _f(WasmCallbackWord<2>) _f(WasmCallbackWord<3>) _f(WasmCallbackWord<4>) \ - _f(WasmCallbackWord<5>) _f(WasmCallbackWord<6>) _f(WasmCallbackWord<7>) \ - _f(WasmCallbackWord<8>) _f(WasmCallbackWord<9>) _f(WasmCallbackWord<10>) \ - _f(WasmCallback_WWl) _f(WasmCallback_WWlWW) _f(WasmCallback_WWm) \ - _f(WasmCallback_dd) - -enum class Cloneable { - NotCloneable, // VMs can not be cloned and should be created from scratch. - CompiledBytecode, // VMs can be cloned with compiled bytecode. - InstantiatedModule // VMs can be cloned from an instantiated module. -}; - -// Wasm VM instance. Provides the low level WASM interface. -class WasmVm : public Logger::Loggable { -public: - using WasmVmPtr = std::unique_ptr; - - virtual ~WasmVm() = default; - /** - * Return the runtime identifier. - * @return one of WasmRuntimeValues from well_known_names.h (e.g. "envoy.wasm.runtime.null"). - */ - virtual absl::string_view runtime() PURE; - - /** - * Whether or not the VM implementation supports cloning. Cloning is VM system dependent. - * When a VM is configured a single VM is instantiated to check that the .wasm file is valid and - * to do VM system specific initialization. In the case of WAVM this is potentially ahead-of-time - * compilation. Then, if cloning is supported, we clone that VM for each worker, potentially - * copying and sharing the initialized data structures for efficiency. Otherwise we create an new - * VM from scratch for each worker. - * @return one of enum Cloneable with the VMs cloneability. - */ - virtual Cloneable cloneable() PURE; - - /** - * Make a worker/thread-specific copy if supported by the underlying VM system (see cloneable() - * above). If not supported, the caller will need to create a new VM from scratch. If supported, - * the clone may share compiled code and other read-only data with the source VM. - * @return a clone of 'this' (e.g. for a different worker/thread). - */ - virtual WasmVmPtr clone() PURE; - - /** - * Load the WASM code from a file. Return true on success. Once the module is loaded it can be - * queried, e.g. to see which version of emscripten support is required. After loading, the - * appropriate ABI callbacks can be registered and then the module can be link()ed (see below). - * @param code the WASM binary code (or registered NullVm plugin name). - * @param allow_precompiled if true, allows supporting VMs (e.g. WAVM) to load the binary - * machine code from a user-defined section of the WASM file. Because that code is not verified by - * the envoy process it is up to the user to ensure that the code is both safe and is built for - * the linked in version of WAVM. - * @return whether or not the load was successful. - */ - virtual bool load(const std::string& code, bool allow_precompiled) PURE; - - /** - * Link the WASM code to the host-provided functions, e.g. the ABI. Prior to linking, the module - * should be loaded and the ABI callbacks registered (see above). Linking should be done once - * after load(). - * @param debug_name user-provided name for use in log and error messages. - */ - virtual void link(absl::string_view debug_name) PURE; - - /** - * Get size of the currently allocated memory in the VM. - * @return the size of memory in bytes. - */ - virtual uint64_t getMemorySize() PURE; - - /** - * Convert a block of memory in the VM to a string_view. - * @param pointer the offset into VM memory of the requested VM memory block. - * @param size the size of the requested VM memory block. - * @return if std::nullopt then the pointer/size pair were invalid, otherwise returns - * a host string_view pointing to the pointer/size pair in VM memory. - */ - virtual absl::optional getMemory(uint64_t pointer, uint64_t size) PURE; - - /** - * Set a block of memory in the VM, returns true on success, false if the pointer/size is invalid. - * @param pointer the offset into VM memory describing the start of a region of VM memory. - * @param size the size of the region of VM memory. - * @return whether or not the pointer/size pair was a valid VM memory block. - */ - virtual bool setMemory(uint64_t pointer, uint64_t size, const void* data) PURE; - - /** - * Get a VM native Word (e.g. sizeof(void*) or sizeof(size_t)) from VM memory, returns true on - * success, false if the pointer is invalid. WASM-32 VMs have 32-bit native words and WASM-64 VMs - * (not yet supported) will have 64-bit words as does the Null VM (compiled into 64-bit Envoy). - * This function can be used to chase pointers in VM memory. - * @param pointer the offset into VM memory describing the start of VM native word size block. - * @param data a pointer to a Word whose contents will be filled from the VM native word at - * 'pointer'. - * @return whether or not the pointer was to a valid VM memory block of VM native word size. - */ - virtual bool getWord(uint64_t pointer, Word* data) PURE; - /** - * Set a Word in the VM, returns true on success, false if the pointer is invalid. - * See getWord above for details. This function can be used (for example) to set indirect pointer - * return values (e.g. proxy_getHeaderHapValue(... const char** value_ptr, size_t* value_size). - * @param pointer the offset into VM memory describing the start of VM native word size block. - * @param data a Word whose contents will be written in VM native word size at 'pointer'. - * @return whether or not the pointer was to a valid VM memory block of VM native word size. - */ - virtual bool setWord(uint64_t pointer, Word data) PURE; - - /** - * Get the contents of the custom section with the given name or "" if it does not exist. - * @param name the name of the custom section to get. - * @return the contents of the custom section (if any). The result will be empty if there - * is no such section. - */ - virtual absl::string_view getCustomSection(absl::string_view name) PURE; - - /** - * Get the name of the custom section that contains precompiled module. - * @return the name of the custom section that contains precompiled module. - */ - virtual absl::string_view getPrecompiledSectionName() PURE; + // proxy_wasm::WasmVmIntegration + proxy_wasm::WasmVmIntegration* clone() override { + return new EnvoyWasmVmIntegration(scope_, runtime_, short_runtime_); + } + bool getNullVmFunction(absl::string_view function_name, bool returns_word, + int number_of_arguments, proxy_wasm::NullPlugin* plugin, + void* ptr_to_function_return) override; + void error(absl::string_view message) override; - /** - * Get typed function exported by the WASM module. - */ -#define _GET_FUNCTION(_T) virtual void getFunction(absl::string_view function_name, _T* f) PURE; - FOR_ALL_WASM_VM_EXPORTS(_GET_FUNCTION) -#undef _GET_FUNCTION + const std::string& runtime() const { return runtime_; } - /** - * Register typed callbacks exported by the host environment. - */ -#define _REGISTER_CALLBACK(_T) \ - virtual void registerCallback(absl::string_view moduleName, absl::string_view function_name, \ - _T f, typename ConvertFunctionTypeWordToUint32<_T>::type) PURE; - FOR_ALL_WASM_VM_IMPORTS(_REGISTER_CALLBACK) -#undef _REGISTER_CALLBACK -}; -using WasmVmPtr = std::unique_ptr; +protected: + const Stats::ScopeSharedPtr scope_; + const std::string runtime_; + const std::string short_runtime_; + const std::string runtime_prefix_; + VmStats stats_; +}; // namespace Wasm -// Exceptions for issues with the WasmVm. -class WasmVmException : public EnvoyException { -public: - using EnvoyException::EnvoyException; -}; +inline EnvoyWasmVmIntegration& getEnvoyWasmIntegration(proxy_wasm::WasmVm& wasm_vm) { + return *static_cast(wasm_vm.integration().get()); +} // Exceptions for issues with the WebAssembly code. class WasmException : public EnvoyException { @@ -271,36 +78,9 @@ class WasmException : public EnvoyException { using EnvoyException::EnvoyException; }; -// Thread local state set during a call into a WASM VM so that calls coming out of the -// VM can be attributed correctly to calling Filter. We use thread_local instead of ThreadLocal -// because this state is live only during the calls and does not need to be initialized consistently -// over all workers as with ThreadLocal data. -extern thread_local Envoy::Extensions::Common::Wasm::Context* current_context_; - -// Requested effective context set by code within the VM to request that the calls coming out of the -// VM be attributed to another filter, for example if a control plane gRPC comes back to the -// RootContext which effects some set of waiting filters. -extern thread_local uint32_t effective_context_id_; - -// Helper to save and restore thread local VM call context information to support reentrant calls. -// NB: this happens for example when a call from the VM invokes a handler which needs to _malloc -// memory in the VM. -struct SaveRestoreContext { - explicit SaveRestoreContext(Context* context) { - saved_context = current_context_; - saved_effective_context_id_ = effective_context_id_; - current_context_ = context; - effective_context_id_ = 0; // No effective context id. - } - ~SaveRestoreContext() { - current_context_ = saved_context; - effective_context_id_ = saved_effective_context_id_; - } - Context* saved_context; - uint32_t saved_effective_context_id_; -}; +using WasmVmPtr = std::unique_ptr; -// Create a new low-level WASM VM using runtime of the given type (e.g. "envoy.wasm.runtime.wavm"). +// Create a new low-level Wasm VM using runtime of the given type (e.g. "envoy.wasm.runtime.wavm"). WasmVmPtr createWasmVm(absl::string_view runtime, const Stats::ScopeSharedPtr& scope); } // namespace Wasm diff --git a/source/extensions/common/wasm/well_known_names.h b/source/extensions/common/wasm/well_known_names.h index 3fb39ed54a4d..5fb8602bf831 100644 --- a/source/extensions/common/wasm/well_known_names.h +++ b/source/extensions/common/wasm/well_known_names.h @@ -15,11 +15,16 @@ namespace Wasm { */ class WasmRuntimeValues { public: + // WAVM (https://github.com/WAVM/WAVM) Wasm VM. + const std::string Wavm = "envoy.wasm.runtime.wavm"; // Null sandbox: modules must be compiled into envoy and registered name is given in the // DataSource.inline_string. const std::string Null = "envoy.wasm.runtime.null"; // V8-based (https://v8.dev) WebAssembly runtime. const std::string V8 = "envoy.wasm.runtime.v8"; + + // Filter state name + const std::string FilterState = "envoy.wasm"; }; using WasmRuntimeNames = ConstSingleton; diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 4e9f82d850b1..e3ec724d9339 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -7,6 +7,7 @@ EXTENSIONS = { "envoy.access_loggers.file": "//source/extensions/access_loggers/file:config", "envoy.access_loggers.http_grpc": "//source/extensions/access_loggers/grpc:http_config", "envoy.access_loggers.tcp_grpc": "//source/extensions/access_loggers/grpc:tcp_config", + "envoy.access_loggers.wasm": "//source/extensions/access_loggers/wasm:config", # # Clusters @@ -30,6 +31,11 @@ EXTENSIONS = { "envoy.grpc_credentials.file_based_metadata": "//source/extensions/grpc_credentials/file_based_metadata:config", "envoy.grpc_credentials.aws_iam": "//source/extensions/grpc_credentials/aws_iam:config", + # + # WASM + # + "envoy.bootstrap.wasm": "//source/extensions/bootstrap/wasm:config", + # # Health checkers # @@ -46,6 +52,7 @@ EXTENSIONS = { "envoy.filters.http.aws_request_signing": "//source/extensions/filters/http/aws_request_signing:config", "envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config", "envoy.filters.http.cache": "//source/extensions/filters/http/cache:config", + "envoy.filters.http.cdn_loop": "//source/extensions/filters/http/cdn_loop:config", "envoy.filters.http.compressor": "//source/extensions/filters/http/compressor:config", "envoy.filters.http.cors": "//source/extensions/filters/http/cors:config", "envoy.filters.http.csrf": "//source/extensions/filters/http/csrf:config", @@ -64,6 +71,7 @@ EXTENSIONS = { "envoy.filters.http.health_check": "//source/extensions/filters/http/health_check:config", "envoy.filters.http.ip_tagging": "//source/extensions/filters/http/ip_tagging:config", "envoy.filters.http.jwt_authn": "//source/extensions/filters/http/jwt_authn:config", + "envoy.filters.http.local_ratelimit": "//source/extensions/filters/http/local_ratelimit:config", "envoy.filters.http.lua": "//source/extensions/filters/http/lua:config", "envoy.filters.http.oauth2": "//source/extensions/filters/http/oauth2:config", "envoy.filters.http.on_demand": "//source/extensions/filters/http/on_demand:config", @@ -73,6 +81,7 @@ EXTENSIONS = { "envoy.filters.http.router": "//source/extensions/filters/http/router:config", "envoy.filters.http.squash": "//source/extensions/filters/http/squash:config", "envoy.filters.http.tap": "//source/extensions/filters/http/tap:config", + "envoy.filters.http.wasm": "//source/extensions/filters/http/wasm:config", # # Listener filters @@ -112,6 +121,7 @@ EXTENSIONS = { "envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config", "envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config", "envoy.filters.network.sni_dynamic_forward_proxy": "//source/extensions/filters/network/sni_dynamic_forward_proxy:config", + "envoy.filters.network.wasm": "//source/extensions/filters/network/wasm:config", "envoy.filters.network.zookeeper_proxy": "//source/extensions/filters/network/zookeeper_proxy:config", # @@ -136,6 +146,7 @@ EXTENSIONS = { "envoy.stat_sinks.hystrix": "//source/extensions/stat_sinks/hystrix:config", "envoy.stat_sinks.metrics_service": "//source/extensions/stat_sinks/metrics_service:config", "envoy.stat_sinks.statsd": "//source/extensions/stat_sinks/statsd:config", + "envoy.stat_sinks.wasm": "//source/extensions/stat_sinks/wasm:config", # # Thrift filters @@ -161,7 +172,7 @@ EXTENSIONS = { # "envoy.transport_sockets.alts": "//source/extensions/transport_sockets/alts:config", - "envoy.transport_sockets.upstream_proxy_protocol": "//source/extensions/transport_sockets/proxy_protocol:upstream_proxy_protocol", + "envoy.transport_sockets.upstream_proxy_protocol": "//source/extensions/transport_sockets/proxy_protocol:upstream_config", "envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config", "envoy.transport_sockets.tap": "//source/extensions/transport_sockets/tap:config", "envoy.transport_sockets.quic": "//source/extensions/quic_listeners/quiche:quic_factory_lib", diff --git a/source/extensions/filters/common/expr/context.cc b/source/extensions/filters/common/expr/context.cc index 17a0bd88a570..9313a550695e 100644 --- a/source/extensions/filters/common/expr/context.cc +++ b/source/extensions/filters/common/expr/context.cc @@ -23,6 +23,19 @@ absl::optional convertHeaderEntry(const Http::HeaderEntry* header) { return CelValue::CreateStringView(header->value().getStringView()); } +absl::optional +convertHeaderEntry(Protobuf::Arena& arena, + Http::HeaderUtility::GetAllOfHeaderAsStringResult&& result) { + if (!result.result().has_value()) { + return {}; + } else if (!result.backingString().empty()) { + return CelValue::CreateString( + Protobuf::Arena::Create(&arena, result.backingString())); + } else { + return CelValue::CreateStringView(result.result().value()); + } +} + namespace { absl::optional extractSslInfo(const Ssl::ConnectionInfo& ssl_info, @@ -129,6 +142,7 @@ absl::optional ResponseWrapper::operator[](CelValue key) const { if (code.has_value()) { return CelValue::CreateInt64(code.value()); } + return {}; } else if (value == Size) { return CelValue::CreateInt64(info_.bytesSent()); } else if (value == Headers) { @@ -150,6 +164,12 @@ absl::optional ResponseWrapper::operator[](CelValue key) const { return CelValue::CreateInt64(info_.bytesSent() + (headers_.value_ ? headers_.value_->byteSize() : 0) + (trailers_.value_ ? trailers_.value_->byteSize() : 0)); + } else if (value == CodeDetails) { + const absl::optional& details = info_.responseCodeDetails(); + if (details.has_value()) { + return CelValue::CreateString(&details.value()); + } + return {}; } return {}; } @@ -164,6 +184,12 @@ absl::optional ConnectionWrapper::operator[](CelValue key) const { info_.downstreamSslConnection()->peerCertificatePresented()); } else if (value == RequestedServerName) { return CelValue::CreateString(&info_.requestedServerName()); + } else if (value == ID) { + auto id = info_.connectionID(); + if (id.has_value()) { + return CelValue::CreateUint64(id.value()); + } + return {}; } auto ssl_info = info_.downstreamSslConnection(); @@ -233,6 +259,22 @@ absl::optional PeerWrapper::operator[](CelValue key) const { return {}; } +absl::optional FilterStateWrapper::operator[](CelValue key) const { + if (!key.IsString()) { + return {}; + } + auto value = key.StringOrDie().value(); + if (filter_state_.hasDataWithName(value)) { + const StreamInfo::FilterState::Object* object = filter_state_.getDataReadOnlyGeneric(value); + absl::optional serialized = object->serializeAsString(); + if (serialized.has_value()) { + std::string* out = ProtobufWkt::Arena::Create(arena_, serialized.value()); + return CelValue::CreateBytes(out); + } + } + return {}; +} + } // namespace Expr } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/expr/context.h b/source/extensions/filters/common/expr/context.h index 70da3d85c30f..fd4b386a9a32 100644 --- a/source/extensions/filters/common/expr/context.h +++ b/source/extensions/filters/common/expr/context.h @@ -4,6 +4,7 @@ #include "envoy/stream_info/stream_info.h" #include "common/grpc/status.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "eval/public/cel_value.h" @@ -37,6 +38,7 @@ constexpr absl::string_view Protocol = "protocol"; // Symbols for traversing the response properties constexpr absl::string_view Response = "response"; constexpr absl::string_view Code = "code"; +constexpr absl::string_view CodeDetails = "code_details"; constexpr absl::string_view Trailers = "trailers"; constexpr absl::string_view Flags = "flags"; constexpr absl::string_view GrpcStatus = "grpc_status"; @@ -44,6 +46,9 @@ constexpr absl::string_view GrpcStatus = "grpc_status"; // Per-request or per-connection metadata constexpr absl::string_view Metadata = "metadata"; +// Per-request or per-connection filter state +constexpr absl::string_view FilterState = "filter_state"; + // Connection properties constexpr absl::string_view Connection = "connection"; constexpr absl::string_view MTLS = "mtls"; @@ -72,10 +77,13 @@ constexpr absl::string_view UpstreamTransportFailureReason = "transport_failure_ class RequestWrapper; absl::optional convertHeaderEntry(const Http::HeaderEntry* header); +absl::optional +convertHeaderEntry(Protobuf::Arena& arena, + Http::HeaderUtility::GetAllOfHeaderAsStringResult&& result); template class HeadersWrapper : public google::api::expr::runtime::CelMap { public: - HeadersWrapper(const T* value) : value_(value) {} + HeadersWrapper(Protobuf::Arena& arena, const T* value) : arena_(arena), value_(value) {} absl::optional operator[](CelValue key) const override { if (value_ == nullptr || !key.IsString()) { return {}; @@ -85,8 +93,8 @@ template class HeadersWrapper : public google::api::expr::runtime::Cel // Reject key if it is an invalid header string return {}; } - auto out = value_->get(Http::LowerCaseString(str)); - return convertHeaderEntry(out); + return convertHeaderEntry( + arena_, Http::HeaderUtility::getAllOfHeaderAsString(*value_, Http::LowerCaseString(str))); } int size() const override { return value_ == nullptr ? 0 : value_->size(); } bool empty() const override { return value_ == nullptr ? true : value_->empty(); } @@ -97,9 +105,13 @@ template class HeadersWrapper : public google::api::expr::runtime::Cel private: friend class RequestWrapper; friend class ResponseWrapper; + Protobuf::Arena& arena_; const T* value_; }; +// Wrapper for accessing properties from internal data structures. +// Note that CEL assumes no ownership of the underlying data, so temporary +// data must be arena-allocated. class BaseWrapper : public google::api::expr::runtime::CelMap, public google::api::expr::runtime::CelValueProducer { public: @@ -108,13 +120,21 @@ class BaseWrapper : public google::api::expr::runtime::CelMap, const google::api::expr::runtime::CelList* ListKeys() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - CelValue Produce(ProtobufWkt::Arena*) override { return CelValue::CreateMap(this); } + CelValue Produce(ProtobufWkt::Arena* arena) override { + // Producer is unique per evaluation arena since activation is re-created. + arena_ = arena; + return CelValue::CreateMap(this); + } + +protected: + ProtobufWkt::Arena* arena_; }; class RequestWrapper : public BaseWrapper { public: - RequestWrapper(const Http::RequestHeaderMap* headers, const StreamInfo::StreamInfo& info) - : headers_(headers), info_(info) {} + RequestWrapper(Protobuf::Arena& arena, const Http::RequestHeaderMap* headers, + const StreamInfo::StreamInfo& info) + : headers_(arena, headers), info_(info) {} absl::optional operator[](CelValue key) const override; private: @@ -124,9 +144,9 @@ class RequestWrapper : public BaseWrapper { class ResponseWrapper : public BaseWrapper { public: - ResponseWrapper(const Http::ResponseHeaderMap* headers, const Http::ResponseTrailerMap* trailers, - const StreamInfo::StreamInfo& info) - : headers_(headers), trailers_(trailers), info_(info) {} + ResponseWrapper(Protobuf::Arena& arena, const Http::ResponseHeaderMap* headers, + const Http::ResponseTrailerMap* trailers, const StreamInfo::StreamInfo& info) + : headers_(arena, headers), trailers_(arena, trailers), info_(info) {} absl::optional operator[](CelValue key) const override; private: @@ -174,6 +194,15 @@ class MetadataProducer : public google::api::expr::runtime::CelValueProducer { const envoy::config::core::v3::Metadata& metadata_; }; +class FilterStateWrapper : public BaseWrapper { +public: + FilterStateWrapper(const StreamInfo::FilterState& filter_state) : filter_state_(filter_state) {} + absl::optional operator[](CelValue key) const override; + +private: + const StreamInfo::FilterState& filter_state_; +}; + } // namespace Expr } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/expr/evaluator.cc b/source/extensions/filters/common/expr/evaluator.cc index e4920fd21fda..d2c376684f72 100644 --- a/source/extensions/filters/common/expr/evaluator.cc +++ b/source/extensions/filters/common/expr/evaluator.cc @@ -11,20 +11,23 @@ namespace Filters { namespace Common { namespace Expr { -ActivationPtr createActivation(const StreamInfo::StreamInfo& info, +ActivationPtr createActivation(Protobuf::Arena& arena, const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap* request_headers, const Http::ResponseHeaderMap* response_headers, const Http::ResponseTrailerMap* response_trailers) { auto activation = std::make_unique(); - activation->InsertValueProducer(Request, std::make_unique(request_headers, info)); - activation->InsertValueProducer( - Response, std::make_unique(response_headers, response_trailers, info)); + activation->InsertValueProducer(Request, + std::make_unique(arena, request_headers, info)); + activation->InsertValueProducer(Response, std::make_unique( + arena, response_headers, response_trailers, info)); activation->InsertValueProducer(Connection, std::make_unique(info)); activation->InsertValueProducer(Upstream, std::make_unique(info)); activation->InsertValueProducer(Source, std::make_unique(info, false)); activation->InsertValueProducer(Destination, std::make_unique(info, true)); activation->InsertValueProducer(Metadata, std::make_unique(info.dynamicMetadata())); + activation->InsertValueProducer(FilterState, + std::make_unique(info.filterState())); return activation; } @@ -65,13 +68,14 @@ ExpressionPtr createExpression(Builder& builder, const google::api::expr::v1alph return std::move(cel_expression_status.value()); } -absl::optional evaluate(const Expression& expr, Protobuf::Arena* arena, +absl::optional evaluate(const Expression& expr, Protobuf::Arena& arena, const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap* request_headers, const Http::ResponseHeaderMap* response_headers, const Http::ResponseTrailerMap* response_trailers) { - auto activation = createActivation(info, request_headers, response_headers, response_trailers); - auto eval_status = expr.Evaluate(*activation, arena); + auto activation = + createActivation(arena, info, request_headers, response_headers, response_trailers); + auto eval_status = expr.Evaluate(*activation, &arena); if (!eval_status.ok()) { return {}; } @@ -82,7 +86,7 @@ absl::optional evaluate(const Expression& expr, Protobuf::Arena* arena bool matches(const Expression& expr, const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& headers) { Protobuf::Arena arena; - auto eval_status = Expr::evaluate(expr, &arena, info, &headers, nullptr, nullptr); + auto eval_status = Expr::evaluate(expr, arena, info, &headers, nullptr, nullptr); if (!eval_status.has_value()) { return false; } diff --git a/source/extensions/filters/common/expr/evaluator.h b/source/extensions/filters/common/expr/evaluator.h index 116239fde1a5..37fdf63ab1bf 100644 --- a/source/extensions/filters/common/expr/evaluator.h +++ b/source/extensions/filters/common/expr/evaluator.h @@ -25,7 +25,7 @@ using ExpressionPtr = std::unique_ptr; // Creates an activation providing the common context attributes. // The activation lazily creates wrappers during an evaluation using the evaluation arena. -ActivationPtr createActivation(const StreamInfo::StreamInfo& info, +ActivationPtr createActivation(Protobuf::Arena& arena, const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap* request_headers, const Http::ResponseHeaderMap* response_headers, const Http::ResponseTrailerMap* response_trailers); @@ -41,7 +41,7 @@ ExpressionPtr createExpression(Builder& builder, const google::api::expr::v1alph // Evaluates an expression for a request. The arena is used to hold intermediate computational // results and potentially the final value. -absl::optional evaluate(const Expression& expr, Protobuf::Arena* arena, +absl::optional evaluate(const Expression& expr, Protobuf::Arena& arena, const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap* request_headers, const Http::ResponseHeaderMap* response_headers, diff --git a/source/extensions/filters/common/ext_authz/BUILD b/source/extensions/filters/common/ext_authz/BUILD index 977560fefb20..fdd81e0081d8 100644 --- a/source/extensions/filters/common/ext_authz/BUILD +++ b/source/extensions/filters/common/ext_authz/BUILD @@ -14,6 +14,7 @@ envoy_cc_library( deps = [ "//include/envoy/http:codes_interface", "//include/envoy/stream_info:stream_info_interface", + "//source/common/http:headers_lib", "//source/common/tracing:http_tracer_lib", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", ], @@ -44,7 +45,7 @@ envoy_cc_library( "//source/common/protobuf", "//source/common/tracing:http_tracer_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/service/auth/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", ], ) @@ -74,6 +75,7 @@ envoy_cc_library( srcs = ["check_request_utils.cc"], hdrs = ["check_request_utils.h"], deps = [ + ":ext_authz_interface", "//include/envoy/grpc:async_client_interface", "//include/envoy/grpc:async_client_manager_interface", "//include/envoy/http:filter_interface", diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.cc b/source/extensions/filters/common/ext_authz/check_request_utils.cc index 55847df1a946..4bd09a1ab5c9 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.cc +++ b/source/extensions/filters/common/ext_authz/check_request_utils.cc @@ -21,6 +21,8 @@ #include "common/network/utility.h" #include "common/protobuf/protobuf.h" +#include "extensions/filters/common/ext_authz/ext_authz.h" + #include "absl/strings/str_cat.h" namespace Envoy { @@ -102,7 +104,7 @@ void CheckRequestUtils::setRequestTime(envoy::service::auth::v3::AttributeContex void CheckRequestUtils::setHttpRequest( envoy::service::auth::v3::AttributeContext::HttpRequest& httpreq, uint64_t stream_id, const StreamInfo::StreamInfo& stream_info, const Buffer::Instance* decoding_buffer, - const Envoy::Http::RequestHeaderMap& headers, uint64_t max_request_bytes) { + const Envoy::Http::RequestHeaderMap& headers, uint64_t max_request_bytes, bool pack_as_bytes) { httpreq.set_id(std::to_string(stream_id)); httpreq.set_method(getHeaderStr(headers.Method())); httpreq.set_path(getHeaderStr(headers.Path())); @@ -118,7 +120,7 @@ void CheckRequestUtils::setHttpRequest( auto* mutable_headers = httpreq.mutable_headers(); headers.iterate([mutable_headers](const Envoy::Http::HeaderEntry& e) { // Skip any client EnvoyAuthPartialBody header, which could interfere with internal use. - if (e.key().getStringView() != Http::Headers::get().EnvoyAuthPartialBody.get()) { + if (e.key().getStringView() != Headers::get().EnvoyAuthPartialBody.get()) { (*mutable_headers)[std::string(e.key().getStringView())] = std::string(e.value().getStringView()); } @@ -130,10 +132,18 @@ void CheckRequestUtils::setHttpRequest( const uint64_t length = std::min(decoding_buffer->length(), max_request_bytes); std::string data(length, 0); decoding_buffer->copyOut(0, length, &data[0]); - httpreq.set_body(std::move(data)); + + // This pack_as_bytes flag allows us to switch the content type (bytes or string) of "body" to + // be sent to the external authorization server without doing string encoding check (in this + // case UTF-8 check). + if (pack_as_bytes) { + httpreq.set_raw_body(std::move(data)); + } else { + httpreq.set_body(std::move(data)); + } // Add in a header to detect when a partial body is used. - (*mutable_headers)[Http::Headers::get().EnvoyAuthPartialBody.get()] = + (*mutable_headers)[Headers::get().EnvoyAuthPartialBody.get()] = length != decoding_buffer->length() ? "true" : "false"; } } @@ -141,10 +151,10 @@ void CheckRequestUtils::setHttpRequest( void CheckRequestUtils::setAttrContextRequest( envoy::service::auth::v3::AttributeContext::Request& req, const uint64_t stream_id, const StreamInfo::StreamInfo& stream_info, const Buffer::Instance* decoding_buffer, - const Envoy::Http::RequestHeaderMap& headers, uint64_t max_request_bytes) { + const Envoy::Http::RequestHeaderMap& headers, uint64_t max_request_bytes, bool pack_as_bytes) { setRequestTime(req, stream_info); setHttpRequest(*req.mutable_http(), stream_id, stream_info, decoding_buffer, headers, - max_request_bytes); + max_request_bytes, pack_as_bytes); } void CheckRequestUtils::createHttpCheck( @@ -152,7 +162,7 @@ void CheckRequestUtils::createHttpCheck( const Envoy::Http::RequestHeaderMap& headers, Protobuf::Map&& context_extensions, envoy::config::core::v3::Metadata&& metadata_context, - envoy::service::auth::v3::CheckRequest& request, uint64_t max_request_bytes, + envoy::service::auth::v3::CheckRequest& request, uint64_t max_request_bytes, bool pack_as_bytes, bool include_peer_certificate) { auto attrs = request.mutable_attributes(); @@ -163,10 +173,10 @@ void CheckRequestUtils::createHttpCheck( auto* cb = const_cast(callbacks); setAttrContextPeer(*attrs->mutable_source(), *cb->connection(), service, false, include_peer_certificate); - setAttrContextPeer(*attrs->mutable_destination(), *cb->connection(), "", true, + setAttrContextPeer(*attrs->mutable_destination(), *cb->connection(), EMPTY_STRING, true, include_peer_certificate); setAttrContextRequest(*attrs->mutable_request(), cb->streamId(), cb->streamInfo(), - cb->decodingBuffer(), headers, max_request_bytes); + cb->decodingBuffer(), headers, max_request_bytes, pack_as_bytes); // Fill in the context extensions and metadata context. (*attrs->mutable_context_extensions()) = std::move(context_extensions); diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.h b/source/extensions/filters/common/ext_authz/check_request_utils.h index c8d165cd5200..f0272d674166 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.h +++ b/source/extensions/filters/common/ext_authz/check_request_utils.h @@ -46,6 +46,7 @@ class CheckRequestUtils { * check request. * @param request is the reference to the check request that will be filled up. * @param with_request_body when true, will add the request body to the check request. + * @param pack_as_bytes when true, will set the check request body as bytes. * @param include_peer_certificate whether to include the peer certificate in the check request. */ static void createHttpCheck(const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, @@ -53,7 +54,8 @@ class CheckRequestUtils { Protobuf::Map&& context_extensions, envoy::config::core::v3::Metadata&& metadata_context, envoy::service::auth::v3::CheckRequest& request, - uint64_t max_request_bytes, bool include_peer_certificate); + uint64_t max_request_bytes, bool pack_as_bytes, + bool include_peer_certificate); /** * createTcpCheck is used to extract the attributes from the network layer and fill them up @@ -76,13 +78,13 @@ class CheckRequestUtils { const uint64_t stream_id, const StreamInfo::StreamInfo& stream_info, const Buffer::Instance* decoding_buffer, const Envoy::Http::RequestHeaderMap& headers, - uint64_t max_request_bytes); + uint64_t max_request_bytes, bool pack_as_bytes); static void setAttrContextRequest(envoy::service::auth::v3::AttributeContext::Request& req, const uint64_t stream_id, const StreamInfo::StreamInfo& stream_info, const Buffer::Instance* decoding_buffer, const Envoy::Http::RequestHeaderMap& headers, - uint64_t max_request_bytes); + uint64_t max_request_bytes, bool pack_as_bytes); static std::string getHeaderStr(const Envoy::Http::HeaderEntry* entry); static Envoy::Http::HeaderMap::Iterate fillHttpHeaders(const Envoy::Http::HeaderEntry&, void*); }; diff --git a/source/extensions/filters/common/ext_authz/ext_authz.h b/source/extensions/filters/common/ext_authz/ext_authz.h index ba34d2e8a9fc..babfa52d0e7c 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz.h +++ b/source/extensions/filters/common/ext_authz/ext_authz.h @@ -6,11 +6,14 @@ #include #include "envoy/common/pure.h" +#include "envoy/event/dispatcher.h" #include "envoy/http/codes.h" #include "envoy/service/auth/v3/external_auth.pb.h" #include "envoy/stream_info/stream_info.h" #include "envoy/tracing/http_tracer.h" +#include "common/http/headers.h" +#include "common/runtime/runtime_features.h" #include "common/singleton/const_singleton.h" namespace Envoy { @@ -31,6 +34,21 @@ struct TracingConstantValues { using TracingConstants = ConstSingleton; +/** + * Constant auth related HTTP headers. All lower case. This group of headers can + * contain prefix override headers. + */ +class HeaderValues { +public: + const char* prefix() const { return ThreadSafeSingleton::get().prefix(); } + + const Http::LowerCaseString EnvoyAuthPartialBody{absl::StrCat(prefix(), "-auth-partial-body")}; + const Http::LowerCaseString EnvoyAuthHeadersToRemove{ + absl::StrCat(prefix(), "-auth-headers-to-remove")}; +}; + +using Headers = ConstSingleton; + /** * Possible async results for a check call. */ @@ -43,12 +61,27 @@ enum class CheckStatus { Denied }; +/** + * Possible error kind for Error status.. + */ +enum class ErrorKind { + // Other error. + Other, + // The request timed out. This will only be set if the timeout is measure when the check request + // was created. + Timedout, +}; + /** * Authorization response object for a RequestCallback. */ struct Response { // Call status. CheckStatus status; + + // In case status is Error, this will contain the kind of error that occurred. + ErrorKind error_kind{ErrorKind::Other}; + // A set of HTTP headers returned by the authorization server, that will be optionally appended // to the request to the upstream server. Http::HeaderVector headers_to_append; @@ -58,6 +91,9 @@ struct Response { // A set of HTTP headers returned by the authorization server, will be optionally added // (using "addCopy") to the request to the upstream server. Http::HeaderVector headers_to_add; + // A set of HTTP headers consumed by the authorization server, will be removed + // from the request to the upstream server. + std::vector headers_to_remove; // Optional http body used only on denied response. std::string body; // Optional http status used only on denied response. @@ -98,13 +134,23 @@ class Client { * passed request parameters to make a permit/deny decision. * @param callback supplies the completion callbacks. * NOTE: The callback may happen within the calling stack. + * @param dispatcher is the dispatcher of the current thread. * @param request is the proto message with the attributes of the specific payload. * @param parent_span source for generating an egress child span as part of the trace. * @param stream_info supplies the client's stream info. */ - virtual void check(RequestCallbacks& callback, + virtual void check(RequestCallbacks& callback, Event::Dispatcher& dispatcher, const envoy::service::auth::v3::CheckRequest& request, Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) PURE; + +protected: + /** + * @return should we start the request time out when the check request is created. + */ + static bool timeoutStartsAtCheckCreation() { + return Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.ext_authz_measure_timeout_on_check_created"); + } }; using ClientPtr = std::unique_ptr; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index 2f720ca26f8a..cfb417d6c927 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -1,7 +1,6 @@ #include "extensions/filters/common/ext_authz/ext_authz_grpc_impl.h" #include "envoy/config/core/v3/base.pb.h" -#include "envoy/service/auth/v2alpha/external_auth.pb.h" #include "envoy/service/auth/v3/external_auth.pb.h" #include "common/common/assert.h" @@ -17,15 +16,13 @@ namespace Filters { namespace Common { namespace ExtAuthz { -GrpcClientImpl::GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, +GrpcClientImpl::GrpcClientImpl(Grpc::RawAsyncClientSharedPtr async_client, const absl::optional& timeout, - envoy::config::core::v3::ApiVersion transport_api_version, - bool use_alpha) - : async_client_(std::move(async_client)), timeout_(timeout), + envoy::config::core::v3::ApiVersion transport_api_version) + : async_client_(async_client), timeout_(timeout), service_method_(Grpc::VersionedMethods("envoy.service.auth.v3.Authorization.Check", - "envoy.service.auth.v2.Authorization.Check", - "envoy.service.auth.v2alpha.Authorization.Check") - .getMethodDescriptorForVersion(transport_api_version, use_alpha)), + "envoy.service.auth.v2.Authorization.Check") + .getMethodDescriptorForVersion(transport_api_version)), transport_api_version_(transport_api_version) {} GrpcClientImpl::~GrpcClientImpl() { ASSERT(!callbacks_); } @@ -34,17 +31,33 @@ void GrpcClientImpl::cancel() { ASSERT(callbacks_ != nullptr); request_->cancel(); callbacks_ = nullptr; + timeout_timer_.reset(); } -void GrpcClientImpl::check(RequestCallbacks& callbacks, +void GrpcClientImpl::check(RequestCallbacks& callbacks, Event::Dispatcher& dispatcher, const envoy::service::auth::v3::CheckRequest& request, - Tracing::Span& parent_span, const StreamInfo::StreamInfo&) { + Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) { ASSERT(callbacks_ == nullptr); callbacks_ = &callbacks; + Http::AsyncClient::RequestOptions options; + if (timeout_.has_value()) { + if (timeoutStartsAtCheckCreation()) { + // TODO(yuval-k): We currently use dispatcher based timeout even if the underlying client is + // Google gRPC client, which has its own timeout mechanism. We may want to change that in the + // future if the implementations converge. + timeout_timer_ = dispatcher.createTimer([this]() -> void { onTimeout(); }); + timeout_timer_->enableTimer(timeout_.value()); + } else { + // not starting timer on check creation, set the timeout on the request. + options.setTimeout(timeout_); + } + } + + options.setParentContext(Http::AsyncClient::ParentContext{&stream_info}); + ENVOY_LOG(trace, "Sending CheckRequest: {}", request.DebugString()); - request_ = async_client_->send(service_method_, request, *this, parent_span, - Http::AsyncClient::RequestOptions().setTimeout(timeout_), + request_ = async_client_->send(service_method_, request, *this, parent_span, options, transport_api_version_); } @@ -57,6 +70,11 @@ void GrpcClientImpl::onSuccess(std::unique_ptrstatus = CheckStatus::OK; if (response->has_ok_response()) { toAuthzResponseHeader(authz_response, response->ok_response().headers()); + if (response->ok_response().headers_to_remove_size() > 0) { + for (const auto& header : response->ok_response().headers_to_remove()) { + authz_response->headers_to_remove.push_back(Http::LowerCaseString(header)); + } + } } } else { span.setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceUnauthz); @@ -81,6 +99,7 @@ void GrpcClientImpl::onSuccess(std::unique_ptronComplete(std::move(authz_response)); callbacks_ = nullptr; + timeout_timer_.reset(); } void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::string&, @@ -88,9 +107,23 @@ void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::strin ENVOY_LOG(trace, "CheckRequest call failed with status: {}", Grpc::Utility::grpcStatusToString(status)); ASSERT(status != Grpc::Status::WellKnownGrpcStatus::Ok); + timeout_timer_.reset(); + respondFailure(ErrorKind::Other); +} + +void GrpcClientImpl::onTimeout() { + ENVOY_LOG(trace, "CheckRequest timed-out"); + ASSERT(request_ != nullptr); + request_->cancel(); + // let the client know of failure: + respondFailure(ErrorKind::Timedout); +} + +void GrpcClientImpl::respondFailure(ErrorKind kind) { Response response{}; response.status = CheckStatus::Error; response.status_code = Http::Code::Forbidden; + response.error_kind = kind; callbacks_->onComplete(std::make_unique(response)); callbacks_ = nullptr; } @@ -109,6 +142,23 @@ void GrpcClientImpl::toAuthzResponseHeader( } } +const Grpc::RawAsyncClientSharedPtr AsyncClientCache::getOrCreateAsyncClient( + const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& proto_config) { + // The cache stores Google gRPC client, so channel is not created for each request. + ASSERT(proto_config.has_grpc_service() && proto_config.grpc_service().has_google_grpc()); + auto& cache = tls_slot_->getTyped(); + const std::size_t cache_key = MessageUtil::hash(proto_config.grpc_service().google_grpc()); + const auto it = cache.async_clients_.find(cache_key); + if (it != cache.async_clients_.end()) { + return it->second; + } + const Grpc::AsyncClientFactoryPtr factory = + async_client_manager_.factoryForGrpcService(proto_config.grpc_service(), scope_, true); + const Grpc::RawAsyncClientSharedPtr async_client = factory->create(); + cache.async_clients_.emplace(cache_key, async_client); + return async_client; +} + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h index da1ed1d2ebf1..792f7bb748cc 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h @@ -7,6 +7,7 @@ #include #include "envoy/config/core/v3/base.pb.h" +#include "envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h" #include "envoy/grpc/async_client.h" #include "envoy/grpc/async_client_manager.h" #include "envoy/http/filter.h" @@ -43,16 +44,16 @@ class GrpcClientImpl : public Client, public ExtAuthzAsyncCallbacks, public Logger::Loggable { public: - // TODO(gsagula): remove `use_alpha` param when V2Alpha gets deprecated. - GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, + GrpcClientImpl(Grpc::RawAsyncClientSharedPtr async_client, const absl::optional& timeout, - envoy::config::core::v3::ApiVersion transport_api_version, bool use_alpha); + envoy::config::core::v3::ApiVersion transport_api_version); ~GrpcClientImpl() override; // ExtAuthz::Client void cancel() override; - void check(RequestCallbacks& callbacks, const envoy::service::auth::v3::CheckRequest& request, - Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) override; + void check(RequestCallbacks& callbacks, Event::Dispatcher& dispatcher, + const envoy::service::auth::v3::CheckRequest& request, Tracing::Span& parent_span, + const StreamInfo::StreamInfo& stream_info) override; // Grpc::AsyncRequestCallbacks void onCreateInitialMetadata(Http::RequestHeaderMap&) override {} @@ -62,9 +63,12 @@ class GrpcClientImpl : public Client, Tracing::Span& span) override; private: + void onTimeout(); + void respondFailure(Filters::Common::ExtAuthz::ErrorKind kind); void toAuthzResponseHeader( ResponsePtr& response, const Protobuf::RepeatedPtrField& headers); + Grpc::AsyncClient async_client_; Grpc::AsyncRequest* request_{}; @@ -72,10 +76,44 @@ class GrpcClientImpl : public Client, RequestCallbacks* callbacks_{}; const Protobuf::MethodDescriptor& service_method_; const envoy::config::core::v3::ApiVersion transport_api_version_; + Event::TimerPtr timeout_timer_; }; using GrpcClientImplPtr = std::unique_ptr; +// The client cache for RawAsyncClient for Google grpc so channel is not created for each request. +// TODO(fpliu233): The cache will cause resource leak that a new channel is created every time a new +// config is pushed. Improve gRPC channel cache with better solution. +class AsyncClientCache : public Singleton::Instance { +public: + AsyncClientCache(Grpc::AsyncClientManager& async_client_manager, Stats::Scope& scope, + ThreadLocal::SlotAllocator& tls) + : async_client_manager_(async_client_manager), scope_(scope), tls_slot_(tls.allocateSlot()) { + tls_slot_->set([](Event::Dispatcher&) { return std::make_shared(); }); + } + + const Grpc::RawAsyncClientSharedPtr getOrCreateAsyncClient( + const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& proto_config); + +private: + /** + * Per-thread cache. + */ + struct ThreadLocalCache : public ThreadLocal::ThreadLocalObject { + ThreadLocalCache() = default; + // The client cache stored with key as hash of + // envoy::config::core::v3::GrpcService::GoogleGrpc config. + // TODO(fpliu233): Remove when the cleaner and generic solution for gRPC is live. + absl::flat_hash_map async_clients_; + }; + + Grpc::AsyncClientManager& async_client_manager_; + Stats::Scope& scope_; + ThreadLocal::SlotPtr tls_slot_; +}; + +using AsyncClientCacheSharedPtr = std::shared_ptr; + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index fe7207bdfbd3..e2e27ab6894d 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -31,10 +31,15 @@ const Http::HeaderMap& lengthZeroHeader() { // Static response used for creating authorization ERROR responses. const Response& errorResponse() { - CONSTRUCT_ON_FIRST_USE(Response, - Response{CheckStatus::Error, Http::HeaderVector{}, Http::HeaderVector{}, - Http::HeaderVector{}, EMPTY_STRING, Http::Code::Forbidden, - ProtobufWkt::Struct{}}); + CONSTRUCT_ON_FIRST_USE(Response, Response{CheckStatus::Error, + ErrorKind::Other, + Http::HeaderVector{}, + Http::HeaderVector{}, + Http::HeaderVector{}, + {{}}, + EMPTY_STRING, + Http::Code::Forbidden, + ProtobufWkt::Struct{}}); } // SuccessResponse used for creating either DENIED or OK authorization responses. @@ -70,40 +75,11 @@ struct SuccessResponse { ResponsePtr response_; }; -envoy::type::matcher::v3::StringMatcher -ignoreCaseStringMatcher(const envoy::type::matcher::v3::StringMatcher& matcher) { - const auto& match_pattern_case = matcher.match_pattern_case(); - if (match_pattern_case == envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSafeRegex || - match_pattern_case == - envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex) { - return matcher; - } - - envoy::type::matcher::v3::StringMatcher ignore_case; - ignore_case.set_ignore_case(true); - switch (matcher.match_pattern_case()) { - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact: - ignore_case.set_exact(matcher.exact()); - break; - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kPrefix: - ignore_case.set_prefix(matcher.prefix()); - break; - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSuffix: - ignore_case.set_suffix(matcher.suffix()); - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - return ignore_case; -} - std::vector -createStringMatchers(const envoy::type::matcher::v3::ListStringMatcher& list, - const bool disable_lowercase_string_matcher) { +createStringMatchers(const envoy::type::matcher::v3::ListStringMatcher& list) { std::vector matchers; for (const auto& matcher : list.patterns()) { - matchers.push_back(std::make_unique( - disable_lowercase_string_matcher ? matcher : ignoreCaseStringMatcher(matcher))); + matchers.push_back(std::make_unique(matcher)); } return matchers; } @@ -127,20 +103,14 @@ bool NotHeaderKeyMatcher::matches(absl::string_view key) const { return !matcher // Config ClientConfig::ClientConfig(const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& config, uint32_t timeout, absl::string_view path_prefix) - : enable_case_sensitive_string_matcher_(Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher")), - request_header_matchers_( - toRequestMatchers(config.http_service().authorization_request().allowed_headers(), - enable_case_sensitive_string_matcher_)), - client_header_matchers_( - toClientMatchers(config.http_service().authorization_response().allowed_client_headers(), - enable_case_sensitive_string_matcher_)), + : request_header_matchers_( + toRequestMatchers(config.http_service().authorization_request().allowed_headers())), + client_header_matchers_(toClientMatchers( + config.http_service().authorization_response().allowed_client_headers())), upstream_header_matchers_(toUpstreamMatchers( - config.http_service().authorization_response().allowed_upstream_headers(), - enable_case_sensitive_string_matcher_)), + config.http_service().authorization_response().allowed_upstream_headers())), upstream_header_to_append_matchers_(toUpstreamMatchers( - config.http_service().authorization_response().allowed_upstream_headers_to_append(), - enable_case_sensitive_string_matcher_)), + config.http_service().authorization_response().allowed_upstream_headers_to_append())), cluster_name_(config.http_service().server_uri().cluster()), timeout_(timeout), path_prefix_(path_prefix), tracing_name_(fmt::format("async {} egress", config.http_service().server_uri().cluster())), @@ -148,14 +118,12 @@ ClientConfig::ClientConfig(const envoy::extensions::filters::http::ext_authz::v3 config.http_service().authorization_request().headers_to_add(), false)) {} MatcherSharedPtr -ClientConfig::toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& list, - const bool disable_lowercase_string_matcher) { +ClientConfig::toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& list) { const std::vector keys{ {Http::CustomHeaders::get().Authorization, Http::Headers::get().Method, Http::Headers::get().Path, Http::Headers::get().Host}}; - std::vector matchers( - createStringMatchers(list, disable_lowercase_string_matcher)); + std::vector matchers(createStringMatchers(list)); for (const auto& key : keys) { envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact(key.get()); @@ -166,10 +134,8 @@ ClientConfig::toRequestMatchers(const envoy::type::matcher::v3::ListStringMatche } MatcherSharedPtr -ClientConfig::toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher& list, - const bool disable_lowercase_string_matcher) { - std::vector matchers( - createStringMatchers(list, disable_lowercase_string_matcher)); +ClientConfig::toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher& list) { + std::vector matchers(createStringMatchers(list)); // If list is empty, all authorization response headers, except Host, should be added to // the client response. @@ -197,10 +163,8 @@ ClientConfig::toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher } MatcherSharedPtr -ClientConfig::toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& list, - const bool disable_lowercase_string_matcher) { - return std::make_unique( - createStringMatchers(list, disable_lowercase_string_matcher)); +ClientConfig::toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& list) { + return std::make_unique(createStringMatchers(list)); } RawHttpClientImpl::RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config) @@ -212,10 +176,11 @@ void RawHttpClientImpl::cancel() { ASSERT(callbacks_ != nullptr); request_->cancel(); callbacks_ = nullptr; + timeout_timer_.reset(); } // Client -void RawHttpClientImpl::check(RequestCallbacks& callbacks, +void RawHttpClientImpl::check(RequestCallbacks& callbacks, Event::Dispatcher& dispatcher, const envoy::service::auth::v3::CheckRequest& request, Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) { @@ -252,8 +217,7 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, Http::RequestMessagePtr message = std::make_unique(std::move(headers)); if (request_length > 0) { - message->body() = - std::make_unique(request.attributes().request().http().body()); + message->body().add(request.attributes().request().http().body()); } const std::string& cluster = config_->cluster(); @@ -267,16 +231,23 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, callbacks_ = nullptr; } else { auto options = Http::AsyncClient::RequestOptions() - .setTimeout(config_->timeout()) .setParentSpan(parent_span) .setChildSpanName(config_->tracingName()); + if (timeoutStartsAtCheckCreation()) { + timeout_timer_ = dispatcher.createTimer([this]() -> void { onTimeout(); }); + timeout_timer_->enableTimer(config_->timeout()); + } else { + options.setTimeout(config_->timeout()); + } + request_ = cm_.httpAsyncClientForCluster(cluster).send(std::move(message), *this, options); } } void RawHttpClientImpl::onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& message) { + timeout_timer_.reset(); callbacks_->onComplete(toResponse(std::move(message))); callbacks_ = nullptr; } @@ -284,6 +255,7 @@ void RawHttpClientImpl::onSuccess(const Http::AsyncClient::Request&, void RawHttpClientImpl::onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason reason) { ASSERT(reason == Http::AsyncClient::FailureReason::Reset); + timeout_timer_.reset(); callbacks_->onComplete(std::make_unique(errorResponse())); callbacks_ = nullptr; } @@ -300,6 +272,18 @@ void RawHttpClientImpl::onBeforeFinalizeUpstreamSpan( } } +void RawHttpClientImpl::onTimeout() { + ENVOY_LOG(trace, "CheckRequest timed-out"); + ASSERT(request_ != nullptr); + request_->cancel(); + // let the client know of failure: + ASSERT(callbacks_ != nullptr); + Response response = errorResponse(); + response.error_kind = ErrorKind::Timedout; + callbacks_->onComplete(std::make_unique(response)); + callbacks_ = nullptr; +} + ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { const uint64_t status_code = Http::Utility::getResponseStatus(message->headers()); @@ -310,12 +294,38 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { return std::make_unique(errorResponse()); } + // Extract headers-to-remove from the storage header coming from the + // authorization server. + const auto& storage_header_name = Headers::get().EnvoyAuthHeadersToRemove; + // If we are going to construct an Ok response we need to save the + // headers_to_remove in a variable first. + std::vector headers_to_remove; + if (status_code == enumToInt(Http::Code::OK)) { + const auto& get_result = message->headers().get(storage_header_name); + for (size_t i = 0; i < get_result.size(); ++i) { + const Http::HeaderEntry* entry = get_result[i]; + if (entry != nullptr) { + absl::string_view storage_header_value = entry->value().getStringView(); + std::vector header_names = StringUtil::splitToken( + storage_header_value, ",", /*keep_empty_string=*/false, /*trim_whitespace=*/true); + headers_to_remove.reserve(headers_to_remove.size() + header_names.size()); + for (const auto& header_name : header_names) { + headers_to_remove.push_back(Http::LowerCaseString(std::string(header_name))); + } + } + } + } + // Now remove the storage header from the authz server response headers before + // we reuse them to construct an Ok/Denied authorization response below. + message->headers().remove(storage_header_name); + // Create an Ok authorization response. if (status_code == enumToInt(Http::Code::OK)) { SuccessResponse ok{message->headers(), config_->upstreamHeaderMatchers(), config_->upstreamHeaderToAppendMatchers(), - Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, - Http::HeaderVector{}, EMPTY_STRING, Http::Code::OK, + Response{CheckStatus::OK, ErrorKind::Other, Http::HeaderVector{}, + Http::HeaderVector{}, Http::HeaderVector{}, + std::move(headers_to_remove), EMPTY_STRING, Http::Code::OK, ProtobufWkt::Struct{}}}; return std::move(ok.response_); } @@ -323,9 +333,15 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { // Create a Denied authorization response. SuccessResponse denied{message->headers(), config_->clientHeaderMatchers(), config_->upstreamHeaderToAppendMatchers(), - Response{CheckStatus::Denied, Http::HeaderVector{}, Http::HeaderVector{}, - Http::HeaderVector{}, message->bodyAsString(), - static_cast(status_code), ProtobufWkt::Struct{}}}; + Response{CheckStatus::Denied, + ErrorKind::Other, + Http::HeaderVector{}, + Http::HeaderVector{}, + Http::HeaderVector{}, + {{}}, + message->bodyAsString(), + static_cast(status_code), + ProtobufWkt::Struct{}}}; return std::move(denied.response_); } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index 8f5abd684379..218a01f6c179 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -119,16 +119,11 @@ class ClientConfig { private: static MatcherSharedPtr - toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher, - bool enable_case_sensitive_string_matcher); + toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& list); + static MatcherSharedPtr toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher& list); static MatcherSharedPtr - toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher, - bool enable_case_sensitive_string_matcher); - static MatcherSharedPtr - toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher, - bool enable_case_sensitive_string_matcher); + toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& list); - const bool enable_case_sensitive_string_matcher_; const MatcherSharedPtr request_header_matchers_; const MatcherSharedPtr client_header_matchers_; const MatcherSharedPtr upstream_header_matchers_; @@ -159,8 +154,9 @@ class RawHttpClientImpl : public Client, // ExtAuthz::Client void cancel() override; - void check(RequestCallbacks& callbacks, const envoy::service::auth::v3::CheckRequest& request, - Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) override; + void check(RequestCallbacks& callbacks, Event::Dispatcher& dispatcher, + const envoy::service::auth::v3::CheckRequest& request, Tracing::Span& parent_span, + const StreamInfo::StreamInfo& stream_info) override; // Http::AsyncClient::Callbacks void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& message) override; @@ -170,12 +166,14 @@ class RawHttpClientImpl : public Client, const Http::ResponseHeaderMap* response_headers) override; private: + void onTimeout(); ResponsePtr toResponse(Http::ResponseMessagePtr message); Upstream::ClusterManager& cm_; ClientConfigSharedPtr config_; Http::AsyncClient::Request* request_{}; RequestCallbacks* callbacks_{}; + Event::TimerPtr timeout_timer_; }; } // namespace ExtAuthz diff --git a/source/extensions/filters/common/fault/fault_config.cc b/source/extensions/filters/common/fault/fault_config.cc index 4053cc0b9006..db7f6028152c 100644 --- a/source/extensions/filters/common/fault/fault_config.cc +++ b/source/extensions/filters/common/fault/fault_config.cc @@ -18,12 +18,14 @@ HeaderPercentageProvider::percentage(const Http::RequestHeaderMap* request_heade return percentage_; } const auto header = request_headers->get(header_name_); - if (header == nullptr) { + if (header.empty()) { return percentage_; } uint32_t header_numerator; - if (!absl::SimpleAtoi(header->value().getStringView(), &header_numerator)) { + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + if (!absl::SimpleAtoi(header[0]->value().getStringView(), &header_numerator)) { return percentage_; } @@ -58,12 +60,14 @@ absl::optional FaultAbortConfig::HeaderAbortProvider::httpStatusCode const Http::RequestHeaderMap* request_headers) const { absl::optional ret = absl::nullopt; auto header = request_headers->get(Filters::Common::Fault::HeaderNames::get().AbortRequest); - if (header == nullptr) { + if (header.empty()) { return ret; } uint64_t code; - if (!absl::SimpleAtoi(header->value().getStringView(), &code)) { + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + if (!absl::SimpleAtoi(header[0]->value().getStringView(), &code)) { return ret; } @@ -77,12 +81,14 @@ absl::optional FaultAbortConfig::HeaderAbortProvider::httpStatusCode absl::optional FaultAbortConfig::HeaderAbortProvider::grpcStatusCode( const Http::RequestHeaderMap* request_headers) const { auto header = request_headers->get(Filters::Common::Fault::HeaderNames::get().AbortGrpcRequest); - if (header == nullptr) { + if (header.empty()) { return absl::nullopt; } uint64_t code; - if (!absl::SimpleAtoi(header->value().getStringView(), &code)) { + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + if (!absl::SimpleAtoi(header[0]->value().getStringView(), &code)) { return absl::nullopt; } @@ -111,12 +117,14 @@ FaultDelayConfig::FaultDelayConfig( absl::optional FaultDelayConfig::HeaderDelayProvider::duration( const Http::RequestHeaderMap* request_headers) const { const auto header = request_headers->get(HeaderNames::get().DelayRequest); - if (header == nullptr) { + if (header.empty()) { return absl::nullopt; } uint64_t value; - if (!absl::SimpleAtoi(header->value().getStringView(), &value)) { + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + if (!absl::SimpleAtoi(header[0]->value().getStringView(), &value)) { return absl::nullopt; } @@ -142,12 +150,14 @@ FaultRateLimitConfig::FaultRateLimitConfig( absl::optional FaultRateLimitConfig::HeaderRateLimitProvider::rateKbps( const Http::RequestHeaderMap* request_headers) const { const auto header = request_headers->get(HeaderNames::get().ThroughputResponse); - if (header == nullptr) { + if (header.empty()) { return absl::nullopt; } uint64_t value; - if (!absl::SimpleAtoi(header->value().getStringView(), &value)) { + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + if (!absl::SimpleAtoi(header[0]->value().getStringView(), &value)) { return absl::nullopt; } diff --git a/source/extensions/filters/common/local_ratelimit/BUILD b/source/extensions/filters/common/local_ratelimit/BUILD new file mode 100644 index 000000000000..1a201025ca3f --- /dev/null +++ b/source/extensions/filters/common/local_ratelimit/BUILD @@ -0,0 +1,20 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "local_ratelimit_lib", + srcs = ["local_ratelimit_impl.cc"], + hdrs = ["local_ratelimit_impl.h"], + deps = [ + "//include/envoy/event:dispatcher_interface", + "//include/envoy/event:timer_interface", + "//source/common/common:thread_synchronizer_lib", + ], +) diff --git a/source/extensions/filters/common/local_ratelimit/local_ratelimit_impl.cc b/source/extensions/filters/common/local_ratelimit/local_ratelimit_impl.cc new file mode 100644 index 000000000000..2adee384673e --- /dev/null +++ b/source/extensions/filters/common/local_ratelimit/local_ratelimit_impl.cc @@ -0,0 +1,78 @@ +#include "extensions/filters/common/local_ratelimit/local_ratelimit_impl.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace LocalRateLimit { + +LocalRateLimiterImpl::LocalRateLimiterImpl(const std::chrono::milliseconds fill_interval, + const uint32_t max_tokens, + const uint32_t tokens_per_fill, + Event::Dispatcher& dispatcher) + : fill_interval_(fill_interval), max_tokens_(max_tokens), tokens_per_fill_(tokens_per_fill), + fill_timer_(fill_interval_ > std::chrono::milliseconds(0) + ? dispatcher.createTimer([this] { onFillTimer(); }) + : nullptr) { + if (fill_timer_ && fill_interval_ < std::chrono::milliseconds(50)) { + throw EnvoyException("local rate limit token bucket fill timer must be >= 50ms"); + } + + tokens_ = max_tokens; + + if (fill_timer_) { + fill_timer_->enableTimer(fill_interval_); + } +} + +LocalRateLimiterImpl::~LocalRateLimiterImpl() { + if (fill_timer_ != nullptr) { + fill_timer_->disableTimer(); + } +} + +void LocalRateLimiterImpl::onFillTimer() { + // Relaxed consistency is used for all operations because we don't care about ordering, just the + // final atomic correctness. + uint32_t expected_tokens = tokens_.load(std::memory_order_relaxed); + uint32_t new_tokens_value; + do { + // expected_tokens is either initialized above or reloaded during the CAS failure below. + new_tokens_value = std::min(max_tokens_, expected_tokens + tokens_per_fill_); + + // Testing hook. + synchronizer_.syncPoint("on_fill_timer_pre_cas"); + + // Loop while the weak CAS fails trying to update the tokens value. + } while ( + !tokens_.compare_exchange_weak(expected_tokens, new_tokens_value, std::memory_order_relaxed)); + + fill_timer_->enableTimer(fill_interval_); +} + +bool LocalRateLimiterImpl::requestAllowed() const { + // Relaxed consistency is used for all operations because we don't care about ordering, just the + // final atomic correctness. + uint32_t expected_tokens = tokens_.load(std::memory_order_relaxed); + do { + // expected_tokens is either initialized above or reloaded during the CAS failure below. + if (expected_tokens == 0) { + return false; + } + + // Testing hook. + synchronizer_.syncPoint("allowed_pre_cas"); + + // Loop while the weak CAS fails trying to subtract 1 from expected. + } while (!tokens_.compare_exchange_weak(expected_tokens, expected_tokens - 1, + std::memory_order_relaxed)); + + // We successfully decremented the counter by 1. + return true; +} + +} // namespace LocalRateLimit +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/common/local_ratelimit/local_ratelimit_impl.h b/source/extensions/filters/common/local_ratelimit/local_ratelimit_impl.h new file mode 100644 index 000000000000..2e35dc5b0ef4 --- /dev/null +++ b/source/extensions/filters/common/local_ratelimit/local_ratelimit_impl.h @@ -0,0 +1,41 @@ +#pragma once + +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" + +#include "common/common/thread_synchronizer.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace LocalRateLimit { + +class LocalRateLimiterImpl { +public: + LocalRateLimiterImpl(const std::chrono::milliseconds fill_interval, const uint32_t max_tokens, + const uint32_t tokens_per_fill, Event::Dispatcher& dispatcher); + ~LocalRateLimiterImpl(); + + bool requestAllowed() const; + +private: + void onFillTimer(); + + const std::chrono::milliseconds fill_interval_; + const uint32_t max_tokens_; + const uint32_t tokens_per_fill_; + const Event::TimerPtr fill_timer_; + mutable std::atomic tokens_; + mutable Thread::ThreadSynchronizer synchronizer_; // Used for testing only. + + friend class LocalRateLimiterImplTest; +}; + +} // namespace LocalRateLimit +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/common/lua/wrappers.cc b/source/extensions/filters/common/lua/wrappers.cc index 02e4db6ca2a8..8c26d9e10bec 100644 --- a/source/extensions/filters/common/lua/wrappers.cc +++ b/source/extensions/filters/common/lua/wrappers.cc @@ -65,28 +65,30 @@ int BufferWrapper::luaGetBytes(lua_State* state) { return 1; } +int BufferWrapper::luaSetBytes(lua_State* state) { + data_.drain(data_.length()); + absl::string_view bytes = luaL_checkstring(state, 2); + data_.add(bytes); + lua_pushnumber(state, data_.length()); + return 1; +} + void MetadataMapHelper::setValue(lua_State* state, const ProtobufWkt::Value& value) { ProtobufWkt::Value::KindCase kind = value.kind_case(); switch (kind) { case ProtobufWkt::Value::kNullValue: return lua_pushnil(state); - case ProtobufWkt::Value::kNumberValue: return lua_pushnumber(state, value.number_value()); - case ProtobufWkt::Value::kBoolValue: return lua_pushboolean(state, value.bool_value()); - + case ProtobufWkt::Value::kStructValue: + return createTable(state, value.struct_value().fields()); case ProtobufWkt::Value::kStringValue: { const auto& string_value = value.string_value(); return lua_pushstring(state, string_value.c_str()); } - - case ProtobufWkt::Value::kStructValue: { - return createTable(state, value.struct_value().fields()); - } - case ProtobufWkt::Value::kListValue: { const auto& list = value.list_value(); const int values_size = list.values_size(); diff --git a/source/extensions/filters/common/lua/wrappers.h b/source/extensions/filters/common/lua/wrappers.h index 09ea9b44467a..2b0f39970c74 100644 --- a/source/extensions/filters/common/lua/wrappers.h +++ b/source/extensions/filters/common/lua/wrappers.h @@ -13,14 +13,16 @@ namespace Common { namespace Lua { /** - * A wrapper for a constant buffer which cannot be modified by Lua. + * A wrapper for a buffer. */ class BufferWrapper : public BaseLuaObject { public: - BufferWrapper(const Buffer::Instance& data) : data_(data) {} + BufferWrapper(Buffer::Instance& data) : data_(data) {} static ExportedFunctions exportedFunctions() { - return {{"length", static_luaLength}, {"getBytes", static_luaGetBytes}}; + return {{"length", static_luaLength}, + {"getBytes", static_luaGetBytes}, + {"setBytes", static_luaSetBytes}}; } private: @@ -37,7 +39,14 @@ class BufferWrapper : public BaseLuaObject { */ DECLARE_LUA_FUNCTION(BufferWrapper, luaGetBytes); - const Buffer::Instance& data_; + /** + * Set the wrapped data with the input string. + * @param 1 (string) input string. + * @return int the length of the input string. + */ + DECLARE_LUA_FUNCTION(BufferWrapper, luaSetBytes); + + Buffer::Instance& data_; }; class MetadataMapWrapper; diff --git a/source/extensions/filters/common/ratelimit/ratelimit.h b/source/extensions/filters/common/ratelimit/ratelimit.h index 4ad48e7a87ab..068cd369b643 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit.h +++ b/source/extensions/filters/common/ratelimit/ratelimit.h @@ -9,6 +9,7 @@ #include "envoy/ratelimit/ratelimit.h" #include "envoy/service/ratelimit/v3/rls.pb.h" #include "envoy/singleton/manager.h" +#include "envoy/stream_info/stream_info.h" #include "envoy/tracing/http_tracer.h" #include "absl/types/optional.h" @@ -77,7 +78,7 @@ class Client { */ virtual void limit(RequestCallbacks& callbacks, const std::string& domain, const std::vector& descriptors, - Tracing::Span& parent_span) PURE; + Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) PURE; }; using ClientPtr = std::unique_ptr; diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index 5a93471af903..d4c3f5afdaa3 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -61,16 +61,18 @@ void GrpcClientImpl::createRequest(envoy::service::ratelimit::v3::RateLimitReque void GrpcClientImpl::limit(RequestCallbacks& callbacks, const std::string& domain, const std::vector& descriptors, - Tracing::Span& parent_span) { + Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) { ASSERT(callbacks_ == nullptr); callbacks_ = &callbacks; envoy::service::ratelimit::v3::RateLimitRequest request; createRequest(request, domain, descriptors); - request_ = async_client_->send(service_method_, request, *this, parent_span, - Http::AsyncClient::RequestOptions().setTimeout(timeout_), - transport_api_version_); + request_ = + async_client_->send(service_method_, request, *this, parent_span, + Http::AsyncClient::RequestOptions().setTimeout(timeout_).setParentContext( + Http::AsyncClient::ParentContext{&stream_info}), + transport_api_version_); } void GrpcClientImpl::onSuccess( diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.h b/source/extensions/filters/common/ratelimit/ratelimit_impl.h index 4108ec2b45c0..4386102a21ca 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.h +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.h @@ -58,7 +58,7 @@ class GrpcClientImpl : public Client, void cancel() override; void limit(RequestCallbacks& callbacks, const std::string& domain, const std::vector& descriptors, - Tracing::Span& parent_span) override; + Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) override; // Grpc::AsyncRequestCallbacks void onCreateInitialMetadata(Http::RequestHeaderMap&) override {} diff --git a/source/extensions/filters/common/rbac/utility.cc b/source/extensions/filters/common/rbac/utility.cc index 0b96bd785947..0895e0156e13 100644 --- a/source/extensions/filters/common/rbac/utility.cc +++ b/source/extensions/filters/common/rbac/utility.cc @@ -1,5 +1,9 @@ #include "extensions/filters/common/rbac/utility.h" +#include + +#include "absl/strings/str_replace.h" + namespace Envoy { namespace Extensions { namespace Filters { @@ -11,6 +15,15 @@ RoleBasedAccessControlFilterStats generateStats(const std::string& prefix, Stats return {ALL_RBAC_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))}; } +std::string responseDetail(const std::string& policy_id) { + // Replace whitespaces in policy_id with '_' to avoid breaking the access log (inconsistent number + // of segments between log entries when the separator is whitespace). + const absl::flat_hash_map replacement{ + {" ", "_"}, {"\t", "_"}, {"\f", "_"}, {"\v", "_"}, {"\n", "_"}, {"\r", "_"}}; + std::string sanitized = absl::StrReplaceAll(policy_id, replacement); + return fmt::format("rbac_access_denied_matched_policy[{}]", sanitized); +} + } // namespace RBAC } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/rbac/utility.h b/source/extensions/filters/common/rbac/utility.h index 04635eb37411..e690b3a297ec 100644 --- a/source/extensions/filters/common/rbac/utility.h +++ b/source/extensions/filters/common/rbac/utility.h @@ -2,6 +2,7 @@ #include "envoy/stats/stats_macros.h" +#include "common/common/fmt.h" #include "common/singleton/const_singleton.h" #include "extensions/filters/common/rbac/engine_impl.h" @@ -44,6 +45,8 @@ std::unique_ptr createShadowEngine(const Confi : nullptr; } +std::string responseDetail(const std::string& policy_id); + } // namespace RBAC } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc b/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc index b2478d040896..1b2d7400f771 100644 --- a/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc +++ b/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc @@ -38,8 +38,8 @@ Http::FilterHeadersStatus AdaptiveConcurrencyFilter::decodeHeaders(Http::Request } if (controller_->forwardingDecision() == Controller::RequestForwardingAction::Block) { - decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "", nullptr, absl::nullopt, - "reached concurrency limit"); + decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "reached concurrency limit", + nullptr, absl::nullopt, "reached_concurrency_limit"); return Http::FilterHeadersStatus::StopIteration; } diff --git a/source/extensions/filters/http/adaptive_concurrency/config.cc b/source/extensions/filters/http/adaptive_concurrency/config.cc index 63a3d2d7f369..45783f034de1 100644 --- a/source/extensions/filters/http/adaptive_concurrency/config.cc +++ b/source/extensions/filters/http/adaptive_concurrency/config.cc @@ -26,7 +26,7 @@ Http::FilterFactoryCb AdaptiveConcurrencyFilterFactory::createFilterFactoryFromP Controller::GradientControllerConfig(config.gradient_controller_config(), context.runtime()); controller = std::make_shared( std::move(gradient_controller_config), context.dispatcher(), context.runtime(), - acc_stats_prefix + "gradient_controller.", context.scope(), context.random(), + acc_stats_prefix + "gradient_controller.", context.scope(), context.api().randomGenerator(), context.timeSource()); AdaptiveConcurrencyFilterConfigSharedPtr filter_config( diff --git a/source/extensions/filters/http/admission_control/admission_control.cc b/source/extensions/filters/http/admission_control/admission_control.cc index fe880fefc47b..0875429bd2df 100644 --- a/source/extensions/filters/http/admission_control/admission_control.cc +++ b/source/extensions/filters/http/admission_control/admission_control.cc @@ -29,7 +29,8 @@ namespace AdmissionControl { using GrpcStatus = Grpc::Status::GrpcStatus; -static constexpr double defaultAggression = 2.0; +static constexpr double defaultAggression = 1.0; +static constexpr double defaultSuccessRateThreshold = 95.0; AdmissionControlFilterConfig::AdmissionControlFilterConfig( const AdmissionControlProto& proto_config, Runtime::Loader& runtime, @@ -37,23 +38,29 @@ AdmissionControlFilterConfig::AdmissionControlFilterConfig( std::shared_ptr response_evaluator) : random_(random), scope_(scope), tls_(std::move(tls)), admission_control_feature_(proto_config.enabled(), runtime), - aggression_( - proto_config.has_aggression_coefficient() - ? std::make_unique(proto_config.aggression_coefficient(), runtime) - : nullptr), + aggression_(proto_config.has_aggression() + ? std::make_unique(proto_config.aggression(), runtime) + : nullptr), + sr_threshold_(proto_config.has_sr_threshold() ? std::make_unique( + proto_config.sr_threshold(), runtime) + : nullptr), response_evaluator_(std::move(response_evaluator)) {} double AdmissionControlFilterConfig::aggression() const { return std::max(1.0, aggression_ ? aggression_->value() : defaultAggression); } +double AdmissionControlFilterConfig::successRateThreshold() const { + const double pct = sr_threshold_ ? sr_threshold_->value() : defaultSuccessRateThreshold; + return std::min(pct, 100.0) / 100.0; +} + AdmissionControlFilter::AdmissionControlFilter(AdmissionControlFilterConfigSharedPtr config, const std::string& stats_prefix) : config_(std::move(config)), stats_(generateStats(config_->scope(), stats_prefix)), - expect_grpc_status_in_trailer_(false), record_request_(true) {} + record_request_(true) {} Http::FilterHeadersStatus AdmissionControlFilter::decodeHeaders(Http::RequestHeaderMap&, bool) { - // TODO(tonya11en): Ensure we document the fact that healthchecks are ignored. if (!config_->filterEnabled() || decoder_callbacks_->streamInfo().healthCheck()) { // We must forego recording the success/failure of this request during encoding. record_request_ = false; @@ -61,9 +68,15 @@ Http::FilterHeadersStatus AdmissionControlFilter::decodeHeaders(Http::RequestHea } if (shouldRejectRequest()) { + // We do not want to sample requests that we are rejecting, since this taints the measurements + // that should be describing the upstreams. In addition, if we were to record the requests + // rejected, the rejection probabilities would not converge back to 0 even if the upstream + // success rate returns to 100%. + record_request_ = false; + + stats_.rq_rejected_.inc(); decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "", nullptr, absl::nullopt, "denied by admission control"); - stats_.rq_rejected_.inc(); return Http::FilterHeadersStatus::StopIteration; } @@ -123,10 +136,17 @@ AdmissionControlFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) { } bool AdmissionControlFilter::shouldRejectRequest() const { + // This formula is documented in the admission control filter documentation: + // https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/admission_control_filter.html const auto request_counts = config_->getController().requestCounts(); - const double total = request_counts.requests; - const double success = request_counts.successes; - const double probability = (total - config_->aggression() * success) / (total + 1); + const double total_requests = request_counts.requests; + const double successful_requests = request_counts.successes; + double probability = total_requests - successful_requests / config_->successRateThreshold(); + probability = probability / (total_requests + 1); + const auto aggression = config_->aggression(); + if (aggression != 1.0) { + probability = std::pow(probability, 1.0 / aggression); + } // Choosing an accuracy of 4 significant figures for the probability. static constexpr uint64_t accuracy = 1e4; diff --git a/source/extensions/filters/http/admission_control/admission_control.h b/source/extensions/filters/http/admission_control/admission_control.h index 54d793236e6b..99554d5e5e76 100644 --- a/source/extensions/filters/http/admission_control/admission_control.h +++ b/source/extensions/filters/http/admission_control/admission_control.h @@ -15,6 +15,7 @@ #include "envoy/stats/stats_macros.h" #include "common/common/cleanup.h" +#include "common/common/logger.h" #include "common/grpc/common.h" #include "common/grpc/status.h" #include "common/http/codes.h" @@ -32,7 +33,10 @@ namespace AdmissionControl { /** * All stats for the admission control filter. */ -#define ALL_ADMISSION_CONTROL_STATS(COUNTER) COUNTER(rq_rejected) +#define ALL_ADMISSION_CONTROL_STATS(COUNTER) \ + COUNTER(rq_rejected) \ + COUNTER(rq_success) \ + COUNTER(rq_failure) /** * Wrapper struct for admission control filter stats. @see stats_macros.h @@ -63,6 +67,7 @@ class AdmissionControlFilterConfig { bool filterEnabled() const { return admission_control_feature_.enabled(); } Stats::Scope& scope() const { return scope_; } double aggression() const; + double successRateThreshold() const; ResponseEvaluator& responseEvaluator() const { return *response_evaluator_; } private: @@ -71,6 +76,7 @@ class AdmissionControlFilterConfig { const ThreadLocal::SlotPtr tls_; Runtime::FeatureFlag admission_control_feature_; std::unique_ptr aggression_; + std::unique_ptr sr_threshold_; std::shared_ptr response_evaluator_; }; @@ -80,7 +86,7 @@ using AdmissionControlFilterConfigSharedPtr = std::shared_ptr { + protected Logger::Loggable { public: AdmissionControlFilter(AdmissionControlFilterConfigSharedPtr config, const std::string& stats_prefix); @@ -100,13 +106,19 @@ class AdmissionControlFilter : public Http::PassThroughFilter, bool shouldRejectRequest() const; - void recordSuccess() { config_->getController().recordSuccess(); } + void recordSuccess() { + stats_.rq_success_.inc(); + config_->getController().recordSuccess(); + } - void recordFailure() { config_->getController().recordFailure(); } + void recordFailure() { + stats_.rq_failure_.inc(); + config_->getController().recordFailure(); + } const AdmissionControlFilterConfigSharedPtr config_; AdmissionControlStats stats_; - bool expect_grpc_status_in_trailer_; + bool expect_grpc_status_in_trailer_{false}; // If false, the filter will forego recording a request success or failure during encoding. bool record_request_; diff --git a/source/extensions/filters/http/admission_control/config.cc b/source/extensions/filters/http/admission_control/config.cc index 297fabf4f6d7..01aef0125bbf 100644 --- a/source/extensions/filters/http/admission_control/config.cc +++ b/source/extensions/filters/http/admission_control/config.cc @@ -1,5 +1,6 @@ #include "extensions/filters/http/admission_control/config.h" +#include "envoy/common/exception.h" #include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" #include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" #include "envoy/registry/registry.h" @@ -15,12 +16,16 @@ namespace Extensions { namespace HttpFilters { namespace AdmissionControl { -static constexpr std::chrono::seconds defaultSamplingWindow{120}; +static constexpr std::chrono::seconds defaultSamplingWindow{30}; Http::FilterFactoryCb AdmissionControlFilterFactory::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + if (config.has_sr_threshold() && config.sr_threshold().default_value().value() == 0) { + throw EnvoyException("Success Rate Threshold cannot be zero percent"); + } + const std::string prefix = stats_prefix + "admission_control."; // Create the thread-local controller. @@ -43,9 +48,9 @@ Http::FilterFactoryCb AdmissionControlFilterFactory::createFilterFactoryFromProt } AdmissionControlFilterConfigSharedPtr filter_config = - std::make_shared(config, context.runtime(), context.random(), - context.scope(), std::move(tls), - std::move(response_evaluator)); + std::make_shared( + config, context.runtime(), context.api().randomGenerator(), context.scope(), + std::move(tls), std::move(response_evaluator)); return [filter_config, prefix](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared(filter_config, prefix)); diff --git a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc index e6b93b3f90e2..8822bbf3a53f 100644 --- a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc +++ b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc @@ -198,7 +198,7 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers } // Just the existence of this header means we have an error, so skip. - if (headers.get(LambdaFilterNames::get().FunctionErrorHeader)) { + if (!headers.get(LambdaFilterNames::get().FunctionErrorHeader).empty()) { skip_ = true; return Http::FilterHeadersStatus::Continue; } diff --git a/source/extensions/filters/http/buffer/config.h b/source/extensions/filters/http/buffer/config.h index afbf4ad8d226..e31cd5dbf75b 100644 --- a/source/extensions/filters/http/buffer/config.h +++ b/source/extensions/filters/http/buffer/config.h @@ -30,6 +30,8 @@ class BufferFilterFactory Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) override; }; +DECLARE_FACTORY(BufferFilterFactory); + } // namespace BufferFilter } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 1f24a71d332a..f7617abeecf3 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -79,8 +79,12 @@ envoy_cc_library( ":inline_headers_handles", "//include/envoy/common:time_interface", "//include/envoy/http:header_map_interface", + "//source/common/common:matchers_lib", "//source/common/http:header_map_lib", "//source/common/http:header_utility_lib", + "//source/common/http:headers_lib", + "//source/common/protobuf", + "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index 3ee022cf3eeb..867c102a75f5 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -30,11 +30,11 @@ struct CacheResponseCodeDetailValues { using CacheResponseCodeDetails = ConstSingleton; -CacheFilter::CacheFilter(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig&, - const std::string&, Stats::Scope&, TimeSource& time_source, - HttpCache& http_cache) +CacheFilter::CacheFilter( + const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config, const std::string&, + Stats::Scope&, TimeSource& time_source, HttpCache& http_cache) : time_source_(time_source), cache_(http_cache), - allowed_vary_headers_(VaryHeader::parseAllowlist()) {} + vary_allow_list_(config.allowed_vary_headers()) {} void CacheFilter::onDestroy() { filter_state_ = FilterState::Destroyed; @@ -63,7 +63,7 @@ Http::FilterHeadersStatus CacheFilter::decodeHeaders(Http::RequestHeaderMap& hea } ASSERT(decoder_callbacks_); - LookupRequest lookup_request(headers, time_source_.systemTime(), allowed_vary_headers_); + LookupRequest lookup_request(headers, time_source_.systemTime(), vary_allow_list_); request_allows_inserts_ = !lookup_request.requestCacheControl().no_store_; lookup_ = cache_.makeLookupContext(std::move(lookup_request)); @@ -97,7 +97,7 @@ Http::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& he // Either a cache miss or a cache entry that is no longer valid. // Check if the new response can be cached. if (request_allows_inserts_ && - CacheabilityUtils::isCacheableResponse(headers, allowed_vary_headers_)) { + CacheabilityUtils::isCacheableResponse(headers, vary_allow_list_)) { ENVOY_STREAM_LOG(debug, "CacheFilter::encodeHeaders inserting headers", *encoder_callbacks_); insert_ = cache_.makeInsertContext(std::move(lookup_)); // Add metadata associated with the cached response. Right now this is only response_time; @@ -373,7 +373,7 @@ void CacheFilter::processSuccessfulValidation(Http::ResponseHeaderMap& response_ // TODO(yosrym93): Try to avoid copying the header key twice. Http::LowerCaseString key(std::string(cached_header.key().getStringView())); absl::string_view value = cached_header.value().getStringView(); - if (!response_headers.get(key)) { + if (response_headers.get(key).empty()) { response_headers.setCopy(key, value); } return Http::HeaderMap::Iterate::Continue; @@ -458,7 +458,8 @@ void CacheFilter::encodeCachedResponse() { // If the filter is encoding, 304 response headers and cached headers are merged in encodeHeaders. // If the filter is decoding, we need to serve response headers from cache directly. if (filter_state_ == FilterState::DecodeServingFromCache) { - decoder_callbacks_->encodeHeaders(std::move(lookup_result_->headers_), end_stream); + decoder_callbacks_->encodeHeaders(std::move(lookup_result_->headers_), end_stream, + CacheResponseCodeDetails::get().ResponseFromCacheFilter); } if (lookup_result_->content_length_ > 0) { diff --git a/source/extensions/filters/http/cache/cache_filter.h b/source/extensions/filters/http/cache/cache_filter.h index b77dbbf9076d..935128e8154b 100644 --- a/source/extensions/filters/http/cache/cache_filter.h +++ b/source/extensions/filters/http/cache/cache_filter.h @@ -86,10 +86,11 @@ class CacheFilter : public Http::PassThroughFilter, // onHeaders for Range Responses, otherwise initialized by encodeCachedResponse. std::vector remaining_ranges_; - // TODO(#12901): The allowlist could be constructed only once directly from the config, instead of - // doing it per-request. - // Stores the headers that can be used to vary responses. - absl::flat_hash_set allowed_vary_headers_; + // TODO(#12901): The allow list could be constructed only once directly from the config, instead + // of doing it per-request. A good example of such config is found in the gzip filter: + // source/extensions/filters/http/gzip/gzip_filter.h. + // Stores the allow list rules that decide if a header can be varied upon. + VaryHeader vary_allow_list_; // True if the response has trailers. // TODO(toddmgreer): cache trailers. diff --git a/source/extensions/filters/http/cache/cache_headers_utils.cc b/source/extensions/filters/http/cache/cache_headers_utils.cc index 7762c3091a5f..7a412e9a131e 100644 --- a/source/extensions/filters/http/cache/cache_headers_utils.cc +++ b/source/extensions/filters/http/cache/cache_headers_utils.cc @@ -184,7 +184,7 @@ absl::optional CacheHeadersUtils::readAndRemoveLeadingDigits(absl::str } uint64_t new_val = (val * 10) + (cur - '0'); if (new_val / 8 < val) { - // Overflow occurred. + // Overflow occurred return absl::nullopt; } val = new_val; @@ -192,33 +192,78 @@ absl::optional CacheHeadersUtils::readAndRemoveLeadingDigits(absl::str } if (bytes_consumed) { - // Consume some digits. + // Consume some digits str.remove_prefix(bytes_consumed); return val; } return absl::nullopt; } -absl::flat_hash_set VaryHeader::parseAllowlist() { - // TODO(cbdm): Populate the hash_set from - // envoy::extensions::filters::http::cache::v3alpha::CacheConfig::allowed_vary_headers. - // Need to make sure that the headers we add here are valid values (i.e., not malformed). That - // way, we won't have to check this again in isAllowed. - return {"x-temporary-standin-header-name"}; +void CacheHeadersUtils::getAllMatchingHeaderNames( + const Http::HeaderMap& headers, const std::vector& ruleset, + absl::flat_hash_set& out) { + headers.iterate([&ruleset, &out](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + absl::string_view header_name = header.key().getStringView(); + for (const auto& rule : ruleset) { + if (rule->match(header_name)) { + out.emplace(header_name); + break; + } + } + return Http::HeaderMap::Iterate::Continue; + }); } -bool VaryHeader::isAllowed(const absl::flat_hash_set& allowed_headers, - const Http::ResponseHeaderMap& headers) { - if (!hasVary(headers)) { +std::vector +CacheHeadersUtils::parseCommaDelimitedList(const Http::HeaderMap::GetResult& entry) { + if (entry.empty()) { + return {}; + } + + // TODO(mattklein123): Consider multiple header values? + std::vector header_values = absl::StrSplit(entry[0]->value().getStringView(), ','); + for (std::string& value : header_values) { + // TODO(cbdm): Might be able to improve the performance here by using StringUtil::trim to + // remove whitespace. + absl::StripAsciiWhitespace(&value); + } + + return header_values; +} + +VaryHeader::VaryHeader( + const Protobuf::RepeatedPtrField& allow_list) { + + for (const auto& rule : allow_list) { + allow_list_.emplace_back(std::make_unique(rule)); + } +} + +bool VaryHeader::isAllowed(const Http::ResponseHeaderMap& headers) const { + if (!VaryHeader::hasVary(headers)) { return true; } std::vector varied_headers = - parseHeaderValue(headers.get(Http::Headers::get().Vary)); + CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::Headers::get().Vary)); - // If the vary value was malformed, it will not be contained in allowed_headers. for (const std::string& header : varied_headers) { - if (!allowed_headers.contains(header)) { + bool valid = false; + + // "Vary: *" should never be cached per: + // https://tools.ietf.org/html/rfc7231#section-7.1.4 + if (header == "*") { + return false; + } + + for (const auto& rule : allow_list_) { + if (rule->match(header)) { + valid = true; + break; + } + } + + if (!valid) { return false; } } @@ -227,8 +272,9 @@ bool VaryHeader::isAllowed(const absl::flat_hash_set& allowed_heade } bool VaryHeader::hasVary(const Http::ResponseHeaderMap& headers) { - const Http::HeaderEntry* vary_header = headers.get(Http::Headers::get().Vary); - return vary_header != nullptr && !vary_header->value().empty(); + // TODO(mattklein123): Support multiple vary headers and/or just make the vary header inline. + const auto vary_header = headers.get(Http::Headers::get().Vary); + return !vary_header.empty() && !vary_header[0]->value().empty(); } namespace { @@ -237,22 +283,23 @@ namespace { // https://tools.ietf.org/html/rfc2616#section-4.2. // Used to separate the values of different headers. -constexpr std::string_view header_separator = "\n"; +constexpr absl::string_view header_separator = "\n"; // Used to separate multiple values of a same header. -constexpr std::string_view in_value_separator = "\r"; +constexpr absl::string_view in_value_separator = "\r"; }; // namespace -std::string VaryHeader::createVaryKey(const Http::HeaderEntry* vary_header, +std::string VaryHeader::createVaryKey(const Http::HeaderMap::GetResult& vary_header, const Http::RequestHeaderMap& entry_headers) { - if (vary_header == nullptr) { + if (vary_header.empty()) { return ""; } - ASSERT(vary_header->key() == "vary"); + // TODO(mattklein123): Support multiple vary headers and/or just make the vary header inline. + ASSERT(vary_header[0]->key() == "vary"); std::string vary_key = "vary-key\n"; - for (const std::string& header : parseHeaderValue(vary_header)) { + for (const std::string& header : CacheHeadersUtils::parseCommaDelimitedList(vary_header)) { // TODO(cbdm): Can add some bucketing logic here based on header. For example, we could // normalize the values for accept-language by making all of {en-CA, en-GB, en-US} into // "en". This way we would not need to store multiple versions of the same payload, and any @@ -260,49 +307,29 @@ std::string VaryHeader::createVaryKey(const Http::HeaderEntry* vary_header, // bucket UserAgent values into android/ios/desktop; UserAgent::initializeFromHeaders tries to // do that normalization and could be used as an inspiration for some bucketing configuration. // The config should enable and control the bucketing wanted. - std::vector header_values; - Http::HeaderUtility::getAllOfHeader(entry_headers, header, header_values); + const auto all_values = Http::HeaderUtility::getAllOfHeaderAsString( + entry_headers, Http::LowerCaseString(header), in_value_separator); absl::StrAppend(&vary_key, header, in_value_separator, - absl::StrJoin(header_values, in_value_separator), header_separator); + all_values.result().has_value() ? all_values.result().value() : "", + header_separator); } return vary_key; } -std::vector VaryHeader::parseHeaderValue(const Http::HeaderEntry* vary_header) { - if (!vary_header) { - return {}; - } - - ASSERT(vary_header->key() == "vary"); - - // Vary header value should follow rules set per: - // https://tools.ietf.org/html/rfc7231#section-7.1.4 - - std::vector header_values = - absl::StrSplit(vary_header->value().getStringView(), ','); - for (std::string& value : header_values) { - // TODO(cbdm): Might be able to improve the performance here: (1) could use StringUtil::trim to - // remove whitespace; (2) lowering the case might not be necessary depending on the - // functionality of isAllowed (e.g., if a hash-set, could hash ignoring case). - absl::StripAsciiWhitespace(&value); - absl::AsciiStrToLower(&value); - } - - return header_values; -} - Http::RequestHeaderMapPtr -VaryHeader::possibleVariedHeaders(const absl::flat_hash_set& allowed_headers, - const Http::RequestHeaderMap& request_headers) { +VaryHeader::possibleVariedHeaders(const Http::RequestHeaderMap& request_headers) const { Http::RequestHeaderMapPtr possible_headers = Http::createHeaderMap({}); - for (const std::string& header : allowed_headers) { - std::vector values; - Http::HeaderUtility::getAllOfHeader(request_headers, header, values); - for (const absl::string_view& value : values) { - possible_headers->addCopy(Http::LowerCaseString(header), value); + absl::flat_hash_set header_names; + CacheHeadersUtils::getAllMatchingHeaderNames(request_headers, allow_list_, header_names); + + for (const absl::string_view& header : header_names) { + const auto lower_case_header = Http::LowerCaseString(std::string{header}); + const auto value = request_headers.get(lower_case_header); + for (size_t i = 0; i < value.size(); i++) { + possible_headers->addCopy(lower_case_header, value[i]->value().getStringView()); } } diff --git a/source/extensions/filters/http/cache/cache_headers_utils.h b/source/extensions/filters/http/cache/cache_headers_utils.h index e64752d846e4..aec5c78e4bcf 100644 --- a/source/extensions/filters/http/cache/cache_headers_utils.h +++ b/source/extensions/filters/http/cache/cache_headers_utils.h @@ -1,8 +1,14 @@ #pragma once #include "envoy/common/time.h" +#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" +#include "envoy/http/header_map.h" +#include "common/common/matchers.h" +#include "common/http/header_map_impl.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" +#include "common/protobuf/protobuf.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" @@ -103,32 +109,40 @@ class CacheHeadersUtils { * absl::nullopt without advancing "*str". */ static absl::optional readAndRemoveLeadingDigits(absl::string_view& str); + + // Add to out all header names from the given map that match any of the given rules. + static void getAllMatchingHeaderNames(const Http::HeaderMap& headers, + const std::vector& ruleset, + absl::flat_hash_set& out); + + // Parses the values of a comma-delimited list as defined per + // https://tools.ietf.org/html/rfc7230#section-7. + static std::vector parseCommaDelimitedList(const Http::HeaderMap::GetResult& entry); }; class VaryHeader { public: - // Checks if the headers contain an allowed value in the Vary header. - static bool isAllowed(const absl::flat_hash_set& allowed_headers, - const Http::ResponseHeaderMap& headers); - // Checks if the headers contain a non-empty value in the Vary header. static bool hasVary(const Http::ResponseHeaderMap& headers); // Creates a single string combining the values of the varied headers from entry_headers. - static std::string createVaryKey(const Http::HeaderEntry* vary_header, + static std::string createVaryKey(const Http::HeaderMap::GetResult& vary_header, const Http::RequestHeaderMap& entry_headers); - // Parses the header names that are in the Vary header value. - static std::vector parseHeaderValue(const Http::HeaderEntry* vary_header); + // Parses the allow list from the Cache Config into the object's private allow_list_. + VaryHeader(const Protobuf::RepeatedPtrField& allow_list); + + // Checks if the headers contain an allowed value in the Vary header. + bool isAllowed(const Http::ResponseHeaderMap& headers) const; // Returns a header map containing the subset of the original headers that can be varied from the // request. - static Http::RequestHeaderMapPtr - possibleVariedHeaders(const absl::flat_hash_set& allowed_headers, - const Http::RequestHeaderMap& request_headers); + Http::RequestHeaderMapPtr + possibleVariedHeaders(const Http::RequestHeaderMap& request_headers) const; - // Parses the allowlist of header values that can be used to create varied responses. - static absl::flat_hash_set parseAllowlist(); +private: + // Stores the matching rules that define whether a header is allowed to be varied. + std::vector allow_list_; }; } // namespace Cache diff --git a/source/extensions/filters/http/cache/cacheability_utils.cc b/source/extensions/filters/http/cache/cacheability_utils.cc index 48e02a04d20e..48f0d48369cd 100644 --- a/source/extensions/filters/http/cache/cacheability_utils.cc +++ b/source/extensions/filters/http/cache/cacheability_utils.cc @@ -43,7 +43,7 @@ bool CacheabilityUtils::isCacheableRequest(const Http::RequestHeaderMap& headers // If needed to be handled properly refer to: // https://httpwg.org/specs/rfc7234.html#validation.received for (auto conditional_header : conditionalHeaders()) { - if (headers.get(*conditional_header)) { + if (!headers.get(*conditional_header).empty()) { return false; } } @@ -56,9 +56,8 @@ bool CacheabilityUtils::isCacheableRequest(const Http::RequestHeaderMap& headers forwarded_proto == header_values.SchemeValues.Https); } -bool CacheabilityUtils::isCacheableResponse( - const Http::ResponseHeaderMap& headers, - const absl::flat_hash_set& allowed_vary_headers) { +bool CacheabilityUtils::isCacheableResponse(const Http::ResponseHeaderMap& headers, + const VaryHeader& vary_allow_list) { absl::string_view cache_control = headers.getInlineValue(response_cache_control_handle.handle()); ResponseCacheControl response_cache_control(cache_control); @@ -74,7 +73,7 @@ bool CacheabilityUtils::isCacheableResponse( return !response_cache_control.no_store_ && cacheableStatusCodes().contains((headers.getStatusValue())) && has_validation_data && - VaryHeader::isAllowed(allowed_vary_headers, headers); + vary_allow_list.isAllowed(headers); } } // namespace Cache diff --git a/source/extensions/filters/http/cache/cacheability_utils.h b/source/extensions/filters/http/cache/cacheability_utils.h index 9554c3d3cfd0..88c2b1cd75fe 100644 --- a/source/extensions/filters/http/cache/cacheability_utils.h +++ b/source/extensions/filters/http/cache/cacheability_utils.h @@ -23,7 +23,7 @@ class CacheabilityUtils { // Therefore, isCacheableRequest, isCacheableResponse and CacheFilter::request_allows_inserts_ // together should cover https://httpwg.org/specs/rfc7234.html#response.cacheability. static bool isCacheableResponse(const Http::ResponseHeaderMap& headers, - const absl::flat_hash_set& allowed_vary_headers); + const VaryHeader& vary_allow_list); }; } // namespace Cache } // namespace HttpFilters diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index fb5934491f8c..453da7ecd3bd 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -24,7 +24,7 @@ namespace HttpFilters { namespace Cache { LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp, - const absl::flat_hash_set& allowed_vary_headers) + const VaryHeader& vary_allow_list) : timestamp_(timestamp) { // These ASSERTs check prerequisites. A request without these headers can't be looked up in cache; // CacheFilter doesn't create LookupRequests for such requests. @@ -54,7 +54,7 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst key_.set_path(std::string(request_headers.getPathValue())); key_.set_clear_http(forwarded_proto == scheme_values.Http); - vary_headers_ = VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers); + vary_headers_ = vary_allow_list.possibleVariedHeaders(request_headers); } // Unless this API is still alpha, calls to stableHashKey() must always return @@ -214,15 +214,13 @@ std::vector RangeRequests::parseRanges(const Http::RequestHeaderMa // Multiple instances of range headers are invalid. // https://tools.ietf.org/html/rfc7230#section-3.2.2 - std::vector range_headers; - Http::HeaderUtility::getAllOfHeader(request_headers, Http::Headers::get().Range.get(), - range_headers); + const auto range_header = request_headers.get(Http::Headers::get().Range); absl::string_view header_value; - if (range_headers.size() == 1) { - header_value = range_headers.front(); + if (range_header.size() == 1) { + header_value = range_header[0]->value().getStringView(); } else { - if (range_headers.size() > 1) { + if (range_header.size() > 1) { ENVOY_LOG(debug, "Multiple range headers provided in request. Ignoring all range headers."); } return {}; diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index 49afbf74bd4b..efde461d146a 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -188,7 +188,7 @@ class LookupRequest { public: // Prereq: request_headers's Path(), Scheme(), and Host() are non-null. LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp, - const absl::flat_hash_set& allowed_vary_headers); + const VaryHeader& vary_allow_list); const RequestCacheControl& requestCacheControl() const { return request_cache_control_; } @@ -220,7 +220,7 @@ class LookupRequest { std::vector request_range_spec_; // Time when this LookupRequest was created (in response to an HTTP request). SystemTime timestamp_; - // The subset of this request's headers that are listed in + // The subset of this request's headers that match one of the rules in // envoy::extensions::filters::http::cache::v3alpha::CacheConfig::allowed_vary_headers. If a cache // storage implementation forwards lookup requests to a remote cache server that supports *vary* // headers, that server may need to see these headers. For local implementations, it may be diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc index 0fea6205801c..dcea0e448d4b 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc @@ -144,8 +144,8 @@ SimpleHttpCache::varyLookup(const LookupRequest& request, // This method should be called from lookup, which holds the mutex for reading. mutex_.AssertReaderHeld(); - const Http::HeaderEntry* vary_header = response_headers->get(Http::Headers::get().Vary); - ASSERT(vary_header); + const auto vary_header = response_headers->get(Http::Headers::get().Vary); + ASSERT(!vary_header.empty()); Key varied_request_key = request.key(); const std::string vary_key = VaryHeader::createVaryKey(vary_header, request.getVaryHeaders()); @@ -168,8 +168,8 @@ void SimpleHttpCache::varyInsert(const Key& request_key, const Http::RequestHeaderMap& request_vary_headers) { absl::WriterMutexLock lock(&mutex_); - const Http::HeaderEntry* vary_header = response_headers->get(Http::Headers::get().Vary); - ASSERT(vary_header); + const auto vary_header = response_headers->get(Http::Headers::get().Vary); + ASSERT(!vary_header.empty()); // Insert the varied response. Key varied_request_key = request_key; @@ -183,7 +183,8 @@ void SimpleHttpCache::varyInsert(const Key& request_key, if (iter == map_.end()) { Http::ResponseHeaderMapPtr vary_only_map = Http::createHeaderMap({}); - vary_only_map->setCopy(Http::Headers::get().Vary, vary_header->value().getStringView()); + // TODO(mattklein123): Support multiple vary headers and/or just make the vary header inline. + vary_only_map->setCopy(Http::Headers::get().Vary, vary_header[0]->value().getStringView()); // TODO(cbdm): In a cache that evicts entries, we could maintain a list of the "varykey"s that // we have inserted as the body for this first lookup. This way, we would know which keys we // have inserted for that resource. For the first entry simply use vary_key as the entry_list, diff --git a/source/extensions/filters/http/cdn_loop/BUILD b/source/extensions/filters/http/cdn_loop/BUILD new file mode 100644 index 000000000000..ff6a8c26bfdf --- /dev/null +++ b/source/extensions/filters/http/cdn_loop/BUILD @@ -0,0 +1,61 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "filter_lib", + srcs = ["filter.cc"], + hdrs = ["filter.h"], + deps = [ + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/http:header_map_interface", + "//source/common/common:statusor_lib", + "//source/common/http:headers_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + ], +) + +envoy_cc_library( + name = "parser_lib", + srcs = ["parser.cc"], + hdrs = ["parser.h"], + deps = ["//source/common/common:statusor_lib"], +) + +envoy_cc_library( + name = "utils_lib", + srcs = ["utils.cc"], + hdrs = ["utils.h"], + deps = [ + ":parser_lib", + "//source/common/common:statusor_lib", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "unknown", + status = "alpha", + deps = [ + ":filter_lib", + ":parser_lib", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:factory_context_interface", + "//source/common/common:statusor_lib", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/cdn_loop/config.cc b/source/extensions/filters/http/cdn_loop/config.cc new file mode 100644 index 000000000000..001c884a9114 --- /dev/null +++ b/source/extensions/filters/http/cdn_loop/config.cc @@ -0,0 +1,44 @@ +#include "extensions/filters/http/cdn_loop/config.h" + +#include + +#include "envoy/common/exception.h" +#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/factory_context.h" + +#include "common/common/statusor.h" + +#include "extensions/filters/http/cdn_loop/filter.h" +#include "extensions/filters/http/cdn_loop/parser.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { + +using ::Envoy::Extensions::HttpFilters::CdnLoop::Parser::parseCdnId; +using ::Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParseContext; +using ::Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParsedCdnId; + +Http::FilterFactoryCb CdnLoopFilterFactory::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig& config, + const std::string& /*stats_prefix*/, Server::Configuration::FactoryContext& /*context*/) { + StatusOr context = parseCdnId(ParseContext(config.cdn_id())); + if (!context.ok()) { + throw EnvoyException(fmt::format("Provided cdn_id \"{}\" is not a valid CDN identifier: {}", + config.cdn_id(), context.status())); + } + return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter( + std::make_shared(config.cdn_id(), config.max_allowed_occurrences())); + }; +} + +REGISTER_FACTORY(CdnLoopFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cdn_loop/config.h b/source/extensions/filters/http/cdn_loop/config.h new file mode 100644 index 000000000000..1e03a123649f --- /dev/null +++ b/source/extensions/filters/http/cdn_loop/config.h @@ -0,0 +1,33 @@ +#pragma once + +#include + +#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" +#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.validate.h" +#include "envoy/http/filter.h" +#include "envoy/server/factory_context.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { + +class CdnLoopFilterFactory + : public Common::FactoryBase< + envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig> { +public: + CdnLoopFilterFactory() : FactoryBase(HttpFilterNames::get().CdnLoop) {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig& config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; +}; + +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cdn_loop/filter.cc b/source/extensions/filters/http/cdn_loop/filter.cc new file mode 100644 index 000000000000..9cc81ca0a928 --- /dev/null +++ b/source/extensions/filters/http/cdn_loop/filter.cc @@ -0,0 +1,54 @@ +#include "extensions/filters/http/cdn_loop/filter.h" + +#include "envoy/http/codes.h" +#include "envoy/http/filter.h" +#include "envoy/http/header_map.h" + +#include "common/common/statusor.h" +#include "common/http/headers.h" + +#include "extensions/filters/http/cdn_loop/utils.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { + +namespace { + +Http::RegisterCustomInlineHeader + cdn_loop_handle(Http::CustomHeaders::get().CdnLoop); + +constexpr absl::string_view ParseErrorMessage = "Invalid CDN-Loop header in request."; +constexpr absl::string_view ParseErrorDetails = "invalid_cdn_loop_header"; +constexpr absl::string_view LoopDetectedMessage = "The server has detected a loop between CDNs."; +constexpr absl::string_view LoopDetectedDetails = "cdn_loop_detected"; + +} // namespace + +Http::FilterHeadersStatus CdnLoopFilter::decodeHeaders(Http::RequestHeaderMap& headers, + bool /*end_stream*/) { + + if (const Http::HeaderEntry* header_entry = headers.getInline(cdn_loop_handle.handle()); + header_entry != nullptr) { + if (StatusOr count = + countCdnLoopOccurrences(header_entry->value().getStringView(), cdn_id_); + !count) { + decoder_callbacks_->sendLocalReply(Http::Code::BadRequest, ParseErrorMessage, nullptr, + absl::nullopt, ParseErrorDetails); + return Http::FilterHeadersStatus::StopIteration; + } else if (*count > max_allowed_occurrences_) { + decoder_callbacks_->sendLocalReply(Http::Code::BadGateway, LoopDetectedMessage, nullptr, + absl::nullopt, LoopDetectedDetails); + return Http::FilterHeadersStatus::StopIteration; + } + } + + headers.appendCopy(Http::CustomHeaders::get().CdnLoop, cdn_id_); + return Http::FilterHeadersStatus::Continue; +} + +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cdn_loop/filter.h b/source/extensions/filters/http/cdn_loop/filter.h new file mode 100644 index 000000000000..77216b798d50 --- /dev/null +++ b/source/extensions/filters/http/cdn_loop/filter.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +#include "envoy/http/filter.h" +#include "envoy/http/header_map.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { + +class CdnLoopFilter : public Http::PassThroughDecoderFilter { +public: + CdnLoopFilter(std::string cdn_id, int max_allowed_occurrences) + : cdn_id_(std::move(cdn_id)), max_allowed_occurrences_(max_allowed_occurrences) {} + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, + bool end_stream) override; + +private: + const std::string cdn_id_; + const int max_allowed_occurrences_; +}; + +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cdn_loop/parser.cc b/source/extensions/filters/http/cdn_loop/parser.cc new file mode 100644 index 000000000000..e686406c2fd3 --- /dev/null +++ b/source/extensions/filters/http/cdn_loop/parser.cc @@ -0,0 +1,381 @@ +#include "extensions/filters/http/cdn_loop/parser.h" + +#include "common/common/statusor.h" + +#include "absl/status/status.h" +#include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { +namespace Parser { + +namespace { + +// RFC 5234 Appendix B.1 says: +// +// ALPHA = %x41-5A / %x61-7A ; A-Z / a-z +constexpr bool isAlpha(char c) { + return ('\x41' <= c && c <= '\x5a') || ('\x61' <= c && c <= '\x7a'); +} + +// RFC 5234 Appendix B.1 says: +// +// DIGIT = %x30-39 ; 0-9 +constexpr bool isDigit(char c) { return '\x30' <= c && c <= '\x39'; } + +// RFC 2234 Section 6.1 defines HEXDIG as: +// +// HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F" +// +// This rule allows lower case letters too in violation of the RFC since IPv6 +// addresses commonly contain lower-case hex digits. +constexpr bool isHexDigitCaseInsensitive(char c) { + return isDigit(c) || ('A' <= c && c <= 'F') || ('a' <= c && c <= 'f'); +} + +// RFC 7230 Section 3.2.6 defines obs-text as: +// +// obs-text = %x80-FF +constexpr bool isObsText(char c) { return 0x80 & c; } + +// RFC 7230 Section 3.2.6 defines qdtext as: +// +// qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text +constexpr bool isQdText(char c) { + return c == '\t' || c == ' ' || c == '\x21' || ('\x23' <= c && c <= '\x5B') || + ('\x5D' <= c && c <= '\x7E') || isObsText(c); +} + +// RFC 5234 Appendix B.1 says: +// +// VCHAR = %x21-7E +// ; visible (printing) characters +constexpr bool isVChar(char c) { return '\x21' <= c && c <= '\x7e'; } + +} // namespace + +ParseContext skipOptionalWhitespace(const ParseContext& input) { + ParseContext context = input; + while (!context.atEnd()) { + const char c = context.peek(); + if (!(c == ' ' || c == '\t')) { + break; + } + context.increment(); + } + return context; +} + +StatusOr parseQuotedPair(const ParseContext& input) { + ParseContext context = input; + if (context.atEnd()) { + return absl::InvalidArgumentError( + absl::StrFormat("expected backslash at position %d; found end-of-input", context.next())); + } + + if (context.peek() != '\\') { + return absl::InvalidArgumentError(absl::StrFormat( + "expected backslash at position %d; found '%c'", input.next(), context.peek())); + } + context.increment(); + + if (context.atEnd()) { + return absl::InvalidArgumentError(absl::StrFormat( + "expected escaped character at position %d; found end-of-input", context.next())); + } + + const char c = context.peek(); + if (!(c == '\t' || c == ' ' || isVChar(c) || isObsText(c))) { + return absl::InvalidArgumentError( + absl::StrFormat("expected escapable character at position %d; found '\\x%x'", input.next(), + context.peek())); + } + context.increment(); + + return context; +} + +StatusOr parseQuotedString(const ParseContext& input) { + ParseContext context = input; + + if (context.atEnd()) { + return absl::InvalidArgumentError(absl::StrFormat( + "expected opening '\"' at position %d; found end-of-input", context.next())); + } + + if (context.peek() != '"') { + return absl::InvalidArgumentError(absl::StrFormat( + "expected opening quote at position %d; found '%c'", context.next(), context.peek())); + } + context.increment(); + + while (!context.atEnd() && context.peek() != '"') { + if (isQdText(context.peek())) { + context.increment(); + continue; + } else if (context.peek() == '\\') { + if (StatusOr quoted_pair_context = parseQuotedPair(context); + !quoted_pair_context) { + return quoted_pair_context.status(); + } else { + context.setNext(*quoted_pair_context); + continue; + } + } else { + break; + } + } + + if (context.atEnd()) { + return absl::InvalidArgumentError(absl::StrFormat( + "expected closing quote at position %d; found end-of-input", context.next())); + } + + if (context.peek() != '"') { + return absl::InvalidArgumentError(absl::StrFormat( + "expected closing quote at position %d; found '%c'", input.next(), context.peek())); + } + context.increment(); + + return context; +} + +StatusOr parseToken(const ParseContext& input) { + ParseContext context = input; + while (!context.atEnd()) { + const char c = context.peek(); + // Put alphanumeric, -, and _ characters at the head of the list since + // they're likely to be used most often. + if (isAlpha(c) || isDigit(c) || c == '-' || c == '_' || c == '!' || c == '#' || c == '$' || + c == '%' || c == '&' || c == '\'' || c == '*' || c == '+' || c == '.' || c == '^' || + c == '`' || c == '|' || c == '~') { + context.increment(); + } else { + break; + } + } + if (context.next() == input.next()) { + if (context.atEnd()) { + return absl::InvalidArgumentError(absl::StrFormat( + "expected token starting at position %d; found end of input", input.next())); + } else { + return absl::InvalidArgumentError(absl::StrFormat( + "expected token starting at position %d; found '%c'", input.next(), context.peek())); + } + } + + return context; +} + +StatusOr parsePlausibleIpV6(const ParseContext& input) { + ParseContext context = input; + if (context.atEnd()) { + return absl::InvalidArgumentError(absl::StrFormat( + "expected IPv6 literal at position %d; found end-of-input", context.next())); + } + + if (context.peek() != '[') { + return absl::InvalidArgumentError(absl::StrFormat("expected opening '[' of IPv6 literal at " + "position %d; found '%c'", + context.next(), context.peek())); + } + context.increment(); + + while (true) { + if (context.atEnd()) { + break; + } + const char c = context.peek(); + if (!(isHexDigitCaseInsensitive(c) || c == ':' || c == '.')) { + break; + } + context.increment(); + } + + if (context.atEnd()) { + return absl::InvalidArgumentError( + absl::StrFormat("expected closing ']' of IPv6 literal at position %d " + "found end-of-input", + context.next())); + } + if (context.peek() != ']') { + return absl::InvalidArgumentError(absl::StrFormat("expected closing ']' of IPv6 literal at " + "position %d; found '%c'", + context.next(), context.peek())); + } + context.increment(); + + return context; +} + +StatusOr parseCdnId(const ParseContext& input) { + ParseContext context = input; + + if (context.atEnd()) { + return absl::InvalidArgumentError( + absl::StrFormat("expected cdn-id at position %d; found end-of-input", context.next())); + } + + // Optimization: dispatch on the next character to avoid the StrFormat in the + // error path of an IPv6 parser when the value has a token (and vice versa). + if (context.peek() == '[') { + if (StatusOr ipv6 = parsePlausibleIpV6(context); !ipv6) { + return ipv6.status(); + } else { + context.setNext(*ipv6); + } + } else { + if (StatusOr token = parseToken(context); !token) { + return token.status(); + } else { + context.setNext(*token); + } + } + + if (context.atEnd()) { + return ParsedCdnId(context, + context.value().substr(input.next(), context.next() - input.next())); + } + + if (context.peek() != ':') { + return ParsedCdnId(context, + context.value().substr(input.next(), context.next() - input.next())); + } + context.increment(); + + while (!context.atEnd()) { + if (isDigit(context.value()[context.next()])) { + context.increment(); + } else { + break; + } + } + + return ParsedCdnId(context, context.value().substr(input.next(), context.next() - input.next())); +} + +StatusOr parseParameter(const ParseContext& input) { + ParseContext context = input; + + if (StatusOr parsed_token = parseToken(context); !parsed_token) { + return parsed_token.status(); + } else { + context.setNext(*parsed_token); + } + + if (context.atEnd()) { + return absl::InvalidArgumentError( + absl::StrFormat("expected '=' at position %d; found end-of-input", context.next())); + } + + if (context.peek() != '=') { + return absl::InvalidArgumentError( + absl::StrFormat("expected '=' at position %d; found '%c'", context.next(), context.peek())); + } + context.increment(); + + if (context.atEnd()) { + return absl::InvalidArgumentError(absl::StrCat( + "expected token or quoted-string at position %d; found end-of-input", context.next())); + } + + // Optimization: dispatch on the next character to avoid the StrFormat in the + // error path of an quoted string parser when the next item is a token (and + // vice versa). + if (context.peek() == '"') { + if (StatusOr value_quote = parseQuotedString(context); !value_quote) { + return value_quote.status(); + } else { + return *value_quote; + } + } else { + if (StatusOr value_token = parseToken(context); !value_token) { + return value_token.status(); + } else { + return *value_token; + } + } +} + +StatusOr parseCdnInfo(const ParseContext& input) { + absl::string_view cdn_id; + ParseContext context = input; + if (StatusOr parsed_id = parseCdnId(input); !parsed_id) { + return parsed_id.status(); + } else { + context.setNext(parsed_id->context()); + cdn_id = parsed_id->cdnId(); + } + + context.setNext(skipOptionalWhitespace(context)); + + while (!context.atEnd()) { + if (context.peek() != ';') { + break; + } + context.increment(); + + context.setNext(skipOptionalWhitespace(context)); + + if (StatusOr parameter = parseParameter(context); !parameter) { + return parameter.status(); + } else { + context.setNext(*parameter); + } + + context.setNext(skipOptionalWhitespace(context)); + } + + return ParsedCdnInfo(context, cdn_id); +} + +StatusOr parseCdnInfoList(const ParseContext& input) { + std::vector cdn_infos; + ParseContext context = input; + + context.setNext(skipOptionalWhitespace(context)); + + while (!context.atEnd()) { + // Loop invariant: we're always at the beginning of a new element. + + if (context.peek() == ',') { + // Empty element case + context.increment(); + context.setNext(skipOptionalWhitespace(context)); + continue; + } + + if (StatusOr parsed_cdn_info = parseCdnInfo(context); !parsed_cdn_info) { + return parsed_cdn_info.status(); + } else { + cdn_infos.push_back(parsed_cdn_info->cdnId()); + context.setNext(parsed_cdn_info->context()); + } + + context.setNext(skipOptionalWhitespace(context)); + + if (context.atEnd()) { + break; + } + + if (context.peek() != ',') { + return absl::InvalidArgumentError(absl::StrFormat("expected ',' at position %d; found '%c'", + context.next(), context.peek())); + } else { + context.increment(); + } + + context.setNext(skipOptionalWhitespace(context)); + } + + return ParsedCdnInfoList(context, std::move(cdn_infos)); +} + +} // namespace Parser +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cdn_loop/parser.h b/source/extensions/filters/http/cdn_loop/parser.h new file mode 100644 index 000000000000..5b9aad5175b2 --- /dev/null +++ b/source/extensions/filters/http/cdn_loop/parser.h @@ -0,0 +1,298 @@ +#pragma once + +#include + +#include "common/common/statusor.h" + +#include "absl/strings/str_join.h" +#include "absl/strings/string_view.h" + +// This file defines a parser for the CDN-Loop header value. +// +// RFC 8586 Section 2 defined the CDN-Loop header as: +// +// CDN-Loop = #cdn-info +// cdn-info = cdn-id *( OWS ";" OWS parameter ) +// cdn-id = ( uri-host [ ":" port ] ) / pseudonym +// pseudonym = token +// +// Each of those productions rely on definitions in RFC 3986, RFC 5234, RFC +// 7230, and RFC 7231. Their use is noted in the individual parse functions. +// +// The parser is a top-down combined parser and lexer that implements just +// enough of the RFC spec to make it possible count the number of times a +// particular CDN value appears. The main differences between the RFC's grammar +// and the parser defined here are: +// +// 1. the parser has a more lax interpretation of what's a valid uri-host. See +// ParseCdnId for details. +// +// 2. the parser allows leading and trailing whitespace around the header +// value. See ParseCdnInfoList for details. +// +// Each parse function takes as input a ParseContext that tells the +// function where to start. Parse functions that just need to parse a portion +// of the CDN-Loop header, but don't need to return a value, should return a +// ParseContext pointing to the next character to parse. Parse functions that +// need to return a value should return something that contains a ParseContext. +// +// Parse functions that can fail (most of them!) wrap their return value in an +// Envoy::StatusOr. +// +// In the interest of performance, this parser works with string_views and +// references instead of copying std::strings. The string_view passed into the +// ParseContext of a parse function must outlive the return value of the +// function. + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { +namespace Parser { + +// A ParseContext contains the state of the recursive descent parser and some +// helper methods. +class ParseContext { +public: + ParseContext(absl::string_view value) : value_(value), next_(0) {} + ParseContext(absl::string_view value, absl::string_view::size_type next) + : value_(value), next_(next) {} + + // Returns true if we have reached the end of value. + constexpr bool atEnd() const { return value_.length() <= next_; } + + // Returns the value we're parsing + constexpr absl::string_view value() const { return value_; } + + // Returns the position of the next character to process. + constexpr absl::string_view::size_type next() const { return next_; } + + // Returns the character at next. + // + // REQUIRES: !at_end() + constexpr char peek() const { return value_[next_]; } + + // Moves to the next character. + constexpr void increment() { ++next_; } + + // Sets next from another context. + constexpr void setNext(const ParseContext& other) { next_ = other.next_; } + + constexpr bool operator==(const ParseContext& other) const { + return value_ == other.value_ && next_ == other.next_; + } + constexpr bool operator!=(const ParseContext& other) const { return !(*this == other); } + + friend std::ostream& operator<<(std::ostream& os, ParseContext arg) { + return os << "ParseContext{next=" << arg.next_ << "}"; + } + +private: + // The item we're parsing. + const absl::string_view value_; + + // A pointer to the next value we should parse. + absl::string_view::size_type next_; +}; + +// A ParsedCdnId holds an extracted CDN-Loop cdn-id. +class ParsedCdnId { +public: + ParsedCdnId(ParseContext context, absl::string_view cdn_id) + : context_(context), cdn_id_(cdn_id) {} + + ParseContext context() const { return context_; } + + absl::string_view cdnId() const { return cdn_id_; } + + constexpr bool operator==(const ParsedCdnId& other) const { + return context_ == other.context_ && cdn_id_ == other.cdn_id_; + } + constexpr bool operator!=(const ParsedCdnId& other) const { return !(*this == other); } + + friend std::ostream& operator<<(std::ostream& os, ParsedCdnId arg) { + return os << "ParsedCdnId{context=" << arg.context_ << ", cdn_id=" << arg.cdn_id_ << "}"; + } + +private: + ParseContext context_; + absl::string_view cdn_id_; +}; + +// A ParsedCdnInfo holds the extracted cdn-id after parsing an entire cdn-info. +struct ParsedCdnInfo { + ParsedCdnInfo(ParseContext context, absl::string_view cdn_id) + : context_(context), cdn_id_(cdn_id) {} + + ParseContext context() const { return context_; } + + absl::string_view cdnId() const { return cdn_id_; } + + constexpr bool operator==(const ParsedCdnInfo& other) const { + return context_ == other.context_ && cdn_id_ == other.cdn_id_; + } + constexpr bool operator!=(const ParsedCdnInfo& other) const { return !(*this == other); } + + friend std::ostream& operator<<(std::ostream& os, ParsedCdnInfo arg) { + return os << "ParsedCdnInfo{context=" << arg.context_ << ", cdn_id=" << arg.cdn_id_ << "}"; + } + +private: + ParseContext context_; + absl::string_view cdn_id_; +}; + +// A ParsedCdnInfoList contains list of cdn-ids after parsing the entire +// CDN-Loop production. +struct ParsedCdnInfoList { + ParsedCdnInfoList(ParseContext context, std::vector cdn_ids) + : context_(context), cdn_ids_(std::move(cdn_ids)) {} + + constexpr const std::vector& cdnIds() { return cdn_ids_; } + + constexpr bool operator==(const ParsedCdnInfoList& other) const { + return context_ == other.context_ && cdn_ids_ == other.cdn_ids_; + } + constexpr bool operator!=(const ParsedCdnInfoList& other) const { return !(*this == other); } + + friend std::ostream& operator<<(std::ostream& os, ParsedCdnInfoList arg) { + return os << "ParsedCdnInfoList{context=" << arg.context_ << ", cdn_ids=[" + << absl::StrJoin(arg.cdn_ids_, ", ") << "]}"; + } + +private: + ParseContext context_; + std::vector cdn_ids_; +}; + +// Skips optional whitespace according to RFC 7230 Section 3.2.3. +// +// OWS = *( SP / HTAB ) +// +// Since this is completely optional, there's no way this call can fail. +ParseContext skipOptionalWhitespace(const ParseContext& input); + +// Parses a quoted-pair according to RFC 7230 Section 3.2.6. +// +// quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) +StatusOr parseQuotedPair(const ParseContext& input); + +// Parses a quoted-string according to RFC 7230 Section 3.2.6. +// +// quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE +// qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text +// obs-text = %x80-FF +// +// quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) +StatusOr parseQuotedString(const ParseContext& input); + +// Parses a token according to RFC 7320 Section 3.2.6. +// +// token = 1*tchar +// +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" +// / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" +// / DIGIT / ALPHA +// ; any VCHAR, except delimiters +// +// According to RFC 5234 Appendix B.1: +// +// ALPHA = %x41-5A / %x61-7A ; A-Z / a-z +// +// DIGIT = %x30-39 +StatusOr parseToken(const ParseContext& input); + +// Parses something that looks like an IPv6 address literal. +// +// A proper IPv6 address literal is defined in RFC 3986, Section 3.2.2 as part +// of the host rule. We're going to allow something simpler: +// +// plausible-ipv6 = "[" *( HEXDIGIT | "." | ":" ) "] +// HEXDIGIT = DIGIT | %x41-46 | %x61-66 ; 0-9 | A-F | a-f +// +// Compared to the real rule, our rule: +// +// - allows lower-case hex digits +// - allows address sections with more than 4 hex digits in a row +// - allows embedded IPv4 addresses multiple times rather than just at the end. +StatusOr parsePlausibleIpV6(const ParseContext& input); + +// Parses a cdn-id in a lax way. +// +// According to to RFC 8586 Section 2, the cdn-id is: +// +// cdn-id = ( uri-host [ ":" port ] ) / pseudonym +// pseudonym = token +// +// The uri-host portion of the cdn-id is the "host" rule from RFC 3986 Section +// 3.2.2. Parsing the host rule is remarkably difficult because the host rule +// tries to parse exactly valid IP addresses (e.g., disallowing values greater +// than 255 in an IPv4 address or only allowing one instance of "::" in IPv6 +// addresses) and needs to deal with % escaping in names. +// +// Worse, the uri-host reg-name rule admits ',' and ';' as members of sub-delim +// rule, making parsing ambiguous in some cases! RFC 3986 does this in order to +// be "future-proof" for naming schemes we haven't dreamed up yet. RFC 8586 +// says that if a CDN uses a uri-host as its cdn-id, the uri-host must be a +// "hostname under its control". The only global naming system we have is DNS, +// so the only really valid reg-name an Internet-facing Envoy should see is a +// DNS name. +// +// Luckily, the token rule more or less covers the uri-host rule for DNS names +// and for IPv4 addresses. We just a new rule to parse IPv6 addresses. See +// ParsePlausibleIpV6 for the rule we'll follow. +// +// The definition of port comes from RFC 3986 Section +// 3.2.3 as: +// +// port = *DIGIT +// +// In other words, any number of digits is allowed. +// +// In all, this function will parse cdn-id as: +// +// cdn-id = ( plausible-ipv6-address / token ) [ ":" *DIGIT ] +StatusOr parseCdnId(const ParseContext& input); + +// Parses a parameter according RFC 7231 Appendix D. +// +// parameter = token "=" ( token / quoted-string ) +StatusOr parseParameter(const ParseContext& input); + +// Parses a cdn-info according to RFC 8586 Section 2. +// +// cdn-info = cdn-id *( OWS ";" OWS parameter ) +StatusOr parseCdnInfo(const ParseContext& input); + +// Parses the top-level cdn-info according to RFC 8586 Section 2. +// +// CDN-Loop = #cdn-info +// +// The # rule is defined by RFC 7230 Section 7. The # is different for senders +// and recipients. We're a recipient, so: +// +// For compatibility with legacy list rules, a recipient MUST parse and +// ignore a reasonable number of empty list elements: enough to handle +// common mistakes by senders that merge values, but not so much that +// they could be used as a denial-of-service mechanism. In other words, +// a recipient MUST accept lists that satisfy the following syntax: +// +// #element => [ ( "," / element ) *( OWS "," [ OWS element ] ) ] +// +// 1#element => *( "," OWS ) element *( OWS "," [ OWS element ] ) +// +// Empty elements do not contribute to the count of elements present. +// +// Since #cdn-info uses the #element form, we have to parse (but not count) +// blank entries. +// +// In a divergence with the RFC's grammar, this function will also ignore +// leading and trailing OWS. This function expects to consume the entire input +// and will return an error if there is something it cannot parse. +StatusOr parseCdnInfoList(const ParseContext& input); + +} // namespace Parser +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cdn_loop/utils.cc b/source/extensions/filters/http/cdn_loop/utils.cc new file mode 100644 index 000000000000..2ea1a6a1945d --- /dev/null +++ b/source/extensions/filters/http/cdn_loop/utils.cc @@ -0,0 +1,32 @@ +#include "extensions/filters/http/cdn_loop/utils.h" + +#include + +#include "common/common/statusor.h" + +#include "extensions/filters/http/cdn_loop/parser.h" + +#include "absl/status/status.h" +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { + +StatusOr countCdnLoopOccurrences(absl::string_view header, absl::string_view cdn_id) { + if (cdn_id.empty()) { + return absl::InvalidArgumentError("cdn_id cannot be empty"); + } + + if (absl::StatusOr parsed = Parser::parseCdnInfoList(header); parsed) { + return std::count(parsed->cdnIds().begin(), parsed->cdnIds().end(), cdn_id); + } else { + return parsed.status(); + } +} + +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cdn_loop/utils.h b/source/extensions/filters/http/cdn_loop/utils.h new file mode 100644 index 000000000000..ba486edadc16 --- /dev/null +++ b/source/extensions/filters/http/cdn_loop/utils.h @@ -0,0 +1,24 @@ +#pragma once + +#include "common/common/statusor.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { + +// Count the number of times cdn_id appears as a cdn-id element in header. +// +// According to RFC 8586, a cdn-id is either a uri-host[:port] or a pseudonym. +// In either case, cdn_id must be at least one character long. +// +// If the header is unparseable or if cdn_id is the empty string, this function +// will return an InvalidArgument status. +StatusOr countCdnLoopOccurrences(absl::string_view header, absl::string_view cdn_id); + +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/common/compressor/compressor.cc b/source/extensions/filters/http/common/compressor/compressor.cc index 4e0a1b48ce9b..e8e604295198 100644 --- a/source/extensions/filters/http/common/compressor/compressor.cc +++ b/source/extensions/filters/http/common/compressor/compressor.cc @@ -224,7 +224,7 @@ CompressorFilter::chooseEncoding(const Http::ResponseHeaderMap& headers) const { } // Find all encodings accepted by the user agent and adjust the list of allowed compressors. - for (const auto token : StringUtil::splitToken(*accept_encoding_, ",", false /* keep_empty */)) { + for (const auto& token : StringUtil::splitToken(*accept_encoding_, ",", false /* keep_empty */)) { EncPair pair = std::make_pair(StringUtil::trim(StringUtil::cropRight(token, ";")), static_cast(1)); const auto params = StringUtil::cropLeft(token, ";"); diff --git a/source/extensions/filters/http/common/jwks_fetcher.cc b/source/extensions/filters/http/common/jwks_fetcher.cc index 9f53fd32e21e..c4a8931d8e34 100644 --- a/source/extensions/filters/http/common/jwks_fetcher.cc +++ b/source/extensions/filters/http/common/jwks_fetcher.cc @@ -69,9 +69,8 @@ class JwksFetcherImpl : public JwksFetcher, const uint64_t status_code = Http::Utility::getResponseStatus(response->headers()); if (status_code == enumToInt(Http::Code::OK)) { ENVOY_LOG(debug, "{}: fetch pubkey [uri = {}]: success", __func__, uri_->uri()); - if (response->body()) { - const auto len = response->body()->length(); - const auto body = std::string(static_cast(response->body()->linearize(len)), len); + if (response->body().length() != 0) { + const auto body = response->bodyAsString(); auto jwks = google::jwt_verify::Jwks::createFrom(body, google::jwt_verify::Jwks::Type::JWKS); if (jwks->getStatus() == google::jwt_verify::Status::Ok) { diff --git a/source/extensions/filters/http/cors/cors_filter.cc b/source/extensions/filters/http/cors/cors_filter.cc index 574a0f36bfc1..e936b16c8f24 100644 --- a/source/extensions/filters/http/cors/cors_filter.cc +++ b/source/extensions/filters/http/cors/cors_filter.cc @@ -109,9 +109,8 @@ Http::FilterHeadersStatus CorsFilter::decodeHeaders(Http::RequestHeaderMap& head response_headers->setInline(access_control_max_age_handle.handle(), maxAge()); } - decoder_callbacks_->streamInfo().setResponseCodeDetails( - HttpResponseCodeDetails::get().CorsResponse); - decoder_callbacks_->encodeHeaders(std::move(response_headers), true); + decoder_callbacks_->encodeHeaders(std::move(response_headers), true, + HttpResponseCodeDetails::get().CorsResponse); return Http::FilterHeadersStatus::StopIteration; } diff --git a/source/extensions/filters/http/csrf/BUILD b/source/extensions/filters/http/csrf/BUILD index 47bea6f6bbf2..383b805580f0 100644 --- a/source/extensions/filters/http/csrf/BUILD +++ b/source/extensions/filters/http/csrf/BUILD @@ -22,7 +22,6 @@ envoy_cc_library( "//source/common/common:matchers_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", - "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/extensions/filters/http:well_known_names", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/source/extensions/filters/http/csrf/csrf_filter.cc b/source/extensions/filters/http/csrf/csrf_filter.cc index bb7db21b36eb..01fe2f38127a 100644 --- a/source/extensions/filters/http/csrf/csrf_filter.cc +++ b/source/extensions/filters/http/csrf/csrf_filter.cc @@ -6,7 +6,6 @@ #include "common/common/empty_string.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" -#include "common/http/url_utility.h" #include "common/http/utility.h" #include "extensions/filters/http/well_known_names.h" diff --git a/source/extensions/filters/http/dynamic_forward_proxy/config.cc b/source/extensions/filters/http/dynamic_forward_proxy/config.cc index 30c984da4840..1907801bfa1d 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/config.cc @@ -15,8 +15,8 @@ Http::FilterFactoryCb DynamicForwardProxyFilterFactory::createFilterFactoryFromP const envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.threadLocal(), context.random(), - context.runtime(), context.scope()); + context.singletonManager(), context.dispatcher(), context.threadLocal(), + context.api().randomGenerator(), context.runtime(), context.scope()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { diff --git a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc index b41b0cf07d91..f684bad7571d 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc @@ -108,9 +108,11 @@ Http::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::RequestHeaderMap& hea const auto& host_rewrite_header = config->hostRewriteHeader(); if (!host_rewrite_header.get().empty()) { - const auto* header = headers.get(host_rewrite_header); - if (header != nullptr) { - const auto& header_value = header->value().getStringView(); + const auto header = headers.get(host_rewrite_header); + if (!header.empty()) { + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + const auto& header_value = header[0]->value().getStringView(); headers.setHost(header_value); } } diff --git a/source/extensions/filters/http/dynamo/dynamo_request_parser.cc b/source/extensions/filters/http/dynamo/dynamo_request_parser.cc index dd34cbc81b0a..6fd0a1d42a84 100644 --- a/source/extensions/filters/http/dynamo/dynamo_request_parser.cc +++ b/source/extensions/filters/http/dynamo/dynamo_request_parser.cc @@ -65,10 +65,12 @@ const std::vector RequestParser::TRANSACT_ITEM_OPERATIONS{"Conditio std::string RequestParser::parseOperation(const Http::HeaderMap& header_map) { std::string operation; - const Http::HeaderEntry* x_amz_target = header_map.get(X_AMZ_TARGET); - if (x_amz_target) { + const auto x_amz_target = header_map.get(X_AMZ_TARGET); + if (!x_amz_target.empty()) { // Normally x-amz-target contains Version.Operation, e.g., DynamoDB_20160101.GetItem - auto version_and_operation = StringUtil::splitToken(x_amz_target->value().getStringView(), "."); + // AWS is trusted. Using the first value is fine. + auto version_and_operation = + StringUtil::splitToken(x_amz_target[0]->value().getStringView(), "."); if (version_and_operation.size() == 2) { operation = std::string{version_and_operation[1]}; } diff --git a/source/extensions/filters/http/ext_authz/config.cc b/source/extensions/filters/http/ext_authz/config.cc index f5808ee7fdf5..788740278af1 100644 --- a/source/extensions/filters/http/ext_authz/config.cc +++ b/source/extensions/filters/http/ext_authz/config.cc @@ -9,6 +9,7 @@ #include "envoy/registry/registry.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_features.h" #include "extensions/filters/common/ext_authz/ext_authz_grpc_impl.h" #include "extensions/filters/common/ext_authz/ext_authz_http_impl.h" @@ -22,9 +23,8 @@ namespace ExtAuthz { Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { - const auto filter_config = - std::make_shared(proto_config, context.localInfo(), context.scope(), - context.runtime(), context.httpContext(), stats_prefix); + const auto filter_config = std::make_shared( + proto_config, context.scope(), context.runtime(), context.httpContext(), stats_prefix); Http::FilterFactoryCb callback; if (proto_config.has_http_service()) { @@ -41,27 +41,54 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{ std::make_shared(filter_config, std::move(client))}); }; + } else if (proto_config.grpc_service().has_google_grpc()) { + // Google gRPC client. + + // The use_alpha field was there select the v2alpha api version, which is + // long deprecated and should not be used anymore. + if (proto_config.hidden_envoy_deprecated_use_alpha()) { + throw EnvoyException("The use_alpha field is deprecated and is no longer supported."); + } + + const uint32_t timeout_ms = + PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, DefaultTimeout); + auto async_client_cache = getAsyncClientCacheSingleton(context); + callback = [async_client_cache, filter_config, timeout_ms, proto_config, + transport_api_version = proto_config.transport_api_version()]( + Http::FilterChainFactoryCallbacks& callbacks) { + auto client = std::make_unique( + async_client_cache->getOrCreateAsyncClient(proto_config), + std::chrono::milliseconds(timeout_ms), transport_api_version); + callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{ + std::make_shared(filter_config, std::move(client))}); + }; } else { - // gRPC client. + // Envoy gRPC client. + + // The use_alpha field was there select the v2alpha api version, which is + // long deprecated and should not be used anymore. + if (proto_config.hidden_envoy_deprecated_use_alpha()) { + throw EnvoyException("The use_alpha field is deprecated and is no longer supported."); + } + const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, DefaultTimeout); callback = [grpc_service = proto_config.grpc_service(), &context, filter_config, timeout_ms, - transport_api_version = proto_config.transport_api_version(), - use_alpha = proto_config.hidden_envoy_deprecated_use_alpha()]( + transport_api_version = proto_config.transport_api_version()]( Http::FilterChainFactoryCallbacks& callbacks) { const auto async_client_factory = context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( grpc_service, context.scope(), true); auto client = std::make_unique( async_client_factory->create(), std::chrono::milliseconds(timeout_ms), - transport_api_version, use_alpha); + transport_api_version); callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{ std::make_shared(filter_config, std::move(client))}); }; } return callback; -}; +} Router::RouteSpecificFilterConfigConstSharedPtr ExtAuthzFilterConfig::createRouteSpecificFilterConfigTyped( @@ -76,6 +103,18 @@ ExtAuthzFilterConfig::createRouteSpecificFilterConfigTyped( REGISTER_FACTORY(ExtAuthzFilterConfig, Server::Configuration::NamedHttpFilterConfigFactory){"envoy.ext_authz"}; +SINGLETON_MANAGER_REGISTRATION(google_grpc_async_client_cache); + +Filters::Common::ExtAuthz::AsyncClientCacheSharedPtr +getAsyncClientCacheSingleton(Server::Configuration::FactoryContext& context) { + return context.singletonManager().getTyped( + SINGLETON_MANAGER_REGISTERED_NAME(google_grpc_async_client_cache), [&context] { + return std::make_shared( + context.clusterManager().grpcAsyncClientManager(), context.scope(), + context.threadLocal()); + }); +} + } // namespace ExtAuthz } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/ext_authz/config.h b/source/extensions/filters/http/ext_authz/config.h index cf6dc6ddf914..5ca66d423805 100644 --- a/source/extensions/filters/http/ext_authz/config.h +++ b/source/extensions/filters/http/ext_authz/config.h @@ -3,6 +3,7 @@ #include "envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h" #include "envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.validate.h" +#include "extensions/filters/common/ext_authz/ext_authz_grpc_impl.h" #include "extensions/filters/http/common/factory_base.h" #include "extensions/filters/http/well_known_names.h" @@ -33,6 +34,9 @@ class ExtAuthzFilterConfig ProtobufMessage::ValidationVisitor& validator) override; }; +Filters::Common::ExtAuthz::AsyncClientCacheSharedPtr +getAsyncClientCacheSingleton(Server::Configuration::FactoryContext& context); + } // namespace ExtAuthz } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 156cdf3d11af..ba050edeaa24 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -4,6 +4,7 @@ #include "common/common/assert.h" #include "common/common/enum_to_int.h" +#include "common/common/matchers.h" #include "common/http/utility.h" #include "common/router/config_impl.h" @@ -23,7 +24,8 @@ struct RcDetailsValues { using RcDetails = ConstSingleton; void FilterConfigPerRoute::merge(const FilterConfigPerRoute& other) { - disabled_ = other.disabled_; + // We only merge context extensions here, and leave boolean flags untouched since those flags are + // not used from the merged config. auto begin_it = other.context_extensions_.begin(); auto end_it = other.context_extensions_.end(); for (auto it = begin_it; it != end_it; ++it) { @@ -61,7 +63,8 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers, Filters::Common::ExtAuthz::CheckRequestUtils::createHttpCheck( callbacks_, headers, std::move(context_extensions), std::move(metadata_context), - check_request_, config_->maxRequestBytes(), config_->includePeerCertificate()); + check_request_, config_->maxRequestBytes(), config_->packAsBytes(), + config_->includePeerCertificate()); ENVOY_STREAM_LOG(trace, "ext_authz filter calling authorization server", *callbacks_); state_ = State::Calling; @@ -69,18 +72,21 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers, // going to invoke check call. cluster_ = callbacks_->clusterInfo(); initiating_call_ = true; - client_->check(*this, check_request_, callbacks_->activeSpan(), callbacks_->streamInfo()); + client_->check(*this, callbacks_->dispatcher(), check_request_, callbacks_->activeSpan(), + callbacks_->streamInfo()); initiating_call_ = false; } Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) { Router::RouteConstSharedPtr route = callbacks_->route(); - skip_check_ = skipCheckForRoute(route); + const auto per_route_flags = getPerRouteFlags(route); + skip_check_ = per_route_flags.skip_check_; + if (skip_check_) { + return Http::FilterHeadersStatus::Continue; + } - if (!config_->filterEnabled() || skip_check_) { - if (skip_check_) { - return Http::FilterHeadersStatus::Continue; - } + if (!config_->filterEnabled(callbacks_->streamInfo().dynamicMetadata())) { + stats_.disabled_.inc(); if (config_->denyAtDisable()) { ENVOY_STREAM_LOG(trace, "ext_authz filter is disabled. Deny the request.", *callbacks_); callbacks_->streamInfo().setResponseFlag( @@ -93,9 +99,10 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, } request_headers_ = &headers; - buffer_data_ = config_->withRequestBody() && + buffer_data_ = config_->withRequestBody() && !per_route_flags.skip_request_body_buffering_ && !(end_stream || Http::Utility::isWebSocketUpgradeRequest(headers) || Http::Utility::isH2UpgradeRequest(headers)); + if (buffer_data_) { ENVOY_STREAM_LOG(debug, "ext_authz filter is buffering the request", *callbacks_); if (!config_->allowPartialMessage()) { @@ -164,8 +171,12 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { switch (response->status) { case CheckStatus::OK: { + // Any changes to request headers can affect how the request is going to be + // routed. If we are changing the headers we also need to clear the route + // cache. if (config_->clearRouteCache() && - (!response->headers_to_set.empty() || !response->headers_to_append.empty())) { + (!response->headers_to_set.empty() || !response->headers_to_append.empty() || + !response->headers_to_remove.empty())) { ENVOY_STREAM_LOG(debug, "ext_authz is clearing route cache", *callbacks_); callbacks_->clearRouteCache(); } @@ -180,7 +191,7 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { request_headers_->addCopy(header.first, header.second); } for (const auto& header : response->headers_to_append) { - const Http::HeaderEntry* header_to_modify = request_headers_->get(header.first); + const auto header_to_modify = request_headers_->get(header.first); // TODO(dio): Add a flag to allow appending non-existent headers, without setting it first // (via `headers_to_add`). For example, given: // 1. Original headers {"original": "true"} @@ -189,7 +200,7 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { // Currently it is not possible to add {{"append": "1"}, {"append": "2"}} (the intended // combined headers: {{"original": "true"}, {"append": "1"}, {"append": "2"}}) to the request // to upstream server by only sets `headers_to_append`. - if (header_to_modify != nullptr) { + if (!header_to_modify.empty()) { ENVOY_STREAM_LOG(trace, "'{}':'{}'", *callbacks_, header.first.get(), header.second); // The current behavior of appending is by combining entries with the same key, into one // entry. The value of that combined entry is separated by ",". @@ -198,6 +209,18 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { } } + ENVOY_STREAM_LOG(trace, "ext_authz filter removed header(s) from the request:", *callbacks_); + for (const auto& header : response->headers_to_remove) { + // We don't allow removing any :-prefixed headers, nor Host, as removing + // them would make the request malformed. + if (absl::StartsWithIgnoreCase(absl::string_view(header.get()), ":") || + header == Http::Headers::get().HostLegacy) { + continue; + } + ENVOY_STREAM_LOG(trace, "'{}'", *callbacks_, header.get()); + request_headers_->remove(header); + } + if (!response->dynamic_metadata.fields().empty()) { callbacks_->streamInfo().setDynamicMetadata(HttpFilterNames::get().ExtAuthorization, response->dynamic_metadata); @@ -258,8 +281,14 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { case CheckStatus::Error: { if (cluster_) { config_->incCounter(cluster_->statsScope(), config_->ext_authz_error_); + if (response->error_kind == Filters::Common::ExtAuthz::ErrorKind::Timedout) { + config_->incCounter(cluster_->statsScope(), config_->ext_authz_timeout_); + } } stats_.error_.inc(); + if (response->error_kind == Filters::Common::ExtAuthz::ErrorKind::Timedout) { + stats_.timeout_.inc(); + } if (config_->failureModeAllow()) { ENVOY_STREAM_LOG(trace, "ext_authz filter allowed the request with error", *callbacks_); stats_.failure_mode_allowed_.inc(); @@ -294,25 +323,29 @@ bool Filter::isBufferFull() const { } void Filter::continueDecoding() { + // After sending the check request, we don't need to buffer the data anymore. + buffer_data_ = false; + filter_return_ = FilterReturn::ContinueDecoding; if (!initiating_call_) { callbacks_->continueDecoding(); } } -bool Filter::skipCheckForRoute(const Router::RouteConstSharedPtr& route) const { +Filter::PerRouteFlags Filter::getPerRouteFlags(const Router::RouteConstSharedPtr& route) const { if (route == nullptr || route->routeEntry() == nullptr) { - return true; + return PerRouteFlags{true /*skip_check_*/, false /*skip_request_body_buffering_*/}; } const auto* specific_per_route_config = Http::Utility::resolveMostSpecificPerFilterConfig( HttpFilterNames::get().ExtAuthorization, route); if (specific_per_route_config != nullptr) { - return specific_per_route_config->disabled(); + return PerRouteFlags{specific_per_route_config->disabled(), + specific_per_route_config->disableRequestBodyBuffering()}; } - return false; + return PerRouteFlags{false /*skip_check_*/, false /*skip_request_body_buffering_*/}; } } // namespace ExtAuthz diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 9a24436c7c18..6dd15619df42 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -7,7 +7,6 @@ #include "envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h" #include "envoy/http/filter.h" -#include "envoy/local_info/local_info.h" #include "envoy/runtime/runtime.h" #include "envoy/service/auth/v3/external_auth.pb.h" #include "envoy/stats/scope.h" @@ -38,6 +37,8 @@ namespace ExtAuthz { COUNTER(ok) \ COUNTER(denied) \ COUNTER(error) \ + COUNTER(timeout) \ + COUNTER(disabled) \ COUNTER(failure_mode_allowed) /** @@ -53,18 +54,23 @@ struct ExtAuthzFilterStats { class FilterConfig { public: FilterConfig(const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& config, - const LocalInfo::LocalInfo&, Stats::Scope& scope, Runtime::Loader& runtime, - Http::Context& http_context, const std::string& stats_prefix) + Stats::Scope& scope, Runtime::Loader& runtime, Http::Context& http_context, + const std::string& stats_prefix) : allow_partial_message_(config.with_request_body().allow_partial_message()), failure_mode_allow_(config.failure_mode_allow()), clear_route_cache_(config.clear_route_cache()), max_request_bytes_(config.with_request_body().max_request_bytes()), + pack_as_bytes_(config.with_request_body().pack_as_bytes()), status_on_error_(toErrorCode(config.status_on_error().code())), scope_(scope), runtime_(runtime), http_context_(http_context), filter_enabled_(config.has_filter_enabled() ? absl::optional( Runtime::FractionalPercent(config.filter_enabled(), runtime_)) : absl::nullopt), + filter_enabled_metadata_( + config.has_filter_enabled_metadata() + ? absl::optional(config.filter_enabled_metadata()) + : absl::nullopt), deny_at_disable_(config.has_deny_at_disable() ? absl::optional( Runtime::FeatureFlag(config.deny_at_disable(), runtime_)) @@ -73,10 +79,13 @@ class FilterConfig { metadata_context_namespaces_(config.metadata_context_namespaces().begin(), config.metadata_context_namespaces().end()), include_peer_certificate_(config.include_peer_certificate()), - stats_(generateStats(stats_prefix, scope)), ext_authz_ok_(pool_.add("ext_authz.ok")), - ext_authz_denied_(pool_.add("ext_authz.denied")), - ext_authz_error_(pool_.add("ext_authz.error")), - ext_authz_failure_mode_allowed_(pool_.add("ext_authz.failure_mode_allowed")) {} + stats_(generateStats(stats_prefix, config.stat_prefix(), scope)), + ext_authz_ok_(pool_.add(createPoolStatName(config.stat_prefix(), "ok"))), + ext_authz_denied_(pool_.add(createPoolStatName(config.stat_prefix(), "denied"))), + ext_authz_error_(pool_.add(createPoolStatName(config.stat_prefix(), "error"))), + ext_authz_timeout_(pool_.add(createPoolStatName(config.stat_prefix(), "timeout"))), + ext_authz_failure_mode_allowed_( + pool_.add(createPoolStatName(config.stat_prefix(), "failure_mode_allowed"))) {} bool allowPartialMessage() const { return allow_partial_message_; } @@ -88,9 +97,16 @@ class FilterConfig { uint32_t maxRequestBytes() const { return max_request_bytes_; } + bool packAsBytes() const { return pack_as_bytes_; } + Http::Code statusOnError() const { return status_on_error_; } - bool filterEnabled() { return filter_enabled_.has_value() ? filter_enabled_->enabled() : true; } + bool filterEnabled(const envoy::config::core::v3::Metadata& metadata) { + const bool enabled = filter_enabled_.has_value() ? filter_enabled_->enabled() : true; + const bool enabled_metadata = + filter_enabled_metadata_.has_value() ? filter_enabled_metadata_->match(metadata) : true; + return enabled && enabled_metadata; + } bool denyAtDisable() { return deny_at_disable_.has_value() ? deny_at_disable_->enabled() : false; @@ -121,21 +137,34 @@ class FilterConfig { return Http::Code::Forbidden; } - ExtAuthzFilterStats generateStats(const std::string& prefix, Stats::Scope& scope) { - const std::string final_prefix = prefix + "ext_authz."; + ExtAuthzFilterStats generateStats(const std::string& prefix, + const std::string& filter_stats_prefix, Stats::Scope& scope) { + const std::string final_prefix = absl::StrCat(prefix, "ext_authz.", filter_stats_prefix); return {ALL_EXT_AUTHZ_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))}; } + // This generates ext_authz..name, for example: ext_authz.waf.ok + // when filter_stats_prefix is "waf", and ext_authz.ok when filter_stats_prefix is empty. + const std::string createPoolStatName(const std::string& filter_stats_prefix, + const std::string& name) { + return absl::StrCat("ext_authz", + filter_stats_prefix.empty() ? EMPTY_STRING + : absl::StrCat(".", filter_stats_prefix), + ".", name); + } + const bool allow_partial_message_; const bool failure_mode_allow_; const bool clear_route_cache_; const uint32_t max_request_bytes_; + const bool pack_as_bytes_; const Http::Code status_on_error_; Stats::Scope& scope_; Runtime::Loader& runtime_; Http::Context& http_context_; const absl::optional filter_enabled_; + const absl::optional filter_enabled_metadata_; const absl::optional deny_at_disable_; // TODO(nezdolik): stop using pool as part of deprecating cluster scope stats. @@ -154,6 +183,7 @@ class FilterConfig { const Stats::StatName ext_authz_ok_; const Stats::StatName ext_authz_denied_; const Stats::StatName ext_authz_error_; + const Stats::StatName ext_authz_timeout_; const Stats::StatName ext_authz_failure_mode_allowed_; }; @@ -172,6 +202,8 @@ class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { : context_extensions_(config.has_check_settings() ? config.check_settings().context_extensions() : ContextExtensionsMap()), + disable_request_body_buffering_(config.has_check_settings() && + config.check_settings().disable_request_body_buffering()), disabled_(config.disabled()) {} void merge(const FilterConfigPerRoute& other); @@ -185,10 +217,13 @@ class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { bool disabled() const { return disabled_; } + bool disableRequestBodyBuffering() const { return disable_request_body_buffering_; } + private: // We save the context extensions as a protobuf map instead of an std::map as this allows us to // move it to the CheckRequest, thus avoiding a copy that would incur by converting it. ContextExtensionsMap context_extensions_; + bool disable_request_body_buffering_; bool disabled_; }; @@ -222,7 +257,13 @@ class Filter : public Logger::Loggable, const Router::RouteConstSharedPtr& route); void continueDecoding(); bool isBufferFull() const; - bool skipCheckForRoute(const Router::RouteConstSharedPtr& route) const; + + // This holds a set of flags defined in per-route configuration. + struct PerRouteFlags { + const bool skip_check_; + const bool skip_request_body_buffering_; + }; + PerRouteFlags getPerRouteFlags(const Router::RouteConstSharedPtr& route) const; // State of this filter's communication with the external authorization service. // The filter has either not started calling the external service, in the middle of calling diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index d13b360ee07f..b8119639e8c1 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -136,17 +136,11 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers // If the response from upstream does not have the correct content-type, // perform an early return with a useful error message in grpc-message. if (content_type != upstream_content_type_) { - headers.setGrpcMessage(badContentTypeMessage(headers)); - headers.setGrpcStatus(Envoy::Grpc::Status::WellKnownGrpcStatus::Unknown); - headers.setStatus(enumToInt(Http::Code::OK)); - - if (!content_type.empty()) { - headers.setContentType(content_type_); - } + decoder_callbacks_->sendLocalReply(Http::Code::OK, badContentTypeMessage(headers), nullptr, + Grpc::Status::WellKnownGrpcStatus::Unknown, + RcDetails::get().GrpcBridgeFailedContentType); - decoder_callbacks_->streamInfo().setResponseCodeDetails( - RcDetails::get().GrpcBridgeFailedContentType); - return Http::FilterHeadersStatus::ContinueAndEndStream; + return Http::FilterHeadersStatus::StopIteration; } // Restore the content-type to match what the downstream sent. diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h index a0fabc85bfdd..2de7578c944e 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h @@ -5,7 +5,6 @@ #include "envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h" #include "envoy/http/filter.h" #include "envoy/http/header_map.h" -#include "envoy/json/json_object.h" #include "common/buffer/buffer_impl.h" #include "common/common/logger.h" diff --git a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.h b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.h index 6afb1861aece..0734d10e8d3d 100644 --- a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.h +++ b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.h @@ -24,6 +24,10 @@ struct GrpcStatsObject : public StreamInfo::FilterState::Object { msg->set_response_message_count(response_message_count); return msg; } + + absl::optional serializeAsString() const override { + return absl::StrCat(request_message_count, ",", response_message_count); + } }; class GrpcStatsFilterConfigFactory diff --git a/source/extensions/filters/http/header_to_metadata/BUILD b/source/extensions/filters/http/header_to_metadata/BUILD index 1bbe574312e6..ad4f9bcf8cfe 100644 --- a/source/extensions/filters/http/header_to_metadata/BUILD +++ b/source/extensions/filters/http/header_to_metadata/BUILD @@ -19,6 +19,7 @@ envoy_cc_library( deps = [ "//include/envoy/server:filter_config_interface", "//source/common/common:base64_lib", + "//source/common/http:header_utility_lib", "//source/common/http:utility_lib", "//source/extensions/filters/http:well_known_names", "@envoy_api//envoy/extensions/filters/http/header_to_metadata/v3:pkg_cc_proto", diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc index 9550e243c6a1..2d5358f89aeb 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc @@ -5,6 +5,7 @@ #include "common/common/base64.h" #include "common/common/regex.h" #include "common/config/well_known_names.h" +#include "common/http/header_utility.h" #include "common/http/utility.h" #include "common/protobuf/protobuf.h" @@ -20,12 +21,12 @@ namespace HeaderToMetadataFilter { // Extract the value of the header. absl::optional HeaderValueSelector::extract(Http::HeaderMap& map) const { - const Http::HeaderEntry* header_entry = map.get(header_); - if (header_entry == nullptr) { + const auto header_value = Http::HeaderUtility::getAllOfHeaderAsString(map, header_); + if (!header_value.result().has_value()) { return absl::nullopt; } // Catch the value in the header before removing. - absl::optional value = std::string(header_entry->value().getStringView()); + absl::optional value = std::string(header_value.result().value()); if (remove_) { map.remove(header_); } diff --git a/source/extensions/filters/http/jwt_authn/BUILD b/source/extensions/filters/http/jwt_authn/BUILD index f0249b014ea1..1df6425b585f 100644 --- a/source/extensions/filters/http/jwt_authn/BUILD +++ b/source/extensions/filters/http/jwt_authn/BUILD @@ -14,6 +14,7 @@ envoy_cc_library( srcs = ["extractor.cc"], hdrs = ["extractor.h"], deps = [ + "//source/common/http:header_utility_lib", "//source/common/http:utility_lib", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/http/jwt_authn/authenticator.cc b/source/extensions/filters/http/jwt_authn/authenticator.cc index 0e86ea6d9518..1b73eeaf08b2 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.cc +++ b/source/extensions/filters/http/jwt_authn/authenticator.cc @@ -140,6 +140,7 @@ void AuthenticatorImpl::startVerify() { tokens_.pop_back(); jwt_ = std::make_unique<::google::jwt_verify::Jwt>(); + ENVOY_LOG(debug, "{}: Parse Jwt {}", name(), curr_token_->token()); const Status status = jwt_->parseFromString(curr_token_->token()); if (status != Status::Ok) { doneWithStatus(status); diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index b84f9fb4178f..b1ec12058ea9 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -5,6 +5,7 @@ #include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" #include "common/common/utility.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/utility.h" #include "common/singleton/const_singleton.h" @@ -187,9 +188,10 @@ ExtractorImpl::extract(const Http::RequestHeaderMap& headers) const { for (const auto& location_it : header_locations_) { const auto& location_spec = location_it.second; ENVOY_LOG(debug, "extract {}", location_it.first); - const Http::HeaderEntry* entry = headers.get(location_spec->header_); - if (entry) { - auto value_str = entry->value().getStringView(); + const auto result = + Http::HeaderUtility::getAllOfHeaderAsString(headers, location_spec->header_); + if (result.result().has_value()) { + auto value_str = result.result().value(); if (!location_spec->value_prefix_.empty()) { const auto pos = value_str.find(location_spec->value_prefix_); if (pos == absl::string_view::npos) { diff --git a/source/extensions/filters/http/jwt_authn/filter.cc b/source/extensions/filters/http/jwt_authn/filter.cc index 65bc2b9a2896..7dfa78a4257b 100644 --- a/source/extensions/filters/http/jwt_authn/filter.cc +++ b/source/extensions/filters/http/jwt_authn/filter.cc @@ -27,14 +27,10 @@ bool isCorsPreflightRequest(const Http::RequestHeaderMap& headers) { !headers.getInlineValue(access_control_request_method_handle.handle()).empty(); } +// The prefix used in the response code detail sent from jwt authn filter. +constexpr absl::string_view kRcDetailJwtAuthnPrefix = "jwt_authn_access_denied"; } // namespace -struct RcDetailsValues { - // The jwt_authn filter rejected the request - const std::string JwtAuthnAccessDenied = "jwt_authn_access_denied"; -}; -using RcDetails = ConstSingleton; - Filter::Filter(FilterConfigSharedPtr config) : stats_(config->stats()), config_(std::move(config)) {} @@ -83,7 +79,7 @@ void Filter::setPayload(const ProtobufWkt::Struct& payload) { } void Filter::onComplete(const Status& status) { - ENVOY_LOG(debug, "Called Filter : check complete {}", + ENVOY_LOG(debug, "Jwt authentication completed with: {}", ::google::jwt_verify::getStatusString(status)); // This stream has been reset, abort the callback. if (state_ == Responded) { @@ -96,8 +92,10 @@ void Filter::onComplete(const Status& status) { Http::Code code = status == Status::JwtAudienceNotAllowed ? Http::Code::Forbidden : Http::Code::Unauthorized; // return failure reason as message body - decoder_callbacks_->sendLocalReply(code, ::google::jwt_verify::getStatusString(status), nullptr, - absl::nullopt, RcDetails::get().JwtAuthnAccessDenied); + decoder_callbacks_->sendLocalReply( + code, ::google::jwt_verify::getStatusString(status), nullptr, absl::nullopt, + absl::StrCat(kRcDetailJwtAuthnPrefix, "{", ::google::jwt_verify::getStatusString(status), + "}")); return; } stats_.allowed_.inc(); diff --git a/source/extensions/filters/http/jwt_authn/matcher.cc b/source/extensions/filters/http/jwt_authn/matcher.cc index 16061fb90196..a1599dac5394 100644 --- a/source/extensions/filters/http/jwt_authn/matcher.cc +++ b/source/extensions/filters/http/jwt_authn/matcher.cc @@ -143,6 +143,22 @@ class RegexMatcherImpl : public BaseMatcherImpl { std::string regex_str_; }; +/** + * Perform a match against an HTTP CONNECT request. + */ +class ConnectMatcherImpl : public BaseMatcherImpl { +public: + ConnectMatcherImpl(const RequirementRule& rule) : BaseMatcherImpl(rule) {} + + bool matches(const Http::RequestHeaderMap& headers) const override { + if (Http::HeaderUtility::isConnect(headers) && BaseMatcherImpl::matchRoute(headers)) { + ENVOY_LOG(debug, "CONNECT requirement matched."); + return true; + } + + return false; + } +}; } // namespace MatcherConstPtr Matcher::create(const RequirementRule& rule) { @@ -155,10 +171,7 @@ MatcherConstPtr Matcher::create(const RequirementRule& rule) { case RouteMatch::PathSpecifierCase::kSafeRegex: return std::make_unique(rule); case RouteMatch::PathSpecifierCase::kConnectMatcher: - // TODO: When CONNECT match support is implemented, remove the manual clean-up of CONNECT - // matching in the filter fuzzer implementation: - // //test/extensions/filters/http/common/fuzz/uber_per_filter.cc - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + return std::make_unique(rule); default: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/extensions/filters/http/local_ratelimit/BUILD b/source/extensions/filters/http/local_ratelimit/BUILD new file mode 100644 index 000000000000..048d7d4ed4e0 --- /dev/null +++ b/source/extensions/filters/http/local_ratelimit/BUILD @@ -0,0 +1,46 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +# Local Ratelimit L7 HTTP filter +# Public docs: docs/root/configuration/http_filters/local_rate_limit_filter.rst + +envoy_extension_package() + +envoy_cc_library( + name = "local_ratelimit_lib", + srcs = ["local_ratelimit.cc"], + hdrs = ["local_ratelimit.h"], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/server:filter_config_interface", + "//include/envoy/stats:stats_macros", + "//source/common/common:utility_lib", + "//source/common/http:header_utility_lib", + "//source/common/http:headers_lib", + "//source/common/router:header_parser_lib", + "//source/common/runtime:runtime_lib", + "//source/extensions/filters/common/local_ratelimit:local_ratelimit_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "@envoy_api//envoy/extensions/filters/http/local_ratelimit/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "unknown", + deps = [ + ":local_ratelimit_lib", + "//include/envoy/http:filter_interface", + "//source/common/protobuf:utility_lib", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/http/local_ratelimit/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/local_ratelimit/config.cc b/source/extensions/filters/http/local_ratelimit/config.cc new file mode 100644 index 000000000000..529fd0dd2977 --- /dev/null +++ b/source/extensions/filters/http/local_ratelimit/config.cc @@ -0,0 +1,43 @@ +#include "extensions/filters/http/local_ratelimit/config.h" + +#include + +#include "envoy/registry/registry.h" + +#include "common/protobuf/utility.h" + +#include "extensions/filters/http/local_ratelimit/local_ratelimit.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace LocalRateLimitFilter { + +Http::FilterFactoryCb LocalRateLimitFilterConfig::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config, + const std::string&, Server::Configuration::FactoryContext& context) { + FilterConfigSharedPtr filter_config = std::make_shared( + proto_config, context.dispatcher(), context.scope(), context.runtime()); + return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(filter_config)); + }; +} + +Router::RouteSpecificFilterConfigConstSharedPtr +LocalRateLimitFilterConfig::createRouteSpecificFilterConfigTyped( + const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config, + Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor&) { + return std::make_shared(proto_config, context.dispatcher(), context.scope(), + context.runtime(), true); +} + +/** + * Static registration for the rate limit filter. @see RegisterFactory. + */ +REGISTER_FACTORY(LocalRateLimitFilterConfig, + Server::Configuration::NamedHttpFilterConfigFactory){"envoy.local_rate_limit"}; + +} // namespace LocalRateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/local_ratelimit/config.h b/source/extensions/filters/http/local_ratelimit/config.h new file mode 100644 index 000000000000..4ee849e368be --- /dev/null +++ b/source/extensions/filters/http/local_ratelimit/config.h @@ -0,0 +1,34 @@ +#pragma once + +#include "envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.pb.h" +#include "envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.pb.validate.h" + +#include "extensions/filters/http/common/factory_base.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace LocalRateLimitFilter { + +/** + * Config registration for the local rate limit filter. @see NamedHttpFilterConfigFactory. + */ +class LocalRateLimitFilterConfig + : public Common::FactoryBase< + envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit> { +public: + LocalRateLimitFilterConfig() : FactoryBase("envoy.filters.http.local_ratelimit") {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; + Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped( + const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config, + Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) override; +}; + +} // namespace LocalRateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/local_ratelimit/local_ratelimit.cc b/source/extensions/filters/http/local_ratelimit/local_ratelimit.cc new file mode 100644 index 000000000000..3b13bfa374ac --- /dev/null +++ b/source/extensions/filters/http/local_ratelimit/local_ratelimit.cc @@ -0,0 +1,110 @@ +#include "extensions/filters/http/local_ratelimit/local_ratelimit.h" + +#include +#include + +#include "envoy/http/codes.h" + +#include "common/http/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace LocalRateLimitFilter { + +FilterConfig::FilterConfig( + const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& config, + Event::Dispatcher& dispatcher, Stats::Scope& scope, Runtime::Loader& runtime, + const bool per_route) + : status_(toErrorCode(config.status().code())), + stats_(generateStats(config.stat_prefix(), scope)), + rate_limiter_(Filters::Common::LocalRateLimit::LocalRateLimiterImpl( + std::chrono::milliseconds( + PROTOBUF_GET_MS_OR_DEFAULT(config.token_bucket(), fill_interval, 0)), + config.token_bucket().max_tokens(), + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.token_bucket(), tokens_per_fill, 1), dispatcher)), + runtime_(runtime), + filter_enabled_( + config.has_filter_enabled() + ? absl::optional( + Envoy::Runtime::FractionalPercent(config.filter_enabled(), runtime_)) + : absl::nullopt), + filter_enforced_( + config.has_filter_enabled() + ? absl::optional( + Envoy::Runtime::FractionalPercent(config.filter_enforced(), runtime_)) + : absl::nullopt), + response_headers_parser_( + Envoy::Router::HeaderParser::configure(config.response_headers_to_add())) { + // Note: no token bucket is fine for the global config, which would be the case for enabling + // the filter globally but disabled and then applying limits at the virtual host or + // route level. At the virtual or route level, it makes no sense to have an no token + // bucket so we throw an error. If there's no token bucket configured globally or + // at the vhost/route level, no rate limiting is applied. + if (per_route && !config.has_token_bucket()) { + throw EnvoyException("local rate limit token bucket must be set for per filter configs"); + } +} + +bool FilterConfig::requestAllowed() const { return rate_limiter_.requestAllowed(); } + +LocalRateLimitStats FilterConfig::generateStats(const std::string& prefix, Stats::Scope& scope) { + const std::string final_prefix = prefix + ".http_local_rate_limit"; + return {ALL_LOCAL_RATE_LIMIT_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))}; +} + +bool FilterConfig::enabled() const { + return filter_enabled_.has_value() ? filter_enabled_->enabled() : false; +} + +bool FilterConfig::enforced() const { + return filter_enforced_.has_value() ? filter_enforced_->enabled() : false; +} + +Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap&, bool) { + const auto* config = getConfig(); + + if (!config->enabled()) { + return Http::FilterHeadersStatus::Continue; + } + + config->stats().enabled_.inc(); + + if (config->requestAllowed()) { + config->stats().ok_.inc(); + return Http::FilterHeadersStatus::Continue; + } + + config->stats().rate_limited_.inc(); + + if (!config->enforced()) { + return Http::FilterHeadersStatus::Continue; + } + + config->stats().enforced_.inc(); + + decoder_callbacks_->sendLocalReply( + config->status(), "local_rate_limited", + [this, config](Http::HeaderMap& headers) { + config->responseHeadersParser().evaluateHeaders(headers, decoder_callbacks_->streamInfo()); + }, + absl::nullopt, "local_rate_limited"); + decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimited); + + return Http::FilterHeadersStatus::StopIteration; +} + +const FilterConfig* Filter::getConfig() const { + const auto* config = Http::Utility::resolveMostSpecificPerFilterConfig( + "envoy.filters.http.local_ratelimit", decoder_callbacks_->route()); + if (config) { + return config; + } + + return config_.get(); +} + +} // namespace LocalRateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/local_ratelimit/local_ratelimit.h b/source/extensions/filters/http/local_ratelimit/local_ratelimit.h new file mode 100644 index 000000000000..6549094d07c3 --- /dev/null +++ b/source/extensions/filters/http/local_ratelimit/local_ratelimit.h @@ -0,0 +1,107 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.pb.h" +#include "envoy/http/filter.h" +#include "envoy/runtime/runtime.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/common/assert.h" +#include "common/http/header_map_impl.h" +#include "common/router/header_parser.h" +#include "common/runtime/runtime_protos.h" + +#include "extensions/filters/common/local_ratelimit/local_ratelimit_impl.h" +#include "extensions/filters/http/common/pass_through_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace LocalRateLimitFilter { + +/** + * All local rate limit stats. @see stats_macros.h + */ +#define ALL_LOCAL_RATE_LIMIT_STATS(COUNTER) \ + COUNTER(enabled) \ + COUNTER(enforced) \ + COUNTER(rate_limited) \ + COUNTER(ok) + +/** + * Struct definition for all local rate limit stats. @see stats_macros.h + */ +struct LocalRateLimitStats { + ALL_LOCAL_RATE_LIMIT_STATS(GENERATE_COUNTER_STRUCT) +}; + +/** + * Global configuration for the HTTP local rate limit filter. + */ +class FilterConfig : public ::Envoy::Router::RouteSpecificFilterConfig { +public: + FilterConfig(const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& config, + Event::Dispatcher& dispatcher, Stats::Scope& scope, Runtime::Loader& runtime, + bool per_route = false); + ~FilterConfig() override = default; + Runtime::Loader& runtime() { return runtime_; } + bool requestAllowed() const; + bool enabled() const; + bool enforced() const; + LocalRateLimitStats& stats() const { return stats_; } + const Router::HeaderParser& responseHeadersParser() const { return *response_headers_parser_; } + Http::Code status() const { return status_; } + +private: + friend class FilterTest; + + static LocalRateLimitStats generateStats(const std::string& prefix, Stats::Scope& scope); + + static Http::Code toErrorCode(uint64_t status) { + const auto code = static_cast(status); + if (code >= Http::Code::BadRequest) { + return code; + } + return Http::Code::TooManyRequests; + } + + const Http::Code status_; + mutable LocalRateLimitStats stats_; + Filters::Common::LocalRateLimit::LocalRateLimiterImpl rate_limiter_; + Runtime::Loader& runtime_; + const absl::optional filter_enabled_; + const absl::optional filter_enforced_; + Router::HeaderParserPtr response_headers_parser_; +}; + +using FilterConfigSharedPtr = std::shared_ptr; + +/** + * HTTP local rate limit filter. Depending on the route configuration, this filter calls consults + * with local token bucket before allowing further filter iteration. + */ +class Filter : public Http::PassThroughFilter { +public: + Filter(FilterConfigSharedPtr config) : config_(config) {} + + // Http::StreamDecoderFilter + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, + bool end_stream) override; + +private: + friend class FilterTest; + + const FilterConfig* getConfig() const; + + FilterConfigSharedPtr config_; +}; + +} // namespace LocalRateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/lua/BUILD b/source/extensions/filters/http/lua/BUILD index 25b02828a28f..188b0c484752 100644 --- a/source/extensions/filters/http/lua/BUILD +++ b/source/extensions/filters/http/lua/BUILD @@ -43,6 +43,7 @@ envoy_cc_library( "//include/envoy/http:header_map_interface", "//include/envoy/stream_info:stream_info_interface", "//source/common/crypto:utility_lib", + "//source/common/http:header_utility_lib", "//source/common/http:utility_lib", "//source/extensions/common/crypto:utility_lib", "//source/extensions/filters/common/lua:lua_lib", diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 99d9618b13a0..5182d002cba5 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -135,7 +135,7 @@ Http::AsyncClient::Request* makeHttpCall(lua_State* state, Filter& filter, } if (body != nullptr) { - message->body() = std::make_unique(body, body_size); + message->body().add(body, body_size); message->headers().setContentLength(body_size); } @@ -348,8 +348,9 @@ void StreamHandleWrapper::onSuccess(const Http::AsyncClient::Request&, }); // TODO(mattklein123): Avoid double copy here. - if (response->body() != nullptr) { - lua_pushstring(coroutine_.luaState(), response->bodyAsString().c_str()); + if (response->body().length() > 0) { + lua_pushlstring(coroutine_.luaState(), response->bodyAsString().data(), + response->body().length()); } else { lua_pushnil(coroutine_.luaState()); } @@ -384,7 +385,7 @@ void StreamHandleWrapper::onFailure(const Http::AsyncClient::Request& request, new Http::ResponseMessageImpl(Http::createHeaderMap( {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::ServiceUnavailable))}}))); - response_message->body() = std::make_unique("upstream failure"); + response_message->body().add("upstream failure"); onSuccess(request, std::move(response_message)); } @@ -427,8 +428,9 @@ int StreamHandleWrapper::luaBody(lua_State* state) { if (body_wrapper_.get() != nullptr) { body_wrapper_.pushStack(); } else { - body_wrapper_.reset( - Filters::Common::Lua::BufferWrapper::create(state, *callbacks_.bufferedBody()), true); + body_wrapper_.reset(Filters::Common::Lua::BufferWrapper::create( + state, const_cast(*callbacks_.bufferedBody())), + true); } return 1; } @@ -618,9 +620,9 @@ int StreamHandleWrapper::luaImportPublicKey(lua_State* state) { } int StreamHandleWrapper::luaBase64Escape(lua_State* state) { - // Get input string. - absl::string_view input = luaL_checkstring(state, 2); - auto output = absl::Base64Escape(input); + size_t input_size; + const char* input = luaL_checklstring(state, 2, &input_size); + auto output = absl::Base64Escape(absl::string_view(input, input_size)); lua_pushlstring(state, output.data(), output.length()); return 1; @@ -761,8 +763,8 @@ void Filter::scriptLog(spdlog::level::level_enum level, const char* message) { void Filter::DecoderCallbacks::respond(Http::ResponseHeaderMapPtr&& headers, Buffer::Instance* body, lua_State*) { - callbacks_->streamInfo().setResponseCodeDetails(HttpResponseCodeDetails::get().LuaResponse); - callbacks_->encodeHeaders(std::move(headers), body == nullptr); + callbacks_->encodeHeaders(std::move(headers), body == nullptr, + HttpResponseCodeDetails::get().LuaResponse); if (body && !parent_.destroyed_) { callbacks_->encodeData(*body, true); } diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index f716519dbefb..cb31e695f8be 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -1,5 +1,6 @@ #include "extensions/filters/http/lua/wrappers.h" +#include "common/http/header_utility.h" #include "common/http/utility.h" #include "extensions/filters/common/lua/wrappers.h" @@ -42,10 +43,10 @@ int HeaderMapWrapper::luaAdd(lua_State* state) { int HeaderMapWrapper::luaGet(lua_State* state) { const char* key = luaL_checkstring(state, 2); - const Http::HeaderEntry* entry = headers_.get(Http::LowerCaseString(key)); - if (entry != nullptr) { - lua_pushlstring(state, entry->value().getStringView().data(), - entry->value().getStringView().length()); + const auto value = + Http::HeaderUtility::getAllOfHeaderAsString(headers_, Http::LowerCaseString(key)); + if (value.result().has_value()) { + lua_pushlstring(state, value.result().value().data(), value.result().value().length()); return 1; } else { return 0; diff --git a/source/extensions/filters/http/oauth2/BUILD b/source/extensions/filters/http/oauth2/BUILD index 7d3ba0b0b773..44d0718a995d 100644 --- a/source/extensions/filters/http/oauth2/BUILD +++ b/source/extensions/filters/http/oauth2/BUILD @@ -44,17 +44,16 @@ envoy_cc_library( srcs = ["filter.cc"], hdrs = ["filter.h"], deps = [ + ":oauth_client", "//include/envoy/server:filter_config_interface", "//source/common/common:assert_lib", "//source/common/common:empty_string", "//source/common/config:datasource_lib", "//source/common/formatter:substitution_formatter_lib", "//source/common/http:rest_api_fetcher_lib", - "//source/common/http:url_utility_lib", "//source/common/protobuf:utility_lib", "//source/extensions/common/crypto:utility_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", - "//source/extensions/filters/http/oauth2:oauth_client", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto", ], @@ -67,10 +66,10 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", status = "alpha", deps = [ + ":oauth_lib", "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", "//source/extensions/filters/http/common:factory_base_lib", - "//source/extensions/filters/http/oauth2:oauth_lib", "@envoy_api//envoy/api/v2/auth:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto", ], diff --git a/source/extensions/filters/http/oauth2/config.cc b/source/extensions/filters/http/oauth2/config.cc index e910db23477d..d51d798874e5 100644 --- a/source/extensions/filters/http/oauth2/config.cc +++ b/source/extensions/filters/http/oauth2/config.cc @@ -51,24 +51,32 @@ Http::FilterFactoryCb OAuth2Config::createFilterFactoryFromProtoTyped( const auto& token_secret = credentials.token_secret(); const auto& hmac_secret = credentials.hmac_secret(); - auto& secret_manager = context.clusterManager().clusterManagerFactory().secretManager(); + auto& cluster_manager = context.clusterManager(); + auto& secret_manager = cluster_manager.clusterManagerFactory().secretManager(); auto& transport_socket_factory = context.getTransportSocketFactoryContext(); auto secret_provider_token_secret = secretsProvider(token_secret, secret_manager, transport_socket_factory); + if (secret_provider_token_secret == nullptr) { + throw EnvoyException("invalid token secret configuration"); + } auto secret_provider_hmac_secret = secretsProvider(hmac_secret, secret_manager, transport_socket_factory); + if (secret_provider_hmac_secret == nullptr) { + throw EnvoyException("invalid HMAC secret configuration"); + } auto secret_reader = std::make_shared( secret_provider_token_secret, secret_provider_hmac_secret, context.api()); - auto config = std::make_shared(proto_config, context.clusterManager(), - secret_reader, context.scope(), stats_prefix); + auto config = std::make_shared(proto_config, cluster_manager, secret_reader, + context.scope(), stats_prefix); - return [&context, config](Http::FilterChainFactoryCallbacks& callbacks) -> void { - std::unique_ptr oauth_client = - std::make_unique(context.clusterManager(), config->oauthTokenEndpoint()); - callbacks.addStreamDecoderFilter( - std::make_shared(config, std::move(oauth_client), context.timeSource())); - }; + return + [&context, config, &cluster_manager](Http::FilterChainFactoryCallbacks& callbacks) -> void { + std::unique_ptr oauth_client = + std::make_unique(cluster_manager, config->oauthTokenEndpoint()); + callbacks.addStreamDecoderFilter( + std::make_shared(config, std::move(oauth_client), context.timeSource())); + }; } /* diff --git a/source/extensions/filters/http/oauth2/filter.cc b/source/extensions/filters/http/oauth2/filter.cc index a8584de0ddaa..1603b9b417e5 100644 --- a/source/extensions/filters/http/oauth2/filter.cc +++ b/source/extensions/filters/http/oauth2/filter.cc @@ -17,7 +17,6 @@ #include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/message_impl.h" -#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/protobuf/utility.h" @@ -57,6 +56,11 @@ const std::string& queryParamsError() { CONSTRUCT_ON_FIRST_USE(std::string, "err const std::string& queryParamsCode() { CONSTRUCT_ON_FIRST_USE(std::string, "code"); } const std::string& queryParamsState() { CONSTRUCT_ON_FIRST_USE(std::string, "state"); } +constexpr absl::string_view REDIRECT_RACE = "oauth.race_redirect"; +constexpr absl::string_view REDIRECT_LOGGED_IN = "oauth.logged_in"; +constexpr absl::string_view REDIRECT_FOR_CREDENTIALS = "oauth.missing_credentials"; +constexpr absl::string_view SIGN_OUT = "oauth.sign_out"; + template std::vector headerMatchers(const T& matcher_protos) { std::vector matchers; @@ -219,7 +223,7 @@ Http::FilterHeadersStatus OAuth2Filter::decodeHeaders(Http::RequestHeaderMap& he Http::createHeaderMap( {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::Found))}, {Http::Headers::get().Location, state}})}; - decoder_callbacks_->encodeHeaders(std::move(response_headers), true); + decoder_callbacks_->encodeHeaders(std::move(response_headers), true, REDIRECT_RACE); } // Continue on with the filter stack. @@ -272,7 +276,7 @@ Http::FilterHeadersStatus OAuth2Filter::decodeHeaders(Http::RequestHeaderMap& he fmt::format(AuthorizationEndpointFormat, config_->authorizationEndpoint(), config_->clientId(), escaped_redirect_uri, escaped_state); response_headers->setLocation(new_url); - decoder_callbacks_->encodeHeaders(std::move(response_headers), true); + decoder_callbacks_->encodeHeaders(std::move(response_headers), true, REDIRECT_FOR_CREDENTIALS); config_->stats().oauth_unauthorized_rq_.inc(); @@ -348,7 +352,7 @@ Http::FilterHeadersStatus OAuth2Filter::signOutUser(const Http::RequestHeaderMap response_headers->addReference(Http::Headers::get().SetCookie, SignoutCookieValue); response_headers->addReference(Http::Headers::get().SetCookie, SignoutBearerTokenValue); response_headers->setLocation(new_path); - decoder_callbacks_->encodeHeaders(std::move(response_headers), true); + decoder_callbacks_->encodeHeaders(std::move(response_headers), true, SIGN_OUT); return Http::FilterHeadersStatus::StopAllIterationAndBuffer; } @@ -419,7 +423,7 @@ void OAuth2Filter::finishFlow() { response_headers->setLocation(state_); - decoder_callbacks_->encodeHeaders(std::move(response_headers), true); + decoder_callbacks_->encodeHeaders(std::move(response_headers), true, REDIRECT_LOGGED_IN); config_->stats().oauth_success_.inc(); decoder_callbacks_->continueDecoding(); } diff --git a/source/extensions/filters/http/oauth2/oauth_client.cc b/source/extensions/filters/http/oauth2/oauth_client.cc index 35098e436262..a25672c81083 100644 --- a/source/extensions/filters/http/oauth2/oauth_client.cc +++ b/source/extensions/filters/http/oauth2/oauth_client.cc @@ -39,8 +39,6 @@ void OAuth2ClientImpl::asyncGetAccessToken(const std::string& auth_code, Http::RequestMessagePtr request = createPostRequest(); const std::string body = fmt::format(GetAccessTokenBodyFormatString, auth_code, encoded_client_id, encoded_secret, encoded_cb_url); - request->body() = std::make_unique(body); - ENVOY_LOG(debug, "Dispatching OAuth request for access token."); dispatchRequest(std::move(request)); diff --git a/source/extensions/filters/http/ratelimit/config.cc b/source/extensions/filters/http/ratelimit/config.cc index 1bcf930af390..9ff4038e9930 100644 --- a/source/extensions/filters/http/ratelimit/config.cc +++ b/source/extensions/filters/http/ratelimit/config.cc @@ -36,6 +36,13 @@ Http::FilterFactoryCb RateLimitFilterConfig::createFilterFactoryFromProtoTyped( }; } +Router::RouteSpecificFilterConfigConstSharedPtr +RateLimitFilterConfig::createRouteSpecificFilterConfigTyped( + const envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute& proto_config, + Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) { + return std::make_shared(proto_config); +} + /** * Static registration for the rate limit filter. @see RegisterFactory. */ diff --git a/source/extensions/filters/http/ratelimit/config.h b/source/extensions/filters/http/ratelimit/config.h index 0eaee53c5828..82d22786acc5 100644 --- a/source/extensions/filters/http/ratelimit/config.h +++ b/source/extensions/filters/http/ratelimit/config.h @@ -16,7 +16,9 @@ namespace RateLimitFilter { * Config registration for the rate limit filter. @see NamedHttpFilterConfigFactory. */ class RateLimitFilterConfig - : public Common::FactoryBase { + : public Common::FactoryBase< + envoy::extensions::filters::http::ratelimit::v3::RateLimit, + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute> { public: RateLimitFilterConfig() : FactoryBase(HttpFilterNames::get().RateLimit) {} @@ -24,6 +26,11 @@ class RateLimitFilterConfig Http::FilterFactoryCb createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::ratelimit::v3::RateLimit& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; + + Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped( + const envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute& proto_config, + Server::Configuration::ServerFactoryContext& context, + ProtobufMessage::ValidationVisitor& validator) override; }; } // namespace RateLimitFilter diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index c2c2b36b9e3a..8430f47243a8 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -13,6 +13,7 @@ #include "common/router/config_impl.h" #include "extensions/filters/http/ratelimit/ratelimit_headers.h" +#include "extensions/filters/http/well_known_names.h" namespace Envoy { namespace Extensions { @@ -50,17 +51,30 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { // Get all applicable rate limit policy entries for the route. populateRateLimitDescriptors(route_entry->rateLimitPolicy(), descriptors, route_entry, headers); - // Get all applicable rate limit policy entries for the virtual host if the route opted to - // include the virtual host rate limits. - if (route_entry->includeVirtualHostRateLimits()) { + VhRateLimitOptions vh_rate_limit_option = getVirtualHostRateLimitOption(route); + + switch (vh_rate_limit_option) { + case VhRateLimitOptions::Ignore: + break; + case VhRateLimitOptions::Include: populateRateLimitDescriptors(route_entry->virtualHost().rateLimitPolicy(), descriptors, route_entry, headers); + break; + case VhRateLimitOptions::Override: + if (route_entry->rateLimitPolicy().empty()) { + populateRateLimitDescriptors(route_entry->virtualHost().rateLimitPolicy(), descriptors, + route_entry, headers); + } + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; } if (!descriptors.empty()) { state_ = State::Calling; initiating_call_ = true; - client_->limit(*this, config_->domain(), descriptors, callbacks_->activeSpan()); + client_->limit(*this, config_->domain(), descriptors, callbacks_->activeSpan(), + callbacks_->streamInfo()); initiating_call_ = false; } } @@ -156,11 +170,13 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, empty_stat_name, false}; httpContext().codeStats().chargeResponseStat(info); - if (response_headers_to_add_ == nullptr) { - response_headers_to_add_ = Http::ResponseHeaderMapImpl::create(); + if (config_->enableXEnvoyRateLimitedHeader()) { + if (response_headers_to_add_ == nullptr) { + response_headers_to_add_ = Http::ResponseHeaderMapImpl::create(); + } + response_headers_to_add_->setReferenceEnvoyRateLimited( + Http::Headers::get().EnvoyRateLimitedValues.True); } - response_headers_to_add_->setReferenceEnvoyRateLimited( - Http::Headers::get().EnvoyRateLimitedValues.True); break; } @@ -234,6 +250,32 @@ void Filter::appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add) { } } +VhRateLimitOptions Filter::getVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route) { + if (route->routeEntry()->includeVirtualHostRateLimits()) { + vh_rate_limits_ = VhRateLimitOptions::Include; + } else { + const auto* specific_per_route_config = + Http::Utility::resolveMostSpecificPerFilterConfig( + HttpFilterNames::get().RateLimit, route); + if (specific_per_route_config != nullptr) { + switch (specific_per_route_config->virtualHostRateLimits()) { + case envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::INCLUDE: + vh_rate_limits_ = VhRateLimitOptions::Include; + break; + case envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::IGNORE: + vh_rate_limits_ = VhRateLimitOptions::Ignore; + break; + case envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::OVERRIDE: + default: + vh_rate_limits_ = VhRateLimitOptions::Override; + } + } else { + vh_rate_limits_ = VhRateLimitOptions::Override; + } + } + return vh_rate_limits_; +} + } // namespace RateLimitFilter } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index b7b803343cbe..058eb793569a 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -30,6 +30,11 @@ namespace RateLimitFilter { */ enum class FilterRequestType { Internal, External, Both }; +/** + * Type of virtual host rate limit options + */ +enum class VhRateLimitOptions { Override, Include, Ignore }; + /** * Global configuration for the HTTP rate limit filter. */ @@ -46,6 +51,7 @@ class FilterConfig { enable_x_ratelimit_headers_( config.enable_x_ratelimit_headers() == envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_03), + disable_x_envoy_ratelimited_header_(config.disable_x_envoy_ratelimited_header()), rate_limited_grpc_status_( config.rate_limited_as_resource_exhausted() ? absl::make_optional(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted) @@ -59,6 +65,7 @@ class FilterConfig { FilterRequestType requestType() const { return request_type_; } bool failureModeAllow() const { return !failure_mode_deny_; } bool enableXRateLimitHeaders() const { return enable_x_ratelimit_headers_; } + bool enableXEnvoyRateLimitedHeader() const { return !disable_x_envoy_ratelimited_header_; } const absl::optional rateLimitedGrpcStatus() const { return rate_limited_grpc_status_; } @@ -85,6 +92,7 @@ class FilterConfig { Runtime::Loader& runtime_; const bool failure_mode_deny_; const bool enable_x_ratelimit_headers_; + const bool disable_x_envoy_ratelimited_header_; const absl::optional rate_limited_grpc_status_; Http::Context& http_context_; Filters::Common::RateLimit::StatNames stat_names_; @@ -92,6 +100,22 @@ class FilterConfig { using FilterConfigSharedPtr = std::shared_ptr; +class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { +public: + FilterConfigPerRoute( + const envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute& config) + : vh_rate_limits_(config.vh_rate_limits()) {} + + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::VhRateLimitsOptions + virtualHostRateLimits() const { + return vh_rate_limits_; + } + +private: + const envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::VhRateLimitsOptions + vh_rate_limits_; +}; + /** * HTTP rate limit filter. Depending on the route configuration, this filter calls the global * rate limiting service before allowing further filter iteration. @@ -134,6 +158,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req const Http::HeaderMap& headers) const; void populateResponseHeaders(Http::HeaderMap& response_headers); void appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add); + VhRateLimitOptions getVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route); Http::Context& httpContext() { return config_->httpContext(); } @@ -143,6 +168,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req Filters::Common::RateLimit::ClientPtr client_; Http::StreamDecoderFilterCallbacks* callbacks_{}; State state_{State::NotStarted}; + VhRateLimitOptions vh_rate_limits_; Upstream::ClusterInfoConstSharedPtr cluster_; bool initiating_call_{}; Http::ResponseHeaderMapPtr response_headers_to_add_; diff --git a/source/extensions/filters/http/rbac/rbac_filter.cc b/source/extensions/filters/http/rbac/rbac_filter.cc index 04f253c840ab..f17fcec7b866 100644 --- a/source/extensions/filters/http/rbac/rbac_filter.cc +++ b/source/extensions/filters/http/rbac/rbac_filter.cc @@ -14,12 +14,6 @@ namespace Extensions { namespace HttpFilters { namespace RBACFilter { -struct RcDetailsValues { - // The rbac filter rejected the request - const std::string RbacAccessDenied = "rbac_access_denied"; -}; -using RcDetails = ConstSingleton; - RoleBasedAccessControlFilterConfig::RoleBasedAccessControlFilterConfig( const envoy::extensions::filters::http::rbac::v3::RBAC& proto_config, const std::string& stats_prefix, Stats::Scope& scope) @@ -112,18 +106,18 @@ RoleBasedAccessControlFilter::decodeHeaders(Http::RequestHeaderMap& headers, boo config_->engine(callbacks_->route(), Filters::Common::RBAC::EnforcementMode::Enforced); if (engine != nullptr) { std::string effective_policy_id; - - if (engine->handleAction(*callbacks_->connection(), headers, callbacks_->streamInfo(), - &effective_policy_id)) { - ENVOY_LOG(debug, "enforced allowed, matched policy {}", - effective_policy_id.empty() ? "none" : effective_policy_id); + bool allowed = engine->handleAction(*callbacks_->connection(), headers, + callbacks_->streamInfo(), &effective_policy_id); + const std::string log_policy_id = effective_policy_id.empty() ? "none" : effective_policy_id; + if (allowed) { + ENVOY_LOG(debug, "enforced allowed, matched policy {}", log_policy_id); config_->stats().allowed_.inc(); return Http::FilterHeadersStatus::Continue; } else { - ENVOY_LOG(debug, "enforced denied, matched policy {}", - effective_policy_id.empty() ? "none" : effective_policy_id); + ENVOY_LOG(debug, "enforced denied, matched policy {}", log_policy_id); callbacks_->sendLocalReply(Http::Code::Forbidden, "RBAC: access denied", nullptr, - absl::nullopt, RcDetails::get().RbacAccessDenied); + absl::nullopt, + Filters::Common::RBAC::responseDetail(log_policy_id)); config_->stats().denied_.inc(); return Http::FilterHeadersStatus::StopIteration; } diff --git a/source/extensions/filters/http/squash/BUILD b/source/extensions/filters/http/squash/BUILD index ea2bdcd1242b..e89a1c25d8b5 100644 --- a/source/extensions/filters/http/squash/BUILD +++ b/source/extensions/filters/http/squash/BUILD @@ -27,6 +27,7 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/http:message_lib", "//source/common/http:utility_lib", + "//source/common/json:json_loader_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/http/squash/squash_filter.cc b/source/extensions/filters/http/squash/squash_filter.cc index 0c5d42fe1f6a..b58052124143 100644 --- a/source/extensions/filters/http/squash/squash_filter.cc +++ b/source/extensions/filters/http/squash/squash_filter.cc @@ -11,6 +11,7 @@ #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/http/utility.h" +#include "common/json/json_loader.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" @@ -137,7 +138,7 @@ void SquashFilter::onDestroy() { cleanup(); } Http::FilterHeadersStatus SquashFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { // Check for squash header - if (!headers.get(Http::Headers::get().XSquashDebug)) { + if (headers.get(Http::Headers::get().XSquashDebug).empty()) { return Http::FilterHeadersStatus::Continue; } @@ -148,7 +149,7 @@ Http::FilterHeadersStatus SquashFilter::decodeHeaders(Http::RequestHeaderMap& he request->headers().setReferencePath(POST_ATTACHMENT_PATH); request->headers().setReferenceHost(SERVER_AUTHORITY); request->headers().setReferenceMethod(Http::Headers::get().MethodValues.Post); - request->body() = std::make_unique(config_->attachmentJson()); + request->body().add(config_->attachmentJson()); is_squashing_ = true; in_flight_request_ = @@ -308,9 +309,7 @@ void SquashFilter::cleanup() { } Json::ObjectSharedPtr SquashFilter::getJsonBody(Http::ResponseMessagePtr&& m) { - Buffer::InstancePtr& data = m->body(); - std::string jsonbody = data->toString(); - return Json::Factory::loadFromString(jsonbody); + return Json::Factory::loadFromString(m->bodyAsString()); } } // namespace Squash diff --git a/source/extensions/filters/http/wasm/BUILD b/source/extensions/filters/http/wasm/BUILD new file mode 100644 index 000000000000..81d0a69665e1 --- /dev/null +++ b/source/extensions/filters/http/wasm/BUILD @@ -0,0 +1,45 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +# Public docs: docs/root/configuration/http_filters/wasm_filter.rst + +envoy_cc_library( + name = "wasm_filter_lib", + srcs = ["wasm_filter.cc"], + hdrs = ["wasm_filter.h"], + visibility = ["//visibility:public"], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/server:filter_config_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/filters/http:well_known_names", + "@envoy_api//envoy/extensions/filters/http/wasm/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "unknown", + status = "alpha", + deps = [ + ":wasm_filter_lib", + "//include/envoy/registry", + "//source/common/common:empty_string", + "//source/common/config:datasource_lib", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/http/wasm/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/wasm/config.cc b/source/extensions/filters/http/wasm/config.cc new file mode 100644 index 000000000000..f46b7cf0692f --- /dev/null +++ b/source/extensions/filters/http/wasm/config.cc @@ -0,0 +1,39 @@ +#include "extensions/filters/http/wasm/config.h" + +#include "envoy/extensions/filters/http/wasm/v3/wasm.pb.validate.h" +#include "envoy/registry/registry.h" + +#include "common/common/empty_string.h" +#include "common/config/datasource.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/filters/http/wasm/wasm_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Wasm { + +Http::FilterFactoryCb WasmFilterConfig::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::wasm::v3::Wasm& proto_config, const std::string&, + Server::Configuration::FactoryContext& context) { + auto filter_config = std::make_shared(proto_config, context); + return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + auto filter = filter_config->createFilter(); + if (!filter) { // Fail open + return; + } + callbacks.addStreamFilter(filter); + callbacks.addAccessLogHandler(filter); + }; +} + +/** + * Static registration for the Wasm filter. @see RegisterFactory. + */ +REGISTER_FACTORY(WasmFilterConfig, Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace Wasm +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/wasm/config.h b/source/extensions/filters/http/wasm/config.h new file mode 100644 index 000000000000..319aee96f9ca --- /dev/null +++ b/source/extensions/filters/http/wasm/config.h @@ -0,0 +1,31 @@ +#pragma once + +#include "envoy/extensions/filters/http/wasm/v3/wasm.pb.h" +#include "envoy/extensions/filters/http/wasm/v3/wasm.pb.validate.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Wasm { + +/** + * Config registration for the Wasm filter. @see NamedHttpFilterConfigFactory. + */ +class WasmFilterConfig + : public Common::FactoryBase { +public: + WasmFilterConfig() : FactoryBase(HttpFilterNames::get().Wasm) {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::wasm::v3::Wasm& proto_config, const std::string&, + Server::Configuration::FactoryContext& context) override; +}; + +} // namespace Wasm +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/wasm/wasm_filter.cc b/source/extensions/filters/http/wasm/wasm_filter.cc new file mode 100644 index 000000000000..c62b06c4102d --- /dev/null +++ b/source/extensions/filters/http/wasm/wasm_filter.cc @@ -0,0 +1,52 @@ +#include "extensions/filters/http/wasm/wasm_filter.h" + +#include "envoy/http/codes.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/assert.h" +#include "common/common/enum_to_int.h" +#include "common/http/header_map_impl.h" +#include "common/http/message_impl.h" +#include "common/http/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Wasm { + +FilterConfig::FilterConfig(const envoy::extensions::filters::http::wasm::v3::Wasm& config, + Server::Configuration::FactoryContext& context) + : tls_slot_(context.threadLocal().allocateSlot()) { + plugin_ = std::make_shared( + config.config().name(), config.config().root_id(), config.config().vm_config().vm_id(), + config.config().vm_config().runtime(), + Common::Wasm::anyToBytes(config.config().configuration()), config.config().fail_open(), + context.direction(), context.localInfo(), &context.listenerMetadata()); + + auto plugin = plugin_; + auto callback = [plugin, this](const Common::Wasm::WasmHandleSharedPtr& base_wasm) { + // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call. + tls_slot_->set( + [base_wasm, + plugin](Event::Dispatcher& dispatcher) -> std::shared_ptr { + if (!base_wasm) { + return nullptr; + } + return std::static_pointer_cast( + Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, dispatcher)); + }); + }; + + if (!Common::Wasm::createWasm( + config.config().vm_config(), plugin_, context.scope().createScope(""), + context.clusterManager(), context.initManager(), context.dispatcher(), context.api(), + context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { + throw Common::Wasm::WasmException( + fmt::format("Unable to create Wasm HTTP filter {}", plugin->name_)); + } +} + +} // namespace Wasm +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/wasm/wasm_filter.h b/source/extensions/filters/http/wasm/wasm_filter.h new file mode 100644 index 000000000000..36bfd1503b77 --- /dev/null +++ b/source/extensions/filters/http/wasm/wasm_filter.h @@ -0,0 +1,53 @@ +#pragma once + +#include + +#include "envoy/extensions/filters/http/wasm/v3/wasm.pb.validate.h" +#include "envoy/http/filter.h" +#include "envoy/server/filter_config.h" +#include "envoy/upstream/cluster_manager.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Wasm { + +using Envoy::Extensions::Common::Wasm::Context; +using Envoy::Extensions::Common::Wasm::Wasm; +using Envoy::Extensions::Common::Wasm::WasmHandle; + +class FilterConfig : Logger::Loggable { +public: + FilterConfig(const envoy::extensions::filters::http::wasm::v3::Wasm& proto_config, + Server::Configuration::FactoryContext& context); + + std::shared_ptr createFilter() { + Wasm* wasm = nullptr; + if (tls_slot_->get()) { + wasm = tls_slot_->getTyped().wasm().get(); + } + if (plugin_->fail_open_ && (!wasm || wasm->isFailed())) { + return nullptr; + } + if (wasm && !root_context_id_) { + root_context_id_ = wasm->getRootContext(plugin_->root_id_)->id(); + } + return std::make_shared(wasm, root_context_id_, plugin_); + } + +private: + uint32_t root_context_id_{0}; + Envoy::Extensions::Common::Wasm::PluginSharedPtr plugin_; + ThreadLocal::SlotPtr tls_slot_; + Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_; +}; + +typedef std::shared_ptr FilterConfigSharedPtr; + +} // namespace Wasm +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/well_known_names.h b/source/extensions/filters/http/well_known_names.h index e43ba7859117..e869e3fc9bbd 100644 --- a/source/extensions/filters/http/well_known_names.h +++ b/source/extensions/filters/http/well_known_names.h @@ -16,6 +16,8 @@ class HttpFilterNameValues { const std::string Buffer = "envoy.filters.http.buffer"; // Cache filter const std::string Cache = "envoy.filters.http.cache"; + // CDN Loop filter + const std::string CdnLoop = "envoy.filters.http.cdn_loop"; // Compressor filter const std::string Compressor = "envoy.filters.http.compressor"; // CORS filter @@ -72,12 +74,14 @@ class HttpFilterNameValues { const std::string OriginalSrc = "envoy.filters.http.original_src"; // Dynamic forward proxy filter const std::string DynamicForwardProxy = "envoy.filters.http.dynamic_forward_proxy"; + // WebAssembly filter + const std::string Wasm = "envoy.filters.http.wasm"; // AWS request signing filter const std::string AwsRequestSigning = "envoy.filters.http.aws_request_signing"; // AWS Lambda filter const std::string AwsLambda = "envoy.filters.http.aws_lambda"; // OAuth filter - const std::string OAuth = "envoy.filters.http.oauth"; + const std::string OAuth = "envoy.filters.http.oauth2"; }; using HttpFilterNames = ConstSingleton; diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc index cf10888a6b89..d4dcd0fcefca 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc @@ -15,6 +15,7 @@ #include "extensions/transport_sockets/well_known_names.h" +#include "absl/strings/str_join.h" #include "openssl/ssl.h" namespace Envoy { @@ -138,6 +139,7 @@ void Filter::onALPN(const unsigned char* data, unsigned int len) { } protocols.emplace_back(reinterpret_cast(CBS_data(&name)), CBS_len(&name)); } + ENVOY_LOG(trace, "tls:onALPN(), ALPN: {}", absl::StrJoin(protocols, ",")); cb_->socket().setRequestedApplicationProtocols(protocols); alpn_found_ = true; } diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc index 4892e8107c61..3f9a342d6d45 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc @@ -14,6 +14,7 @@ #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/http/utility.h" +#include "common/json/json_loader.h" #include "common/network/utility.h" namespace Envoy { diff --git a/source/extensions/filters/network/client_ssl_auth/config.cc b/source/extensions/filters/network/client_ssl_auth/config.cc index 2b361023cb65..23a5d8dfa6c2 100644 --- a/source/extensions/filters/network/client_ssl_auth/config.cc +++ b/source/extensions/filters/network/client_ssl_auth/config.cc @@ -18,9 +18,9 @@ Network::FilterFactoryCb ClientSslAuthConfigFactory::createFilterFactoryFromProt ASSERT(!proto_config.auth_api_cluster().empty()); ASSERT(!proto_config.stat_prefix().empty()); - ClientSslAuthConfigSharedPtr filter_config( - ClientSslAuthConfig::create(proto_config, context.threadLocal(), context.clusterManager(), - context.dispatcher(), context.scope(), context.random())); + ClientSslAuthConfigSharedPtr filter_config(ClientSslAuthConfig::create( + proto_config, context.threadLocal(), context.clusterManager(), context.dispatcher(), + context.scope(), context.api().randomGenerator())); return [filter_config](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared(filter_config)); }; diff --git a/source/extensions/filters/network/dubbo_proxy/config.cc b/source/extensions/filters/network/dubbo_proxy/config.cc index 2ea2a87315ac..76260b0eeeb7 100644 --- a/source/extensions/filters/network/dubbo_proxy/config.cc +++ b/source/extensions/filters/network/dubbo_proxy/config.cc @@ -25,7 +25,7 @@ Network::FilterFactoryCb DubboProxyFilterConfigFactory::createFilterFactoryFromP return [filter_config, &context](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared( - *filter_config, context.random(), context.dispatcher().timeSource())); + *filter_config, context.api().randomGenerator(), context.dispatcher().timeSource())); }; } diff --git a/source/extensions/filters/network/ext_authz/BUILD b/source/extensions/filters/network/ext_authz/BUILD index ebc6847e28f6..3ef4a4738a3d 100644 --- a/source/extensions/filters/network/ext_authz/BUILD +++ b/source/extensions/filters/network/ext_authz/BUILD @@ -23,6 +23,7 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", "//include/envoy/upstream:cluster_manager_interface", "//source/common/common:assert_lib", + "//source/common/common:matchers_lib", "//source/common/tracing:http_tracer_lib", "//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib", "//source/extensions/filters/common/ext_authz:ext_authz_interface", diff --git a/source/extensions/filters/network/ext_authz/config.cc b/source/extensions/filters/network/ext_authz/config.cc index 8bfdf1f81f7d..a8a4188b395e 100644 --- a/source/extensions/filters/network/ext_authz/config.cc +++ b/source/extensions/filters/network/ext_authz/config.cc @@ -35,7 +35,7 @@ Network::FilterFactoryCb ExtAuthzConfigFactory::createFilterFactoryFromProtoType auto client = std::make_unique( async_client_factory->create(), std::chrono::milliseconds(timeout_ms), - transport_api_version, false); + transport_api_version); filter_manager.addReadFilter(Network::ReadFilterSharedPtr{ std::make_shared(ext_authz_config, std::move(client))}); }; diff --git a/source/extensions/filters/network/ext_authz/ext_authz.cc b/source/extensions/filters/network/ext_authz/ext_authz.cc index 97feb62e0d22..0ea178650d84 100644 --- a/source/extensions/filters/network/ext_authz/ext_authz.cc +++ b/source/extensions/filters/network/ext_authz/ext_authz.cc @@ -30,12 +30,18 @@ void Filter::callCheck() { config_->stats().total_.inc(); calling_check_ = true; - client_->check(*this, check_request_, Tracing::NullSpan::instance(), - filter_callbacks_->connection().streamInfo()); + auto& connection = filter_callbacks_->connection(); + client_->check(*this, connection.dispatcher(), check_request_, Tracing::NullSpan::instance(), + connection.streamInfo()); calling_check_ = false; } Network::FilterStatus Filter::onData(Buffer::Instance&, bool /* end_stream */) { + if (!filterEnabled(filter_callbacks_->connection().streamInfo().dynamicMetadata())) { + config_->stats().disabled_.inc(); + return Network::FilterStatus::Continue; + } + if (status_ == Status::NotStarted) { // By waiting to invoke the check at onData() the call to authorization service will have // sufficient information to fill out the checkRequest_. @@ -72,6 +78,9 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { break; case Filters::Common::ExtAuthz::CheckStatus::Error: config_->stats().error_.inc(); + if (response->error_kind == Filters::Common::ExtAuthz::ErrorKind::Timedout) { + config_->stats().timeout_.inc(); + } break; case Filters::Common::ExtAuthz::CheckStatus::Denied: config_->stats().denied_.inc(); diff --git a/source/extensions/filters/network/ext_authz/ext_authz.h b/source/extensions/filters/network/ext_authz/ext_authz.h index 9fa49f0f3bb7..73fa9c0df5d8 100644 --- a/source/extensions/filters/network/ext_authz/ext_authz.h +++ b/source/extensions/filters/network/ext_authz/ext_authz.h @@ -14,6 +14,8 @@ #include "envoy/stats/stats_macros.h" #include "envoy/upstream/cluster_manager.h" +#include "common/common/matchers.h" + #include "extensions/filters/common/ext_authz/ext_authz.h" #include "extensions/filters/common/ext_authz/ext_authz_grpc_impl.h" @@ -29,9 +31,11 @@ namespace ExtAuthz { COUNTER(cx_closed) \ COUNTER(denied) \ COUNTER(error) \ + COUNTER(timeout) \ COUNTER(failure_mode_allowed) \ COUNTER(ok) \ COUNTER(total) \ + COUNTER(disabled) \ GAUGE(active, Accumulate) /** @@ -50,18 +54,26 @@ class Config { Stats::Scope& scope) : stats_(generateStats(config.stat_prefix(), scope)), failure_mode_allow_(config.failure_mode_allow()), - include_peer_certificate_(config.include_peer_certificate()) {} + include_peer_certificate_(config.include_peer_certificate()), + filter_enabled_metadata_( + config.has_filter_enabled_metadata() + ? absl::optional(config.filter_enabled_metadata()) + : absl::nullopt) {} const InstanceStats& stats() { return stats_; } bool failureModeAllow() const { return failure_mode_allow_; } void setFailModeAllow(bool value) { failure_mode_allow_ = value; } bool includePeerCertificate() const { return include_peer_certificate_; } + bool filterEnabledMetadata(const envoy::config::core::v3::Metadata& metadata) const { + return filter_enabled_metadata_.has_value() ? filter_enabled_metadata_->match(metadata) : true; + } private: static InstanceStats generateStats(const std::string& name, Stats::Scope& scope); const InstanceStats stats_; bool failure_mode_allow_; const bool include_peer_certificate_; + const absl::optional filter_enabled_metadata_; }; using ConfigSharedPtr = std::shared_ptr; @@ -107,6 +119,10 @@ class Filter : public Network::ReadFilter, enum class FilterReturn { Stop, Continue }; void callCheck(); + bool filterEnabled(const envoy::config::core::v3::Metadata& metadata) { + return config_->filterEnabledMetadata(metadata); + } + ConfigSharedPtr config_; Filters::Common::ExtAuthz::ClientPtr client_; Network::ReadFilterCallbacks* filter_callbacks_{}; diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 82a65448222f..93fff20c3ca9 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -167,9 +167,9 @@ HttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoTyped( // as these captured objects are also global singletons. return [singletons, filter_config, &context](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(Network::ReadFilterSharedPtr{new Http::ConnectionManagerImpl( - *filter_config, context.drainDecision(), context.random(), context.httpContext(), - context.runtime(), context.localInfo(), context.clusterManager(), context.overloadManager(), - context.dispatcher().timeSource())}); + *filter_config, context.drainDecision(), context.api().randomGenerator(), + context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(), + context.overloadManager(), context.dispatcher().timeSource())}); }; } @@ -270,7 +270,8 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( request_id_extension_ = Http::RequestIDExtensionFactory::fromProto(config.request_id_extension(), context_); } else { - request_id_extension_ = Http::RequestIDExtensionFactory::defaultInstance(context_.random()); + request_id_extension_ = + Http::RequestIDExtensionFactory::defaultInstance(context_.api().randomGenerator()); } // If scoped RDS is enabled, avoid creating a route config provider. Route config providers will @@ -581,13 +582,15 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, "envoy.reloadable_features.new_codec_behavior")) { return std::make_unique( connection, callbacks, - Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, - maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), + context_.api().randomGenerator(), http2_options_, maxRequestHeadersKb(), + maxRequestHeadersCount(), headersWithUnderscoresAction()); } else { return std::make_unique( connection, callbacks, - Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, - maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), + context_.api().randomGenerator(), http2_options_, maxRequestHeadersKb(), + maxRequestHeadersCount(), headersWithUnderscoresAction()); } } case CodecType::HTTP3: @@ -601,9 +604,9 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, .createQuicServerConnection(connection, callbacks)); case CodecType::AUTO: return Http::ConnectionManagerUtility::autoCreateCodec( - connection, data, callbacks, context_.scope(), http1_codec_stats_, http2_codec_stats_, - http1_settings_, http2_options_, maxRequestHeadersKb(), maxRequestHeadersCount(), - headersWithUnderscoresAction()); + connection, data, callbacks, context_.scope(), context_.api().randomGenerator(), + http1_codec_stats_, http2_codec_stats_, http1_settings_, http2_options_, + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); } NOT_REACHED_GCOVR_EXCL_LINE; } @@ -709,9 +712,9 @@ HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( // as these captured objects are also global singletons. return [singletons, filter_config, &context, &read_callbacks]() -> Http::ApiListenerPtr { auto conn_manager = std::make_unique( - *filter_config, context.drainDecision(), context.random(), context.httpContext(), - context.runtime(), context.localInfo(), context.clusterManager(), context.overloadManager(), - context.dispatcher().timeSource()); + *filter_config, context.drainDecision(), context.api().randomGenerator(), + context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(), + context.overloadManager(), context.dispatcher().timeSource()); // This factory creates a new ConnectionManagerImpl in the absence of its usual environment as // an L4 filter, so this factory needs to take a few actions. diff --git a/source/extensions/filters/network/local_ratelimit/BUILD b/source/extensions/filters/network/local_ratelimit/BUILD index 13389742fa56..ad61ff36235e 100644 --- a/source/extensions/filters/network/local_ratelimit/BUILD +++ b/source/extensions/filters/network/local_ratelimit/BUILD @@ -22,9 +22,9 @@ envoy_cc_library( "//include/envoy/network:filter_interface", "//include/envoy/runtime:runtime_interface", "//include/envoy/stats:stats_macros", - "//source/common/common:thread_synchronizer_lib", "//source/common/protobuf:utility_lib", "//source/common/runtime:runtime_lib", + "//source/extensions/filters/common/local_ratelimit:local_ratelimit_lib", "@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/network/local_ratelimit/local_ratelimit.cc b/source/extensions/filters/network/local_ratelimit/local_ratelimit.cc index 6c574188fed5..773daf175139 100644 --- a/source/extensions/filters/network/local_ratelimit/local_ratelimit.cc +++ b/source/extensions/filters/network/local_ratelimit/local_ratelimit.cc @@ -13,64 +13,21 @@ namespace LocalRateLimitFilter { Config::Config( const envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& proto_config, Event::Dispatcher& dispatcher, Stats::Scope& scope, Runtime::Loader& runtime) - : fill_timer_(dispatcher.createTimer([this] { onFillTimer(); })), - max_tokens_(proto_config.token_bucket().max_tokens()), - tokens_per_fill_( - PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config.token_bucket(), tokens_per_fill, 1)), - fill_interval_(PROTOBUF_GET_MS_REQUIRED(proto_config.token_bucket(), fill_interval)), + : rate_limiter_(Filters::Common::LocalRateLimit::LocalRateLimiterImpl( + std::chrono::milliseconds( + PROTOBUF_GET_MS_REQUIRED(proto_config.token_bucket(), fill_interval)), + proto_config.token_bucket().max_tokens(), + PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config.token_bucket(), tokens_per_fill, 1), + dispatcher)), enabled_(proto_config.runtime_enabled(), runtime), - stats_(generateStats(proto_config.stat_prefix(), scope)), tokens_(max_tokens_) { - if (fill_interval_ < std::chrono::milliseconds(50)) { - throw EnvoyException("local rate limit token bucket fill timer must be >= 50ms"); - } - fill_timer_->enableTimer(fill_interval_); -} + stats_(generateStats(proto_config.stat_prefix(), scope)) {} LocalRateLimitStats Config::generateStats(const std::string& prefix, Stats::Scope& scope) { const std::string final_prefix = "local_rate_limit." + prefix; return {ALL_LOCAL_RATE_LIMIT_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))}; } -void Config::onFillTimer() { - // Relaxed consistency is used for all operations because we don't care about ordering, just the - // final atomic correctness. - uint32_t expected_tokens = tokens_.load(std::memory_order_relaxed); - uint32_t new_tokens_value; - do { - // expected_tokens is either initialized above or reloaded during the CAS failure below. - new_tokens_value = std::min(max_tokens_, expected_tokens + tokens_per_fill_); - - // Testing hook. - synchronizer_.syncPoint("on_fill_timer_pre_cas"); - - // Loop while the weak CAS fails trying to update the tokens value. - } while ( - !tokens_.compare_exchange_weak(expected_tokens, new_tokens_value, std::memory_order_relaxed)); - - ENVOY_LOG(trace, "local_rate_limit: fill tokens={}", new_tokens_value); - fill_timer_->enableTimer(fill_interval_); -} - -bool Config::canCreateConnection() { - // Relaxed consistency is used for all operations because we don't care about ordering, just the - // final atomic correctness. - uint32_t expected_tokens = tokens_.load(std::memory_order_relaxed); - do { - // expected_tokens is either initialized above or reloaded during the CAS failure below. - if (expected_tokens == 0) { - return false; - } - - // Testing hook. - synchronizer_.syncPoint("can_create_connection_pre_cas"); - - // Loop while the weak CAS fails trying to subtract 1 from expected. - } while (!tokens_.compare_exchange_weak(expected_tokens, expected_tokens - 1, - std::memory_order_relaxed)); - - // We successfully decremented the counter by 1. - return true; -} +bool Config::canCreateConnection() { return rate_limiter_.requestAllowed(); } Network::FilterStatus Filter::onNewConnection() { if (!config_->enabled()) { diff --git a/source/extensions/filters/network/local_ratelimit/local_ratelimit.h b/source/extensions/filters/network/local_ratelimit/local_ratelimit.h index 30a48f876d97..e1cd52ac1bee 100644 --- a/source/extensions/filters/network/local_ratelimit/local_ratelimit.h +++ b/source/extensions/filters/network/local_ratelimit/local_ratelimit.h @@ -9,6 +9,8 @@ #include "common/common/thread_synchronizer.h" #include "common/runtime/runtime_protos.h" +#include "extensions/filters/common/local_ratelimit/local_ratelimit_impl.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -43,17 +45,9 @@ class Config : Logger::Loggable { static LocalRateLimitStats generateStats(const std::string& prefix, Stats::Scope& scope); void onFillTimer(); - // TODO(mattklein123): Determine if/how to merge this with token_bucket_impl.h/cc. This - // implementation is geared towards multi-threading as well assumes a high call rate (which is - // why a fixed periodic refresh timer is used). - const Event::TimerPtr fill_timer_; - const uint32_t max_tokens_; - const uint32_t tokens_per_fill_; - const std::chrono::milliseconds fill_interval_; + Filters::Common::LocalRateLimit::LocalRateLimiterImpl rate_limiter_; Runtime::FeatureFlag enabled_; LocalRateLimitStats stats_; - std::atomic tokens_; - Thread::ThreadSynchronizer synchronizer_; // Used for testing only. friend class LocalRateLimitTestBase; }; diff --git a/source/extensions/filters/network/mongo_proxy/config.cc b/source/extensions/filters/network/mongo_proxy/config.cc index 13ed4139bbb4..ecdc60644888 100644 --- a/source/extensions/filters/network/mongo_proxy/config.cc +++ b/source/extensions/filters/network/mongo_proxy/config.cc @@ -34,7 +34,13 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP fault_config = std::make_shared(proto_config.delay()); } - auto stats = std::make_shared(context.scope(), stat_prefix); + auto commands = std::vector{"delete", "insert", "update"}; + if (proto_config.commands_size() > 0) { + commands = + std::vector(proto_config.commands().begin(), proto_config.commands().end()); + } + + auto stats = std::make_shared(context.scope(), stat_prefix, commands); const bool emit_dynamic_metadata = proto_config.emit_dynamic_metadata(); return [stat_prefix, &context, access_log, fault_config, emit_dynamic_metadata, stats](Network::FilterManager& filter_manager) -> void { diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.cc b/source/extensions/filters/network/mongo_proxy/mongo_stats.cc index 6059b461f94c..aca9ffaf35fd 100644 --- a/source/extensions/filters/network/mongo_proxy/mongo_stats.cc +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.cc @@ -13,7 +13,8 @@ namespace Extensions { namespace NetworkFilters { namespace MongoProxy { -MongoStats::MongoStats(Stats::Scope& scope, absl::string_view prefix) +MongoStats::MongoStats(Stats::Scope& scope, absl::string_view prefix, + const std::vector& commands) : scope_(scope), stat_name_set_(scope.symbolTable().makeSet("Mongo")), prefix_(stat_name_set_->add(prefix)), callsite_(stat_name_set_->add("callsite")), cmd_(stat_name_set_->add("cmd")), collection_(stat_name_set_->add("collection")), @@ -25,10 +26,9 @@ MongoStats::MongoStats(Stats::Scope& scope, absl::string_view prefix) scatter_get_(stat_name_set_->add("scatter_get")), total_(stat_name_set_->add("total")), unknown_command_(stat_name_set_->add("unknown_command")) { - // TODO(jmarantz): is this the right set of mongo commands to use as builtins? - // Should we also have builtins for callsites or collections, or do those need - // to be dynamic? - stat_name_set_->rememberBuiltins({"insert", "query", "update", "delete"}); + for (const auto& cmd : commands) { + stat_name_set_->rememberBuiltin(cmd); + } } Stats::ElementVec MongoStats::addPrefix(const Stats::ElementVec& names) { diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.h b/source/extensions/filters/network/mongo_proxy/mongo_stats.h index b19561df6788..3df51affd0a4 100644 --- a/source/extensions/filters/network/mongo_proxy/mongo_stats.h +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.h @@ -16,7 +16,8 @@ namespace MongoProxy { class MongoStats { public: - MongoStats(Stats::Scope& scope, absl::string_view prefix); + MongoStats(Stats::Scope& scope, absl::string_view prefix, + const std::vector& commands); void incCounter(const Stats::ElementVec& names); void recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit, diff --git a/source/extensions/filters/network/mongo_proxy/proxy.cc b/source/extensions/filters/network/mongo_proxy/proxy.cc index fa70d4ae9801..b8ee760c910b 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.cc +++ b/source/extensions/filters/network/mongo_proxy/proxy.cc @@ -59,10 +59,9 @@ ProxyFilter::ProxyFilter(const std::string& stat_prefix, Stats::Scope& scope, const Filters::Common::Fault::FaultDelayConfigSharedPtr& fault_config, const Network::DrainDecision& drain_decision, TimeSource& time_source, bool emit_dynamic_metadata, const MongoStatsSharedPtr& mongo_stats) - : stat_prefix_(stat_prefix), stats_(generateStats(stat_prefix, scope)), runtime_(runtime), - drain_decision_(drain_decision), access_log_(access_log), fault_config_(fault_config), - time_source_(time_source), emit_dynamic_metadata_(emit_dynamic_metadata), - mongo_stats_(mongo_stats) { + : stats_(generateStats(stat_prefix, scope)), runtime_(runtime), drain_decision_(drain_decision), + access_log_(access_log), fault_config_(fault_config), time_source_(time_source), + emit_dynamic_metadata_(emit_dynamic_metadata), mongo_stats_(mongo_stats) { if (!runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().ConnectionLoggingEnabled, 100)) { // If we are not logging at the connection level, just release the shared pointer so that we diff --git a/source/extensions/filters/network/mongo_proxy/proxy.h b/source/extensions/filters/network/mongo_proxy/proxy.h index c54308f1ae38..773d4714aa7f 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.h +++ b/source/extensions/filters/network/mongo_proxy/proxy.h @@ -183,7 +183,6 @@ class ProxyFilter : public Network::Filter, void tryInjectDelay(); std::unique_ptr decoder_; - std::string stat_prefix_; MongoProxyStats stats_; Runtime::Loader& runtime_; const Network::DrainDecision& drain_decision_; diff --git a/source/extensions/filters/network/mongo_proxy/utility.cc b/source/extensions/filters/network/mongo_proxy/utility.cc index c51468f01e72..bb4537744288 100644 --- a/source/extensions/filters/network/mongo_proxy/utility.cc +++ b/source/extensions/filters/network/mongo_proxy/utility.cc @@ -22,6 +22,17 @@ QueryMessageInfo::QueryMessageInfo(const QueryMessage& query) if (command_ == "find") { command_ = ""; parseFindCommand(*command); + // command aliases + } else if (command_ == "collstats") { + command_ = "collStats"; + } else if (command_ == "dbstats") { + command_ = "dbStats"; + } else if (command_ == "findandmodify") { + command_ = "findAndModify"; + } else if (command_ == "getlasterror") { + command_ = "getLastError"; + } else if (command_ == "ismaster") { + command_ = "isMaster"; } return; diff --git a/source/extensions/filters/network/mysql_proxy/mysql_filter.cc b/source/extensions/filters/network/mysql_proxy/mysql_filter.cc index e66701ee8784..d3fa62e55fc8 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_filter.cc +++ b/source/extensions/filters/network/mysql_proxy/mysql_filter.cc @@ -14,7 +14,7 @@ namespace NetworkFilters { namespace MySQLProxy { MySQLFilterConfig::MySQLFilterConfig(const std::string& stat_prefix, Stats::Scope& scope) - : scope_(scope), stat_prefix_(stat_prefix), stats_(generateStats(stat_prefix, scope)) {} + : scope_(scope), stats_(generateStats(stat_prefix, scope)) {} MySQLFilter::MySQLFilter(MySQLFilterConfigSharedPtr config) : config_(std::move(config)) {} diff --git a/source/extensions/filters/network/mysql_proxy/mysql_filter.h b/source/extensions/filters/network/mysql_proxy/mysql_filter.h index 2d73ef9b5846..daabb165dde6 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_filter.h +++ b/source/extensions/filters/network/mysql_proxy/mysql_filter.h @@ -54,7 +54,6 @@ class MySQLFilterConfig { const MySQLProxyStats& stats() { return stats_; } Stats::Scope& scope_; - const std::string stat_prefix_; MySQLProxyStats stats_; private: diff --git a/source/extensions/filters/network/postgres_proxy/BUILD b/source/extensions/filters/network/postgres_proxy/BUILD index aa397da9b55f..420286527f04 100644 --- a/source/extensions/filters/network/postgres_proxy/BUILD +++ b/source/extensions/filters/network/postgres_proxy/BUILD @@ -19,10 +19,12 @@ envoy_cc_library( srcs = [ "postgres_decoder.cc", "postgres_filter.cc", + "postgres_message.cc", ], hdrs = [ "postgres_decoder.h", "postgres_filter.h", + "postgres_message.h", "postgres_session.h", ], repository = "@envoy", diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc b/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc index 0aae15ce995f..18a55b088e40 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc +++ b/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc @@ -9,69 +9,91 @@ namespace Extensions { namespace NetworkFilters { namespace PostgresProxy { +#define BODY_FORMAT(...) \ + []() -> std::unique_ptr { return createMsgBodyReader<__VA_ARGS__>(); } +#define NO_BODY BODY_FORMAT() + void DecoderImpl::initialize() { // Special handler for first message of the transaction. - first_ = MsgProcessor{"Startup", {&DecoderImpl::onStartup}}; + first_ = + MessageProcessor{"Startup", BODY_FORMAT(Int32, Repeated), {&DecoderImpl::onStartup}}; // Frontend messages. FE_messages_.direction_ = "Frontend"; // Setup handlers for known messages. - absl::flat_hash_map& FE_known_msgs = FE_messages_.messages_; + absl::flat_hash_map& FE_known_msgs = FE_messages_.messages_; // Handler for known Frontend messages. - FE_known_msgs['B'] = MsgProcessor{"Bind", {}}; - FE_known_msgs['C'] = MsgProcessor{"Close", {}}; - FE_known_msgs['d'] = MsgProcessor{"CopyData", {}}; - FE_known_msgs['c'] = MsgProcessor{"CopyDone", {}}; - FE_known_msgs['f'] = MsgProcessor{"CopyFail", {}}; - FE_known_msgs['D'] = MsgProcessor{"Describe", {}}; - FE_known_msgs['E'] = MsgProcessor{"Execute", {}}; - FE_known_msgs['H'] = MsgProcessor{"Flush", {}}; - FE_known_msgs['F'] = MsgProcessor{"FunctionCall", {}}; + FE_known_msgs['B'] = MessageProcessor{ + "Bind", BODY_FORMAT(String, String, Array, Array, Array), {}}; + FE_known_msgs['C'] = MessageProcessor{"Close", BODY_FORMAT(Byte1, String), {}}; + FE_known_msgs['d'] = MessageProcessor{"CopyData", BODY_FORMAT(ByteN), {}}; + FE_known_msgs['c'] = MessageProcessor{"CopyDone", NO_BODY, {}}; + FE_known_msgs['f'] = MessageProcessor{"CopyFail", BODY_FORMAT(String), {}}; + FE_known_msgs['D'] = MessageProcessor{"Describe", BODY_FORMAT(Byte1, String), {}}; + FE_known_msgs['E'] = MessageProcessor{"Execute", BODY_FORMAT(String, Int32), {}}; + FE_known_msgs['H'] = MessageProcessor{"Flush", NO_BODY, {}}; + FE_known_msgs['F'] = MessageProcessor{ + "FunctionCall", BODY_FORMAT(Int32, Array, Array, Int16), {}}; FE_known_msgs['p'] = - MsgProcessor{"PasswordMessage/GSSResponse/SASLInitialResponse/SASLResponse", {}}; - FE_known_msgs['P'] = MsgProcessor{"Parse", {&DecoderImpl::onParse}}; - FE_known_msgs['Q'] = MsgProcessor{"Query", {&DecoderImpl::onQuery}}; - FE_known_msgs['S'] = MsgProcessor{"Sync", {}}; - FE_known_msgs['X'] = MsgProcessor{"Terminate", {&DecoderImpl::decodeFrontendTerminate}}; + MessageProcessor{"PasswordMessage/GSSResponse/SASLInitialResponse/SASLResponse", + BODY_FORMAT(Int32, ByteN), + {}}; + FE_known_msgs['P'] = + MessageProcessor{"Parse", BODY_FORMAT(String, String, Array), {&DecoderImpl::onParse}}; + FE_known_msgs['Q'] = MessageProcessor{"Query", BODY_FORMAT(String), {&DecoderImpl::onQuery}}; + FE_known_msgs['S'] = MessageProcessor{"Sync", NO_BODY, {}}; + FE_known_msgs['X'] = + MessageProcessor{"Terminate", NO_BODY, {&DecoderImpl::decodeFrontendTerminate}}; // Handler for unknown Frontend messages. - FE_messages_.unknown_ = MsgProcessor{"Other", {&DecoderImpl::incMessagesUnknown}}; + FE_messages_.unknown_ = + MessageProcessor{"Other", BODY_FORMAT(ByteN), {&DecoderImpl::incMessagesUnknown}}; // Backend messages. BE_messages_.direction_ = "Backend"; // Setup handlers for known messages. - absl::flat_hash_map& BE_known_msgs = BE_messages_.messages_; + absl::flat_hash_map& BE_known_msgs = BE_messages_.messages_; // Handler for known Backend messages. - BE_known_msgs['R'] = MsgProcessor{"Authentication", {&DecoderImpl::decodeAuthentication}}; - BE_known_msgs['K'] = MsgProcessor{"BackendKeyData", {}}; - BE_known_msgs['2'] = MsgProcessor{"BindComplete", {}}; - BE_known_msgs['3'] = MsgProcessor{"CloseComplete", {}}; - BE_known_msgs['C'] = MsgProcessor{"CommandComplete", {&DecoderImpl::decodeBackendStatements}}; - BE_known_msgs['d'] = MsgProcessor{"CopyData", {}}; - BE_known_msgs['c'] = MsgProcessor{"CopyDone", {}}; - BE_known_msgs['G'] = MsgProcessor{"CopyInResponse", {}}; - BE_known_msgs['H'] = MsgProcessor{"CopyOutResponse", {}}; - BE_known_msgs['D'] = MsgProcessor{"DataRow", {}}; - BE_known_msgs['I'] = MsgProcessor{"EmptyQueryResponse", {}}; - BE_known_msgs['E'] = MsgProcessor{"ErrorResponse", {&DecoderImpl::decodeBackendErrorResponse}}; - BE_known_msgs['V'] = MsgProcessor{"FunctionCallResponse", {}}; - BE_known_msgs['v'] = MsgProcessor{"NegotiateProtocolVersion", {}}; - BE_known_msgs['n'] = MsgProcessor{"NoData", {}}; - BE_known_msgs['N'] = MsgProcessor{"NoticeResponse", {&DecoderImpl::decodeBackendNoticeResponse}}; - BE_known_msgs['A'] = MsgProcessor{"NotificationResponse", {}}; - BE_known_msgs['t'] = MsgProcessor{"ParameterDescription", {}}; - BE_known_msgs['S'] = MsgProcessor{"ParameterStatus", {}}; - BE_known_msgs['1'] = MsgProcessor{"ParseComplete", {}}; - BE_known_msgs['s'] = MsgProcessor{"PortalSuspend", {}}; - BE_known_msgs['Z'] = MsgProcessor{"ReadyForQuery", {}}; - BE_known_msgs['T'] = MsgProcessor{"RowDescription", {}}; + BE_known_msgs['R'] = + MessageProcessor{"Authentication", BODY_FORMAT(ByteN), {&DecoderImpl::decodeAuthentication}}; + BE_known_msgs['K'] = MessageProcessor{"BackendKeyData", BODY_FORMAT(Int32, Int32), {}}; + BE_known_msgs['2'] = MessageProcessor{"BindComplete", NO_BODY, {}}; + BE_known_msgs['3'] = MessageProcessor{"CloseComplete", NO_BODY, {}}; + BE_known_msgs['C'] = MessageProcessor{ + "CommandComplete", BODY_FORMAT(String), {&DecoderImpl::decodeBackendStatements}}; + BE_known_msgs['d'] = MessageProcessor{"CopyData", BODY_FORMAT(ByteN), {}}; + BE_known_msgs['c'] = MessageProcessor{"CopyDone", NO_BODY, {}}; + BE_known_msgs['G'] = MessageProcessor{"CopyInResponse", BODY_FORMAT(Int8, Array), {}}; + BE_known_msgs['H'] = MessageProcessor{"CopyOutResponse", BODY_FORMAT(Int8, Array), {}}; + BE_known_msgs['W'] = MessageProcessor{"CopyBothResponse", BODY_FORMAT(Int8, Array), {}}; + BE_known_msgs['D'] = MessageProcessor{"DataRow", BODY_FORMAT(Array), {}}; + BE_known_msgs['I'] = MessageProcessor{"EmptyQueryResponse", NO_BODY, {}}; + BE_known_msgs['E'] = MessageProcessor{ + "ErrorResponse", BODY_FORMAT(Byte1, String), {&DecoderImpl::decodeBackendErrorResponse}}; + BE_known_msgs['V'] = MessageProcessor{"FunctionCallResponse", BODY_FORMAT(VarByteN), {}}; + BE_known_msgs['v'] = MessageProcessor{"NegotiateProtocolVersion", BODY_FORMAT(ByteN), {}}; + BE_known_msgs['n'] = MessageProcessor{"NoData", NO_BODY, {}}; + BE_known_msgs['N'] = MessageProcessor{ + "NoticeResponse", BODY_FORMAT(ByteN), {&DecoderImpl::decodeBackendNoticeResponse}}; + BE_known_msgs['A'] = + MessageProcessor{"NotificationResponse", BODY_FORMAT(Int32, String, String), {}}; + BE_known_msgs['t'] = MessageProcessor{"ParameterDescription", BODY_FORMAT(Array), {}}; + BE_known_msgs['S'] = MessageProcessor{"ParameterStatus", BODY_FORMAT(String, String), {}}; + BE_known_msgs['1'] = MessageProcessor{"ParseComplete", NO_BODY, {}}; + BE_known_msgs['s'] = MessageProcessor{"PortalSuspend", NO_BODY, {}}; + BE_known_msgs['Z'] = MessageProcessor{"ReadyForQuery", BODY_FORMAT(Byte1), {}}; + BE_known_msgs['T'] = MessageProcessor{ + "RowDescription", + BODY_FORMAT(Array>), + {}}; // Handler for unknown Backend messages. - BE_messages_.unknown_ = MsgProcessor{"Other", {&DecoderImpl::incMessagesUnknown}}; + BE_messages_.unknown_ = + MessageProcessor{"Other", BODY_FORMAT(ByteN), {&DecoderImpl::incMessagesUnknown}}; // Setup hash map for handling backend statements. BE_statements_["BEGIN"] = [this](DecoderImpl*) -> void { @@ -154,7 +176,7 @@ void DecoderImpl::initialize() { }; } -bool DecoderImpl::parseMessage(Buffer::Instance& data) { +bool DecoderImpl::parseHeader(Buffer::Instance& data) { ENVOY_LOG(trace, "postgres_proxy: parsing message, len {}", data.length()); // The minimum size of the message sufficient for parsing is 5 bytes. @@ -171,11 +193,10 @@ bool DecoderImpl::parseMessage(Buffer::Instance& data) { // The 1 byte message type and message length should be in the buffer // Check if the entire message has been read. std::string message; - - uint32_t length = data.peekBEInt(startup_ ? 0 : 1); - if (data.length() < (length + (startup_ ? 0 : 1))) { + message_len_ = data.peekBEInt(startup_ ? 0 : 1); + if (data.length() < (message_len_ + (startup_ ? 0 : 1))) { ENVOY_LOG(trace, "postgres_proxy: cannot parse message. Need {} bytes in buffer", - length + (startup_ ? 0 : 1)); + message_len_ + (startup_ ? 0 : 1)); // Not enough data in the buffer. return false; } @@ -197,15 +218,8 @@ bool DecoderImpl::parseMessage(Buffer::Instance& data) { } } - setMessageLength(length); - data.drain(startup_ ? 4 : 5); // Length plus optional 1st byte. - auto bytesToRead = length - 4; - message.assign(std::string(static_cast(data.linearize(bytesToRead)), bytesToRead)); - data.drain(bytesToRead); - setMessage(message); - ENVOY_LOG(trace, "postgres_proxy: msg parsed"); return true; } @@ -220,7 +234,7 @@ bool DecoderImpl::onData(Buffer::Instance& data, bool frontend) { ENVOY_LOG(trace, "postgres_proxy: decoding {} bytes", data.length()); - if (!parseMessage(data)) { + if (!parseHeader(data)) { return false; } @@ -229,7 +243,7 @@ bool DecoderImpl::onData(Buffer::Instance& data, bool frontend) { // Set processing to the handler of unknown messages. // If message is found, the processing will be updated. - std::reference_wrapper msg = msg_processor.unknown_; + std::reference_wrapper msg = msg_processor.unknown_; if (startup_) { msg = std::ref(first_); @@ -241,16 +255,32 @@ bool DecoderImpl::onData(Buffer::Instance& data, bool frontend) { } } - std::vector& actions = std::get<1>(msg.get()); - for (const auto& action : actions) { - action(this); + // message_len_ specifies total message length including 4 bytes long + // "length" field. The length of message body is total length minus size + // of "length" field (4 bytes). + uint32_t bytes_to_read = message_len_ - 4; + + std::vector& actions = std::get<2>(msg.get()); + if (!actions.empty()) { + // Linearize the message for processing. + message_.assign(std::string(static_cast(data.linearize(bytes_to_read)), bytes_to_read)); + + // Invoke actions associated with the type of received message. + for (const auto& action : actions) { + action(this); + } + + // Drop the linearized message. + message_.erase(); } ENVOY_LOG(debug, "({}) command = {} ({})", msg_processor.direction_, command_, std::get<0>(msg.get())); - ENVOY_LOG(debug, "({}) length = {}", msg_processor.direction_, getMessageLength()); - ENVOY_LOG(debug, "({}) message = {}", msg_processor.direction_, getMessage()); + ENVOY_LOG(debug, "({}) length = {}", msg_processor.direction_, message_len_); + ENVOY_LOG(debug, "({}) message = {}", msg_processor.direction_, + genDebugMessage(msg, data, bytes_to_read)); + data.drain(bytes_to_read); ENVOY_LOG(trace, "postgres_proxy: {} bytes remaining in buffer", data.length()); return true; @@ -366,6 +396,19 @@ void DecoderImpl::onStartup() { } } +// Method generates displayable format of currently processed message. +const std::string DecoderImpl::genDebugMessage(const MessageProcessor& msg, Buffer::Instance& data, + uint32_t message_len) { + const MsgBodyReader& f = std::get<1>(msg); + std::string message = "Unrecognized"; + if (f != nullptr) { + const auto msgParser = f(); + msgParser->read(data, message_len); + message = msgParser->toString(); + } + return message; +} + } // namespace PostgresProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h b/source/extensions/filters/network/postgres_proxy/postgres_decoder.h index 24465b55731f..409cdbba659c 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h +++ b/source/extensions/filters/network/postgres_proxy/postgres_decoder.h @@ -7,6 +7,7 @@ #include "common/common/logger.h" #include "extensions/common/sqlutils/sqlutils.h" +#include "extensions/filters/network/postgres_proxy/postgres_message.h" #include "extensions/filters/network/postgres_proxy/postgres_session.h" #include "absl/container/flat_hash_map.h" @@ -71,36 +72,39 @@ class DecoderImpl : public Decoder, Logger::Loggable { bool onData(Buffer::Instance& data, bool frontend) override; PostgresSession& getSession() override { return session_; } - void setMessage(std::string message) { message_ = message; } std::string getMessage() { return message_; } - void setMessageLength(uint32_t message_len) { message_len_ = message_len; } - uint32_t getMessageLength() { return message_len_; } - void setStartup(bool startup) { startup_ = startup; } void initialize(); bool encrypted() const { return encrypted_; } protected: - // Message action defines the Decoder's method which will be invoked + // MsgAction defines the Decoder's method which will be invoked // when a specific message has been decoded. using MsgAction = std::function; - // MsgProcessor has two fields: + // MsgBodyReader is a function which returns a pointer to a Message + // class which is able to read the Postgres message body. + // The Postgres message body structure depends on the message type. + using MsgBodyReader = std::function()>; + + // MessageProcessor has the following fields: // first - string with message description - // second - vector of Decoder's methods which are invoked when the message + // second - function which instantiates a Message object of specific type + // which is capable of parsing the message's body. + // third - vector of Decoder's methods which are invoked when the message // is processed. - using MsgProcessor = std::pair>; + using MessageProcessor = std::tuple>; // Frontend and Backend messages. using MsgGroup = struct { // String describing direction (Frontend or Backend). std::string direction_; // Hash map indexed by messages' 1st byte points to handlers used for processing messages. - absl::flat_hash_map messages_; + absl::flat_hash_map messages_; // Handler used for processing messages not found in hash map. - MsgProcessor unknown_; + MessageProcessor unknown_; }; // Hash map binding keyword found in a message to an @@ -117,7 +121,7 @@ class DecoderImpl : public Decoder, Logger::Loggable { MsgAction unknown_; }; - bool parseMessage(Buffer::Instance& data); + bool parseHeader(Buffer::Instance& data); void decode(Buffer::Instance& data); void decodeAuthentication(); void decodeBackendStatements(); @@ -133,6 +137,11 @@ class DecoderImpl : public Decoder, Logger::Loggable { void incSessionsEncrypted() { callbacks_->incSessionsEncrypted(); } void incSessionsUnencrypted() { callbacks_->incSessionsUnencrypted(); } + // Helper method generating currently processed message in + // displayable format. + const std::string genDebugMessage(const MessageProcessor& msg, Buffer::Instance& data, + uint32_t message_len); + DecoderCallbacks* callbacks_{}; PostgresSession session_{}; @@ -152,7 +161,7 @@ class DecoderImpl : public Decoder, Logger::Loggable { // Startup message message which does not start with 1 byte TYPE. // It starts with message length and must be therefore handled // differently. - MsgProcessor first_; + MessageProcessor first_; // hash map for dispatching backend transaction messages KeywordProcessor BE_statements_; diff --git a/source/extensions/filters/network/postgres_proxy/postgres_filter.cc b/source/extensions/filters/network/postgres_proxy/postgres_filter.cc index f66754c05101..ee8526b92c9f 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_filter.cc +++ b/source/extensions/filters/network/postgres_proxy/postgres_filter.cc @@ -13,8 +13,7 @@ namespace PostgresProxy { PostgresFilterConfig::PostgresFilterConfig(const std::string& stat_prefix, bool enable_sql_parsing, Stats::Scope& scope) - : stat_prefix_{stat_prefix}, - enable_sql_parsing_(enable_sql_parsing), scope_{scope}, stats_{generateStats(stat_prefix, + : enable_sql_parsing_(enable_sql_parsing), scope_{scope}, stats_{generateStats(stat_prefix, scope)} {} PostgresFilter::PostgresFilter(PostgresFilterConfigSharedPtr config) : config_{config} { diff --git a/source/extensions/filters/network/postgres_proxy/postgres_filter.h b/source/extensions/filters/network/postgres_proxy/postgres_filter.h index 5571a0587c40..f3ef83a6abce 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_filter.h +++ b/source/extensions/filters/network/postgres_proxy/postgres_filter.h @@ -65,7 +65,6 @@ class PostgresFilterConfig { PostgresFilterConfig(const std::string& stat_prefix, bool enable_sql_parsing, Stats::Scope& scope); - const std::string stat_prefix_; bool enable_sql_parsing_{true}; Stats::Scope& scope_; PostgresProxyStats stats_; diff --git a/source/extensions/filters/network/postgres_proxy/postgres_message.cc b/source/extensions/filters/network/postgres_proxy/postgres_message.cc new file mode 100644 index 000000000000..5f7c1f27d814 --- /dev/null +++ b/source/extensions/filters/network/postgres_proxy/postgres_message.cc @@ -0,0 +1,84 @@ +#include "extensions/filters/network/postgres_proxy/postgres_message.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace PostgresProxy { + +// String type methods. +bool String::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { + // First find the terminating zero. + const char zero = 0; + const ssize_t index = data.search(&zero, 1, pos); + if (index == -1) { + return false; + } + + // Reserve that many bytes in the string. + const uint64_t size = index - pos; + value_.resize(size); + // Now copy from buffer to string. + data.copyOut(pos, index - pos, value_.data()); + pos += (size + 1); + left -= (size + 1); + + return true; +} + +std::string String::toString() const { return absl::StrCat("[", value_, "]"); } + +// ByteN type methods. +bool ByteN::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { + if (left > (data.length() - pos)) { + return false; + } + value_.resize(left); + data.copyOut(pos, left, value_.data()); + pos += left; + left = 0; + return true; +} + +std::string ByteN::toString() const { + std::string out = "["; + absl::StrAppend(&out, absl::StrJoin(value_, " ")); + absl::StrAppend(&out, "]"); + return out; +} + +// VarByteN type methods. +bool VarByteN::read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { + if ((left < sizeof(int32_t)) || ((data.length() - pos) < sizeof(int32_t))) { + return false; + } + len_ = data.peekBEInt(pos); + pos += sizeof(int32_t); + left -= sizeof(int32_t); + if (len_ < 1) { + // There is no payload if length is not positive. + value_.clear(); + return true; + } + if ((left < static_cast(len_)) || + ((data.length() - pos) < static_cast(len_))) { + return false; + } + value_.resize(len_); + data.copyOut(pos, len_, value_.data()); + pos += len_; + left -= len_; + return true; +} + +std::string VarByteN::toString() const { + std::string out; + out = fmt::format("[({} bytes):", len_); + absl::StrAppend(&out, absl::StrJoin(value_, " ")); + absl::StrAppend(&out, "]"); + return out; +} + +} // namespace PostgresProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/postgres_proxy/postgres_message.h b/source/extensions/filters/network/postgres_proxy/postgres_message.h new file mode 100644 index 000000000000..03eb90a249e0 --- /dev/null +++ b/source/extensions/filters/network/postgres_proxy/postgres_message.h @@ -0,0 +1,276 @@ +#include "common/buffer/buffer_impl.h" + +#include "absl/strings/str_cat.h" +#include "absl/strings/str_join.h" +#include "fmt/printf.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace PostgresProxy { + +/** + * Postgres messages are described in official Postgres documentation: + * https://www.postgresql.org/docs/12/protocol-message-formats.html + * + * Most of messages start with 1-byte message identifier followed by 4-bytes length field. Few + * messages are defined without starting 1-byte character and are used during well-defined initial + * stage of connection process. + * + * Messages are composed from various fields: 8, 16, 32-bit integers, String, Arrays, etc. + * + * Structures defined below have the same naming as types used in official Postgres documentation. + * + * Each structure has the following methods: + * read - to read number of bytes from received buffer. The number of bytes depends on structure + * type. toString - method returns displayable representation of the structure value. + * + */ + +// Template for integer types. +// Size of integer types is fixed and depends on the type of integer. +template class Int { +public: + /** + * Read integer value from data buffer. + * @param data reference to a buffer containing data to read. + * @param pos offset in the buffer where data to read is located. Successful read will advance + * this parameter. + * @param left number of bytes to be read to reach the end of Postgres message. + * Successful read will adjust this parameter. + * @return boolean value indicating whether read was successful. If read returns + * false "pos" and "left" params are not updated. When read is not successful, + * the caller should not continue reading next values from the data buffer + * for the current message. + */ + bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { + if ((data.length() - pos) < sizeof(T)) { + return false; + } + value_ = data.peekBEInt(pos); + pos += sizeof(T); + left -= sizeof(T); + return true; + } + + std::string toString() const { return fmt::format("[{}]", value_); } + + T get() const { return value_; } + +private: + T value_{}; +}; + +using Int32 = Int; +using Int16 = Int; +using Int8 = Int; + +// 8-bits character value. +using Byte1 = Int; + +// String type requires byte with zero value to indicate end of string. +class String { +public: + /** + * See above for parameter and return value description. + */ + bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left); + std::string toString() const; + +private: + std::string value_; +}; + +// ByteN type is used as the last type in the Postgres message and contains +// sequence of bytes. The length must be deduced from message length. +class ByteN { +public: + /** + * See above for parameter and return value description. + */ + bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left); + std::string toString() const; + +private: + std::vector value_; +}; + +// VarByteN represents the structure consisting of 4 bytes of length +// indicating how many bytes follow. +// In Postgres documentation it is described as: +// - Int32 +// The number of bytes in the structure (this count does not include itself). Can be +// zero. As a special case, -1 indicates a NULL (no result). No value bytes follow in the NULL +// case. +// +// - ByteN +// The sequence of bytes representing the value. Bytes are present only when length has a positive +// value. +class VarByteN { +public: + /** + * See above for parameter and return value description. + */ + bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left); + std::string toString() const; + +private: + int32_t len_; + std::vector value_; +}; + +// Array contains one or more values of the same type. +template class Array { +public: + /** + * See above for parameter and return value description. + */ + bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { + // First read the 16 bits value which indicates how many + // elements there are in the array. + if (((data.length() - pos) < sizeof(uint16_t)) || (left < sizeof(uint16_t))) { + return false; + } + const uint16_t num = data.peekBEInt(pos); + pos += sizeof(uint16_t); + left -= sizeof(uint16_t); + if (num != 0) { + for (uint16_t i = 0; i < num; i++) { + auto item = std::make_unique(); + if (!item->read(data, pos, left)) { + return false; + } + value_.push_back(std::move(item)); + } + } + return true; + } + + std::string toString() const { + std::string out = fmt::format("[Array of {}:{{", value_.size()); + + // Iterate through all elements in the array. + // No delimiter is required between elements, as each + // element is wrapped in "[]" or "{}". + for (const auto& i : value_) { + absl::StrAppend(&out, i->toString()); + } + absl::StrAppend(&out, "}]"); + + return out; + } + +private: + std::vector> value_; +}; + +// Repeated is a composite type used at the end of the message. +// It indicates to read the value of the same type until the end +// of the Postgres message. +template class Repeated { +public: + /** + * See above for parameter and return value description. + */ + bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { + if ((data.length() - pos) < left) { + return false; + } + // Read until nothing is left. + while (left != 0) { + auto item = std::make_unique(); + if (!item->read(data, pos, left)) { + return false; + } + value_.push_back(std::move(item)); + } + return true; + } + + std::string toString() const { + std::string out; + + // Iterate through all repeated elements. + // No delimiter is required between elements, as each + // element is wrapped in "[]" or "{}". + for (const auto& i : value_) { + absl::StrAppend(&out, i->toString()); + } + return out; + } + +private: + std::vector> value_; +}; + +// Interface to Postgres message class. +class Message { +public: + virtual ~Message() = default; + + // read method should read only as many bytes from data + // buffer as it is indicated in message's length field. + // "length" parameter indicates how many bytes were indicated in Postgres message's + // length field. "data" buffer may contain more bytes than "length". + virtual bool read(const Buffer::Instance& data, const uint64_t length) PURE; + + // toString method provides displayable representation of + // the Postgres message. + virtual std::string toString() const PURE; +}; + +// Sequence is tuple like structure, which binds together +// set of several fields of different types. +template class Sequence; + +template +class Sequence : public Message { + FirstField first_; + Sequence remaining_; + +public: + Sequence() = default; + std::string toString() const override { + return absl::StrCat(first_.toString(), remaining_.toString()); + } + + bool read(const Buffer::Instance& data, const uint64_t length) override { + uint64_t pos = 0; + uint64_t left = length; + return read(data, pos, left); + } + + /** + * Implementation of "read" method for variadic template. + * It reads data for the current type and invokes read operation + * for remaining types. + * See above for parameter and return value description for individual types. + */ + bool read(const Buffer::Instance& data, uint64_t& pos, uint64_t& left) { + bool result = first_.read(data, pos, left); + if (!result) { + return false; + } + return remaining_.read(data, pos, left); + } +}; + +// Terminal template definition for variadic Sequence template. +template <> class Sequence<> : public Message { +public: + Sequence<>() = default; + std::string toString() const override { return ""; } + bool read(const Buffer::Instance&, uint64_t&, uint64_t&) { return true; } + bool read(const Buffer::Instance&, const uint64_t) override { return true; } +}; + +// Helper function to create pointer to a Sequence structure and is used by Postgres +// decoder after learning the type of Postgres message. +template std::unique_ptr createMsgBodyReader() { + return std::make_unique>(); +} + +} // namespace PostgresProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/ratelimit/ratelimit.cc b/source/extensions/filters/network/ratelimit/ratelimit.cc index 430508ce3b61..00ed50a9f60c 100644 --- a/source/extensions/filters/network/ratelimit/ratelimit.cc +++ b/source/extensions/filters/network/ratelimit/ratelimit.cc @@ -49,7 +49,8 @@ Network::FilterStatus Filter::onNewConnection() { config_->stats().active_.inc(); config_->stats().total_.inc(); calling_limit_ = true; - client_->limit(*this, config_->domain(), config_->descriptors(), Tracing::NullSpan::instance()); + client_->limit(*this, config_->domain(), config_->descriptors(), Tracing::NullSpan::instance(), + filter_callbacks_->connection().streamInfo()); calling_limit_ = false; } diff --git a/source/extensions/filters/network/rbac/rbac_filter.cc b/source/extensions/filters/network/rbac/rbac_filter.cc index 03b6567f24d0..6e160cb24e7c 100644 --- a/source/extensions/filters/network/rbac/rbac_filter.cc +++ b/source/extensions/filters/network/rbac/rbac_filter.cc @@ -39,26 +39,35 @@ Network::FilterStatus RoleBasedAccessControlFilter::onData(Buffer::Instance&, bo : "none", callbacks_->connection().streamInfo().dynamicMetadata().DebugString()); + std::string log_policy_id = "none"; // When the enforcement type is continuous always do the RBAC checks. If it is a one time check, // run the check once and skip it for subsequent onData calls. if (config_->enforcementType() == envoy::extensions::filters::network::rbac::v3::RBAC::CONTINUOUS) { - shadow_engine_result_ = checkEngine(Filters::Common::RBAC::EnforcementMode::Shadow); - engine_result_ = checkEngine(Filters::Common::RBAC::EnforcementMode::Enforced); + shadow_engine_result_ = + checkEngine(Filters::Common::RBAC::EnforcementMode::Shadow).engine_result_; + auto result = checkEngine(Filters::Common::RBAC::EnforcementMode::Enforced); + engine_result_ = result.engine_result_; + log_policy_id = result.connection_termination_details_; } else { if (shadow_engine_result_ == Unknown) { // TODO(quanlin): Pass the shadow engine results to other filters. - shadow_engine_result_ = checkEngine(Filters::Common::RBAC::EnforcementMode::Shadow); + shadow_engine_result_ = + checkEngine(Filters::Common::RBAC::EnforcementMode::Shadow).engine_result_; } if (engine_result_ == Unknown) { - engine_result_ = checkEngine(Filters::Common::RBAC::EnforcementMode::Enforced); + auto result = checkEngine(Filters::Common::RBAC::EnforcementMode::Enforced); + engine_result_ = result.engine_result_; + log_policy_id = result.connection_termination_details_; } } if (engine_result_ == Allow) { return Network::FilterStatus::Continue; } else if (engine_result_ == Deny) { + callbacks_->connection().streamInfo().setConnectionTerminationDetails( + Filters::Common::RBAC::responseDetail(log_policy_id)); callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); return Network::FilterStatus::StopIteration; } @@ -80,45 +89,41 @@ void RoleBasedAccessControlFilter::setDynamicMetadata(std::string shadow_engine_ callbacks_->connection().streamInfo().setDynamicMetadata(NetworkFilterNames::get().Rbac, metrics); } -EngineResult -RoleBasedAccessControlFilter::checkEngine(Filters::Common::RBAC::EnforcementMode mode) { +Result RoleBasedAccessControlFilter::checkEngine(Filters::Common::RBAC::EnforcementMode mode) { const auto engine = config_->engine(mode); + std::string effective_policy_id; if (engine != nullptr) { - std::string effective_policy_id; - // Check authorization decision and do Action operations - if (engine->handleAction(callbacks_->connection(), callbacks_->connection().streamInfo(), - &effective_policy_id)) { + bool allowed = engine->handleAction( + callbacks_->connection(), callbacks_->connection().streamInfo(), &effective_policy_id); + const std::string log_policy_id = effective_policy_id.empty() ? "none" : effective_policy_id; + if (allowed) { if (mode == Filters::Common::RBAC::EnforcementMode::Shadow) { - ENVOY_LOG(debug, "shadow allowed, matched policy {}", - effective_policy_id.empty() ? "none" : effective_policy_id); + ENVOY_LOG(debug, "shadow allowed, matched policy {}", log_policy_id); config_->stats().shadow_allowed_.inc(); setDynamicMetadata( Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().EngineResultAllowed, effective_policy_id); } else if (mode == Filters::Common::RBAC::EnforcementMode::Enforced) { - ENVOY_LOG(debug, "enforced allowed, matched policy {}", - effective_policy_id.empty() ? "none" : effective_policy_id); + ENVOY_LOG(debug, "enforced allowed, matched policy {}", log_policy_id); config_->stats().allowed_.inc(); } - return Allow; + return Result{Allow, effective_policy_id}; } else { if (mode == Filters::Common::RBAC::EnforcementMode::Shadow) { - ENVOY_LOG(debug, "shadow denied, matched policy {}", - effective_policy_id.empty() ? "none" : effective_policy_id); + ENVOY_LOG(debug, "shadow denied, matched policy {}", log_policy_id); config_->stats().shadow_denied_.inc(); setDynamicMetadata( Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().EngineResultDenied, effective_policy_id); } else if (mode == Filters::Common::RBAC::EnforcementMode::Enforced) { - ENVOY_LOG(debug, "enforced denied, matched policy {}", - effective_policy_id.empty() ? "none" : effective_policy_id); + ENVOY_LOG(debug, "enforced denied, matched policy {}", log_policy_id); config_->stats().denied_.inc(); } - return Deny; + return Result{Deny, log_policy_id}; } } - return None; + return Result{None, "none"}; } } // namespace RBACFilter diff --git a/source/extensions/filters/network/rbac/rbac_filter.h b/source/extensions/filters/network/rbac/rbac_filter.h index 19c9360e2134..f45a938956c3 100644 --- a/source/extensions/filters/network/rbac/rbac_filter.h +++ b/source/extensions/filters/network/rbac/rbac_filter.h @@ -17,6 +17,11 @@ namespace RBACFilter { enum EngineResult { Unknown, None, Allow, Deny }; +struct Result { + EngineResult engine_result_; + std::string connection_termination_details_; +}; + /** * Configuration for the RBAC network filter. */ @@ -74,7 +79,7 @@ class RoleBasedAccessControlFilter : public Network::ReadFilter, EngineResult engine_result_{Unknown}; EngineResult shadow_engine_result_{Unknown}; - EngineResult checkEngine(Filters::Common::RBAC::EnforcementMode mode); + Result checkEngine(Filters::Common::RBAC::EnforcementMode mode); }; } // namespace RBACFilter diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index 208a1a5c1cab..d10e951e7b85 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -88,7 +88,7 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP std::make_unique(prefix_routes, std::move(upstreams), context.runtime()); auto fault_manager = std::make_unique( - context.random(), context.runtime(), proto_config.faults()); + context.api().randomGenerator(), context.runtime(), proto_config.faults()); std::shared_ptr splitter = std::make_shared( diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc index aaedee18a560..97924d6086c6 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc @@ -19,8 +19,8 @@ SniDynamicForwardProxyNetworkFilterConfigFactory::createFilterFactoryFromProtoTy const FilterConfig& proto_config, Server::Configuration::FactoryContext& context) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.threadLocal(), context.random(), - context.runtime(), context.scope()); + context.singletonManager(), context.dispatcher(), context.threadLocal(), + context.api().randomGenerator(), context.runtime(), context.scope()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); diff --git a/source/extensions/filters/network/thrift_proxy/config.cc b/source/extensions/filters/network/thrift_proxy/config.cc index ab8aa302eb1f..fc2edbb54cb1 100644 --- a/source/extensions/filters/network/thrift_proxy/config.cc +++ b/source/extensions/filters/network/thrift_proxy/config.cc @@ -105,7 +105,7 @@ Network::FilterFactoryCb ThriftProxyFilterConfigFactory::createFilterFactoryFrom return [filter_config, &context](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared( - *filter_config, context.random(), context.dispatcher().timeSource())); + *filter_config, context.api().randomGenerator(), context.dispatcher().timeSource())); }; } diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc index e26a565f5856..45e025c04e34 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc @@ -45,7 +45,8 @@ void Filter::initiateCall(const ThriftProxy::MessageMetadata& metadata) { if (!descriptors.empty()) { state_ = State::Calling; initiating_call_ = true; - client_->limit(*this, config_->domain(), descriptors, Tracing::NullSpan::instance()); + client_->limit(*this, config_->domain(), descriptors, Tracing::NullSpan::instance(), + decoder_callbacks_->streamInfo()); initiating_call_ = false; } } diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc index f3910f0614bc..aa10f24d5b71 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc @@ -63,9 +63,11 @@ RouteConstSharedPtr RouteEntryImplBase::clusterEntry(uint64_t random_value, const auto& cluster_header = clusterHeader(); if (!cluster_header.get().empty()) { const auto& headers = metadata.headers(); - const auto* entry = headers.get(cluster_header); - if (entry != nullptr) { - return std::make_shared(*this, entry->value().getStringView()); + const auto entry = headers.get(cluster_header); + if (!entry.empty()) { + // This is an implicitly untrusted header, so per the API documentation only the first + // value is used. + return std::make_shared(*this, entry[0]->value().getStringView()); } return nullptr; diff --git a/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc b/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc index 694831013a42..01d4154a4d08 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc @@ -40,13 +40,14 @@ bool RequestHeadersAction::populateDescriptor(const RouteEntry&, RateLimit::Desc return true; } - const Http::HeaderEntry* header_value = metadata.headers().get(header_name_); - if (!header_value) { + const auto header_value = metadata.headers().get(header_name_); + if (header_value.empty()) { return false; } + // TODO(https://github.com/envoyproxy/envoy/issues/13454): Potentially populate all values. descriptor.entries_.push_back( - {descriptor_key_, std::string(header_value->value().getStringView())}); + {descriptor_key_, std::string(header_value[0]->value().getStringView())}); return true; } diff --git a/source/extensions/filters/network/wasm/BUILD b/source/extensions/filters/network/wasm/BUILD new file mode 100644 index 000000000000..f87909482665 --- /dev/null +++ b/source/extensions/filters/network/wasm/BUILD @@ -0,0 +1,43 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +# Public docs: docs/root/configuration/network_filters/wasm_filter.rst + +envoy_cc_library( + name = "wasm_filter_lib", + srcs = ["wasm_filter.cc"], + hdrs = ["wasm_filter.h"], + deps = [ + "//include/envoy/server:filter_config_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/filters/network:well_known_names", + "@envoy_api//envoy/extensions/filters/network/wasm/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "unknown", + status = "alpha", + deps = [ + ":wasm_filter_lib", + "//include/envoy/registry", + "//source/common/common:empty_string", + "//source/common/config:datasource_lib", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/network/wasm/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/network/wasm/config.cc b/source/extensions/filters/network/wasm/config.cc new file mode 100644 index 000000000000..05f8f1abb854 --- /dev/null +++ b/source/extensions/filters/network/wasm/config.cc @@ -0,0 +1,37 @@ +#include "extensions/filters/network/wasm/config.h" + +#include "envoy/extensions/filters/network/wasm/v3/wasm.pb.validate.h" +#include "envoy/registry/registry.h" + +#include "common/common/empty_string.h" +#include "common/config/datasource.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/filters/network/wasm/wasm_filter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Wasm { + +Network::FilterFactoryCb WasmFilterConfig::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::network::wasm::v3::Wasm& proto_config, + Server::Configuration::FactoryContext& context) { + auto filter_config = std::make_shared(proto_config, context); + return [filter_config](Network::FilterManager& filter_manager) -> void { + auto filter = filter_config->createFilter(); + if (filter) { + filter_manager.addFilter(filter); + } // else fail open + }; +} + +/** + * Static registration for the Wasm filter. @see RegisterFactory. + */ +REGISTER_FACTORY(WasmFilterConfig, Server::Configuration::NamedNetworkFilterConfigFactory); + +} // namespace Wasm +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/wasm/config.h b/source/extensions/filters/network/wasm/config.h new file mode 100644 index 000000000000..12201c56b187 --- /dev/null +++ b/source/extensions/filters/network/wasm/config.h @@ -0,0 +1,31 @@ +#pragma once + +#include "envoy/extensions/filters/network/wasm/v3/wasm.pb.h" +#include "envoy/extensions/filters/network/wasm/v3/wasm.pb.validate.h" + +#include "extensions/filters/network/common/factory_base.h" +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Wasm { + +/** + * Config registration for the Wasm filter. @see NamedNetworkFilterConfigFactory. + */ +class WasmFilterConfig + : public Common::FactoryBase { +public: + WasmFilterConfig() : FactoryBase(NetworkFilterNames::get().Wasm) {} + +private: + Network::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::network::wasm::v3::Wasm& proto_config, + Server::Configuration::FactoryContext& context) override; +}; + +} // namespace Wasm +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/wasm/wasm_filter.cc b/source/extensions/filters/network/wasm/wasm_filter.cc new file mode 100644 index 000000000000..9d253b675abd --- /dev/null +++ b/source/extensions/filters/network/wasm/wasm_filter.cc @@ -0,0 +1,47 @@ +#include "extensions/filters/network/wasm/wasm_filter.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/assert.h" +#include "common/common/enum_to_int.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Wasm { + +FilterConfig::FilterConfig(const envoy::extensions::filters::network::wasm::v3::Wasm& config, + Server::Configuration::FactoryContext& context) + : tls_slot_(context.threadLocal().allocateSlot()) { + plugin_ = std::make_shared( + config.config().name(), config.config().root_id(), config.config().vm_config().vm_id(), + config.config().vm_config().runtime(), + Common::Wasm::anyToBytes(config.config().configuration()), config.config().fail_open(), + context.direction(), context.localInfo(), &context.listenerMetadata()); + + auto plugin = plugin_; + auto callback = [plugin, this](Common::Wasm::WasmHandleSharedPtr base_wasm) { + // NB: the Slot set() call doesn't complete inline, so all arguments must outlive this call. + tls_slot_->set( + [base_wasm, + plugin](Event::Dispatcher& dispatcher) -> std::shared_ptr { + if (!base_wasm) { + return nullptr; + } + return std::static_pointer_cast( + Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, dispatcher)); + }); + }; + + if (!Common::Wasm::createWasm( + config.config().vm_config(), plugin_, context.scope().createScope(""), + context.clusterManager(), context.initManager(), context.dispatcher(), context.api(), + context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { + throw Common::Wasm::WasmException( + fmt::format("Unable to create Wasm network filter {}", plugin->name_)); + } +} + +} // namespace Wasm +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/wasm/wasm_filter.h b/source/extensions/filters/network/wasm/wasm_filter.h new file mode 100644 index 000000000000..51adbcd7ac0c --- /dev/null +++ b/source/extensions/filters/network/wasm/wasm_filter.h @@ -0,0 +1,56 @@ +#pragma once + +#include + +#include "envoy/extensions/filters/network/wasm/v3/wasm.pb.validate.h" +#include "envoy/network/filter.h" +#include "envoy/server/filter_config.h" +#include "envoy/upstream/cluster_manager.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Wasm { + +using Envoy::Extensions::Common::Wasm::Context; +using Envoy::Extensions::Common::Wasm::Wasm; +using Envoy::Extensions::Common::Wasm::WasmHandle; + +class FilterConfig : Logger::Loggable { +public: + FilterConfig(const envoy::extensions::filters::network::wasm::v3::Wasm& proto_config, + Server::Configuration::FactoryContext& context); + + std::shared_ptr createFilter() { + Wasm* wasm = nullptr; + if (tls_slot_->get()) { + wasm = tls_slot_->getTyped().wasm().get(); + } + if (plugin_->fail_open_ && (!wasm || wasm->isFailed())) { + return nullptr; + } + if (wasm && !root_context_id_) { + root_context_id_ = wasm->getRootContext(plugin_->root_id_)->id(); + } + return std::make_shared(wasm, root_context_id_, plugin_); + } + Envoy::Extensions::Common::Wasm::Wasm* wasm() { + return tls_slot_->getTyped().wasm().get(); + } + +private: + uint32_t root_context_id_{0}; + Envoy::Extensions::Common::Wasm::PluginSharedPtr plugin_; + ThreadLocal::SlotPtr tls_slot_; + Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_; +}; + +typedef std::shared_ptr FilterConfigSharedPtr; + +} // namespace Wasm +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/well_known_names.h b/source/extensions/filters/network/well_known_names.h index 78564a5a990f..7f62193c7896 100644 --- a/source/extensions/filters/network/well_known_names.h +++ b/source/extensions/filters/network/well_known_names.h @@ -52,6 +52,8 @@ class NetworkFilterNameValues { const std::string SniDynamicForwardProxy = "envoy.filters.network.sni_dynamic_forward_proxy"; // ZooKeeper proxy filter const std::string ZooKeeperProxy = "envoy.filters.network.zookeeper_proxy"; + // WebAssembly filter + const std::string Wasm = "envoy.filters.network.wasm"; }; using NetworkFilterNames = ConstSingleton; diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc index 0c8c1bc4ea55..131856c718a7 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -22,7 +22,7 @@ DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config) : root_scope_(context.scope()), cluster_manager_(context.clusterManager()), api_(context.api()), stats_(generateStats(config.stat_prefix(), root_scope_)), - resolver_timeout_(DEFAULT_RESOLVER_TIMEOUT), random_(context.random()) { + resolver_timeout_(DEFAULT_RESOLVER_TIMEOUT), random_(context.api().randomGenerator()) { using envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig; const auto& server_config = config.server_config(); @@ -82,16 +82,10 @@ DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( } const std::chrono::seconds ttl = std::chrono::seconds(dns_service.ttl().seconds()); - // Generate the full name for the DNS service. + // Generate the full name for the DNS service. All input parameters are populated + // strings enforced by the message definition const std::string full_service_name = Utils::buildServiceName(dns_service.service_name(), proto, virtual_domain.name()); - if (full_service_name.empty()) { - ENVOY_LOG( - trace, - "Unable to construct the full service name using name [{}], protocol[{}], domain[{}]", - dns_service.service_name(), proto, virtual_domain.name()); - continue; - } DnsSrvRecordPtr service_record_ptr = std::make_unique(full_service_name, proto, ttl); diff --git a/source/extensions/filters/udp/dns_filter/dns_filter_constants.h b/source/extensions/filters/udp/dns_filter/dns_filter_constants.h index 7dca40663e09..bde5cca070ef 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter_constants.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter_constants.h @@ -19,6 +19,7 @@ constexpr uint16_t DNS_RESPONSE_CODE_FORMAT_ERROR = 1; constexpr uint16_t DNS_RESPONSE_CODE_NAME_ERROR = 3; constexpr uint16_t DNS_RESPONSE_CODE_NOT_IMPLEMENTED = 4; +constexpr size_t MIN_QUERY_NAME_LENGTH = 3; constexpr size_t MAX_LABEL_LENGTH = 63; constexpr size_t MAX_NAME_LENGTH = 255; diff --git a/source/extensions/filters/udp/dns_filter/dns_filter_utils.cc b/source/extensions/filters/udp/dns_filter/dns_filter_utils.cc index 5f5a5f4d5b6b..cb22c4483fb3 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter_utils.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter_utils.cc @@ -24,12 +24,12 @@ std::string getProtoName(const DnsTable::DnsServiceProtocol& protocol) { case 17: proto = "udp"; break; - default: { - struct protoent* pe = getprotobynumber(protocol.number()); - if (pe != nullptr) { - proto = std::string(pe->p_name); - } - } + default: + // For Envoy to resolve a protocol to a name "/etc/protocols" + // should exist. This isn't guaranteed. Since most services are + // tcp or udp, if we get a different value, return an empty string. + proto = EMPTY_STRING; + break; } // end switch } return proto; @@ -61,10 +61,6 @@ absl::string_view getProtoFromName(const absl::string_view name) { std::string buildServiceName(const std::string& name, const std::string& proto, const std::string& domain) { - if (name.empty() || proto.empty() || domain.empty()) { - return EMPTY_STRING; - } - std::string result{}; if (name[0] != '_') { result += "_"; diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.cc b/source/extensions/filters/udp/dns_filter/dns_parser.cc index 0ba7d13f3079..763424a63324 100644 --- a/source/extensions/filters/udp/dns_filter/dns_parser.cc +++ b/source/extensions/filters/udp/dns_filter/dns_parser.cc @@ -147,10 +147,11 @@ bool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context, size_t available_bytes = buffer->length(); uint64_t offset = 0; uint16_t data; + bool done = false; DnsQueryParseState state{DnsQueryParseState::Init}; header_ = {}; - while (state != DnsQueryParseState::Finish) { + do { // Ensure that we have enough data remaining in the buffer to parse the query if (available_bytes < field_size) { context->counters_.underflow_counter.inc(); @@ -159,12 +160,6 @@ bool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context, return false; } - if (offset > buffer->length()) { - ENVOY_LOG(debug, "Buffer read offset [{}] is beyond buffer length [{}].", offset, - buffer->length()); - return false; - } - // Each aggregate DNS header field is 2 bytes wide. data = buffer->peekBEInt(offset); offset += field_size; @@ -193,12 +188,10 @@ bool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context, break; case DnsQueryParseState::Authority2: header_.additional_rrs = data; - state = DnsQueryParseState::Finish; + done = true; break; - case DnsQueryParseState::Finish: - NOT_REACHED_GCOVR_EXCL_LINE; } - } + } while (!done); if (!header_.flags.qr && header_.answers) { ENVOY_LOG(debug, "Answer records present in query"); @@ -326,14 +319,16 @@ DnsAnswerRecordPtr DnsMessageParser::parseDnsARecord(DnsAnswerCtx& ctx) { break; } - if (ip_addr != nullptr) { - ENVOY_LOG(trace, "Parsed address [{}] from record type [{}]: offset {}", - ip_addr->ip()->addressAsString(), ctx.record_type_, ctx.offset_); - - return std::make_unique(ctx.record_name_, ctx.record_type_, ctx.record_class_, - std::chrono::seconds(ctx.ttl_), std::move(ip_addr)); + if (ip_addr == nullptr) { + ENVOY_LOG(debug, "No IP parsed from an A or AAAA record"); + return nullptr; } - return nullptr; + + ENVOY_LOG(trace, "Parsed address [{}] from record type [{}]: offset {}", + ip_addr->ip()->addressAsString(), ctx.record_type_, ctx.offset_); + + return std::make_unique(ctx.record_name_, ctx.record_type_, ctx.record_class_, + std::chrono::seconds(ctx.ttl_), std::move(ip_addr)); } DnsSrvRecordPtr DnsMessageParser::parseDnsSrvRecord(DnsAnswerCtx& ctx) { @@ -377,11 +372,6 @@ DnsSrvRecordPtr DnsMessageParser::parseDnsSrvRecord(DnsAnswerCtx& ctx) { DnsAnswerRecordPtr DnsMessageParser::parseDnsAnswerRecord(const Buffer::InstancePtr& buffer, uint64_t& offset) { - if (offset >= buffer->length()) { - ENVOY_LOG(debug, "Invalid offset for parsing answer record"); - return nullptr; - } - uint64_t available_bytes = buffer->length() - offset; const std::string record_name = parseDnsNameRecord(buffer, available_bytes, offset); if (record_name.empty()) { @@ -461,7 +451,9 @@ DnsAnswerRecordPtr DnsMessageParser::parseDnsAnswerRecord(const Buffer::Instance DnsQueryRecordPtr DnsMessageParser::parseDnsQueryRecord(const Buffer::InstancePtr& buffer, uint64_t& offset) { uint64_t available_bytes = buffer->length() - offset; - if (available_bytes == 0) { + + // This is the minimum data length needed to parse a name [length, value, null byte] + if (available_bytes < MIN_QUERY_NAME_LENGTH) { ENVOY_LOG(debug, "No available data in buffer to parse a query record"); return nullptr; } @@ -472,6 +464,7 @@ DnsQueryRecordPtr DnsMessageParser::parseDnsQueryRecord(const Buffer::InstancePt return nullptr; } + // After reading the name we should have data for the record type and class if (available_bytes < 2 * sizeof(uint16_t)) { ENVOY_LOG(debug, "Insufficient data in buffer to read query record type and class. " @@ -656,6 +649,7 @@ void DnsMessageParser::buildResponseBuffer(DnsQueryContextPtr& query_context, Buffer::OwnedImpl query_buffer{}; Buffer::OwnedImpl answer_buffer{}; + Buffer::OwnedImpl addl_rec_buffer{}; ENVOY_LOG(trace, "Building response for query ID [{}]", query_context->id_); @@ -673,6 +667,11 @@ void DnsMessageParser::buildResponseBuffer(DnsQueryContextPtr& query_context, if (answers.empty()) { continue; } + + // Serialize the additional records in parallel with the answers to ensure consistent + // records + const auto& additional_rrs = query_context->additional_; + const size_t num_answers = answers.size(); // Randomize the starting index if we have more than 8 records @@ -686,10 +685,37 @@ void DnsMessageParser::buildResponseBuffer(DnsQueryContextPtr& query_context, // // See Section 2.3.4 of https://tools.ietf.org/html/rfc1035 RELEASE_ASSERT(query->name_.size() < MAX_NAME_LENGTH, - "Unable to serialize invalid query name"); + "Query name is too large for serialization"); // Serialize answer records whose names and types match the query if (answer->first == query->name_ && answer->second->type_ == query->type_) { + // Ensure that we can serialize the answer and the corresponding SRV additional + // record together. + + // It is still possible that there may be more additional records than those referenced + // by the answers. However, each serialized answer will have an accompanying additional + // record for the host. + if (query->type_ == DNS_RECORD_TYPE_SRV) { + const DnsSrvRecord* srv_rec = dynamic_cast(answer->second.get()); + const auto& target = srv_rec->targets_.begin(); + const auto& rr = additional_rrs.find(target->first); + + if (rr != additional_rrs.end()) { + Buffer::OwnedImpl serialized_rr{}; + + // If serializing the additional record fails, skip serializing the answer record + if (!rr->second->serialize(serialized_rr)) { + ENVOY_LOG(debug, "Unable to serialize answer record for {}", query->name_); + continue; + } + total_buffer_size += serialized_rr.length(); + addl_rec_buffer.add(serialized_rr); + ++serialized_additional_rrs; + } + } + + // Now we serialize the answer record. We check the length of the serialized + // data to ensure we don't exceed the DNS response limit Buffer::OwnedImpl serialized_answer; if (!answer->second->serialize(serialized_answer)) { ENVOY_LOG(debug, "Unable to serialize answer record for {}", query->name_); @@ -705,29 +731,6 @@ void DnsMessageParser::buildResponseBuffer(DnsQueryContextPtr& query_context, } } } - - // Serialize Additional Resource Records - const auto& additional_rrs = query_context->additional_; - if (!additional_rrs.empty()) { - const size_t num_rrs = additional_rrs.size(); - auto rr = additional_rrs.begin(); - while (serialized_additional_rrs < num_rrs) { - Buffer::OwnedImpl serialized_rr; - if (!rr->second->serialize(serialized_rr)) { - ENVOY_LOG(debug, "Unable to serialize answer record for {}", query->name_); - continue; - } - total_buffer_size += serialized_rr.length(); - if (total_buffer_size > MAX_DNS_RESPONSE_SIZE) { - break; - } - answer_buffer.add(serialized_rr); - if (++serialized_additional_rrs == MAX_RETURNED_RECORDS) { - break; - } - ++rr; - } - } } setResponseCode(query_context, serialized_queries, serialized_answers); @@ -749,6 +752,7 @@ void DnsMessageParser::buildResponseBuffer(DnsQueryContextPtr& query_context, // write the queries and answers buffer.move(query_buffer); buffer.move(answer_buffer); + buffer.move(addl_rec_buffer); } } // namespace DnsFilter diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.h b/source/extensions/filters/udp/dns_filter/dns_parser.h index 8bbbc0a38a80..8dbd07afc02f 100644 --- a/source/extensions/filters/udp/dns_filter/dns_parser.h +++ b/source/extensions/filters/udp/dns_filter/dns_parser.h @@ -169,12 +169,11 @@ class DnsMessageParser : public Logger::Loggable { public: enum class DnsQueryParseState { Init, - Flags, // 2 bytes - Questions, // 2 bytes - Answers, // 2 bytes - Authority, // 2 bytes - Authority2, // 2 bytes - Finish + Flags, // 2 bytes + Questions, // 2 bytes + Answers, // 2 bytes + Authority, // 2 bytes + Authority2 // 2 bytes }; // The flags have been verified with dig and this structure should not be modified. The flag diff --git a/source/extensions/health_checkers/redis/config.cc b/source/extensions/health_checkers/redis/config.cc index 4e9b4d2e8157..b36a2b6f0118 100644 --- a/source/extensions/health_checkers/redis/config.cc +++ b/source/extensions/health_checkers/redis/config.cc @@ -20,7 +20,7 @@ Upstream::HealthCheckerSharedPtr RedisHealthCheckerFactory::createCustomHealthCh return std::make_shared( context.cluster(), config, getRedisHealthCheckConfig(config, context.messageValidationVisitor()), context.dispatcher(), - context.runtime(), context.random(), context.eventLogger(), context.api(), + context.runtime(), context.eventLogger(), context.api(), NetworkFilters::Common::Redis::Client::ClientFactoryImpl::instance_); }; diff --git a/source/extensions/health_checkers/redis/redis.cc b/source/extensions/health_checkers/redis/redis.cc index 7eb66b5f8b0c..045aa56b8d0e 100644 --- a/source/extensions/health_checkers/redis/redis.cc +++ b/source/extensions/health_checkers/redis/redis.cc @@ -14,10 +14,11 @@ namespace RedisHealthChecker { RedisHealthChecker::RedisHealthChecker( const Upstream::Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, const envoy::config::health_checker::redis::v2::Redis& redis_config, - Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Random::RandomGenerator& random, + Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Upstream::HealthCheckEventLoggerPtr&& event_logger, Api::Api& api, Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory) - : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), + : HealthCheckerImplBase(cluster, config, dispatcher, runtime, api.randomGenerator(), + std::move(event_logger)), client_factory_(client_factory), key_(redis_config.key()), auth_username_( NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authUsername(cluster.info(), api)), diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h index 73088832f18c..455f7c0c503c 100644 --- a/source/extensions/health_checkers/redis/redis.h +++ b/source/extensions/health_checkers/redis/redis.h @@ -28,7 +28,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { RedisHealthChecker( const Upstream::Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, const envoy::config::health_checker::redis::v2::Redis& redis_config, - Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Random::RandomGenerator& random, + Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Upstream::HealthCheckEventLoggerPtr&& event_logger, Api::Api& api, Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory); diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index 8338093b7998..29eb78d155e1 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -181,7 +181,7 @@ envoy_cc_library( ], tags = ["nofips"], deps = [ - ":envoy_quic_crypto_server_stream_lib", + ":envoy_quic_proof_source_lib", ":envoy_quic_stream_lib", ":envoy_quic_utils_lib", ":quic_filter_manager_connection_lib", @@ -277,7 +277,6 @@ envoy_cc_library( ":envoy_quic_server_connection_lib", ":envoy_quic_server_session_lib", "//include/envoy/network:listener_interface", - "//source/common/http:utility_lib", "//source/server:connection_handler_lib", "@com_googlesource_quiche//:quic_core_server_lib", "@com_googlesource_quiche//:quic_core_utils_lib", @@ -375,13 +374,9 @@ envoy_cc_extension( # QUICHE can't build against FIPS BoringSSL until the FIPS build # is on a new enough version to have QUIC support. Remove it from # the build until then. Re-enable as part of #7433. - # - # QUICHE doesn't compile on Windows right now. Disable until it - # is fixed. deps = select({ "//bazel:boringssl_fips": [], "//bazel:boringssl_disabled": [], - "//bazel:windows_x86_64": [], "//conditions:default": [ ":active_quic_listener_config_lib", ":codec_lib", @@ -409,10 +404,7 @@ envoy_cc_library( "//bazel:linux": ["udp_gso_batch_writer.cc"], "//conditions:default": [], }), - hdrs = select({ - "//bazel:linux": ["udp_gso_batch_writer.h"], - "//conditions:default": [], - }), + hdrs = ["udp_gso_batch_writer.h"], external_deps = ["quiche_quic_platform"], tags = ["nofips"], visibility = [ @@ -445,14 +437,3 @@ envoy_cc_library( "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], ) - -envoy_cc_library( - name = "envoy_quic_crypto_server_stream_lib", - srcs = ["envoy_quic_crypto_server_stream.cc"], - hdrs = ["envoy_quic_crypto_server_stream.h"], - tags = ["nofips"], - deps = [ - ":envoy_quic_proof_source_lib", - "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", - ], -) diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.cc b/source/extensions/quic_listeners/quiche/active_quic_listener.cc index 2d756875ad4f..f4808adc52b0 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.cc +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.cc @@ -8,6 +8,7 @@ #include +#include "common/runtime/runtime_features.h" #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h" #include "extensions/quic_listeners/quiche/envoy_quic_dispatcher.h" @@ -18,26 +19,27 @@ namespace Envoy { namespace Quic { -ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, - Network::ConnectionHandler& parent, - Network::ListenerConfig& listener_config, - const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options, - const envoy::config::core::v3::RuntimeFeatureFlag& enabled) - : ActiveQuicListener(dispatcher, parent, +ActiveQuicListener::ActiveQuicListener( + uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher, + Network::ConnectionHandler& parent, Network::ListenerConfig& listener_config, + const quic::QuicConfig& quic_config, Network::Socket::OptionsSharedPtr options, + bool kernel_worker_routing, const envoy::config::core::v3::RuntimeFeatureFlag& enabled) + : ActiveQuicListener(worker_index, concurrency, dispatcher, parent, listener_config.listenSocketFactory().getListenSocket(), listener_config, - quic_config, std::move(options), enabled) {} - -ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, - Network::ConnectionHandler& parent, - Network::SocketSharedPtr listen_socket, - Network::ListenerConfig& listener_config, - const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options, - const envoy::config::core::v3::RuntimeFeatureFlag& enabled) - : Server::ConnectionHandlerImpl::ActiveListenerImplBase(parent, &listener_config), + quic_config, std::move(options), kernel_worker_routing, enabled) {} + +ActiveQuicListener::ActiveQuicListener( + uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher, + Network::ConnectionHandler& parent, Network::SocketSharedPtr listen_socket, + Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, + Network::Socket::OptionsSharedPtr options, bool kernel_worker_routing, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled) + : Server::ActiveUdpListenerBase(worker_index, concurrency, parent, *listen_socket, + dispatcher.createUdpListener(listen_socket, *this), + &listener_config), dispatcher_(dispatcher), version_manager_(quic::CurrentSupportedVersions()), - listen_socket_(*listen_socket), enabled_(enabled, Runtime::LoaderSingleton::get()) { + kernel_worker_routing_(kernel_worker_routing), + enabled_(enabled, Runtime::LoaderSingleton::get()) { if (options != nullptr) { const bool ok = Network::Socket::applyOptions( options, listen_socket_, envoy::config::core::v3::SocketOption::STATE_BOUND); @@ -49,7 +51,7 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, } listen_socket_.addOptions(options); } - udp_listener_ = dispatcher_.createUdpListener(std::move(listen_socket), *this); + quic::QuicRandom* const random = quic::QuicRandom::GetInstance(); random->RandBytes(random_seed_, sizeof(random_seed_)); crypto_config_ = std::make_unique( @@ -94,7 +96,11 @@ void ActiveQuicListener::onListenerShutdown() { udp_listener_.reset(); } -void ActiveQuicListener::onData(Network::UdpRecvData& data) { +void ActiveQuicListener::onDataWorker(Network::UdpRecvData&& data) { + if (!enabled_.enabled()) { + return; + } + quic::QuicSocketAddress peer_address( envoyIpAddressToQuicSocketAddress(data.addresses_.peer_->ip())); quic::QuicSocketAddress self_address( @@ -112,6 +118,12 @@ void ActiveQuicListener::onData(Network::UdpRecvData& data) { /*packet_headers=*/nullptr, /*headers_length=*/0, /*owns_header_buffer*/ false); quic_dispatcher_->ProcessPacket(self_address, peer_address, packet); + + if (quic_dispatcher_->HasChlosBuffered()) { + // If there are any buffered CHLOs, activate a read event for the next event loop to process + // them. + udp_listener_->activateRead(); + } } void ActiveQuicListener::onReadReady() { @@ -119,7 +131,17 @@ void ActiveQuicListener::onReadReady() { ENVOY_LOG(trace, "Quic listener {}: runtime disabled", config_->name()); return; } + + if (quic_dispatcher_->HasChlosBuffered()) { + event_loops_with_buffered_chlo_for_test_++; + } + quic_dispatcher_->ProcessBufferedChlos(kNumSessionsToCreatePerLoop); + + // If there were more buffered than the limit, schedule again for the next event loop. + if (quic_dispatcher_->HasChlosBuffered()) { + udp_listener_->activateRead(); + } } void ActiveQuicListener::onWriteReady(const Network::Socket& /*socket*/) { @@ -136,6 +158,49 @@ void ActiveQuicListener::shutdownListener() { quic_dispatcher_->StopAcceptingNewConnections(); } +uint32_t ActiveQuicListener::destination(const Network::UdpRecvData& data) const { + if (kernel_worker_routing_) { + // The kernel has already routed the packet correctly. Make it stay on the current worker. + return worker_index_; + } + + // This implementation is not as performant as it could be. It will result in most packets being + // delivered by the kernel to the wrong worker, and then redirected to the correct worker. + // + // This could possibly be improved by keeping a global table of connection IDs, so that a new + // connection will add its connection ID to the table on the current worker, and so packets should + // be delivered to the correct worker by the kernel unless the client changes address. + + // This is a re-implementation of the same algorithm written in BPF in + // ``ActiveQuicListenerFactory::createActiveUdpListener`` + const uint64_t packet_length = data.buffer_->length(); + if (packet_length < 9) { + return worker_index_; + } + + uint8_t first_octet; + data.buffer_->copyOut(0, sizeof(first_octet), &first_octet); + + uint32_t connection_id_snippet; + if (first_octet & 0x80) { + // IETF QUIC long header. + // The connection id starts from 7th byte. + // Minimum length of a long header packet is 14. + if (packet_length < 14) { + return worker_index_; + } + + data.buffer_->copyOut(6, sizeof(connection_id_snippet), &connection_id_snippet); + } else { + // IETF QUIC short header, or gQUIC. + // The connection id starts from 2nd byte. + data.buffer_->copyOut(1, sizeof(connection_id_snippet), &connection_id_snippet); + } + + connection_id_snippet = htonl(connection_id_snippet); + return connection_id_snippet % concurrency_; +} + ActiveQuicListenerFactory::ActiveQuicListenerFactory( const envoy::config::listener::v3::QuicProtocolOptions& config, uint32_t concurrency) : concurrency_(concurrency), enabled_(config.enabled()) { @@ -155,11 +220,12 @@ ActiveQuicListenerFactory::ActiveQuicListenerFactory( quic_config_.SetMaxUnidirectionalStreamsToSend(max_streams); } -Network::ConnectionHandler::ActiveListenerPtr -ActiveQuicListenerFactory::createActiveUdpListener(Network::ConnectionHandler& parent, - Event::Dispatcher& disptacher, - Network::ListenerConfig& config) { +Network::ConnectionHandler::ActiveUdpListenerPtr ActiveQuicListenerFactory::createActiveUdpListener( + uint32_t worker_index, Network::ConnectionHandler& parent, Event::Dispatcher& disptacher, + Network::ListenerConfig& config) { + bool kernel_worker_routing = false; std::unique_ptr options = std::make_unique(); + #if defined(SO_ATTACH_REUSEPORT_CBPF) && defined(__linux__) // This BPF filter reads the 1st word of QUIC connection id in the UDP payload and mods it by the // number of workers to get the socket index in the SO_REUSEPORT socket groups. QUIC packets @@ -194,32 +260,32 @@ ActiveQuicListenerFactory::createActiveUdpListener(Network::ConnectionHandler& p sock_fprog prog; // This option only needs to be applied once to any one of the sockets in SO_REUSEPORT socket // group. One of the listener will be created with this socket option. - absl::call_once(install_bpf_once_, [&]() { - if (concurrency_ > 1) { - prog.len = filter.size(); - prog.filter = filter.data(); - options->push_back(std::make_shared( - envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_ATTACH_REUSEPORT_CBPF, - absl::string_view(reinterpret_cast(&prog), sizeof(prog)))); - } - }); -#else - if (concurrency_ > 1) { -#ifdef __APPLE__ - // Not support multiple listeners in Mac OS unless someone cares. This is because SO_REUSEPORT - // doesn't behave as expected in Mac OS.(#8794) - ENVOY_LOG(error, "Because SO_REUSEPORT doesn't guarantee stable hashing from network 5 tuple " - "to socket in Mac OS. QUIC connection is not stable with concurrency > 1"); + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing")) { + absl::call_once(install_bpf_once_, [&]() { + if (concurrency_ > 1) { + prog.len = filter.size(); + prog.filter = filter.data(); + options->push_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_ATTACH_REUSEPORT_CBPF, + absl::string_view(reinterpret_cast(&prog), sizeof(prog)))); + } + }); + + kernel_worker_routing = true; + }; + #else - ENVOY_LOG(warn, "BPF filter is not supported on this platform. QUIC won't support connection " - "migration and NAT port rebinding."); -#endif + if (concurrency_ != 1) { + ENVOY_LOG(warn, "Efficient routing of QUIC packets to the correct worker is not supported or " + "not implemented by Envoy on this platform. QUIC performance may be degraded."); } #endif - return std::make_unique(disptacher, parent, config, quic_config_, - std::move(options), enabled_); -} + return std::make_unique(worker_index, concurrency_, disptacher, parent, + config, quic_config_, std::move(options), + kernel_worker_routing, enabled_); +} // namespace Quic } // namespace Quic } // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.h b/source/extensions/quic_listeners/quiche/active_quic_listener.h index 08b7807dfc4f..878032406ea9 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.h +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.h @@ -18,39 +18,42 @@ namespace Quic { // QUIC specific UdpListenerCallbacks implementation which delegates incoming // packets, write signals and listener errors to QuicDispatcher. -class ActiveQuicListener : public Network::UdpListenerCallbacks, - public Server::ConnectionHandlerImpl::ActiveListenerImplBase, +class ActiveQuicListener : public Envoy::Server::ActiveUdpListenerBase, Logger::Loggable { public: // TODO(bencebeky): Tune this value. static const size_t kNumSessionsToCreatePerLoop = 16; - ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, - Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options, + ActiveQuicListener(uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher, + Network::ConnectionHandler& parent, Network::ListenerConfig& listener_config, + const quic::QuicConfig& quic_config, Network::Socket::OptionsSharedPtr options, + bool kernel_worker_routing, const envoy::config::core::v3::RuntimeFeatureFlag& enabled); - ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, - Network::SocketSharedPtr listen_socket, + ActiveQuicListener(uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher, + Network::ConnectionHandler& parent, Network::SocketSharedPtr listen_socket, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options, + Network::Socket::OptionsSharedPtr options, bool kernel_worker_routing, const envoy::config::core::v3::RuntimeFeatureFlag& enabled); ~ActiveQuicListener() override; void onListenerShutdown(); + uint64_t eventLoopsWithBufferedChlosForTest() const { + return event_loops_with_buffered_chlo_for_test_; + } // Network::UdpListenerCallbacks - void onData(Network::UdpRecvData& data) override; void onReadReady() override; void onWriteReady(const Network::Socket& socket) override; void onReceiveError(Api::IoError::IoErrorCode /*error_code*/) override { // No-op. Quic can't do anything upon listener error. } Network::UdpPacketWriter& udpPacketWriter() override { return *udp_packet_writer_; } + void onDataWorker(Network::UdpRecvData&& data) override; + uint32_t destination(const Network::UdpRecvData& data) const override; // ActiveListenerImplBase - Network::Listener* listener() override { return udp_listener_.get(); } void pauseListening() override; void resumeListening() override; void shutdownListener() override; @@ -58,15 +61,18 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, private: friend class ActiveQuicListenerPeer; - Network::UdpListenerPtr udp_listener_; uint8_t random_seed_[16]; std::unique_ptr crypto_config_; Event::Dispatcher& dispatcher_; quic::QuicVersionManager version_manager_; std::unique_ptr quic_dispatcher_; - Network::Socket& listen_socket_; + const bool kernel_worker_routing_; Runtime::FeatureFlag enabled_; Network::UdpPacketWriter* udp_packet_writer_; + + // The number of runs of the event loop in which at least one CHLO was buffered. + // TODO(ggreenway): Consider making this a published stat, or some variation of this information. + uint64_t event_loops_with_buffered_chlo_for_test_{0}; }; using ActiveQuicListenerPtr = std::unique_ptr; @@ -79,9 +85,9 @@ class ActiveQuicListenerFactory : public Network::ActiveUdpListenerFactory, uint32_t concurrency); // Network::ActiveUdpListenerFactory. - Network::ConnectionHandler::ActiveListenerPtr - createActiveUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& disptacher, - Network::ListenerConfig& config) override; + Network::ConnectionHandler::ActiveUdpListenerPtr + createActiveUdpListener(uint32_t worker_index, Network::ConnectionHandler& parent, + Event::Dispatcher& disptacher, Network::ListenerConfig& config) override; bool isTransportConnectionless() const override { return false; } private: diff --git a/source/extensions/quic_listeners/quiche/codec_impl.cc b/source/extensions/quic_listeners/quiche/codec_impl.cc index ab9236b727c9..51fad9e4bc20 100644 --- a/source/extensions/quic_listeners/quiche/codec_impl.cc +++ b/source/extensions/quic_listeners/quiche/codec_impl.cc @@ -51,6 +51,14 @@ void QuicHttpServerConnectionImpl::onUnderlyingConnectionBelowWriteBufferLowWate runWatermarkCallbacksForEachStream(quic_server_session_.stream_map(), false); } +void QuicHttpServerConnectionImpl::shutdownNotice() { + if (quic::VersionUsesHttp3(quic_server_session_.transport_version())) { + quic_server_session_.SendHttp3Shutdown(); + } else { + ENVOY_CONN_LOG(debug, "Shutdown notice is not propagated to QUIC.", quic_server_session_); + } +} + void QuicHttpServerConnectionImpl::goAway() { if (quic::VersionUsesHttp3(quic_server_session_.transport_version())) { quic_server_session_.SendHttp3GoAway(); diff --git a/source/extensions/quic_listeners/quiche/codec_impl.h b/source/extensions/quic_listeners/quiche/codec_impl.h index 58098ecd9ce5..8f655523c3cf 100644 --- a/source/extensions/quic_listeners/quiche/codec_impl.h +++ b/source/extensions/quic_listeners/quiche/codec_impl.h @@ -47,10 +47,7 @@ class QuicHttpServerConnectionImpl : public QuicHttpConnectionImplBase, // Http::Connection void goAway() override; - void shutdownNotice() override { - // TODO(danzh): Add double-GOAWAY support in QUIC. - ENVOY_CONN_LOG(error, "Shutdown notice is not propagated to QUIC.", quic_server_session_); - } + void shutdownNotice() override; void onUnderlyingConnectionAboveWriteBufferHighWatermark() override; void onUnderlyingConnectionBelowWriteBufferLowWatermark() override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h b/source/extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h index c373ed42298f..218545058461 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h @@ -4,15 +4,18 @@ #include "extensions/quic_listeners/quiche/envoy_quic_alarm.h" +#if defined(__GNUC__) #pragma GCC diagnostic push - -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" +#endif + #include "quiche/quic/core/quic_alarm_factory.h" #include "quiche/quic/core/quic_arena_scoped_ptr.h" #include "quiche/quic/core/quic_one_block_arena.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif namespace Envoy { namespace Quic { diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc index 830b28e28634..e79b08ad9921 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc @@ -80,7 +80,8 @@ void EnvoyQuicClientConnection::setUpConnectionSocket() { if (connectionSocket()->ioHandle().isOpen()) { file_event_ = connectionSocket()->ioHandle().createFileEvent( dispatcher_, [this](uint32_t events) -> void { onFileEvent(events); }, - Event::FileTriggerType::Edge, Event::FileReadyType::Read | Event::FileReadyType::Write); + Event::PlatformDefaultTriggerType, + Event::FileReadyType::Read | Event::FileReadyType::Write); if (!Network::Socket::applyOptions(connectionSocket()->options(), *connectionSocket(), envoy::config::core::v3::SocketOption::STATE_LISTENING)) { diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc index 45f4ff749de6..f049f4e83f34 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc @@ -99,7 +99,7 @@ EnvoyQuicClientSession::CreateIncomingStream(quic::PendingStream* /*pending*/) { bool EnvoyQuicClientSession::hasDataToWrite() { return HasDataToWrite(); } -void EnvoyQuicClientSession::OnOneRttKeysAvailable() { +void EnvoyQuicClientSession::OnTlsHandshakeComplete() { raiseConnectionEvent(Network::ConnectionEvent::Connected); } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h index 4c14a11cf985..c187295fc72e 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h @@ -1,15 +1,17 @@ #pragma once +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" #pragma GCC diagnostic ignored "-Wtype-limits" +#endif #include "quiche/quic/core/http/quic_spdy_client_session.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "extensions/quic_listeners/quiche/envoy_quic_client_stream.h" #include "extensions/quic_listeners/quiche/envoy_quic_client_connection.h" @@ -56,7 +58,7 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, void OnCanWrite() override; void OnGoAway(const quic::QuicGoAwayFrame& frame) override; void OnHttp3GoAway(uint64_t stream_id) override; - void OnOneRttKeysAvailable() override; + void OnTlsHandshakeComplete() override; // quic::QuicSpdyClientSessionBase void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc index 39a16309c271..866e35416b0b 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc @@ -1,17 +1,19 @@ #include "extensions/quic_listeners/quiche/envoy_quic_client_stream.h" +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/core/quic_session.h" #include "quiche/quic/core/http/quic_header_list.h" #include "quiche/spdy/core/spdy_header_block.h" #include "extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" #include "extensions/quic_listeners/quiche/envoy_quic_client_session.h" diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h index 761201c16f7c..79003e4621f4 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h @@ -1,13 +1,16 @@ #pragma once +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif + #include "quiche/quic/core/http/quic_spdy_client_stream.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "extensions/quic_listeners/quiche/envoy_quic_stream.h" diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_connection.h b/source/extensions/quic_listeners/quiche/envoy_quic_connection.h index 51aebb2dd08f..f4c8589d7118 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_connection.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_connection.h @@ -1,14 +1,16 @@ #pragma once +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/core/quic_connection.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_connection_helper.h b/source/extensions/quic_listeners/quiche/envoy_quic_connection_helper.h index 6af08fdece7a..2c050e557358 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_connection_helper.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_connection_helper.h @@ -1,17 +1,19 @@ #pragma once +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" #pragma GCC diagnostic ignored "-Wtype-limits" +#endif #include "quiche/quic/core/crypto/quic_random.h" #include "quiche/quic/core/quic_connection.h" #include "quiche/quic/core/quic_simple_buffer_allocator.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "extensions/quic_listeners/quiche/platform/envoy_quic_clock.h" diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.cc deleted file mode 100644 index fb52d075c374..000000000000 --- a/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.cc +++ /dev/null @@ -1,48 +0,0 @@ -#include "extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h" - -namespace Envoy { -namespace Quic { - -void EnvoyQuicCryptoServerStream::EnvoyProcessClientHelloResultCallback::Run( - quic::QuicErrorCode error, const std::string& error_details, - std::unique_ptr message, - std::unique_ptr diversification_nonce, - std::unique_ptr proof_source_details) { - if (parent_ == nullptr) { - return; - } - - if (proof_source_details != nullptr) { - // Retain a copy of the proof source details after getting filter chain. - parent_->details_ = std::make_unique( - static_cast(*proof_source_details)); - } - parent_->done_cb_wrapper_ = nullptr; - parent_ = nullptr; - done_cb_->Run(error, error_details, std::move(message), std::move(diversification_nonce), - std::move(proof_source_details)); -} - -EnvoyQuicCryptoServerStream::~EnvoyQuicCryptoServerStream() { - if (done_cb_wrapper_ != nullptr) { - done_cb_wrapper_->cancel(); - } -} - -void EnvoyQuicCryptoServerStream::ProcessClientHello( - quic::QuicReferenceCountedPointer result, - std::unique_ptr proof_source_details, - std::unique_ptr done_cb) { - auto done_cb_wrapper = - std::make_unique(this, std::move(done_cb)); - ASSERT(done_cb_wrapper_ == nullptr); - done_cb_wrapper_ = done_cb_wrapper.get(); - // Old QUICHE code might call GetProof() earlier and pass in proof source instance here. But this - // is no longer the case, so proof_source_details should always be null. - ASSERT(proof_source_details == nullptr); - quic::QuicCryptoServerStream::ProcessClientHello(result, std::move(proof_source_details), - std::move(done_cb_wrapper)); -} - -} // namespace Quic -} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h deleted file mode 100644 index faaa6254bdf8..000000000000 --- a/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h +++ /dev/null @@ -1,89 +0,0 @@ -#pragma once - -#pragma GCC diagnostic push -// QUICHE allows unused parameters. -#pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). -#pragma GCC diagnostic ignored "-Winvalid-offsetof" - -#include "quiche/quic/core/quic_crypto_server_stream.h" -#include "quiche/quic/core/tls_server_handshaker.h" - -#pragma GCC diagnostic pop - -#include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" - -#include - -namespace Envoy { -namespace Quic { - -class EnvoyCryptoServerStream : protected Logger::Loggable { -public: - virtual ~EnvoyCryptoServerStream() = default; - virtual const EnvoyQuicProofSourceDetails* proofSourceDetails() const = 0; -}; - -// A dedicated stream to do QUIC crypto handshake. -class EnvoyQuicCryptoServerStream : public quic::QuicCryptoServerStream, - public EnvoyCryptoServerStream { -public: - // A wrapper to retain proof source details which has filter chain. - class EnvoyProcessClientHelloResultCallback : public quic::ProcessClientHelloResultCallback { - public: - EnvoyProcessClientHelloResultCallback( - EnvoyQuicCryptoServerStream* parent, - std::unique_ptr done_cb) - : parent_(parent), done_cb_(std::move(done_cb)) {} - - // quic::ProcessClientHelloResultCallback - void Run(quic::QuicErrorCode error, const std::string& error_details, - std::unique_ptr message, - std::unique_ptr diversification_nonce, - std::unique_ptr proof_source_details) override; - - void cancel() { parent_ = nullptr; } - - private: - EnvoyQuicCryptoServerStream* parent_; - std::unique_ptr done_cb_; - }; - - EnvoyQuicCryptoServerStream(const quic::QuicCryptoServerConfig* crypto_config, - quic::QuicCompressedCertsCache* compressed_certs_cache, - quic::QuicSession* session, - quic::QuicCryptoServerStreamBase::Helper* helper) - : quic::QuicCryptoServerStream(crypto_config, compressed_certs_cache, session, helper) {} - - ~EnvoyQuicCryptoServerStream() override; - - // quic::QuicCryptoServerStream - // Override to retain ProofSource::Details. - void ProcessClientHello( - quic::QuicReferenceCountedPointer result, - std::unique_ptr proof_source_details, - std::unique_ptr done_cb) override; - // EnvoyCryptoServerStream - const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { return details_.get(); } - -private: - EnvoyProcessClientHelloResultCallback* done_cb_wrapper_{nullptr}; - std::unique_ptr details_; -}; - -// A dedicated stream to do TLS1.3 handshake. -class EnvoyQuicTlsServerHandshaker : public quic::TlsServerHandshaker, - public EnvoyCryptoServerStream { -public: - EnvoyQuicTlsServerHandshaker(quic::QuicSession* session, - const quic::QuicCryptoServerConfig& crypto_config) - : quic::TlsServerHandshaker(session, crypto_config) {} - - // EnvoyCryptoServerStream - const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { - return dynamic_cast(proof_source_details()); - } -}; - -} // namespace Quic -} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h index 5921342b84bf..589ff5327706 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h @@ -1,16 +1,18 @@ #pragma once +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" #pragma GCC diagnostic ignored "-Wtype-limits" +#endif #include "quiche/quic/core/quic_dispatcher.h" #include "quiche/quic/core/quic_utils.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h index bb4b736c84c8..560742e44e57 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h @@ -1,14 +1,16 @@ #pragma once +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/core/quic_packet_writer.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "envoy/network/udp_packet_writer_handler.h" diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc index 220dc4cb1ccf..2c82c04d901d 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc @@ -1,12 +1,15 @@ #include "extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h" +#if defined(__GNUC__) #pragma GCC diagnostic push - -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" +#endif + #include "quiche/quic/core/quic_data_writer.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h index 149cc50c7d63..b7d76981e519 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h @@ -6,17 +6,21 @@ #include "absl/strings/str_cat.h" +#if defined(__GNUC__) #pragma GCC diagnostic push - -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" +#endif + #include "quiche/quic/core/crypto/proof_source.h" #include "quiche/quic/core/quic_versions.h" #include "quiche/quic/core/crypto/crypto_protocol.h" #include "quiche/quic/platform/api/quic_reference_counted.h" #include "quiche/quic/platform/api/quic_socket_address.h" #include "quiche/common/platform/api/quiche_string_piece.h" + +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "openssl/ssl.h" #include "envoy/network/filter.h" diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc index b7040d1279d7..08ce684a428c 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc @@ -35,7 +35,7 @@ quic::QuicAsyncStatus EnvoyQuicProofVerifier::VerifyCertChain( std::unique_ptr cert_view = quic::CertificateView::ParseSingleCertificate(certs[0]); ASSERT(cert_view != nullptr); - for (const absl::string_view config_san : cert_view->subject_alt_name_domains()) { + for (const absl::string_view& config_san : cert_view->subject_alt_name_domains()) { if (Extensions::TransportSockets::Tls::ContextImpl::dnsNameMatch(hostname, config_san)) { return quic::QUIC_SUCCESS; } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h index 02dac5facd42..f45736c7d19a 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h @@ -2,15 +2,17 @@ #include "absl/strings/str_cat.h" +#if defined(__GNUC__) #pragma GCC diagnostic push - -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" +#endif #include "quiche/quic/core/crypto/proof_verifier.h" #include "quiche/quic/core/quic_versions.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "common/common/logger.h" diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc index bc708dea4866..d827855e60f0 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc @@ -4,7 +4,7 @@ #include "common/common/assert.h" -#include "extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" #include "extensions/quic_listeners/quiche/envoy_quic_server_stream.h" namespace Envoy { @@ -34,17 +34,7 @@ std::unique_ptr EnvoyQuicServerSession::CreateQuicCryptoServerStream( const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache) { - switch (connection()->version().handshake_protocol) { - case quic::PROTOCOL_QUIC_CRYPTO: - return std::make_unique(crypto_config, compressed_certs_cache, - this, stream_helper()); - case quic::PROTOCOL_TLS1_3: - return std::make_unique(this, *crypto_config); - case quic::PROTOCOL_UNSUPPORTED: - PANIC(fmt::format("Unknown handshake protocol: {}", - static_cast(connection()->version().handshake_protocol))); - } - return nullptr; + return CreateCryptoServerStream(crypto_config, compressed_certs_cache, this, stream_helper()); } quic::QuicSpdyStream* EnvoyQuicServerSession::CreateIncomingStream(quic::QuicStreamId id) { @@ -117,15 +107,15 @@ void EnvoyQuicServerSession::SetDefaultEncryptionLevel(quic::EncryptionLevel lev bool EnvoyQuicServerSession::hasDataToWrite() { return HasDataToWrite(); } -void EnvoyQuicServerSession::OnOneRttKeysAvailable() { - quic::QuicServerSessionBase::OnOneRttKeysAvailable(); +void EnvoyQuicServerSession::OnTlsHandshakeComplete() { + quic::QuicServerSessionBase::OnTlsHandshakeComplete(); maybeCreateNetworkFilters(); raiseConnectionEvent(Network::ConnectionEvent::Connected); } void EnvoyQuicServerSession::maybeCreateNetworkFilters() { - const EnvoyQuicProofSourceDetails* proof_source_details = - dynamic_cast(GetCryptoStream())->proofSourceDetails(); + auto proof_source_details = + dynamic_cast(GetCryptoStream()->ProofSourceDetails()); ASSERT(proof_source_details != nullptr, "ProofSource didn't provide ProofSource::Details. No filter chain will be installed."); diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h index a50e6fbe8f44..54a88efdea75 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h @@ -1,21 +1,24 @@ #pragma once +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" #pragma GCC diagnostic ignored "-Wtype-limits" +#endif #include "quiche/quic/core/http/quic_server_session_base.h" +#include "quiche/quic/core/quic_crypto_server_stream.h" +#include "quiche/quic/core/tls_server_handshaker.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include #include "extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h" #include "extensions/quic_listeners/quiche/envoy_quic_server_stream.h" -#include "extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h" namespace Envoy { namespace Quic { @@ -52,7 +55,7 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, quic::ConnectionCloseSource source) override; void Initialize() override; void OnCanWrite() override; - void OnOneRttKeysAvailable() override; + void OnTlsHandshakeComplete() override; // quic::QuicSpdySession void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc index feda7c2f2a94..d5e5726bf369 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc @@ -5,17 +5,20 @@ #include +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/core/http/quic_header_list.h" #include "quiche/quic/core/quic_session.h" #include "quiche/spdy/core/spdy_header_block.h" #include "extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h" + +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" #include "extensions/quic_listeners/quiche/envoy_quic_server_session.h" diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h index 060cb237d669..b05a707751ff 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h @@ -1,13 +1,16 @@ #pragma once +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif + #include "quiche/quic/core/http/quic_spdy_server_stream_base.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "extensions/quic_listeners/quiche/envoy_quic_stream.h" diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h index 5c321ab749f1..563e0960cbd9 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h @@ -8,16 +8,17 @@ #include "common/network/address_impl.h" #include "common/network/listen_socket_impl.h" +#if defined(__GNUC__) #pragma GCC diagnostic push - -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/core/quic_types.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "quiche/quic/core/http/quic_header_list.h" #include "quiche/quic/core/quic_error_codes.h" @@ -51,7 +52,7 @@ template std::unique_ptr spdyHeaderBlockToEnvoyHeaders(const spdy::SpdyHeaderBlock& header_block) { auto headers = T::create(); for (auto entry : header_block) { - // TODO(danzh): Avoid temporary strings and addCopy() with std::string_view. + // TODO(danzh): Avoid temporary strings and addCopy() with string_view. std::string key(entry.first); std::string value(entry.second); headers->addCopy(Http::LowerCaseString(key), value); diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index e7f70f86cb26..f53e07b58a33 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -142,7 +142,6 @@ envoy_cc_library( "abseil_node_hash_map", "abseil_node_hash_set", "abseil_optional", - "googletest", ], tags = ["nofips"], visibility = ["//visibility:public"], @@ -188,7 +187,7 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/filesystem:directory_lib", "//source/common/filesystem:filesystem_lib", - "//source/common/http:url_utility_lib", + "//source/common/http:utility_lib", ], ) diff --git a/source/extensions/quic_listeners/quiche/platform/flags_list.h b/source/extensions/quic_listeners/quiche/platform/flags_list.h index 587e80054c0a..7e9e20a7c192 100644 --- a/source/extensions/quic_listeners/quiche/platform/flags_list.h +++ b/source/extensions/quic_listeners/quiche/platform/flags_list.h @@ -17,16 +17,23 @@ QUICHE_FLAG( bool, http2_reloadable_flag_http2_backend_alpn_failure_error_code, false, "If true, the GFE will return a new ResponseCodeDetails error when ALPN to the backend fails.") -QUICHE_FLAG(bool, http2_reloadable_flag_http2_ip_based_cwnd_exp, false, +QUICHE_FLAG(bool, http2_reloadable_flag_http2_ip_based_cwnd_exp, true, "If true, enable IP address based CWND bootstrapping experiment with different " "bandwidth models and priorities in HTTP2.") +QUICHE_FLAG( + bool, http2_reloadable_flag_http2_load_based_goaway_warning, false, + "If true, load-based connection closures will send a warning GOAWAY before the actual GOAWAY.") + QUICHE_FLAG(bool, http2_reloadable_flag_http2_security_requirement_for_client3, false, "If true, check whether client meets security requirements during SSL handshake. If " "flag is true and client does not meet security requirements, do not negotiate HTTP/2 " "with client or terminate the session with SPDY_INADEQUATE_SECURITY if HTTP/2 is " "already negotiated. The spec contains both cipher and TLS version requirements.") +QUICHE_FLAG(bool, http2_reloadable_flag_http2_websocket_detection, false, + "If true, uses a HTTP/2-specific method of detecting websocket upgrade requests.") + QUICHE_FLAG(bool, http2_reloadable_flag_permissive_http2_switch, false, "If true, the GFE allows both HTTP/1.0 and HTTP/1.1 versions in HTTP/2 upgrade " "requests/responses.") @@ -39,14 +46,23 @@ QUICHE_FLAG(bool, quic_reloadable_flag_gclb_quic_allow_alia, true, "If gfe2_reloadable_flag_gclb_use_alia is also true, use Alia for GCLB QUIC " "handshakes. To be used as a big red button if there's a problem with Alia/QUIC.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_abort_qpack_on_stream_close, false, + "If true, abort async QPACK header decompression in QuicSpdyStream::OnClose().") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_ack_delay_alarm_granularity, false, "When true, ensure the ACK delay is never less than the alarm granularity when ACK " "decimation is enabled.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_add_silent_idle_timeout, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_add_missing_connected_checks, false, + "If true, add missing connected checks.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_add_silent_idle_timeout, true, "If true, when server is silently closing connections due to idle timeout, serialize " "the connection close packets which will be added to time wait list.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_add_stream_info_to_idle_close_detail, false, + "If true, include stream information in idle timeout connection close detail.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_backend_set_stream_ttl, false, "If true, check backend response header for X-Response-Ttl. If it is provided, the " "stream TTL is set. A QUIC stream will be immediately canceled when tries to write " @@ -65,38 +81,33 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_fewer_startup_round_trips, fals "When true, the 1RTT and 2RTT connection options decrease the number of round trips in " "BBRv2 STARTUP without a 25% bandwidth increase to 1 or 2 round trips respectively.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_ignore_inflight_lo, false, - "When true, QUIC's BBRv2 ignores inflight_lo in PROBE_BW.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_improve_adjust_network_parameters, false, - "If true, improve Bbr2Sender::AdjustNetworkParameters by 1) do not inject a bandwidth " - "sample to the bandwidth filter, and 2) re-calculate pacing rate after cwnd updated..") - QUICHE_FLAG( bool, quic_reloadable_flag_quic_bbr2_limit_inflight_hi, false, "When true, the B2HI connection option limits reduction of inflight_hi to (1-Beta)*CWND.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_flexible_app_limited, false, - "When true and the BBR9 connection option is present, BBR only considers bandwidth " - "samples app-limited if they're not filling the pipe.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_use_post_inflight_to_detect_queuing, false, + "If true, QUIC BBRv2 will use inflight byte after congestion event to detect queuing " + "during PROBE_UP.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_no_bytes_acked_in_startup_recovery, false, "When in STARTUP and recovery, do not add bytes_acked to QUIC BBR's CWND in " "CalculateCongestionWindow()") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bootstrap_cwnd_by_gfe_bandwidth, false, - "If true, bootstrap initial QUIC cwnd by GFE measured bandwidth models.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true, "If true, bootstrap initial QUIC cwnd by SPDY priorities.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_check_encryption_level_in_fast_path, false, - "If true, when data is sending in fast path mode in the creator, making sure stream " - "data is sent in the right encryption level.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_cap_large_client_initial_rtt, true, + "If true, cap client suggested initial RTT to 1s if it is longer than 1s.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_coalesced_packet_of_higher_space2, false, - "If true, try to coalesce packet of higher space with retransmissions to mitigate RTT " - "inflations.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_clean_up_spdy_session_destructor, false, + "If true, QuicSpdySession's destructor won't need to do cleanup.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_close_connection_in_on_can_write_with_blocked_writer, + false, + "If true, close connection if writer is still blocked while OnCanWrite is called.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_close_connection_on_serialization_failure, false, + "If true, close connection on packet serialization failures.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, "If true, set burst token to 2 in cwnd bootstrapping experiment.") @@ -104,6 +115,10 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_cwnd_and_pacing_gains, false, "If true, uses conservative cwnd gain and pacing gain when cwnd gets bootstrapped.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_copy_bbr_cwnd_to_bbr2, false, + "If true, when switching from BBR to BBRv2, BBRv2 will use BBR's cwnd as its initial cwnd.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_enable_5rto_blackhole_detection2, true, "If true, default-enable 5RTO blachole detection.") @@ -117,15 +132,9 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr_v2, false, "If true, use BBRv2 as the default congestion controller. Takes precedence over " "--quic_default_to_bbr.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_determine_serialized_packet_fate_early, false, - "If true, determine a serialized packet's fate before the packet gets serialized.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_server_blackhole_detection, false, "If true, disable blackhole detection on server side.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_25, true, - "If true, disable QUIC version h3-25.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_27, false, "If true, disable QUIC version h3-27.") @@ -144,76 +153,64 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q050, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_t050, false, "If true, disable QUIC version h3-T050.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_dispatcher_legacy_version_encapsulation, false, - "When true, QuicDispatcher supports decapsulation of Legacy Version Encapsulation packets.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_t051, false, + "If true, disable QUIC version h3-T051.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_discard_initial_packet_with_key_dropped, false, + "If true, discard INITIAL packet if the key has been dropped.") QUICHE_FLAG( bool, quic_reloadable_flag_quic_do_not_accept_stop_waiting, false, "In v44 and above, where STOP_WAITING is never sent, close the connection if it's received.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_do_not_close_stream_again_on_connection_close, false, - "If true, do not try to close stream again if stream fails to be closed upon connection close.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_do_not_use_stream_map, false, - "If true, QUIC subclasses will no longer directly access stream_map for its content.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_time, false, "If true, stop resetting ideal_next_packet_send_time_ in pacing sender.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_dont_pad_chlo, true, - "When true, do not pad the QUIC_CRYPTO CHLO message itself. Note that the packet " - "containing the CHLO will still be padded.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_dont_send_max_ack_delay_if_default, true, - "When true, QUIC_CRYPTO versions of QUIC will not send the max ACK delay unless it is " - "configured to a non-default value.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_experiment_at_gfe, false, "If ture, enable GFE-picked loss detection experiment.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_tuner, false, "If true, allow QUIC loss detection tuning to be enabled by connection option ELDT.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_overshooting_detection, false, - "If true, enable overshooting detection when the DTOS connection option is supplied.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_t051, false, - "If true, enable QUIC version h3-T051.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_enable_mtu_discovery_at_server, false, + "If true, QUIC will default enable MTU discovery at server, with a target of 1450 bytes.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_extra_padding_bytes, false, - "If true, consider frame expansion when calculating extra padding bytes to meet " - "minimum plaintext packet size required for header protection.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_arm_pto_for_application_data, false, + "If true, do not arm PTO for application data until handshake confirmed.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_neuter_handshake_data, false, - "If true, fix a case where data is marked lost in HANDSHAKE level but HANDSHAKE key " - "gets decrypted later.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bytes_left_for_batch_write, false, + "If true, convert bytes_left_for_batch_write_ to unsigned int.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_packet_number_length, false, - "If true, take the largest acked packet into account when computing the sent packet " - "number length.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_fix_http3_goaway_stream_id, false, + "If true, send the lowest stream ID that can be retried by the client in a GOAWAY frame. If " + "false, send the highest received stream ID, which actually should not be retried.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_out_of_order_sending, false, + "If true, fix a potential out of order sending caused by handshake gets confirmed " + "while the coalescer is not empty.") + +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_fix_pto_pending_timer_count, false, + "If true, make sure there is pending timer credit when trying to PTO retransmit any packets.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_print_draft_version, false, - "When true, ParsedQuicVersionToString will print IETF drafts with format draft29 " - "instead of ff00001d.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_undecryptable_packets2, false, + "If true, remove processed undecryptable packets.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_get_stream_information_from_stream_map, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_get_stream_information_from_stream_map, true, "If true, gQUIC will only consult stream_map in QuicSession::GetNumActiveStreams().") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_http3_goaway_new_behavior, false, - "If true, server accepts GOAWAY (draft-28 behavior), client receiving GOAWAY with stream ID " - "that is not client-initiated bidirectional stream ID closes connection with H3_ID_ERROR " - "(draft-28 behavior). Also, receiving a GOAWAY with ID larger than previously received closes " - "connection with H3_ID_ERROR. If false, server receiving GOAWAY closes connection with " - "H3_FRAME_UNEXPECTED (draft-27 behavior), client receiving GOAWAY with stream ID that is not " - "client-initiated bidirectional stream ID closes connection with PROTOCOL_VIOLATION (draft-04 " - "behavior), larger ID than previously received does not trigger connection close.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_ip_based_cwnd_exp, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_give_sent_packet_to_debug_visitor_after_sent, false, + "If true, QUIC connection will pass sent packet information to the debug visitor after " + "a packet is recorded as sent in sent packet manager.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_http3_new_default_urgency_value, false, + "If true, QuicStream::kDefaultUrgency is 3, otherwise 1.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_ip_based_cwnd_exp, true, "If true, enable IP address based CWND bootstrapping experiment with different " "bandwidth models and priorities. ") @@ -221,13 +218,27 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, "If true, QuicListener::OnSocketIsWritable will always return false, which means there " "will never be a fake EPOLLOUT event in the next epoll iteration.") +QUICHE_FLAG(bool, + quic_reloadable_flag_quic_neuter_initial_packet_in_coalescer_with_initial_key_discarded, + false, "If true, neuter initial packet in the coalescer when discarding initial keys.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_dup_experiment_id_2, false, "If true, transport connection stats doesn't report duplicated experiments for same " "connection.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_silent_close_for_idle_timeout, true, + "If true, always send connection close for idle timeout if NSLC is received.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_only_set_uaid_in_tcs_visitor, false, + "If true, QuicTransportConnectionStatsVisitor::PopulateTransportConnectionStats will " + "be the only place where TCS's uaid field is set.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_only_truncate_long_cids, true, "In IETF QUIC, only truncate long CIDs from the client's Initial, don't modify them.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_preferred_altsvc_version, false, + "When true, we will send a preferred QUIC version at the start of our Alt-Svc list.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_write_packed_strings, false, "If true, QuicProxyDispatcher will write packed_client_address and packed_server_vip " "in TcpProxyHeaderProto.") @@ -241,14 +252,7 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_received_min_ack_delay, false QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_all_traffic, false, "") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_streams_waiting_for_acks, false, - "If true, QuicSession will no longer need streams_waiting_for_acks_.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_unused_ack_options, false, - "Remove ACK_DECIMATION_WITH_REORDERING mode and fast_ack_after_quiescence option in " - "QUIC received packet manager.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_zombie_streams, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_zombie_streams, true, "If true, QuicSession doesn't keep a separate zombie_streams. Instead, all streams are " "stored in stream_map_.") @@ -256,35 +260,32 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, fals "If true, require handshake confirmation for QUIC connections, functionally disabling " "0-rtt handshakes.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_retransmit_handshake_data_early, false, - "If true, retransmit unacked handshake data before PTO expiry.") - QUICHE_FLAG( - bool, quic_reloadable_flag_quic_revert_mtu_after_two_ptos, false, - "If true, QUIC connection will revert to a previously validated MTU(if exists) after two PTOs.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_save_user_agent_in_quic_session, true, - "If true, save user agent into in QuicSession.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_early_data_header_to_backend, false, - "If true, for 0RTT IETF QUIC requests, GFE will append a Early-Data header and send it " - "to backend.") + bool, quic_reloadable_flag_quic_send_key_update_not_yet_supported, false, + "When true, QUIC+TLS versions will send the key_update_not_yet_supported transport parameter.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_path_response, false, - "If true, send PATH_RESPONSE upon receiving PATH_CHALLENGE regardless of perspective.") + "If true, send PATH_RESPONSE upon receiving PATH_CHALLENGE regardless of perspective. " + "--gfe2_reloadable_flag_quic_start_peer_migration_earlier has to be true before turn " + "on this flag.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, "When the STMP connection option is sent by the client, timestamps in the QUIC ACK " "frame are sent and processed.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, false, "If true, enable server push feature on QUIC.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_simplify_received_packet_manager_ack, false, - "Simplify the ACK code in quic_received_packet_manager.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_set_resumed_ssl_session_early, false, + "If true, set resumed_ssl_session if this is a 0-RTT connection.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_start_peer_migration_earlier, false, + "If true, while reading an IETF quic packet, start peer migration immediately when " + "detecting the existence of any non-probing frame instead of at the end of the packet.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_support_handshake_done_in_t050, true, - "If true, support HANDSHAKE_DONE frame in T050.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_stop_sending_uses_ietf_error_code, false, + "If true, use IETF QUIC application error codes in STOP_SENDING frames. If false, use " + "QuicRstStreamErrorCodes.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, "A testonly reloadable flag that will always default to false.") @@ -296,12 +297,6 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_unified_iw_options, false, "When true, set the initial congestion control window from connection options in " "QuicSentPacketManager rather than TcpCubicSenderBytes.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_update_packet_size, false, - "If true, update packet size when the first frame gets queued.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_half_rtt_as_first_pto, false, - "If true, when TLPR copt is used, enable half RTT as first PTO timeout.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_header_stage_idle_list2, false, "If true, use header stage idle list for QUIC connections in GFE.") @@ -327,16 +322,16 @@ QUICHE_FLAG( bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false, "If true, inspects QUIC CHLOs for kLOAS and early creates sessions to allow multi-packet CHLOs") -QUICHE_FLAG(bool, quic_restart_flag_quic_enable_tls_resumption_v4, false, +QUICHE_FLAG( + bool, quic_restart_flag_quic_disable_gws_cwnd_experiment, false, + "If true, X-Google-Gws-Initial-Cwnd-Mode related header sent by GWS becomes no-op for QUIC.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_enable_tls_resumption_v4, true, "If true, enables support for TLS resumption in QUIC.") -QUICHE_FLAG(bool, quic_restart_flag_quic_enable_zero_rtt_for_tls_v2, false, +QUICHE_FLAG(bool, quic_restart_flag_quic_enable_zero_rtt_for_tls_v2, true, "If true, support for IETF QUIC 0-rtt is enabled.") -QUICHE_FLAG(bool, quic_restart_flag_quic_google_transport_param_omit_old, true, - "When true, QUIC+TLS will not send nor parse the old-format Google-specific transport " - "parameters.") - QUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false, "If true, QUIC offload pacing when using USPS as egress method.") @@ -367,6 +362,9 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_use_pigeon_socket_to_backend, false, QUICHE_FLAG(bool, spdy_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true, "If true, bootstrap initial QUIC cwnd by SPDY priorities.") +QUICHE_FLAG(bool, spdy_reloadable_flag_quic_clean_up_spdy_session_destructor, false, + "If true, QuicSpdySession's destructor won't need to do cleanup.") + QUICHE_FLAG( bool, spdy_reloadable_flag_spdy_discard_response_body_if_disallowed, false, "If true, SPDY will discard all response body bytes when response code indicates no response " diff --git a/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc index 7b26dac94e26..bcbafb56639e 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc @@ -1,4 +1,3 @@ - // NOLINT(namespace-envoy) // This file is part of the QUICHE platform implementation, and is not to be @@ -9,7 +8,7 @@ #include -#include "common/http/url_utility.h" +#include "common/http/utility.h" #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" diff --git a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.cc index c75f99c0bafb..903ee1332d04 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.cc @@ -16,7 +16,7 @@ QuicMemSliceImpl::QuicMemSliceImpl(QuicUniqueBufferPtr buffer, size_t length) : fragment_(std::make_unique( buffer.release(), length, [](const void* p, size_t, const Envoy::Buffer::BufferFragmentImpl*) { - delete static_cast(p); + delete[] static_cast(p); })) { single_slice_buffer_.addBufferFragment(*fragment_); ASSERT(this->length() == length); diff --git a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h index 54c8e87a259d..cf049ab5ac52 100644 --- a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h +++ b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h @@ -82,6 +82,7 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase { StreamInfo::StreamInfo& streamInfo() override { return stream_info_; } const StreamInfo::StreamInfo& streamInfo() const override { return stream_info_; } absl::string_view transportFailureReason() const override { return transport_failure_reason_; } + absl::optional lastRoundTripTime() const override { return {}; } // Network::FilterManagerConnection void rawWrite(Buffer::Instance& data, bool end_stream) override; diff --git a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h index 0516946a11f8..023b0ec5b43f 100644 --- a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h +++ b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h @@ -1,3 +1,4 @@ +#include #include #include "envoy/network/io_handle.h" @@ -28,6 +29,13 @@ class QuicIoHandleWrapper : public Network::IoHandle { } return io_handle_.readv(max_length, slices, num_slice); } + Api::IoCallUint64Result read(Buffer::Instance& buffer, uint64_t max_length) override { + if (closed_) { + return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF), + Network::IoSocketError::deleteIoError)); + } + return io_handle_.read(buffer, max_length); + } Api::IoCallUint64Result writev(const Buffer::RawSlice* slices, uint64_t num_slice) override { if (closed_) { return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF), @@ -35,6 +43,13 @@ class QuicIoHandleWrapper : public Network::IoHandle { } return io_handle_.writev(slices, num_slice); } + Api::IoCallUint64Result write(Buffer::Instance& buffer) override { + if (closed_) { + return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF), + Network::IoSocketError::deleteIoError)); + } + return io_handle_.write(buffer); + } Api::IoCallUint64Result sendmsg(const Buffer::RawSlice* slices, uint64_t num_slice, int flags, const Envoy::Network::Address::Ip* self_ip, const Network::Address::Instance& peer_address) override { @@ -105,6 +120,7 @@ class QuicIoHandleWrapper : public Network::IoHandle { return io_handle_.createFileEvent(dispatcher, cb, trigger, events); } Api::SysCallIntResult shutdown(int how) override { return io_handle_.shutdown(how); } + absl::optional lastRoundTripTime() override { return {}; } private: Network::IoHandle& io_handle_; diff --git a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc index d4cbc87fc506..cd1b3a938447 100644 --- a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc +++ b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc @@ -51,9 +51,7 @@ Api::IoCallUint64Result convertQuicWriteResult(quic::WriteResult quic_result, si // Initialize QuicGsoBatchWriter, set io_handle_ and stats_ UdpGsoBatchWriter::UdpGsoBatchWriter(Network::IoHandle& io_handle, Stats::Scope& scope) - : quic::QuicGsoBatchWriter(std::make_unique(), - io_handle.fdDoNotUse()), - stats_(generateStats(scope)) {} + : quic::QuicGsoBatchWriter(io_handle.fdDoNotUse()), stats_(generateStats(scope)) {} // Do Nothing in the Destructor For now UdpGsoBatchWriter::~UdpGsoBatchWriter() = default; diff --git a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h index 477ad8bdcdc7..366dfccb2468 100644 --- a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h +++ b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h @@ -1,11 +1,13 @@ #pragma once +#if !defined(__linux__) +#define UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT 0 +#else +#define UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT 1 + #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" -// QUICHE allows ignored qualifiers #pragma GCC diagnostic ignored "-Wignored-qualifiers" // QUICHE doesn't mark override at QuicBatchWriterBase::SupportsReleaseTime() @@ -122,3 +124,5 @@ class UdpGsoBatchWriterFactory : public Network::UdpPacketWriterFactory { } // namespace Quic } // namespace Envoy + +#endif // defined(__linux__) diff --git a/source/extensions/retry/priority/previous_priorities/previous_priorities.cc b/source/extensions/retry/priority/previous_priorities/previous_priorities.cc index 96dc7c540b25..e01028e7fc3b 100644 --- a/source/extensions/retry/priority/previous_priorities/previous_priorities.cc +++ b/source/extensions/retry/priority/previous_priorities/previous_priorities.cc @@ -51,7 +51,7 @@ bool PreviousPrioritiesRetryPriority::adjustForAttemptedPriorities( // This allows us to fall back to the unmodified priority load when we run out of priorities // instead of failing to route requests. if (total_availability == 0) { - for (auto&& excluded_priority : excluded_priorities_) { + for (auto excluded_priority : excluded_priorities_) { excluded_priority = false; } attempted_hosts_.clear(); diff --git a/source/extensions/stat_sinks/wasm/BUILD b/source/extensions/stat_sinks/wasm/BUILD new file mode 100644 index 000000000000..70e156ac4acc --- /dev/null +++ b/source/extensions/stat_sinks/wasm/BUILD @@ -0,0 +1,39 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +# Stats sink for wasm + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "data_plane_agnostic", + status = "alpha", + deps = [ + ":wasm_stat_sink_lib", + "//include/envoy/registry", + "//include/envoy/server:factory_context_interface", + "//include/envoy/server:instance_interface", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/stat_sinks:well_known_names", + "//source/server:configuration_lib", + "@envoy_api//envoy/extensions/stat_sinks/wasm/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "wasm_stat_sink_lib", + hdrs = ["wasm_stat_sink_impl.h"], + deps = [ + "//include/envoy/stats:stats_interface", + "//source/extensions/common/wasm:wasm_lib", + ], +) diff --git a/source/extensions/stat_sinks/wasm/config.cc b/source/extensions/stat_sinks/wasm/config.cc new file mode 100644 index 000000000000..ba94937a3b3a --- /dev/null +++ b/source/extensions/stat_sinks/wasm/config.cc @@ -0,0 +1,71 @@ +#include "extensions/stat_sinks/wasm/config.h" + +#include + +#include "envoy/extensions/stat_sinks/wasm/v3/wasm.pb.validate.h" +#include "envoy/registry/registry.h" +#include "envoy/server/factory_context.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/stat_sinks/wasm/wasm_stat_sink_impl.h" +#include "extensions/stat_sinks/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace StatSinks { +namespace Wasm { + +Stats::SinkPtr +WasmSinkFactory::createStatsSink(const Protobuf::Message& proto_config, + Server::Configuration::ServerFactoryContext& context) { + const auto& config = + MessageUtil::downcastAndValidate( + proto_config, context.messageValidationContext().staticValidationVisitor()); + + auto wasm_sink = std::make_unique(config.config().root_id(), nullptr); + + auto plugin = std::make_shared( + config.config().name(), config.config().root_id(), config.config().vm_config().vm_id(), + config.config().vm_config().runtime(), + Common::Wasm::anyToBytes(config.config().configuration()), config.config().fail_open(), + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, context.localInfo(), nullptr); + + auto callback = [&wasm_sink, &context, plugin](Common::Wasm::WasmHandleSharedPtr base_wasm) { + if (!base_wasm) { + if (plugin->fail_open_) { + ENVOY_LOG(error, "Unable to create Wasm Stat Sink {}", plugin->name_); + } else { + ENVOY_LOG(critical, "Unable to create Wasm Stat Sink {}", plugin->name_); + } + return; + } + wasm_sink->setSingleton( + Common::Wasm::getOrCreateThreadLocalWasm(base_wasm, plugin, context.dispatcher())); + }; + + if (!Common::Wasm::createWasm( + config.config().vm_config(), plugin, context.scope().createScope(""), + context.clusterManager(), context.initManager(), context.dispatcher(), context.api(), + context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { + throw Common::Wasm::WasmException( + fmt::format("Unable to create Wasm Stat Sink {}", plugin->name_)); + } + + return wasm_sink; +} + +ProtobufTypes::MessagePtr WasmSinkFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +std::string WasmSinkFactory::name() const { return StatsSinkNames::get().Wasm; } + +/** + * Static registration for the wasm access log. @see RegisterFactory. + */ +REGISTER_FACTORY(WasmSinkFactory, Server::Configuration::StatsSinkFactory); + +} // namespace Wasm +} // namespace StatSinks +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/stat_sinks/wasm/config.h b/source/extensions/stat_sinks/wasm/config.h new file mode 100644 index 000000000000..4fbd3d72937a --- /dev/null +++ b/source/extensions/stat_sinks/wasm/config.h @@ -0,0 +1,38 @@ +#pragma once + +#include + +#include "envoy/server/factory_context.h" +#include "envoy/server/instance.h" + +#include "common/config/datasource.h" + +#include "server/configuration_impl.h" + +namespace Envoy { +namespace Extensions { +namespace StatSinks { +namespace Wasm { + +/** + * Config registration for the Wasm statsd sink. @see StatSinkFactory. + */ +class WasmSinkFactory : Logger::Loggable, + public Server::Configuration::StatsSinkFactory { +public: + // StatsSinkFactory + Stats::SinkPtr createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + + std::string name() const override; + +private: + Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_; +}; + +} // namespace Wasm +} // namespace StatSinks +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/stat_sinks/wasm/wasm_stat_sink_impl.h b/source/extensions/stat_sinks/wasm/wasm_stat_sink_impl.h new file mode 100644 index 000000000000..5f2a9b6e13f9 --- /dev/null +++ b/source/extensions/stat_sinks/wasm/wasm_stat_sink_impl.h @@ -0,0 +1,41 @@ +#pragma once + +#include "envoy/stats/sink.h" + +#include "extensions/common/wasm/wasm.h" + +namespace Envoy { +namespace Extensions { +namespace StatSinks { +namespace Wasm { + +using Envoy::Extensions::Common::Wasm::WasmHandle; + +class WasmStatSink : public Stats::Sink { +public: + WasmStatSink(absl::string_view root_id, Common::Wasm::WasmHandleSharedPtr singleton) + : root_id_(root_id), singleton_(std::move(singleton)) {} + + void flush(Stats::MetricSnapshot& snapshot) override { + singleton_->wasm()->onStatsUpdate(root_id_, snapshot); + } + + void setSingleton(Common::Wasm::WasmHandleSharedPtr singleton) { + ASSERT(singleton != nullptr); + singleton_ = std::move(singleton); + } + + void onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) override { + (void)histogram; + (void)value; + } + +private: + std::string root_id_; + Common::Wasm::WasmHandleSharedPtr singleton_; +}; + +} // namespace Wasm +} // namespace StatSinks +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/stat_sinks/well_known_names.h b/source/extensions/stat_sinks/well_known_names.h index afb16a0a4baf..a1f5c7965d03 100644 --- a/source/extensions/stat_sinks/well_known_names.h +++ b/source/extensions/stat_sinks/well_known_names.h @@ -22,6 +22,8 @@ class StatsSinkNameValues { const std::string MetricsService = "envoy.stat_sinks.metrics_service"; // Hystrix sink const std::string Hystrix = "envoy.stat_sinks.hystrix"; + // WebAssembly sink + const std::string Wasm = "envoy.stat_sinks.wasm"; }; using StatsSinkNames = ConstSingleton; diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index cad01b83bb83..18bf1e828dcc 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -48,10 +48,11 @@ class OpenTracingHTTPHeadersReader : public opentracing::HTTPHeadersReader { // opentracing::HTTPHeadersReader opentracing::expected LookupKey(opentracing::string_view key) const override { - const Http::HeaderEntry* entry = request_headers_.get(Http::LowerCaseString{key}); - if (entry != nullptr) { - return opentracing::string_view{entry->value().getStringView().data(), - entry->value().getStringView().length()}; + const auto entry = request_headers_.get(Http::LowerCaseString{key}); + if (!entry.empty()) { + // This is an implicitly untrusted header, so only the first value is used. + return opentracing::string_view{entry[0]->value().getStringView().data(), + entry[0]->value().getStringView().length()}; } else { return opentracing::make_unexpected(opentracing::key_not_found_error); } diff --git a/source/extensions/tracers/datadog/datadog_tracer_impl.cc b/source/extensions/tracers/datadog/datadog_tracer_impl.cc index 3635e490d659..8ccb6fbc4595 100644 --- a/source/extensions/tracers/datadog/datadog_tracer_impl.cc +++ b/source/extensions/tracers/datadog/datadog_tracer_impl.cc @@ -95,9 +95,7 @@ void TraceReporter::flushTraces() { message->headers().setReferenceKey(lower_case_headers_.at(h.first), h.second); } - Buffer::InstancePtr body(new Buffer::OwnedImpl()); - body->add(encoder_->payload()); - message->body() = std::move(body); + message->body().add(encoder_->payload()); ENVOY_LOG(debug, "submitting {} trace(s) to {} with payload size {}", pendingTraces, encoder_->path(), encoder_->payload().size()); @@ -138,7 +136,7 @@ void TraceReporter::onSuccess(const Http::AsyncClient::Request& request, } else { ENVOY_LOG(debug, "traces successfully submitted to datadog agent"); driver_.tracerStats().reports_sent_.inc(); - encoder_->handleResponse(http_response->body()->toString()); + encoder_->handleResponse(http_response->bodyAsString()); } } diff --git a/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc b/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc index e66a3c355845..4c944ea3a025 100644 --- a/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc +++ b/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc @@ -21,17 +21,16 @@ namespace Extensions { namespace Tracers { namespace Lightstep { -static Buffer::InstancePtr serializeGrpcMessage(const lightstep::BufferChain& buffer_chain) { - Buffer::InstancePtr body(new Buffer::OwnedImpl()); +static void serializeGrpcMessage(const lightstep::BufferChain& buffer_chain, + Buffer::Instance& body) { auto size = buffer_chain.num_bytes(); Buffer::RawSlice iovec; - body->reserve(size, &iovec, 1); + body.reserve(size, &iovec, 1); ASSERT(iovec.len_ >= size); iovec.len_ = size; buffer_chain.CopyOut(static_cast(iovec.mem_), size); - body->commit(&iovec, 1); - Grpc::Common::prependGrpcFrameHeader(*body); - return body; + body.commit(&iovec, 1); + Grpc::Common::prependGrpcFrameHeader(body); } static std::vector @@ -127,7 +126,7 @@ void LightStepDriver::LightStepTransporter::Send(std::unique_ptr(timeout)); - message->body() = serializeGrpcMessage(*report); + serializeGrpcMessage(*report, message->body()); if (collector_cluster_.exists()) { active_report_ = std::move(report); diff --git a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc index 41bd08e03d44..50b5fdac8039 100644 --- a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc +++ b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc @@ -92,31 +92,34 @@ startSpanHelper(const std::string& name, bool traced, const Http::RequestHeaderM bool found = false; switch (incoming) { case OpenCensusConfig::TRACE_CONTEXT: { - const Http::HeaderEntry* header = request_headers.get(Constants::get().TRACEPARENT); - if (header != nullptr) { + const auto header = request_headers.get(Constants::get().TRACEPARENT); + if (!header.empty()) { found = true; + // This is an implicitly untrusted header, so only the first value is used. parent_ctx = ::opencensus::trace::propagation::FromTraceParentHeader( - header->value().getStringView()); + header[0]->value().getStringView()); } break; } case OpenCensusConfig::GRPC_TRACE_BIN: { - const Http::HeaderEntry* header = request_headers.get(Constants::get().GRPC_TRACE_BIN); - if (header != nullptr) { + const auto header = request_headers.get(Constants::get().GRPC_TRACE_BIN); + if (!header.empty()) { found = true; + // This is an implicitly untrusted header, so only the first value is used. parent_ctx = ::opencensus::trace::propagation::FromGrpcTraceBinHeader( - Base64::decodeWithoutPadding(header->value().getStringView())); + Base64::decodeWithoutPadding(header[0]->value().getStringView())); } break; } case OpenCensusConfig::CLOUD_TRACE_CONTEXT: { - const Http::HeaderEntry* header = request_headers.get(Constants::get().X_CLOUD_TRACE_CONTEXT); - if (header != nullptr) { + const auto header = request_headers.get(Constants::get().X_CLOUD_TRACE_CONTEXT); + if (!header.empty()) { found = true; + // This is an implicitly untrusted header, so only the first value is used. parent_ctx = ::opencensus::trace::propagation::FromCloudTraceContextHeader( - header->value().getStringView()); + header[0]->value().getStringView()); } break; } @@ -126,23 +129,27 @@ startSpanHelper(const std::string& name, bool traced, const Http::RequestHeaderM absl::string_view b3_span_id; absl::string_view b3_sampled; absl::string_view b3_flags; - const Http::HeaderEntry* h_b3_trace_id = request_headers.get(Constants::get().X_B3_TRACEID); - if (h_b3_trace_id != nullptr) { - b3_trace_id = h_b3_trace_id->value().getStringView(); + const auto h_b3_trace_id = request_headers.get(Constants::get().X_B3_TRACEID); + if (!h_b3_trace_id.empty()) { + // This is an implicitly untrusted header, so only the first value is used. + b3_trace_id = h_b3_trace_id[0]->value().getStringView(); } - const Http::HeaderEntry* h_b3_span_id = request_headers.get(Constants::get().X_B3_SPANID); - if (h_b3_span_id != nullptr) { - b3_span_id = h_b3_span_id->value().getStringView(); + const auto h_b3_span_id = request_headers.get(Constants::get().X_B3_SPANID); + if (!h_b3_span_id.empty()) { + // This is an implicitly untrusted header, so only the first value is used. + b3_span_id = h_b3_span_id[0]->value().getStringView(); } - const Http::HeaderEntry* h_b3_sampled = request_headers.get(Constants::get().X_B3_SAMPLED); - if (h_b3_sampled != nullptr) { - b3_sampled = h_b3_sampled->value().getStringView(); + const auto h_b3_sampled = request_headers.get(Constants::get().X_B3_SAMPLED); + if (!h_b3_sampled.empty()) { + // This is an implicitly untrusted header, so only the first value is used. + b3_sampled = h_b3_sampled[0]->value().getStringView(); } - const Http::HeaderEntry* h_b3_flags = request_headers.get(Constants::get().X_B3_FLAGS); - if (h_b3_flags != nullptr) { - b3_flags = h_b3_flags->value().getStringView(); + const auto h_b3_flags = request_headers.get(Constants::get().X_B3_FLAGS); + if (!h_b3_flags.empty()) { + // This is an implicitly untrusted header, so only the first value is used. + b3_flags = h_b3_flags[0]->value().getStringView(); } - if (h_b3_trace_id != nullptr && h_b3_span_id != nullptr) { + if (!h_b3_trace_id.empty() && !h_b3_span_id.empty()) { found = true; parent_ctx = ::opencensus::trace::propagation::FromB3Headers(b3_trace_id, b3_span_id, b3_sampled, b3_flags); diff --git a/source/extensions/tracers/xray/xray_tracer_impl.cc b/source/extensions/tracers/xray/xray_tracer_impl.cc index 57f6c9e869f4..77e240c783a2 100644 --- a/source/extensions/tracers/xray/xray_tracer_impl.cc +++ b/source/extensions/tracers/xray/xray_tracer_impl.cc @@ -47,16 +47,16 @@ Driver::Driver(const XRayConfiguration& config, ENVOY_LOG(debug, "send X-Ray generated segments to daemon address on {}", daemon_endpoint); sampling_strategy_ = std::make_unique( - xray_config_.sampling_rules_, context.serverFactoryContext().random(), + xray_config_.sampling_rules_, context.serverFactoryContext().api().randomGenerator(), context.serverFactoryContext().timeSource()); tls_slot_ptr_->set([this, daemon_endpoint, &context](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { DaemonBrokerPtr broker = std::make_unique(daemon_endpoint); - TracerPtr tracer = std::make_unique(xray_config_.segment_name_, xray_config_.origin_, - xray_config_.aws_metadata_, std::move(broker), - context.serverFactoryContext().timeSource(), - context.serverFactoryContext().random()); + TracerPtr tracer = std::make_unique( + xray_config_.segment_name_, xray_config_.origin_, xray_config_.aws_metadata_, + std::move(broker), context.serverFactoryContext().timeSource(), + context.serverFactoryContext().api().randomGenerator()); return std::make_shared(std::move(tracer), *this); }); } @@ -77,11 +77,12 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, UNREFERENCED_PARAMETER(config); // TODO(marcomagdy) - how do we factor this into the logic above UNREFERENCED_PARAMETER(tracing_decision); - const auto* header = request_headers.get(Http::LowerCaseString(XRayTraceHeader)); + const auto header = request_headers.get(Http::LowerCaseString(XRayTraceHeader)); absl::optional should_trace; XRayHeader xray_header; - if (header) { - Http::LowerCaseString lowered_header_value{std::string(header->value().getStringView())}; + if (!header.empty()) { + // This is an implicitly untrusted header, so only the first value is used. + Http::LowerCaseString lowered_header_value{std::string(header[0]->value().getStringView())}; xray_header = parseXRayHeader(lowered_header_value); // if the sample_decision in the x-ray header is unknown then we try to make a decision based // on the sampling strategy @@ -107,7 +108,8 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, auto* tracer = tls_slot_ptr_->getTyped().tracer_.get(); if (should_trace.value()) { return tracer->startSpan(operation_name, start_time, - header ? absl::optional(xray_header) : absl::nullopt); + !header.empty() ? absl::optional(xray_header) + : absl::nullopt); } // instead of returning nullptr, we return a Span that is marked as not-sampled. diff --git a/source/extensions/tracers/zipkin/config.cc b/source/extensions/tracers/zipkin/config.cc index 36d1f38fae8e..f2171dee9b72 100644 --- a/source/extensions/tracers/zipkin/config.cc +++ b/source/extensions/tracers/zipkin/config.cc @@ -23,7 +23,8 @@ Tracing::HttpTracerSharedPtr ZipkinTracerFactory::createHttpTracerTyped( proto_config, context.serverFactoryContext().clusterManager(), context.serverFactoryContext().scope(), context.serverFactoryContext().threadLocal(), context.serverFactoryContext().runtime(), context.serverFactoryContext().localInfo(), - context.serverFactoryContext().random(), context.serverFactoryContext().timeSource()); + context.serverFactoryContext().api().randomGenerator(), + context.serverFactoryContext().timeSource()); return std::make_shared(std::move(zipkin_driver), context.serverFactoryContext().localInfo()); diff --git a/source/extensions/tracers/zipkin/span_context_extractor.cc b/source/extensions/tracers/zipkin/span_context_extractor.cc index 0f48986f4437..a9b67503ffd1 100644 --- a/source/extensions/tracers/zipkin/span_context_extractor.cc +++ b/source/extensions/tracers/zipkin/span_context_extractor.cc @@ -37,8 +37,9 @@ SpanContextExtractor::~SpanContextExtractor() = default; bool SpanContextExtractor::extractSampled(const Tracing::Decision tracing_decision) { bool sampled(false); auto b3_header_entry = request_headers_.get(ZipkinCoreConstants::get().B3); - if (b3_header_entry) { - absl::string_view b3 = b3_header_entry->value().getStringView(); + if (!b3_header_entry.empty()) { + // This is an implicitly untrusted header, so only the first value is used. + absl::string_view b3 = b3_header_entry[0]->value().getStringView(); int sampled_pos = 0; switch (b3.length()) { case 1: @@ -62,18 +63,19 @@ bool SpanContextExtractor::extractSampled(const Tracing::Decision tracing_decisi } auto x_b3_sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED); - if (!x_b3_sampled_entry) { + if (x_b3_sampled_entry.empty()) { return tracing_decision.traced; } // Checking if sampled flag has been specified. Also checking for 'true' value, as some old // zipkin tracers may still use that value, although should be 0 or 1. - absl::string_view xb3_sampled = x_b3_sampled_entry->value().getStringView(); + // This is an implicitly untrusted header, so only the first value is used. + absl::string_view xb3_sampled = x_b3_sampled_entry[0]->value().getStringView(); sampled = xb3_sampled == SAMPLED || xb3_sampled == "true"; return sampled; } std::pair SpanContextExtractor::extractSpanContext(bool is_sampled) { - if (request_headers_.get(ZipkinCoreConstants::get().B3)) { + if (!request_headers_.get(ZipkinCoreConstants::get().B3).empty()) { return extractSpanContextFromB3SingleFormat(is_sampled); } uint64_t trace_id(0); @@ -83,11 +85,12 @@ std::pair SpanContextExtractor::extractSpanContext(bool is_sa auto b3_trace_id_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID); auto b3_span_id_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID); - if (b3_span_id_entry && b3_trace_id_entry) { + if (!b3_span_id_entry.empty() && !b3_trace_id_entry.empty()) { // Extract trace id - which can either be 128 or 64 bit. For 128 bit, // it needs to be divided into two 64 bit numbers (high and low). - const std::string tid(b3_trace_id_entry->value().getStringView()); - if (b3_trace_id_entry->value().size() == 32) { + // This is an implicitly untrusted header, so only the first value is used. + const std::string tid(b3_trace_id_entry[0]->value().getStringView()); + if (b3_trace_id_entry[0]->value().size() == 32) { const std::string high_tid = tid.substr(0, 16); const std::string low_tid = tid.substr(16, 16); if (!StringUtil::atoull(high_tid.c_str(), trace_id_high, 16) || @@ -99,14 +102,16 @@ std::pair SpanContextExtractor::extractSpanContext(bool is_sa throw ExtractorException(absl::StrCat("Invalid trace_id ", tid.c_str())); } - const std::string spid(b3_span_id_entry->value().getStringView()); + // This is an implicitly untrusted header, so only the first value is used. + const std::string spid(b3_span_id_entry[0]->value().getStringView()); if (!StringUtil::atoull(spid.c_str(), span_id, 16)) { throw ExtractorException(absl::StrCat("Invalid span id ", spid.c_str())); } auto b3_parent_id_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID); - if (b3_parent_id_entry && !b3_parent_id_entry->value().empty()) { - const std::string pspid(b3_parent_id_entry->value().getStringView()); + if (!b3_parent_id_entry.empty() && !b3_parent_id_entry[0]->value().empty()) { + // This is an implicitly untrusted header, so only the first value is used. + const std::string pspid(b3_parent_id_entry[0]->value().getStringView()); if (!StringUtil::atoull(pspid.c_str(), parent_id, 16)) { throw ExtractorException(absl::StrCat("Invalid parent span id ", pspid.c_str())); } @@ -121,8 +126,9 @@ std::pair SpanContextExtractor::extractSpanContext(bool is_sa std::pair SpanContextExtractor::extractSpanContextFromB3SingleFormat(bool is_sampled) { auto b3_head_entry = request_headers_.get(ZipkinCoreConstants::get().B3); - ASSERT(b3_head_entry); - const std::string b3(b3_head_entry->value().getStringView()); + ASSERT(!b3_head_entry.empty()); + // This is an implicitly untrusted header, so only the first value is used. + const std::string b3(b3_head_entry[0]->value().getStringView()); if (!b3.length()) { throw ExtractorException("Invalid input: empty"); } diff --git a/source/extensions/tracers/zipkin/tracer.cc b/source/extensions/tracers/zipkin/tracer.cc index f334246d4c51..90f1f1d72f49 100644 --- a/source/extensions/tracers/zipkin/tracer.cc +++ b/source/extensions/tracers/zipkin/tracer.cc @@ -66,6 +66,14 @@ SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span span_ptr->setName(span_name); + // Set the span's kind (client or server) + if (config.operationName() == Tracing::OperationName::Egress) { + annotation.setValue(CLIENT_SEND); + } else { + annotation.setValue(SERVER_RECV); + } + + // Set the span's id and parent id if (config.operationName() == Tracing::OperationName::Egress || !shared_span_context_) { // We need to create a new span that is a child of the previous span; no shared context @@ -76,9 +84,6 @@ SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span // Set the parent id to the id of the previous span span_ptr->setParentId(previous_context.id()); - // Set the CS annotation value - annotation.setValue(CLIENT_SEND); - // Set the timestamp globally for the span span_ptr->setTimestamp(timestamp_micro); } else if (config.operationName() == Tracing::OperationName::Ingress) { @@ -89,9 +94,6 @@ SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span if (previous_context.parentId()) { span_ptr->setParentId(previous_context.parentId()); } - - // Set the SR annotation value - annotation.setValue(SERVER_RECV); } else { return span_ptr; // return an empty span } diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index 8cf176d1fabc..19255b047c64 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -186,9 +186,7 @@ void ReporterImpl::flushSpans() { ? Http::Headers::get().ContentTypeValues.Protobuf : Http::Headers::get().ContentTypeValues.Json); - Buffer::InstancePtr body = std::make_unique(); - body->add(request_body); - message->body() = std::move(body); + message->body().add(request_body); const uint64_t timeout = driver_.runtime().snapshot().getInteger("tracing.zipkin.request_timeout", 5000U); diff --git a/source/extensions/transport_sockets/alts/grpc_tsi.h b/source/extensions/transport_sockets/alts/grpc_tsi.h index d07cd8d57fb2..1acecac58d4d 100644 --- a/source/extensions/transport_sockets/alts/grpc_tsi.h +++ b/source/extensions/transport_sockets/alts/grpc_tsi.h @@ -4,7 +4,7 @@ // compile with -Werror, ignoring those compiler warning since we don't have // control on those source codes. This works with GCC and Clang. -#ifndef _MSC_VER +#if defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wold-style-cast" @@ -18,7 +18,7 @@ #include "src/core/tsi/transport_security_grpc.h" #include "src/core/tsi/transport_security_interface.h" -#ifndef _MSC_VER +#if defined(__GNUC__) #pragma GCC diagnostic pop #endif diff --git a/source/extensions/transport_sockets/proxy_protocol/BUILD b/source/extensions/transport_sockets/proxy_protocol/BUILD index 251721adfbb4..397626c3c6ae 100644 --- a/source/extensions/transport_sockets/proxy_protocol/BUILD +++ b/source/extensions/transport_sockets/proxy_protocol/BUILD @@ -1,6 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", + "envoy_cc_library", "envoy_extension_package", ) @@ -9,11 +10,25 @@ licenses(["notice"]) # Apache 2 envoy_extension_package() envoy_cc_extension( + name = "upstream_config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream_and_upstream", # header generated in Envoy, so can't be faked + deps = [ + ":upstream_proxy_protocol", + "//include/envoy/network:transport_socket_interface", + "//include/envoy/registry", + "//include/envoy/server:transport_socket_config_interface", + "//source/common/config:utility_lib", + "//source/extensions/transport_sockets:well_known_names", + "@envoy_api//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( name = "upstream_proxy_protocol", srcs = ["proxy_protocol.cc"], hdrs = ["proxy_protocol.h"], - security_posture = "robust_to_untrusted_downstream", - undocumented = True, deps = [ "//include/envoy/network:connection_interface", "//include/envoy/network:transport_socket_interface", diff --git a/source/extensions/transport_sockets/proxy_protocol/config.cc b/source/extensions/transport_sockets/proxy_protocol/config.cc new file mode 100644 index 000000000000..e037263c8a4a --- /dev/null +++ b/source/extensions/transport_sockets/proxy_protocol/config.cc @@ -0,0 +1,46 @@ +#include "extensions/transport_sockets/proxy_protocol/config.h" + +#include "envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.pb.h" +#include "envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.pb.validate.h" +#include "envoy/registry/registry.h" + +#include "common/config/utility.h" + +#include "extensions/transport_sockets/proxy_protocol/proxy_protocol.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace ProxyProtocol { + +Network::TransportSocketFactoryPtr +UpstreamProxyProtocolSocketConfigFactory::createTransportSocketFactory( + const Protobuf::Message& message, + Server::Configuration::TransportSocketFactoryContext& context) { + const auto& outer_config = + MessageUtil::downcastAndValidate( + message, context.messageValidationVisitor()); + auto& inner_config_factory = Config::Utility::getAndCheckFactory< + Server::Configuration::UpstreamTransportSocketConfigFactory>(outer_config.transport_socket()); + ProtobufTypes::MessagePtr inner_factory_config = Config::Utility::translateToFactoryConfig( + outer_config.transport_socket(), context.messageValidationVisitor(), inner_config_factory); + auto inner_transport_factory = + inner_config_factory.createTransportSocketFactory(*inner_factory_config, context); + return std::make_unique(std::move(inner_transport_factory), + outer_config.config()); +} + +ProtobufTypes::MessagePtr UpstreamProxyProtocolSocketConfigFactory::createEmptyConfigProto() { + return std::make_unique< + envoy::extensions::transport_sockets::proxy_protocol::v3::ProxyProtocolUpstreamTransport>(); + ; +} + +REGISTER_FACTORY(UpstreamProxyProtocolSocketConfigFactory, + Server::Configuration::UpstreamTransportSocketConfigFactory); + +} // namespace ProxyProtocol +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/transport_sockets/proxy_protocol/config.h b/source/extensions/transport_sockets/proxy_protocol/config.h new file mode 100644 index 000000000000..9ab05d870b40 --- /dev/null +++ b/source/extensions/transport_sockets/proxy_protocol/config.h @@ -0,0 +1,29 @@ +#pragma once + +#include "envoy/server/transport_socket_config.h" + +#include "extensions/transport_sockets/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace ProxyProtocol { + +/** + * Config registration for the proxy protocol wrapper for transport socket factory. + * @see TransportSocketConfigFactory. + */ +class UpstreamProxyProtocolSocketConfigFactory + : public Server::Configuration::UpstreamTransportSocketConfigFactory { +public: + std::string name() const override { return TransportSocketNames::get().UpstreamProxyProtocol; } + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + Network::TransportSocketFactoryPtr createTransportSocketFactory( + const Protobuf::Message& config, + Server::Configuration::TransportSocketFactoryContext& context) override; +}; + +} // namespace ProxyProtocol +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc index d1427b7aaa9d..3d4f716421e7 100644 --- a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc @@ -10,6 +10,7 @@ #include "extensions/common/proxy_protocol/proxy_protocol_header.h" +using envoy::config::core::v3::ProxyProtocolConfig; using envoy::config::core::v3::ProxyProtocolConfig_Version; namespace Envoy { @@ -26,7 +27,6 @@ void UpstreamProxyProtocolSocket::setTransportSocketCallbacks( Network::TransportSocketCallbacks& callbacks) { transport_socket_->setTransportSocketCallbacks(callbacks); callbacks_ = &callbacks; - generateHeader(); } Network::IoResult UpstreamProxyProtocolSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { @@ -51,7 +51,7 @@ void UpstreamProxyProtocolSocket::generateHeader() { } void UpstreamProxyProtocolSocket::generateHeaderV1() { - // Default to local addresses + // Default to local addresses (used if no downstream connection exists e.g. health checks) auto src_addr = callbacks_->connection().localAddress(); auto dst_addr = callbacks_->connection().remoteAddress(); @@ -82,7 +82,7 @@ Network::IoResult UpstreamProxyProtocolSocket::writeHeader() { break; } - Api::IoCallUint64Result result = header_buffer_.write(callbacks_->ioHandle()); + Api::IoCallUint64Result result = callbacks_->ioHandle().write(header_buffer_); if (result.ok()) { ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.rc_); @@ -100,6 +100,29 @@ Network::IoResult UpstreamProxyProtocolSocket::writeHeader() { return {action, bytes_written, false}; } +void UpstreamProxyProtocolSocket::onConnected() { + generateHeader(); + transport_socket_->onConnected(); +} + +UpstreamProxyProtocolSocketFactory::UpstreamProxyProtocolSocketFactory( + Network::TransportSocketFactoryPtr transport_socket_factory, ProxyProtocolConfig config) + : transport_socket_factory_(std::move(transport_socket_factory)), config_(config) {} + +Network::TransportSocketPtr UpstreamProxyProtocolSocketFactory::createTransportSocket( + Network::TransportSocketOptionsSharedPtr options) const { + auto inner_socket = transport_socket_factory_->createTransportSocket(options); + if (inner_socket == nullptr) { + return nullptr; + } + return std::make_unique(std::move(inner_socket), options, + config_.version()); +} + +bool UpstreamProxyProtocolSocketFactory::implementsSecureTransport() const { + return transport_socket_factory_->implementsSecureTransport(); +} + } // namespace ProxyProtocol } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h index 3b0996e20882..4a191ebf539d 100644 --- a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h @@ -9,6 +9,7 @@ #include "extensions/transport_sockets/common/passthrough.h" +using envoy::config::core::v3::ProxyProtocolConfig; using envoy::config::core::v3::ProxyProtocolConfig_Version; namespace Envoy { @@ -25,6 +26,7 @@ class UpstreamProxyProtocolSocket : public TransportSockets::PassthroughSocket, void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override; + void onConnected() override; private: void generateHeader(); @@ -38,6 +40,21 @@ class UpstreamProxyProtocolSocket : public TransportSockets::PassthroughSocket, ProxyProtocolConfig_Version version_{ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1}; }; +class UpstreamProxyProtocolSocketFactory : public Network::TransportSocketFactory { +public: + UpstreamProxyProtocolSocketFactory(Network::TransportSocketFactoryPtr transport_socket_factory, + ProxyProtocolConfig config); + + // Network::TransportSocketFactory + Network::TransportSocketPtr + createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; + bool implementsSecureTransport() const override; + +private: + Network::TransportSocketFactoryPtr transport_socket_factory_; + ProxyProtocolConfig config_; +}; + } // namespace ProxyProtocol } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index aabca8fd0581..860b56203c30 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -161,6 +161,7 @@ envoy_cc_library( "//source/common/runtime:runtime_features_lib", "//source/common/stats:symbol_table_lib", "//source/common/stats:utility_lib", + "//source/extensions/transport_sockets/tls/ocsp:ocsp_lib", "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", diff --git a/source/extensions/transport_sockets/tls/context_config_impl.cc b/source/extensions/transport_sockets/tls/context_config_impl.cc index 56fbece90c73..546489232e74 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.cc +++ b/source/extensions/transport_sockets/tls/context_config_impl.cc @@ -410,6 +410,7 @@ ServerContextConfigImpl::ServerContextConfigImpl( DEFAULT_CIPHER_SUITES, DEFAULT_CURVES, factory_context), require_client_certificate_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, require_client_certificate, false)), + ocsp_staple_policy_(ocspStaplePolicyFromProto(config.ocsp_staple_policy())), session_ticket_keys_provider_(getTlsSessionTicketKeysConfigProvider(factory_context, config)), disable_stateless_session_resumption_(getStatelessSessionResumptionDisabled(config)) { @@ -504,6 +505,21 @@ ServerContextConfigImpl::getSessionTicketKey(const std::string& key_data) { return dst_key; } +Ssl::ServerContextConfig::OcspStaplePolicy ServerContextConfigImpl::ocspStaplePolicyFromProto( + const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::OcspStaplePolicy& + policy) { + switch (policy) { + case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::LENIENT_STAPLING: + return Ssl::ServerContextConfig::OcspStaplePolicy::LenientStapling; + case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::STRICT_STAPLING: + return Ssl::ServerContextConfig::OcspStaplePolicy::StrictStapling; + case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::MUST_STAPLE: + return Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/tls/context_config_impl.h b/source/extensions/transport_sockets/tls/context_config_impl.h index 9d8048c3b32a..44c5a8cc619d 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.h +++ b/source/extensions/transport_sockets/tls/context_config_impl.h @@ -140,6 +140,7 @@ class ServerContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::Ser // Ssl::ServerContextConfig bool requireClientCertificate() const override { return require_client_certificate_; } + OcspStaplePolicy ocspStaplePolicy() const override { return ocsp_staple_policy_; } const std::vector& sessionTicketKeys() const override { return session_ticket_keys_; } @@ -164,6 +165,7 @@ class ServerContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::Ser static const std::string DEFAULT_CURVES; const bool require_client_certificate_; + const OcspStaplePolicy ocsp_staple_policy_; std::vector session_ticket_keys_; const Secret::TlsSessionTicketKeysConfigProviderSharedPtr session_ticket_keys_provider_; Envoy::Common::CallbackHandle* stk_update_callback_handle_{}; @@ -172,6 +174,9 @@ class ServerContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::Ser std::vector getSessionTicketKeys( const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys& keys); ServerContextConfig::SessionTicketKey getSessionTicketKey(const std::string& key_data); + static OcspStaplePolicy ocspStaplePolicyFromProto( + const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::OcspStaplePolicy& + policy); absl::optional session_timeout_; const bool disable_stateless_session_resumption_; diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index f461bb3c5d7f..8be424e42d06 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -325,6 +325,15 @@ ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& c absl::StrCat("Failed to load certificate chain from ", ctx.cert_chain_file_path_)); } + // The must staple extension means the certificate promises to carry + // with it an OCSP staple. https://tools.ietf.org/html/rfc7633#section-6 + constexpr absl::string_view tls_feature_ext = "1.3.6.1.5.5.7.1.24"; + constexpr absl::string_view must_staple_ext_value = "\x30\x3\x02\x01\x05"; + auto must_staple = Utility::getCertificateExtensionValue(*ctx.cert_chain_, tls_feature_ext); + if (must_staple == must_staple_ext_value) { + ctx.is_must_staple_ = true; + } + bssl::UniquePtr public_key(X509_get_pubkey(ctx.cert_chain_.get())); const int pkey_id = EVP_PKEY_id(public_key.get()); if (!cert_pkey_ids.insert(pkey_id).second) { @@ -798,11 +807,24 @@ size_t ContextImpl::daysUntilFirstCertExpires() const { return daysUntilExpiration; } +absl::optional ContextImpl::secondsUntilFirstOcspResponseExpires() const { + absl::optional secs_until_expiration; + for (auto& ctx : tls_contexts_) { + if (ctx.ocsp_response_) { + uint64_t next_expiration = ctx.ocsp_response_->secondsUntilExpiration(); + secs_until_expiration = std::min( + next_expiration, secs_until_expiration.value_or(std::numeric_limits::max())); + } + } + + return secs_until_expiration; +} + Envoy::Ssl::CertificateDetailsPtr ContextImpl::getCaCertInformation() const { if (ca_cert_ == nullptr) { return nullptr; } - return certificateDetails(ca_cert_.get(), getCaFileName()); + return certificateDetails(ca_cert_.get(), getCaFileName(), nullptr); } std::vector ContextImpl::getCertChainInformation() const { @@ -811,20 +833,28 @@ std::vector ContextImpl::getCertChainInformat if (ctx.cert_chain_ == nullptr) { continue; } - cert_details.emplace_back( - certificateDetails(ctx.cert_chain_.get(), ctx.getCertChainFileName())); + cert_details.emplace_back(certificateDetails(ctx.cert_chain_.get(), ctx.getCertChainFileName(), + ctx.ocsp_response_.get())); } return cert_details; } -Envoy::Ssl::CertificateDetailsPtr ContextImpl::certificateDetails(X509* cert, - const std::string& path) const { +Envoy::Ssl::CertificateDetailsPtr +ContextImpl::certificateDetails(X509* cert, const std::string& path, + const Ocsp::OcspResponseWrapper* ocsp_response) const { Envoy::Ssl::CertificateDetailsPtr certificate_details = std::make_unique(); certificate_details->set_path(path); certificate_details->set_serial_number(Utility::getSerialNumberFromCertificate(*cert)); certificate_details->set_days_until_expiration( Utility::getDaysUntilExpiration(cert, time_source_)); + if (ocsp_response) { + auto* ocsp_details = certificate_details->mutable_ocsp_details(); + ProtobufWkt::Timestamp* valid_from = ocsp_details->mutable_valid_from(); + TimestampUtil::systemClockToTimestamp(ocsp_response->getThisUpdate(), *valid_from); + ProtobufWkt::Timestamp* expiration = ocsp_details->mutable_expiration(); + TimestampUtil::systemClockToTimestamp(ocsp_response->getNextUpdate(), *expiration); + } ProtobufWkt::Timestamp* valid_from = certificate_details->mutable_valid_from(); TimestampUtil::systemClockToTimestamp(Utility::getValidFrom(*cert), *valid_from); ProtobufWkt::Timestamp* expiration_time = certificate_details->mutable_expiration_time(); @@ -1001,7 +1031,8 @@ ServerContextImpl::ServerContextImpl(Stats::Scope& scope, const Envoy::Ssl::ServerContextConfig& config, const std::vector& server_names, TimeSource& time_source) - : ContextImpl(scope, config, time_source), session_ticket_keys_(config.sessionTicketKeys()) { + : ContextImpl(scope, config, time_source), session_ticket_keys_(config.sessionTicketKeys()), + ocsp_staple_policy_(config.ocspStaplePolicy()) { if (config.tlsCertificates().empty() && !config.capabilities().provides_certificates) { throw EnvoyException("Server TlsCertificates must have a certificate specified"); } @@ -1024,7 +1055,9 @@ ServerContextImpl::ServerContextImpl(Stats::Scope& scope, }); } - for (auto& ctx : tls_contexts_) { + const auto tls_certificates = config.tlsCertificates(); + for (uint32_t i = 0; i < tls_certificates.size(); ++i) { + auto& ctx = tls_contexts_[i]; if (!config.capabilities().verifies_peer_certificates && config.certificateValidationContext() != nullptr && !config.certificateValidationContext()->caCert().empty()) { @@ -1068,6 +1101,24 @@ ServerContextImpl::ServerContextImpl(Stats::Scope& scope, int rc = SSL_CTX_set_session_id_context(ctx.ssl_ctx_.get(), session_id.data(), session_id.size()); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); + + auto& ocsp_resp_bytes = tls_certificates[i].get().ocspStaple(); + if (ocsp_resp_bytes.empty()) { + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.require_ocsp_response_for_must_staple_certs") && + ctx.is_must_staple_) { + throw EnvoyException("OCSP response is required for must-staple certificate"); + } + if (ocsp_staple_policy_ == Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple) { + throw EnvoyException("Required OCSP response is missing from TLS context"); + } + } else { + auto response = std::make_unique(ocsp_resp_bytes, time_source_); + if (!response->matchesCertificate(*ctx.cert_chain_)) { + throw EnvoyException("OCSP response does not match its TLS certificate"); + } + ctx.ocsp_response_ = std::move(response); + } } } @@ -1337,17 +1388,113 @@ bool ServerContextImpl::isClientEcdsaCapable(const SSL_CLIENT_HELLO* ssl_client_ return false; } +bool ServerContextImpl::isClientOcspCapable(const SSL_CLIENT_HELLO* ssl_client_hello) { + const uint8_t* status_request_data; + size_t status_request_len; + if (SSL_early_callback_ctx_extension_get(ssl_client_hello, TLSEXT_TYPE_status_request, + &status_request_data, &status_request_len)) { + return true; + } + + return false; +} + +OcspStapleAction ServerContextImpl::ocspStapleAction(const ContextImpl::TlsContext& ctx, + bool client_ocsp_capable) { + if (!client_ocsp_capable) { + return OcspStapleAction::ClientNotCapable; + } + + auto& response = ctx.ocsp_response_; + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.check_ocsp_policy")) { + // Expiration check is disabled. Proceed as if the policy is LenientStapling and the response + // is not expired. + return response ? OcspStapleAction::Staple : OcspStapleAction::NoStaple; + } + + auto policy = ocsp_staple_policy_; + if (ctx.is_must_staple_) { + // The certificate has the must-staple extension, so upgrade the policy to match. + policy = Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple; + } + + const bool valid_response = response && !response->isExpired(); + + switch (policy) { + case Ssl::ServerContextConfig::OcspStaplePolicy::LenientStapling: + if (!valid_response) { + return OcspStapleAction::NoStaple; + } + return OcspStapleAction::Staple; + + case Ssl::ServerContextConfig::OcspStaplePolicy::StrictStapling: + if (valid_response) { + return OcspStapleAction::Staple; + } + if (response) { + // Expired response. + return OcspStapleAction::Fail; + } + return OcspStapleAction::NoStaple; + + case Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple: + if (!valid_response) { + return OcspStapleAction::Fail; + } + return OcspStapleAction::Staple; + + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + enum ssl_select_cert_result_t ServerContextImpl::selectTlsContext(const SSL_CLIENT_HELLO* ssl_client_hello) { const bool client_ecdsa_capable = isClientEcdsaCapable(ssl_client_hello); + const bool client_ocsp_capable = isClientOcspCapable(ssl_client_hello); + // Fallback on first certificate. const TlsContext* selected_ctx = &tls_contexts_[0]; + auto ocsp_staple_action = ocspStapleAction(*selected_ctx, client_ocsp_capable); for (const auto& ctx : tls_contexts_) { - if (client_ecdsa_capable == ctx.is_ecdsa_) { - selected_ctx = &ctx; - break; + if (client_ecdsa_capable != ctx.is_ecdsa_) { + continue; + } + + auto action = ocspStapleAction(ctx, client_ocsp_capable); + if (action == OcspStapleAction::Fail) { + continue; } + + selected_ctx = &ctx; + ocsp_staple_action = action; + break; + } + + if (client_ocsp_capable) { + stats_.ocsp_staple_requests_.inc(); } + + switch (ocsp_staple_action) { + case OcspStapleAction::Staple: { + // We avoid setting the OCSP response if the client didn't request it, but doing so is safe. + RELEASE_ASSERT(selected_ctx->ocsp_response_, + "OCSP response must be present under OcspStapleAction::Staple"); + auto& resp_bytes = selected_ctx->ocsp_response_->rawBytes(); + int rc = SSL_set_ocsp_response(ssl_client_hello->ssl, resp_bytes.data(), resp_bytes.size()); + RELEASE_ASSERT(rc != 0, ""); + stats_.ocsp_staple_responses_.inc(); + } break; + case OcspStapleAction::NoStaple: + stats_.ocsp_staple_omitted_.inc(); + break; + case OcspStapleAction::Fail: + stats_.ocsp_staple_failed_.inc(); + return ssl_select_cert_error; + case OcspStapleAction::ClientNotCapable: + break; + } + RELEASE_ASSERT(SSL_set_SSL_CTX(ssl_client_hello->ssl, selected_ctx->ssl_ctx_.get()) != nullptr, ""); return ssl_select_cert_success; diff --git a/source/extensions/transport_sockets/tls/context_impl.h b/source/extensions/transport_sockets/tls/context_impl.h index d40c4c88881c..772ab6fa2090 100644 --- a/source/extensions/transport_sockets/tls/context_impl.h +++ b/source/extensions/transport_sockets/tls/context_impl.h @@ -18,6 +18,7 @@ #include "common/stats/symbol_table_impl.h" #include "extensions/transport_sockets/tls/context_manager_impl.h" +#include "extensions/transport_sockets/tls/ocsp/ocsp.h" #include "absl/synchronization/mutex.h" #include "openssl/ssl.h" @@ -40,7 +41,11 @@ namespace Tls { COUNTER(fail_verify_no_cert) \ COUNTER(fail_verify_error) \ COUNTER(fail_verify_san) \ - COUNTER(fail_verify_cert_hash) + COUNTER(fail_verify_cert_hash) \ + COUNTER(ocsp_staple_failed) \ + COUNTER(ocsp_staple_omitted) \ + COUNTER(ocsp_staple_responses) \ + COUNTER(ocsp_staple_requests) /** * Wrapper struct for SSL stats. @see stats_macros.h @@ -98,6 +103,7 @@ class ContextImpl : public virtual Envoy::Ssl::Context { size_t daysUntilFirstCertExpires() const override; Envoy::Ssl::CertificateDetailsPtr getCaCertInformation() const override; std::vector getCertChainInformation() const override; + absl::optional secondsUntilFirstOcspResponseExpires() const override; std::vector getPrivateKeyMethodProviders(); @@ -158,7 +164,9 @@ class ContextImpl : public virtual Envoy::Ssl::Context { void incCounter(const Stats::StatName name, absl::string_view value, const Stats::StatName fallback) const; - Envoy::Ssl::CertificateDetailsPtr certificateDetails(X509* cert, const std::string& path) const; + Envoy::Ssl::CertificateDetailsPtr + certificateDetails(X509* cert, const std::string& path, + const Ocsp::OcspResponseWrapper* ocsp_response) const; struct TlsContext { // Each certificate specified for the context has its own SSL_CTX. SSL_CTXs @@ -168,7 +176,9 @@ class ContextImpl : public virtual Envoy::Ssl::Context { bssl::UniquePtr ssl_ctx_; bssl::UniquePtr cert_chain_; std::string cert_chain_file_path_; + Ocsp::OcspResponseWrapperPtr ocsp_response_; bool is_ecdsa_{}; + bool is_must_staple_{}; Ssl::PrivateKeyMethodProviderSharedPtr private_key_method_provider_{}; std::string getCertChainFileName() const { return cert_chain_file_path_; }; @@ -233,6 +243,8 @@ class ClientContextImpl : public ContextImpl, public Envoy::Ssl::ClientContext { bool session_keys_single_use_{false}; }; +enum class OcspStapleAction { Staple, NoStaple, Fail, ClientNotCapable }; + class ServerContextImpl : public ContextImpl, public Envoy::Ssl::ServerContext { public: ServerContextImpl(Stats::Scope& scope, const Envoy::Ssl::ServerContextConfig& config, @@ -246,13 +258,17 @@ class ServerContextImpl : public ContextImpl, public Envoy::Ssl::ServerContext { int sessionTicketProcess(SSL* ssl, uint8_t* key_name, uint8_t* iv, EVP_CIPHER_CTX* ctx, HMAC_CTX* hmac_ctx, int encrypt); bool isClientEcdsaCapable(const SSL_CLIENT_HELLO* ssl_client_hello); + bool isClientOcspCapable(const SSL_CLIENT_HELLO* ssl_client_hello); // Select the TLS certificate context in SSL_CTX_set_select_certificate_cb() callback with // ClientHello details. enum ssl_select_cert_result_t selectTlsContext(const SSL_CLIENT_HELLO* ssl_client_hello); + OcspStapleAction ocspStapleAction(const ServerContextImpl::TlsContext& ctx, + bool client_ocsp_capable); SessionContextID generateHashForSessionContextId(const std::vector& server_names); const std::vector session_ticket_keys_; + const Ssl::ServerContextConfig::OcspStaplePolicy ocsp_staple_policy_; }; } // namespace Tls diff --git a/source/extensions/transport_sockets/tls/context_manager_impl.cc b/source/extensions/transport_sockets/tls/context_manager_impl.cc index c877acae40d9..c1a2f076033d 100644 --- a/source/extensions/transport_sockets/tls/context_manager_impl.cc +++ b/source/extensions/transport_sockets/tls/context_manager_impl.cc @@ -1,6 +1,8 @@ #include "extensions/transport_sockets/tls/context_manager_impl.h" +#include #include +#include #include "envoy/stats/scope.h" @@ -62,6 +64,21 @@ size_t ContextManagerImpl::daysUntilFirstCertExpires() const { return ret; } +absl::optional ContextManagerImpl::secondsUntilFirstOcspResponseExpires() const { + absl::optional ret; + for (const auto& ctx_weak_ptr : contexts_) { + Envoy::Ssl::ContextSharedPtr context = ctx_weak_ptr.lock(); + if (context) { + auto next_expiration = context->secondsUntilFirstOcspResponseExpires(); + if (next_expiration) { + ret = std::min(next_expiration.value(), + ret.value_or(std::numeric_limits::max())); + } + } + } + return ret; +} + void ContextManagerImpl::iterateContexts(std::function callback) { for (const auto& ctx_weak_ptr : contexts_) { Envoy::Ssl::ContextSharedPtr context = ctx_weak_ptr.lock(); diff --git a/source/extensions/transport_sockets/tls/context_manager_impl.h b/source/extensions/transport_sockets/tls/context_manager_impl.h index d08e12e97410..3badb61ff8db 100644 --- a/source/extensions/transport_sockets/tls/context_manager_impl.h +++ b/source/extensions/transport_sockets/tls/context_manager_impl.h @@ -35,6 +35,7 @@ class ContextManagerImpl final : public Envoy::Ssl::ContextManager { createSslServerContext(Stats::Scope& scope, const Envoy::Ssl::ServerContextConfig& config, const std::vector& server_names) override; size_t daysUntilFirstCertExpires() const override; + absl::optional secondsUntilFirstOcspResponseExpires() const override; void iterateContexts(std::function callback) override; Ssl::PrivateKeyMethodManager& privateKeyMethodManager() override { return private_key_method_manager_; diff --git a/source/extensions/transport_sockets/tls/ocsp/asn1_utility.cc b/source/extensions/transport_sockets/tls/ocsp/asn1_utility.cc index 82cf430a7fd2..3da51ed7e9f3 100644 --- a/source/extensions/transport_sockets/tls/ocsp/asn1_utility.cc +++ b/source/extensions/transport_sockets/tls/ocsp/asn1_utility.cc @@ -34,7 +34,7 @@ ParsingResult> Asn1Utility::getOptional(CBS& cbs, unsigned t return "Failed to parse ASN.1 element tag"; } - return is_present ? absl::optional(data) : absl::nullopt; + return is_present ? absl::optional(data) : absl::nullopt; } ParsingResult Asn1Utility::parseOid(CBS& cbs) { diff --git a/source/extensions/transport_sockets/tls/ocsp/ocsp.cc b/source/extensions/transport_sockets/tls/ocsp/ocsp.cc index 11f7d0aa2c90..32410365916f 100644 --- a/source/extensions/transport_sockets/tls/ocsp/ocsp.cc +++ b/source/extensions/transport_sockets/tls/ocsp/ocsp.cc @@ -94,6 +94,10 @@ OcspResponseWrapper::OcspResponseWrapper(std::vector der_response, Time : raw_bytes_(std::move(der_response)), response_(readDerEncodedOcspResponse(raw_bytes_)), time_source_(time_source) { + if (response_->status_ != OcspResponseStatus::Successful) { + throw EnvoyException("OCSP response was unsuccessful"); + } + if (response_->response_ == nullptr) { throw EnvoyException("OCSP response has no body"); } @@ -116,7 +120,7 @@ OcspResponseWrapper::OcspResponseWrapper(std::vector der_response, Time // Though different issuers could produce certificates with the same serial // number, this is check is to prevent operator error and a collision in this // case is unlikely. -bool OcspResponseWrapper::matchesCertificate(X509& cert) { +bool OcspResponseWrapper::matchesCertificate(X509& cert) const { std::string cert_serial_number = CertUtility::getSerialNumberFromCertificate(cert); std::string resp_cert_serial_number = response_->response_->getCertSerialNumber(); return resp_cert_serial_number == cert_serial_number; @@ -127,6 +131,28 @@ bool OcspResponseWrapper::isExpired() { return next_update == absl::nullopt || next_update < time_source_.systemTime(); } +uint64_t OcspResponseWrapper::secondsUntilExpiration() const { + auto& next_update = response_->response_->getNextUpdate(); + auto now = time_source_.systemTime(); + if (!next_update || next_update.value() <= now) { + return 0; + } + return std::chrono::duration_cast(next_update.value() - now).count(); +} + +Envoy::SystemTime OcspResponseWrapper::getThisUpdate() const { + return response_->response_->getThisUpdate(); +} + +Envoy::SystemTime OcspResponseWrapper::getNextUpdate() const { + auto& next_update = response_->response_->getNextUpdate(); + if (next_update) { + return *next_update; + } + + return time_source_.systemTime(); +} + std::unique_ptr Asn1OcspUtility::parseOcspResponse(CBS& cbs) { // OCSPResponse ::= SEQUENCE { // responseStatus OCSPResponseStatus, diff --git a/source/extensions/transport_sockets/tls/ocsp/ocsp.h b/source/extensions/transport_sockets/tls/ocsp/ocsp.h index 35c274ee1579..24e806cfac5b 100644 --- a/source/extensions/transport_sockets/tls/ocsp/ocsp.h +++ b/source/extensions/transport_sockets/tls/ocsp/ocsp.h @@ -186,19 +186,19 @@ class OcspResponseWrapper { * @return std::vector& a reference to the underlying bytestring representation * of the OCSP response */ - const std::vector& rawBytes() { return raw_bytes_; } + const std::vector& rawBytes() const { return raw_bytes_; } /** * @return OcspResponseStatus whether the OCSP response was successfully created * or a status indicating an error in the OCSP process */ - OcspResponseStatus getResponseStatus() { return response_->status_; } + OcspResponseStatus getResponseStatus() const { return response_->status_; } /** * @param cert a X509& SSL certificate * @returns bool whether this OCSP response contains the revocation status of `cert` */ - bool matchesCertificate(X509& cert); + bool matchesCertificate(X509& cert) const; /** * Determines whether the OCSP response can no longer be considered valid. @@ -210,6 +210,25 @@ class OcspResponseWrapper { */ bool isExpired(); + /** + * @returns the seconds until this OCSP response expires. + */ + uint64_t secondsUntilExpiration() const; + + /** + * @return The beginning of the validity window for this response. + */ + Envoy::SystemTime getThisUpdate() const; + + /** + * The time at which this response is considered to expire. If + * the underlying response does not have a value, then the current + * time is returned. + * + * @return The end of the validity window for this response. + */ + Envoy::SystemTime getNextUpdate() const; + private: const std::vector raw_bytes_; const std::unique_ptr response_; diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h index ba73cc5d6ac6..c14cb502bed1 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.h +++ b/source/extensions/transport_sockets/tls/ssl_socket.h @@ -68,6 +68,7 @@ class SslSocket : public Network::TransportSocket, Network::Connection& connection() const override; void onSuccess(SSL* ssl) override; void onFailure() override; + Network::TransportSocketCallbacks* transportSocketCallbacks() override { return callbacks_; } SSL* rawSslForTest() const { return rawSsl(); } diff --git a/source/extensions/watchdog/profile_action/BUILD b/source/extensions/watchdog/profile_action/BUILD index 6add86fd8479..afe779924b43 100644 --- a/source/extensions/watchdog/profile_action/BUILD +++ b/source/extensions/watchdog/profile_action/BUILD @@ -24,6 +24,7 @@ envoy_cc_library( "//include/envoy/thread:thread_interface", "//source/common/profiler:profiler_lib", "//source/common/protobuf:utility_lib", + "//source/common/stats:symbol_table_lib", "@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto", ], ) diff --git a/source/extensions/watchdog/profile_action/profile_action.cc b/source/extensions/watchdog/profile_action/profile_action.cc index 7b268d1911ff..2002414faa8c 100644 --- a/source/extensions/watchdog/profile_action/profile_action.cc +++ b/source/extensions/watchdog/profile_action/profile_action.cc @@ -6,6 +6,7 @@ #include "common/profiler/profiler.h" #include "common/protobuf/utility.h" +#include "common/stats/symbol_table_impl.h" #include "absl/strings/str_format.h" @@ -32,31 +33,44 @@ ProfileAction::ProfileAction( duration_( std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, profile_duration, 5000))), max_profiles_(config.max_profiles() == 0 ? DefaultMaxProfiles : config.max_profiles()), - running_profile_(false), profiles_started_(0), context_(context), - timer_cb_(context_.dispatcher_.createTimer([this] { + profiles_attempted_(context.stats_.counterFromStatName( + Stats::StatNameManagedStorage( + absl::StrCat(context.guarddog_name_, ".profile_action.attempted"), + context.stats_.symbolTable()) + .statName())), + profiles_successfully_captured_(context.stats_.counterFromStatName( + Stats::StatNameManagedStorage( + absl::StrCat(context.guarddog_name_, ".profile_action.successfully_captured"), + context.stats_.symbolTable()) + .statName())), + context_(context), timer_cb_(context_.dispatcher_.createTimer([this] { if (Profiler::Cpu::profilerEnabled()) { Profiler::Cpu::stopProfiler(); running_profile_ = false; } else { ENVOY_LOG_MISC(error, "Profile Action's stop() was scheduled, but profiler isn't running!"); + return; } if (!context_.api_.fileSystem().fileExists(profile_filename_)) { ENVOY_LOG_MISC(error, "Profile file {} wasn't created!", profile_filename_); + } else { + profiles_successfully_captured_.inc(); } })) {} void ProfileAction::run( envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent /*event*/, - const std::vector>& thread_ltt_pairs, + const std::vector>& thread_last_checkin_pairs, MonotonicTime /*now*/) { if (running_profile_) { return; } + profiles_attempted_.inc(); // Check if there's a tid that justifies profiling - if (thread_ltt_pairs.empty()) { + if (thread_last_checkin_pairs.empty()) { ENVOY_LOG_MISC(warn, "Profile Action: No tids were provided."); return; } diff --git a/source/extensions/watchdog/profile_action/profile_action.h b/source/extensions/watchdog/profile_action/profile_action.h index 24dea5b592d2..144f6b9861ff 100644 --- a/source/extensions/watchdog/profile_action/profile_action.h +++ b/source/extensions/watchdog/profile_action/profile_action.h @@ -22,16 +22,18 @@ class ProfileAction : public Server::Configuration::GuardDogAction { Server::Configuration::GuardDogActionFactoryContext& context); void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event, - const std::vector>& thread_ltt_pairs, + const std::vector>& thread_last_checkin_pairs, MonotonicTime now) override; private: const std::string path_; const std::chrono::milliseconds duration_; const uint64_t max_profiles_; - bool running_profile_; + bool running_profile_ = false; std::string profile_filename_; - uint64_t profiles_started_; + Stats::Counter& profiles_attempted_; + Stats::Counter& profiles_successfully_captured_; + uint64_t profiles_started_ = 0; Server::Configuration::GuardDogActionFactoryContext& context_; Event::TimerPtr timer_cb_; }; diff --git a/source/server/BUILD b/source/server/BUILD index dc82ca7ef502..852cc2bda604 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -127,7 +127,9 @@ envoy_cc_library( "//source/common/event:libevent_lib", "//source/common/protobuf:utility_lib", "//source/common/stats:symbol_table_lib", + "//source/common/watchdog:abort_action_config", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", ], ) @@ -328,6 +330,7 @@ envoy_cc_library( "//source/common/network:connection_balancer_lib", "//source/common/network:filter_matcher_lib", "//source/common/network:listen_socket_lib", + "//source/common/network:listener_lib", "//source/common/network:resolver_lib", "//source/common/network:socket_option_factory_lib", "//source/common/network:utility_lib", @@ -440,7 +443,6 @@ envoy_cc_library( "//source/common/runtime:runtime_lib", "//source/common/secret:secret_manager_impl_lib", "//source/common/singleton:manager_impl_lib", - "//source/common/stats:symbol_table_creator_lib", "//source/common/stats:thread_local_store_lib", "//source/common/upstream:cluster_manager_lib", "//source/common/upstream:health_discovery_service_lib", diff --git a/source/server/active_raw_udp_listener_config.cc b/source/server/active_raw_udp_listener_config.cc index f34abe2fcb0e..a3aa7b71918f 100644 --- a/source/server/active_raw_udp_listener_config.cc +++ b/source/server/active_raw_udp_listener_config.cc @@ -11,11 +11,16 @@ namespace Envoy { namespace Server { -Network::ConnectionHandler::ActiveListenerPtr -ActiveRawUdpListenerFactory::createActiveUdpListener(Network::ConnectionHandler& parent, +ActiveRawUdpListenerFactory::ActiveRawUdpListenerFactory(uint32_t concurrency) + : concurrency_(concurrency) {} + +Network::ConnectionHandler::ActiveUdpListenerPtr +ActiveRawUdpListenerFactory::createActiveUdpListener(uint32_t worker_index, + Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, Network::ListenerConfig& config) { - return std::make_unique(parent, dispatcher, config); + return std::make_unique(worker_index, concurrency_, parent, dispatcher, + config); } ProtobufTypes::MessagePtr ActiveRawUdpListenerConfigFactory::createEmptyConfigProto() { @@ -24,8 +29,8 @@ ProtobufTypes::MessagePtr ActiveRawUdpListenerConfigFactory::createEmptyConfigPr Network::ActiveUdpListenerFactoryPtr ActiveRawUdpListenerConfigFactory::createActiveUdpListenerFactory( - const Protobuf::Message& /*message*/, uint32_t /*concurrency*/) { - return std::make_unique(); + const Protobuf::Message& /*message*/, uint32_t concurrency) { + return std::make_unique(concurrency); } std::string ActiveRawUdpListenerConfigFactory::name() const { diff --git a/source/server/active_raw_udp_listener_config.h b/source/server/active_raw_udp_listener_config.h index da0216301de6..ce718dc8295d 100644 --- a/source/server/active_raw_udp_listener_config.h +++ b/source/server/active_raw_udp_listener_config.h @@ -9,11 +9,16 @@ namespace Server { class ActiveRawUdpListenerFactory : public Network::ActiveUdpListenerFactory { public: - Network::ConnectionHandler::ActiveListenerPtr - createActiveUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& disptacher, - Network::ListenerConfig& config) override; + ActiveRawUdpListenerFactory(uint32_t concurrency); + + Network::ConnectionHandler::ActiveUdpListenerPtr + createActiveUdpListener(uint32_t worker_index, Network::ConnectionHandler& parent, + Event::Dispatcher& disptacher, Network::ListenerConfig& config) override; bool isTransportConnectionless() const override { return true; } + +private: + const uint32_t concurrency_; }; // This class uses a protobuf config to create a UDP listener factory which diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index c92b56c5c032..5b0d7d7a9f1d 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -146,8 +146,8 @@ void AdminImpl::startHttpListener(const std::string& access_log_path, } AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) - : server_(server), - request_id_extension_(Http::RequestIDExtensionFactory::defaultInstance(server_.random())), + : server_(server), request_id_extension_(Http::RequestIDExtensionFactory::defaultInstance( + server_.api().randomGenerator())), profile_path_(profile_path), stats_(Http::ConnectionManagerImpl::generateStats("http.admin.", server_.stats())), null_overload_manager_(server_.threadLocal()), @@ -227,8 +227,8 @@ Http::ServerConnectionPtr AdminImpl::createCodec(Network::Connection& connection const Buffer::Instance& data, Http::ServerConnectionCallbacks& callbacks) { return Http::ConnectionManagerUtility::autoCreateCodec( - connection, data, callbacks, server_.stats(), http1_codec_stats_, http2_codec_stats_, - Http::Http1Settings(), + connection, data, callbacks, server_.stats(), server_.api().randomGenerator(), + http1_codec_stats_, http2_codec_stats_, Http::Http1Settings(), ::Envoy::Http2::Utility::initializeAndValidateOptions( envoy::config::core::v3::Http2ProtocolOptions()), maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); @@ -239,8 +239,8 @@ bool AdminImpl::createNetworkFilterChain(Network::Connection& connection, // Pass in the null overload manager so that the admin interface is accessible even when Envoy is // overloaded. connection.addReadFilter(Network::ReadFilterSharedPtr{new Http::ConnectionManagerImpl( - *this, server_.drainManager(), server_.random(), server_.httpContext(), server_.runtime(), - server_.localInfo(), server_.clusterManager(), null_overload_manager_, + *this, server_.drainManager(), server_.api().randomGenerator(), server_.httpContext(), + server_.runtime(), server_.localInfo(), server_.clusterManager(), null_overload_manager_, server_.timeSource())}); return true; } diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 8ed948da85b1..945885ac3909 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -345,6 +345,9 @@ class AdminImpl : public Admin, Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { NOT_REACHED_GCOVR_EXCL_LINE; } + Network::UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() override { + NOT_REACHED_GCOVR_EXCL_LINE; + } envoy::config::core::v3::TrafficDirection direction() const override { return envoy::config::core::v3::UNSPECIFIED; } diff --git a/source/server/admin/admin_filter.cc b/source/server/admin/admin_filter.cc index d2b70fa36a6d..c9227f2c725d 100644 --- a/source/server/admin/admin_filter.cc +++ b/source/server/admin/admin_filter.cc @@ -70,10 +70,9 @@ void AdminFilter::onComplete() { RELEASE_ASSERT(request_headers_, ""); Http::Code code = admin_server_callback_func_(path, *header_map, response, *this); Utility::populateFallbackResponseHeaders(code, *header_map); - decoder_callbacks_->streamInfo().setResponseCodeDetails( - StreamInfo::ResponseCodeDetails::get().AdminFilterResponse); decoder_callbacks_->encodeHeaders(std::move(header_map), - end_stream_on_complete_ && response.length() == 0); + end_stream_on_complete_ && response.length() == 0, + StreamInfo::ResponseCodeDetails::get().AdminFilterResponse); if (response.length() > 0) { decoder_callbacks_->encodeData(response, end_stream_on_complete_); diff --git a/source/server/admin/clusters_handler.cc b/source/server/admin/clusters_handler.cc index 78482d4a8f84..e0e17350b7c9 100644 --- a/source/server/admin/clusters_handler.cc +++ b/source/server/admin/clusters_handler.cc @@ -12,6 +12,35 @@ namespace Envoy { namespace Server { +namespace { + +void addCircuitBreakerSettingsAsText(const std::string& cluster_name, + const std::string& priority_str, + Upstream::ResourceManager& resource_manager, + Buffer::Instance& response) { + response.add(fmt::format("{}::{}_priority::max_connections::{}\n", cluster_name, priority_str, + resource_manager.connections().max())); + response.add(fmt::format("{}::{}_priority::max_pending_requests::{}\n", cluster_name, + priority_str, resource_manager.pendingRequests().max())); + response.add(fmt::format("{}::{}_priority::max_requests::{}\n", cluster_name, priority_str, + resource_manager.requests().max())); + response.add(fmt::format("{}::{}_priority::max_retries::{}\n", cluster_name, priority_str, + resource_manager.retries().max())); +} + +void addCircuitBreakerSettingsAsJson(const envoy::config::core::v3::RoutingPriority& priority, + Upstream::ResourceManager& resource_manager, + envoy::admin::v3::ClusterStatus& cluster_status) { + auto& thresholds = *cluster_status.mutable_circuit_breakers()->add_thresholds(); + thresholds.set_priority(priority); + thresholds.mutable_max_connections()->set_value(resource_manager.connections().max()); + thresholds.mutable_max_pending_requests()->set_value(resource_manager.pendingRequests().max()); + thresholds.mutable_max_requests()->set_value(resource_manager.requests().max()); + thresholds.mutable_max_retries()->set_value(resource_manager.retries().max()); +} + +} // namespace + ClustersHandler::ClustersHandler(Server::Instance& server) : HandlerContextBase(server) {} Http::Code ClustersHandler::handlerClusters(absl::string_view url, @@ -78,6 +107,13 @@ void ClustersHandler::writeClustersAsJson(Buffer::Instance& response) { envoy::admin::v3::ClusterStatus& cluster_status = *clusters.add_cluster_statuses(); cluster_status.set_name(cluster_info->name()); + addCircuitBreakerSettingsAsJson( + envoy::config::core::v3::RoutingPriority::DEFAULT, + cluster.info()->resourceManager(Upstream::ResourcePriority::Default), cluster_status); + addCircuitBreakerSettingsAsJson( + envoy::config::core::v3::RoutingPriority::HIGH, + cluster.info()->resourceManager(Upstream::ResourcePriority::High), cluster_status); + const Upstream::Outlier::Detector* outlier_detector = cluster.outlierDetector(); if (outlier_detector != nullptr && outlier_detector->successRateEjectionThreshold( @@ -153,11 +189,12 @@ void ClustersHandler::writeClustersAsText(Buffer::Instance& response) { const std::string& cluster_name = cluster.info()->name(); addOutlierInfo(cluster_name, cluster.outlierDetector(), response); - addCircuitSettings(cluster_name, "default", - cluster.info()->resourceManager(Upstream::ResourcePriority::Default), - response); - addCircuitSettings(cluster_name, "high", - cluster.info()->resourceManager(Upstream::ResourcePriority::High), response); + addCircuitBreakerSettingsAsText( + cluster_name, "default", + cluster.info()->resourceManager(Upstream::ResourcePriority::Default), response); + addCircuitBreakerSettingsAsText( + cluster_name, "high", cluster.info()->resourceManager(Upstream::ResourcePriority::High), + response); response.add( fmt::format("{}::added_via_api::{}\n", cluster_name, cluster.info()->addedViaApi())); @@ -230,19 +267,5 @@ void ClustersHandler::addOutlierInfo(const std::string& cluster_name, } } -void ClustersHandler::addCircuitSettings(const std::string& cluster_name, - const std::string& priority_str, - Upstream::ResourceManager& resource_manager, - Buffer::Instance& response) { - response.add(fmt::format("{}::{}_priority::max_connections::{}\n", cluster_name, priority_str, - resource_manager.connections().max())); - response.add(fmt::format("{}::{}_priority::max_pending_requests::{}\n", cluster_name, - priority_str, resource_manager.pendingRequests().max())); - response.add(fmt::format("{}::{}_priority::max_requests::{}\n", cluster_name, priority_str, - resource_manager.requests().max())); - response.add(fmt::format("{}::{}_priority::max_retries::{}\n", cluster_name, priority_str, - resource_manager.retries().max())); -} - } // namespace Server } // namespace Envoy diff --git a/source/server/admin/clusters_handler.h b/source/server/admin/clusters_handler.h index 5cf1b25a884c..efb00e3b2cca 100644 --- a/source/server/admin/clusters_handler.h +++ b/source/server/admin/clusters_handler.h @@ -23,8 +23,6 @@ class ClustersHandler : public HandlerContextBase { AdminStream&); private: - void addCircuitSettings(const std::string& cluster_name, const std::string& priority_str, - Upstream::ResourceManager& resource_manager, Buffer::Instance& response); void addOutlierInfo(const std::string& cluster_name, const Upstream::Outlier::Detector* outlier_detector, Buffer::Instance& response); diff --git a/source/server/admin/server_info_handler.cc b/source/server/admin/server_info_handler.cc index d668dac83992..86669949d288 100644 --- a/source/server/admin/server_info_handler.cc +++ b/source/server/admin/server_info_handler.cc @@ -88,6 +88,7 @@ Http::Code ServerInfoHandler::handlerServerInfo(absl::string_view, Http::Respons envoy::admin::v3::CommandLineOptions* command_line_options = server_info.mutable_command_line_options(); *command_line_options = *server_.options().toCommandLineOptions(); + server_info.mutable_node()->MergeFrom(server_.localInfo().node()); response.add(MessageUtil::getJsonStringFromMessage(server_info, true, true)); headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); return Http::Code::OK; diff --git a/source/server/admin/utils.cc b/source/server/admin/utils.cc index eaa5a6689a02..4bc16f2cc515 100644 --- a/source/server/admin/utils.cc +++ b/source/server/admin/utils.cc @@ -28,7 +28,7 @@ void populateFallbackResponseHeaders(Http::Code code, Http::ResponseHeaderMap& h header_map.setReferenceContentType(Http::Headers::get().ContentTypeValues.TextUtf8); } // Default to 'no-cache' if unset, but not 'no-store' which may break the back button. - if (header_map.get(Http::CustomHeaders::get().CacheControl) == nullptr) { + if (header_map.get(Http::CustomHeaders::get().CacheControl).empty()) { header_map.setReference(Http::CustomHeaders::get().CacheControl, Http::CustomHeaders::get().CacheControlValues.NoCacheMaxAge0); } diff --git a/source/server/api_listener_impl.h b/source/server/api_listener_impl.h index 3bd552a31308..4731ea90ca54 100644 --- a/source/server/api_listener_impl.h +++ b/source/server/api_listener_impl.h @@ -133,6 +133,7 @@ class ApiListenerImplBase : public ApiListener, const StreamInfo::StreamInfo& streamInfo() const override { return stream_info_; } void setDelayedCloseTimeout(std::chrono::milliseconds) override {} absl::string_view transportFailureReason() const override { return EMPTY_STRING; } + absl::optional lastRoundTripTime() const override { return {}; }; SyntheticReadCallbacks& parent_; StreamInfo::StreamInfoImpl stream_info_; diff --git a/source/server/config_validation/api.cc b/source/server/config_validation/api.cc index 8e683bb5e78f..fb6152b9053d 100644 --- a/source/server/config_validation/api.cc +++ b/source/server/config_validation/api.cc @@ -8,8 +8,10 @@ namespace Envoy { namespace Api { ValidationImpl::ValidationImpl(Thread::ThreadFactory& thread_factory, Stats::Store& stats_store, - Event::TimeSystem& time_system, Filesystem::Instance& file_system) - : Impl(thread_factory, stats_store, time_system, file_system), time_system_(time_system) {} + Event::TimeSystem& time_system, Filesystem::Instance& file_system, + Random::RandomGenerator& random_generator) + : Impl(thread_factory, stats_store, time_system, file_system, random_generator), + time_system_(time_system) {} Event::DispatcherPtr ValidationImpl::allocateDispatcher(const std::string& name) { return Event::DispatcherPtr{new Event::ValidationDispatcher(name, *this, time_system_)}; diff --git a/source/server/config_validation/api.h b/source/server/config_validation/api.h index f5cbd292e625..21c0dca05820 100644 --- a/source/server/config_validation/api.h +++ b/source/server/config_validation/api.h @@ -16,7 +16,8 @@ namespace Api { class ValidationImpl : public Impl { public: ValidationImpl(Thread::ThreadFactory& thread_factory, Stats::Store& stats_store, - Event::TimeSystem& time_system, Filesystem::Instance& file_system); + Event::TimeSystem& time_system, Filesystem::Instance& file_system, + Random::RandomGenerator& random_generator); Event::DispatcherPtr allocateDispatcher(const std::string& name) override; Event::DispatcherPtr allocateDispatcher(const std::string& name, diff --git a/source/server/config_validation/cluster_manager.cc b/source/server/config_validation/cluster_manager.cc index d5f4489c918c..38e1413da9d9 100644 --- a/source/server/config_validation/cluster_manager.cc +++ b/source/server/config_validation/cluster_manager.cc @@ -11,9 +11,8 @@ namespace Upstream { ClusterManagerPtr ValidationClusterManagerFactory::clusterManagerFromProto( const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { return std::make_unique( - bootstrap, *this, stats_, tls_, runtime_, random_, local_info_, log_manager_, - main_thread_dispatcher_, admin_, validation_context_, api_, http_context_, grpc_context_, - time_system_); + bootstrap, *this, stats_, tls_, runtime_, local_info_, log_manager_, main_thread_dispatcher_, + admin_, validation_context_, api_, http_context_, grpc_context_, time_system_); } CdsApiPtr @@ -28,11 +27,11 @@ ValidationClusterManagerFactory::createCds(const envoy::config::core::v3::Config ValidationClusterManager::ValidationClusterManager( const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, - AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, - Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, + const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, + Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, + ProtobufMessage::ValidationContext& validation_context, Api::Api& api, Http::Context& http_context, Grpc::Context& grpc_context, Event::TimeSystem& time_system) - : ClusterManagerImpl(bootstrap, factory, stats, tls, runtime, random, local_info, log_manager, + : ClusterManagerImpl(bootstrap, factory, stats, tls, runtime, local_info, log_manager, main_thread_dispatcher, admin, validation_context, api, http_context, grpc_context), async_client_(api, time_system) {} diff --git a/source/server/config_validation/cluster_manager.h b/source/server/config_validation/cluster_manager.h index 6ce2c46941fe..38bda5ad2f8d 100644 --- a/source/server/config_validation/cluster_manager.h +++ b/source/server/config_validation/cluster_manager.h @@ -23,17 +23,17 @@ class ValidationClusterManagerFactory : public ProdClusterManagerFactory { explicit ValidationClusterManagerFactory( Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats, - ThreadLocal::Instance& tls, Random::RandomGenerator& random, - Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, - Event::Dispatcher& main_thread_dispatcher, const LocalInfo::LocalInfo& local_info, - Secret::SecretManager& secret_manager, ProtobufMessage::ValidationContext& validation_context, - Api::Api& api, Http::Context& http_context, Grpc::Context& grpc_context, + ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, + Ssl::ContextManager& ssl_context_manager, Event::Dispatcher& main_thread_dispatcher, + const LocalInfo::LocalInfo& local_info, Secret::SecretManager& secret_manager, + ProtobufMessage::ValidationContext& validation_context, Api::Api& api, + Http::Context& http_context, Grpc::Context& grpc_context, AccessLog::AccessLogManager& log_manager, Singleton::Manager& singleton_manager, Event::TimeSystem& time_system) - : ProdClusterManagerFactory(admin, runtime, stats, tls, random, dns_resolver, - ssl_context_manager, main_thread_dispatcher, local_info, - secret_manager, validation_context, api, http_context, - grpc_context, log_manager, singleton_manager), + : ProdClusterManagerFactory(admin, runtime, stats, tls, dns_resolver, ssl_context_manager, + main_thread_dispatcher, local_info, secret_manager, + validation_context, api, http_context, grpc_context, log_manager, + singleton_manager), grpc_context_(grpc_context), time_system_(time_system) {} ClusterManagerPtr @@ -57,7 +57,7 @@ class ValidationClusterManager : public ClusterManagerImpl { ValidationClusterManager(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, diff --git a/source/server/config_validation/server.cc b/source/server/config_validation/server.cc index 4df691f6b96e..877e7293f293 100644 --- a/source/server/config_validation/server.cc +++ b/source/server/config_validation/server.cc @@ -44,8 +44,8 @@ ValidationInstance::ValidationInstance( : options_(options), validation_context_(options_.allowUnknownStaticFields(), !options.rejectUnknownDynamicFields(), !options.ignoreUnknownDynamicFields()), - stats_store_(store), - api_(new Api::ValidationImpl(thread_factory, store, time_system, file_system)), + stats_store_(store), api_(new Api::ValidationImpl(thread_factory, store, time_system, + file_system, random_generator_)), dispatcher_(api_->allocateDispatcher("main_thread")), singleton_manager_(new Singleton::ManagerImpl(api_->threadFactory())), access_log_manager_(options.fileFlushIntervalMsec(), *api_, *dispatcher_, access_log_lock, @@ -99,8 +99,8 @@ void ValidationInstance::initialize(const Options& options, secret_manager_ = std::make_unique(admin().getConfigTracker()); ssl_context_manager_ = createContextManager("ssl_context_manager", api_->timeSource()); cluster_manager_factory_ = std::make_unique( - admin(), runtime(), stats(), threadLocal(), random(), dnsResolver(), sslContextManager(), - dispatcher(), localInfo(), *secret_manager_, messageValidationContext(), *api_, http_context_, + admin(), runtime(), stats(), threadLocal(), dnsResolver(), sslContextManager(), dispatcher(), + localInfo(), *secret_manager_, messageValidationContext(), *api_, http_context_, grpc_context_, accessLogManager(), singletonManager(), time_system_); config_.initialize(bootstrap, *this, *cluster_manager_factory_); runtime().initialize(clusterManager()); diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index c9108ffaec91..4909e5270f20 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -85,7 +85,6 @@ class ValidationInstance final : Logger::Loggable, ServerLifecycleNotifier& lifecycleNotifier() override { return *this; } ListenerManager& listenerManager() override { return *listener_manager_; } Secret::SecretManager& secretManager() override { return *secret_manager_; } - Random::RandomGenerator& random() override { return random_generator_; } Runtime::Loader& runtime() override { return Runtime::LoaderSingleton::get(); } void shutdown() override; bool isShutdown() override { return false; } @@ -120,9 +119,10 @@ class ValidationInstance final : Logger::Loggable, } // Server::ListenerComponentFactory - LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) override { - return std::make_unique(lds_config, clusterManager(), initManager(), stats(), - listenerManager(), + LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config, + const udpa::core::v1::ResourceLocator* lds_resources_locator) override { + return std::make_unique(lds_config, lds_resources_locator, clusterManager(), + initManager(), stats(), listenerManager(), messageValidationContext().dynamicValidationVisitor()); } std::vector createNetworkFilterFactoryList( @@ -155,7 +155,7 @@ class ValidationInstance final : Logger::Loggable, uint64_t nextListenerTag() override { return 0; } // Server::WorkerFactory - WorkerPtr createWorker(OverloadManager&, const std::string&) override { + WorkerPtr createWorker(uint32_t, OverloadManager&, const std::string&) override { // Returned workers are not currently used so we can return nothing here safely vs. a // validation mock. return nullptr; diff --git a/source/server/configuration_impl.cc b/source/server/configuration_impl.cc index 2a06ada13e05..c0b3524adcff 100644 --- a/source/server/configuration_impl.cc +++ b/source/server/configuration_impl.cc @@ -6,6 +6,7 @@ #include #include +#include "envoy/common/exception.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/metrics/v3/stats.pb.h" #include "envoy/config/trace/v3/http_tracer.pb.h" @@ -131,8 +132,19 @@ void MainImpl::initializeStatsSinks(const envoy::config::bootstrap::v3::Bootstra void MainImpl::initializeWatchdogs(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, Instance& server) { - // TODO(kbaichoo): modify this to handle additional watchdogs - watchdog_ = std::make_unique(bootstrap.watchdog(), server); + if (bootstrap.has_watchdog() && bootstrap.has_watchdogs()) { + throw EnvoyException("Only one of watchdog or watchdogs should be set!"); + } + + if (bootstrap.has_watchdog()) { + main_thread_watchdog_ = std::make_unique(bootstrap.watchdog(), server); + worker_watchdog_ = std::make_unique(bootstrap.watchdog(), server); + } else { + main_thread_watchdog_ = + std::make_unique(bootstrap.watchdogs().main_thread_watchdog(), server); + worker_watchdog_ = + std::make_unique(bootstrap.watchdogs().worker_watchdog(), server); + } } WatchdogImpl::WatchdogImpl(const envoy::config::bootstrap::v3::Watchdog& watchdog, @@ -152,7 +164,7 @@ WatchdogImpl::WatchdogImpl(const envoy::config::bootstrap::v3::Watchdog& watchdo // We shouldn't have overflow issues due to the range of Duration. // This won't be entirely uniform, depending on how large max_skew // is relation to uint64. - kill_timeout += (server.random().random() % max_kill_timeout_jitter) + 1; + kill_timeout += (server.api().randomGenerator().random() % max_kill_timeout_jitter) + 1; } kill_timeout_ = std::chrono::milliseconds(kill_timeout); diff --git a/source/server/configuration_impl.h b/source/server/configuration_impl.h index 57ef7851cd23..dc7a893ab629 100644 --- a/source/server/configuration_impl.h +++ b/source/server/configuration_impl.h @@ -18,7 +18,6 @@ #include "envoy/server/instance.h" #include "common/common/logger.h" -#include "common/json/json_loader.h" #include "common/network/resolver_impl.h" #include "common/network/utility.h" @@ -101,7 +100,8 @@ class MainImpl : Logger::Loggable, public Main { Upstream::ClusterManager* clusterManager() override { return cluster_manager_.get(); } std::list& statsSinks() override { return stats_sinks_; } std::chrono::milliseconds statsFlushInterval() const override { return stats_flush_interval_; } - const Watchdog& watchdogConfig() const override { return *watchdog_; } + const Watchdog& mainThreadWatchdogConfig() const override { return *main_thread_watchdog_; } + const Watchdog& workerWatchdogConfig() const override { return *worker_watchdog_; } private: /** @@ -111,13 +111,17 @@ class MainImpl : Logger::Loggable, public Main { void initializeStatsSinks(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, Instance& server); + /** + * Initialize watchdog(s). Call before accessing any watchdog configuration. + */ void initializeWatchdogs(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, Instance& server); std::unique_ptr cluster_manager_; std::list stats_sinks_; std::chrono::milliseconds stats_flush_interval_; - std::unique_ptr watchdog_; + std::unique_ptr main_thread_watchdog_; + std::unique_ptr worker_watchdog_; }; class WatchdogImpl : public Watchdog { diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index fd8232b484a8..d28f892b22ae 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -26,9 +26,10 @@ void emitLogs(Network::ListenerConfig& config, StreamInfo::StreamInfo& stream_in } } // namespace -ConnectionHandlerImpl::ConnectionHandlerImpl(Event::Dispatcher& dispatcher) - : dispatcher_(dispatcher), per_handler_stat_prefix_(dispatcher.name() + "."), - disable_listeners_(false) {} +ConnectionHandlerImpl::ConnectionHandlerImpl(Event::Dispatcher& dispatcher, + absl::optional worker_index) + : worker_index_(worker_index), dispatcher_(dispatcher), + per_handler_stat_prefix_(dispatcher.name() + "."), disable_listeners_(false) {} void ConnectionHandlerImpl::incNumConnections() { ++num_handler_connections_; } @@ -44,23 +45,30 @@ void ConnectionHandlerImpl::addListener(absl::optional overridden_list if (overridden_listener.has_value()) { for (auto& listener : listeners_) { if (listener.second.listener_->listenerTag() == overridden_listener) { - listener.second.tcp_listener_->get().updateListenerConfig(config); + listener.second.tcpListener()->get().updateListenerConfig(config); return; } } NOT_REACHED_GCOVR_EXCL_LINE; } auto tcp_listener = std::make_unique(*this, config); - details.tcp_listener_ = *tcp_listener; + details.typed_listener_ = *tcp_listener; details.listener_ = std::move(tcp_listener); } else { ASSERT(config.udpListenerFactory() != nullptr, "UDP listener factory is not initialized."); - details.listener_ = - config.udpListenerFactory()->createActiveUdpListener(*this, dispatcher_, config); + ASSERT(worker_index_.has_value()); + ConnectionHandler::ActiveUdpListenerPtr udp_listener = + config.udpListenerFactory()->createActiveUdpListener(*worker_index_, *this, dispatcher_, + config); + details.typed_listener_ = *udp_listener; + details.listener_ = std::move(udp_listener); } if (disable_listeners_) { details.listener_->pauseListening(); } + if (auto* listener = details.listener_->listener(); listener != nullptr) { + listener->setRejectFraction(listener_reject_fraction_); + } listeners_.emplace_back(config.listenSocketFactory().localAddress(), std::move(details)); } @@ -74,12 +82,39 @@ void ConnectionHandlerImpl::removeListeners(uint64_t listener_tag) { } } +ConnectionHandlerImpl::ActiveListenerDetailsOptRef +ConnectionHandlerImpl::findActiveListenerByTag(uint64_t listener_tag) { + // TODO(mattklein123): We should probably use a hash table here to lookup the tag + // instead of iterating through the listener list. + for (auto& listener : listeners_) { + if (listener.second.listener_->listener() != nullptr && + listener.second.listener_->listenerTag() == listener_tag) { + return listener.second; + } + } + + return absl::nullopt; +} + +Network::UdpListenerCallbacksOptRef +ConnectionHandlerImpl::getUdpListenerCallbacks(uint64_t listener_tag) { + auto listener = findActiveListenerByTag(listener_tag); + if (listener.has_value()) { + // If the tag matches this must be a UDP listener. + auto udp_listener = listener->get().udpListener(); + ASSERT(udp_listener.has_value()); + return udp_listener; + } + + return absl::nullopt; +} + void ConnectionHandlerImpl::removeFilterChains( uint64_t listener_tag, const std::list& filter_chains, std::function completion) { for (auto& listener : listeners_) { if (listener.second.listener_->listenerTag() == listener_tag) { - listener.second.tcp_listener_->get().deferredRemoveFilterChains(filter_chains); + listener.second.tcpListener()->get().deferredRemoveFilterChains(filter_chains); // Completion is deferred because the above removeFilterChains() may defer delete connection. Event::DeferredTaskUtil::deferredRun(dispatcher_, std::move(completion)); return; @@ -116,6 +151,13 @@ void ConnectionHandlerImpl::enableListeners() { } } +void ConnectionHandlerImpl::setListenerRejectFraction(float reject_fraction) { + listener_reject_fraction_ = reject_fraction; + for (auto& listener : listeners_) { + listener.second.listener_->listener()->setRejectFraction(reject_fraction); + } +} + void ConnectionHandlerImpl::ActiveTcpListener::removeConnection(ActiveTcpConnection& connection) { ENVOY_CONN_LOG(debug, "adding to cleanup list", *connection.connection_); ActiveConnections& active_connections = connection.active_connections_; @@ -165,6 +207,7 @@ ConnectionHandlerImpl::ActiveTcpListener::ActiveTcpListener(ConnectionHandlerImp void ConnectionHandlerImpl::ActiveTcpListener::updateListenerConfig( Network::ListenerConfig& config) { ENVOY_LOG(trace, "replacing listener ", config_->listenerTag(), " by ", config.listenerTag()); + ASSERT(&config_->connectionBalancer() == &config.connectionBalancer()); config_ = &config; } @@ -204,15 +247,14 @@ ConnectionHandlerImpl::findActiveTcpListenerByAddress(const Network::Address::In // We do not return stopped listeners. auto listener_it = std::find_if( listeners_.begin(), listeners_.end(), - [&address]( - const std::pair& p) { - return p.second.tcp_listener_.has_value() && p.second.listener_->listener() != nullptr && + [&address](std::pair& p) { + return p.second.tcpListener().has_value() && p.second.listener_->listener() != nullptr && p.first->type() == Network::Address::Type::Ip && *(p.first) == address; }); // If there is exact address match, return the corresponding listener. if (listener_it != listeners_.end()) { - return listener_it->second.tcp_listener_; + return listener_it->second.tcpListener(); } // Otherwise, we need to look for the wild card match, i.e., 0.0.0.0:[address_port]. @@ -222,11 +264,16 @@ ConnectionHandlerImpl::findActiveTcpListenerByAddress(const Network::Address::In listeners_.begin(), listeners_.end(), [&address]( const std::pair& p) { - return p.second.tcp_listener_.has_value() && p.second.listener_->listener() != nullptr && + return absl::holds_alternative>( + p.second.typed_listener_) && + p.second.listener_->listener() != nullptr && p.first->type() == Network::Address::Type::Ip && p.first->ip()->port() == address.ip()->port() && p.first->ip()->isAnyAddress(); }); - return (listener_it != listeners_.end()) ? listener_it->second.tcp_listener_ : absl::nullopt; + return (listener_it != listeners_.end()) + ? ActiveTcpListenerOptRef(absl::get>( + listener_it->second.typed_listener_)) + : absl::nullopt; } void ConnectionHandlerImpl::ActiveTcpSocket::onTimeout() { @@ -354,6 +401,17 @@ void ConnectionHandlerImpl::ActiveTcpListener::onAccept(Network::ConnectionSocke onAcceptWorker(std::move(socket), config_->handOffRestoredDestinationConnections(), false); } +void ConnectionHandlerImpl::ActiveTcpListener::onReject(RejectCause cause) { + switch (cause) { + case RejectCause::GlobalCxLimit: + stats_.downstream_global_cx_overflow_.inc(); + break; + case RejectCause::OverloadAction: + stats_.downstream_cx_overload_reject_.inc(); + break; + } +} + void ConnectionHandlerImpl::ActiveTcpListener::onAcceptWorker( Network::ConnectionSocketPtr&& socket, bool hand_off_restored_destination_connections, bool rebalanced) { @@ -386,8 +444,24 @@ void ConnectionHandlerImpl::ActiveTcpListener::onAcceptWorker( } } +void ConnectionHandlerImpl::ActiveTcpListener::pauseListening() { + if (listener_ != nullptr) { + listener_->disable(); + } +} + +void ConnectionHandlerImpl::ActiveTcpListener::resumeListening() { + if (listener_ != nullptr) { + listener_->enable(); + } +} + void ConnectionHandlerImpl::ActiveTcpListener::newConnection( Network::ConnectionSocketPtr&& socket, std::unique_ptr stream_info) { + // Refresh local address in case it was restored by a listener filter like the original_dst + // filter. + stream_info->setDownstreamLocalAddress(socket->localAddress()); + // Find matching filter chain. const auto filter_chain = config_->filterChainManager().findFilterChain(*socket); if (filter_chain == nullptr) { @@ -476,21 +550,18 @@ void ConnectionHandlerImpl::ActiveTcpListener::post(Network::ConnectionSocketPtr parent_.dispatcher_.post( [socket_to_rebalance, tag = config_->listenerTag(), &parent = parent_]() { - // TODO(mattklein123): We should probably use a hash table here to lookup the tag instead of - // iterating through the listener list. - for (const auto& listener : parent.listeners_) { - if (listener.second.listener_->listener() != nullptr && - listener.second.listener_->listenerTag() == tag) { - // If the tag matches this must be a TCP listener. - ASSERT(listener.second.tcp_listener_.has_value()); - listener.second.tcp_listener_.value().get().onAcceptWorker( - std::move(socket_to_rebalance->socket), - listener.second.tcp_listener_.value() - .get() - .config_->handOffRestoredDestinationConnections(), - true); - return; - } + auto listener = parent.findActiveListenerByTag(tag); + if (listener.has_value()) { + // If the tag matches this must be a TCP listener. + ASSERT(absl::holds_alternative>( + listener->get().typed_listener_)); + auto& tcp_listener = + absl::get>(listener->get().typed_listener_) + .get(); + tcp_listener.onAcceptWorker(std::move(socket_to_rebalance->socket), + tcp_listener.config_->handOffRestoredDestinationConnections(), + true); + return; } }); } @@ -519,6 +590,7 @@ ConnectionHandlerImpl::ActiveTcpConnection::ActiveTcpConnection( listener.stats_.downstream_cx_active_.inc(); listener.per_worker_stats_.downstream_cx_total_.inc(); listener.per_worker_stats_.downstream_cx_active_.inc(); + stream_info_->setConnectionID(connection_->id()); // Active connections on the handler (not listener). The per listener connections have already // been incremented at this point either via the connection balancer or in the socket accept @@ -541,33 +613,102 @@ ConnectionHandlerImpl::ActiveTcpConnection::~ActiveTcpConnection() { listener.parent_.decNumConnections(); } -ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, +ConnectionHandlerImpl::ActiveTcpListenerOptRef +ConnectionHandlerImpl::ActiveListenerDetails::tcpListener() { + auto* val = absl::get_if>(&typed_listener_); + return (val != nullptr) ? absl::make_optional(*val) : absl::nullopt; +} + +ConnectionHandlerImpl::UdpListenerCallbacksOptRef +ConnectionHandlerImpl::ActiveListenerDetails::udpListener() { + auto* val = absl::get_if>(&typed_listener_); + return (val != nullptr) ? absl::make_optional(*val) : absl::nullopt; +} + +ActiveUdpListenerBase::ActiveUdpListenerBase(uint32_t worker_index, uint32_t concurrency, + Network::ConnectionHandler& parent, + Network::Socket& listen_socket, + Network::UdpListenerPtr&& listener, + Network::ListenerConfig* config) + : ConnectionHandlerImpl::ActiveListenerImplBase(parent, config), worker_index_(worker_index), + concurrency_(concurrency), parent_(parent), listen_socket_(listen_socket), + udp_listener_(std::move(listener)) { + ASSERT(worker_index_ < concurrency_); + config_->udpListenerWorkerRouter()->get().registerWorkerForListener(*this); +} + +ActiveUdpListenerBase::~ActiveUdpListenerBase() { + config_->udpListenerWorkerRouter()->get().unregisterWorkerForListener(*this); +} + +void ActiveUdpListenerBase::post(Network::UdpRecvData&& data) { + ASSERT(!udp_listener_->dispatcher().isThreadSafe(), + "Shouldn't be post'ing if thread safe; use onWorkerData() instead."); + + // It is not possible to capture a unique_ptr because the post() API copies the lambda, so we must + // bundle the socket inside a shared_ptr that can be captured. + // TODO(mattklein123): It may be possible to change the post() API such that the lambda is only + // moved, but this is non-trivial and needs investigation. + auto data_to_post = std::make_shared(); + *data_to_post = std::move(data); + + udp_listener_->dispatcher().post( + [data_to_post, tag = config_->listenerTag(), &parent = parent_]() { + Network::UdpListenerCallbacksOptRef listener = parent.getUdpListenerCallbacks(tag); + if (listener.has_value()) { + listener->get().onDataWorker(std::move(*data_to_post)); + } + }); +} + +void ActiveUdpListenerBase::onData(Network::UdpRecvData&& data) { + uint32_t dest = worker_index_; + + // For concurrency == 1, the packet will always go to the current worker. + if (concurrency_ > 1) { + dest = destination(data); + ASSERT(dest < concurrency_); + } + + if (dest == worker_index_) { + onDataWorker(std::move(data)); + } else { + config_->udpListenerWorkerRouter()->get().deliver(dest, std::move(data)); + } +} + +ActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency, + Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, Network::ListenerConfig& config) - : ActiveRawUdpListener(parent, config.listenSocketFactory().getListenSocket(), dispatcher, - config) {} + : ActiveRawUdpListener(worker_index, concurrency, parent, + config.listenSocketFactory().getListenSocket(), dispatcher, config) {} -ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, +ActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency, + Network::ConnectionHandler& parent, Network::SocketSharedPtr listen_socket_ptr, Event::Dispatcher& dispatcher, Network::ListenerConfig& config) - : ActiveRawUdpListener(parent, *listen_socket_ptr, listen_socket_ptr, dispatcher, config) {} + : ActiveRawUdpListener(worker_index, concurrency, parent, *listen_socket_ptr, listen_socket_ptr, + dispatcher, config) {} -ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, +ActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency, + Network::ConnectionHandler& parent, Network::Socket& listen_socket, Network::SocketSharedPtr listen_socket_ptr, Event::Dispatcher& dispatcher, Network::ListenerConfig& config) - : ActiveRawUdpListener(parent, listen_socket, - dispatcher.createUdpListener(std::move(listen_socket_ptr), *this), - config) {} + : ActiveRawUdpListener(worker_index, concurrency, parent, listen_socket, + dispatcher.createUdpListener(listen_socket_ptr, *this), config) {} -ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, +ActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency, + Network::ConnectionHandler& parent, Network::Socket& listen_socket, Network::UdpListenerPtr&& listener, Network::ListenerConfig& config) - : ConnectionHandlerImpl::ActiveListenerImplBase(parent, &config), - udp_listener_(std::move(listener)), read_filter_(nullptr), listen_socket_(listen_socket) { + : ActiveUdpListenerBase(worker_index, concurrency, parent, listen_socket, std::move(listener), + &config), + read_filter_(nullptr) { // Create the filter chain on creating a new udp listener config_->filterChainFactory().createUdpListenerFilterChain(*this, *this); @@ -583,7 +724,7 @@ ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, listen_socket_.ioHandle(), config.listenerScope()); } -void ActiveRawUdpListener::onData(Network::UdpRecvData& data) { read_filter_->onData(data); } +void ActiveRawUdpListener::onDataWorker(Network::UdpRecvData&& data) { read_filter_->onData(data); } void ActiveRawUdpListener::onReadReady() {} diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index 3a6a252e3084..66d317e8f10e 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -30,6 +30,7 @@ namespace Server { COUNTER(downstream_cx_destroy) \ COUNTER(downstream_cx_overflow) \ COUNTER(downstream_cx_total) \ + COUNTER(downstream_cx_overload_reject) \ COUNTER(downstream_global_cx_overflow) \ COUNTER(downstream_pre_cx_timeout) \ COUNTER(no_filter_chain_match) \ @@ -55,6 +56,8 @@ struct PerHandlerListenerStats { ALL_PER_HANDLER_LISTENER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; +class ActiveUdpListenerBase; + /** * Server side connection handler. This is used both by workers as well as the * main thread for non-threaded listeners. @@ -63,7 +66,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, NonCopyable, Logger::Loggable { public: - ConnectionHandlerImpl(Event::Dispatcher& dispatcher); + ConnectionHandlerImpl(Event::Dispatcher& dispatcher, absl::optional worker_index); // Network::ConnectionHandler uint64_t numConnections() const override { return num_handler_connections_; } @@ -72,6 +75,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, void addListener(absl::optional overridden_listener, Network::ListenerConfig& config) override; void removeListeners(uint64_t listener_tag) override; + Network::UdpListenerCallbacksOptRef getUdpListenerCallbacks(uint64_t listener_tag) override; void removeFilterChains(uint64_t listener_tag, const std::list& filter_chains, std::function completion) override; @@ -79,12 +83,13 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, void stopListeners() override; void disableListeners() override; void enableListeners() override; + void setListenerRejectFraction(float reject_fraction) override; const std::string& statPrefix() const override { return per_handler_stat_prefix_; } /** * Wrapper for an active listener owned by this handler. */ - class ActiveListenerImplBase : public Network::ConnectionHandler::ActiveListener { + class ActiveListenerImplBase : public virtual Network::ConnectionHandler::ActiveListener { public: ActiveListenerImplBase(Network::ConnectionHandler& parent, Network::ListenerConfig* config); @@ -130,12 +135,12 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, // Network::TcpListenerCallbacks void onAccept(Network::ConnectionSocketPtr&& socket) override; - void onReject() override { stats_.downstream_global_cx_overflow_.inc(); } + void onReject(RejectCause) override; // ActiveListenerImplBase Network::Listener* listener() override { return listener_.get(); } - void pauseListening() override { listener_->disable(); } - void resumeListening() override { listener_->enable(); } + void pauseListening() override; + void resumeListening() override; void shutdownListener() override { listener_.reset(); } // Network::BalancedConnectionHandler @@ -331,51 +336,97 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, }; using ActiveTcpListenerOptRef = absl::optional>; + using UdpListenerCallbacksOptRef = + absl::optional>; struct ActiveListenerDetails { // Strong pointer to the listener, whether TCP, UDP, QUIC, etc. Network::ConnectionHandler::ActiveListenerPtr listener_; - // Reference to the listener IFF this is a TCP listener. Null otherwise. - ActiveTcpListenerOptRef tcp_listener_; + + absl::variant, + std::reference_wrapper> + typed_listener_; + + // Helpers for accessing the data in the variant for cleaner code. + ActiveTcpListenerOptRef tcpListener(); + UdpListenerCallbacksOptRef udpListener(); }; + using ActiveListenerDetailsOptRef = absl::optional>; ActiveTcpListenerOptRef findActiveTcpListenerByAddress(const Network::Address::Instance& address); + ActiveListenerDetailsOptRef findActiveListenerByTag(uint64_t listener_tag); + // This has a value on worker threads, and no value on the main thread. + const absl::optional worker_index_; Event::Dispatcher& dispatcher_; const std::string per_handler_stat_prefix_; std::list> listeners_; std::atomic num_handler_connections_{}; bool disable_listeners_; + float listener_reject_fraction_{0}; +}; + +class ActiveUdpListenerBase : public ConnectionHandlerImpl::ActiveListenerImplBase, + public Network::ConnectionHandler::ActiveUdpListener { +public: + ActiveUdpListenerBase(uint32_t worker_index, uint32_t concurrency, + Network::ConnectionHandler& parent, Network::Socket& listen_socket, + Network::UdpListenerPtr&& listener, Network::ListenerConfig* config); + ~ActiveUdpListenerBase() override; + + // Network::UdpListenerCallbacks + void onData(Network::UdpRecvData&& data) final; + uint32_t workerIndex() const final { return worker_index_; } + void post(Network::UdpRecvData&& data) final; + + // ActiveListenerImplBase + Network::Listener* listener() override { return udp_listener_.get(); } + +protected: + uint32_t destination(const Network::UdpRecvData& /*data*/) const override { + // By default, route to the current worker. + return worker_index_; + } + + const uint32_t worker_index_; + const uint32_t concurrency_; + Network::ConnectionHandler& parent_; + Network::Socket& listen_socket_; + Network::UdpListenerPtr udp_listener_; }; /** * Wrapper for an active udp listener owned by this handler. */ -class ActiveRawUdpListener : public Network::UdpListenerCallbacks, - public ConnectionHandlerImpl::ActiveListenerImplBase, +class ActiveRawUdpListener : public ActiveUdpListenerBase, public Network::UdpListenerFilterManager, public Network::UdpReadFilterCallbacks { public: - ActiveRawUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, + ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency, + Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, Network::ListenerConfig& config); - ActiveRawUdpListener(Network::ConnectionHandler& parent, + ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency, + Network::ConnectionHandler& parent, Network::SocketSharedPtr listen_socket_ptr, Event::Dispatcher& dispatcher, Network::ListenerConfig& config); - ActiveRawUdpListener(Network::ConnectionHandler& parent, Network::Socket& listen_socket, + ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency, + Network::ConnectionHandler& parent, Network::Socket& listen_socket, Network::SocketSharedPtr listen_socket_ptr, Event::Dispatcher& dispatcher, Network::ListenerConfig& config); - ActiveRawUdpListener(Network::ConnectionHandler& parent, Network::Socket& listen_socket, + ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency, + Network::ConnectionHandler& parent, Network::Socket& listen_socket, Network::UdpListenerPtr&& listener, Network::ListenerConfig& config); // Network::UdpListenerCallbacks - void onData(Network::UdpRecvData& data) override; void onReadReady() override; void onWriteReady(const Network::Socket& socket) override; void onReceiveError(Api::IoError::IoErrorCode error_code) override; Network::UdpPacketWriter& udpPacketWriter() override { return *udp_packet_writer_; } + // Network::UdpWorker + void onDataWorker(Network::UdpRecvData&& data) override; + // ActiveListenerImplBase - Network::Listener* listener() override { return udp_listener_.get(); } void pauseListening() override { udp_listener_->disable(); } void resumeListening() override { udp_listener_->enable(); } void shutdownListener() override { @@ -394,10 +445,8 @@ class ActiveRawUdpListener : public Network::UdpListenerCallbacks, Network::UdpListener& udpListener() override; private: - Network::UdpListenerPtr udp_listener_; Network::UdpListenerReadFilterPtr read_filter_; Network::UdpPacketWriterPtr udp_packet_writer_; - Network::Socket& listen_socket_; }; } // namespace Server diff --git a/source/server/drain_manager_impl.cc b/source/server/drain_manager_impl.cc index d9b1e1dcfa8f..132108b95eaf 100644 --- a/source/server/drain_manager_impl.cc +++ b/source/server/drain_manager_impl.cc @@ -49,7 +49,7 @@ bool DrainManagerImpl::drainClose() const { ASSERT(server_.options().drainTime() >= remaining_time); const auto elapsed_time = server_.options().drainTime() - remaining_time; return static_cast(elapsed_time.count()) > - (server_.random().random() % server_.options().drainTime().count()); + (server_.api().randomGenerator().random() % server_.options().drainTime().count()); } void DrainManagerImpl::startDrainSequence(std::function drain_complete_cb) { diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index a342e686f935..0e006561ce09 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -88,10 +88,6 @@ const LocalInfo::LocalInfo& PerFilterChainFactoryContextImpl::localInfo() const return parent_context_.localInfo(); } -Envoy::Random::RandomGenerator& PerFilterChainFactoryContextImpl::random() { - return parent_context_.random(); -} - Envoy::Runtime::Loader& PerFilterChainFactoryContextImpl::runtime() { return parent_context_.runtime(); } @@ -145,8 +141,9 @@ bool FilterChainManagerImpl::isWildcardServerName(const std::string& name) { return absl::StartsWith(name, "*."); } -void FilterChainManagerImpl::addFilterChain( +void FilterChainManagerImpl::addFilterChains( absl::Span filter_chain_span, + const envoy::config::listener::v3::FilterChain* default_filter_chain, FilterChainFactoryBuilder& filter_chain_factory_builder, FilterChainFactoryContextCreator& context_creator) { Cleanup cleanup([this]() { origin_ = absl::nullopt; }); @@ -187,8 +184,7 @@ void FilterChainManagerImpl::addFilterChain( // Reject partial wildcards, we don't match on them. for (const auto& server_name : filter_chain_match.server_names()) { - if (server_name.find('*') != std::string::npos && - !FilterChainManagerImpl::isWildcardServerName(server_name)) { + if (server_name.find('*') != std::string::npos && !isWildcardServerName(server_name)) { throw EnvoyException( fmt::format("error adding listener '{}': partial wildcards are not supported in " "\"server_names\"", @@ -212,13 +208,49 @@ void FilterChainManagerImpl::addFilterChain( filter_chain_match.server_names(), filter_chain_match.transport_protocol(), filter_chain_match.application_protocols(), filter_chain_match.source_type(), source_ips, filter_chain_match.source_ports(), filter_chain_impl); + fc_contexts_[*filter_chain] = filter_chain_impl; } convertIPsToTries(); + copyOrRebuildDefaultFilterChain(default_filter_chain, filter_chain_factory_builder, + context_creator); ENVOY_LOG(debug, "new fc_contexts has {} filter chains, including {} newly built", fc_contexts_.size(), new_filter_chain_size); } +void FilterChainManagerImpl::copyOrRebuildDefaultFilterChain( + const envoy::config::listener::v3::FilterChain* default_filter_chain, + FilterChainFactoryBuilder& filter_chain_factory_builder, + FilterChainFactoryContextCreator& context_creator) { + // Default filter chain is built exactly once. + ASSERT(!default_filter_chain_message_.has_value()); + + // Save the default filter chain message. This message could be used in next listener update. + if (default_filter_chain == nullptr) { + return; + } + default_filter_chain_message_ = absl::make_optional(*default_filter_chain); + + // Origin filter chain manager could be empty if the current is the ancestor. + const auto* origin = getOriginFilterChainManager(); + if (origin == nullptr) { + default_filter_chain_ = + filter_chain_factory_builder.buildFilterChain(*default_filter_chain, context_creator); + return; + } + + // Copy from original filter chain manager, or build new filter chain if the default filter chain + // is not equivalent to the one in the original filter chain manager. + MessageUtil eq; + if (origin->default_filter_chain_message_.has_value() && + eq(origin->default_filter_chain_message_.value(), *default_filter_chain)) { + default_filter_chain_ = origin->default_filter_chain_; + } else { + default_filter_chain_ = + filter_chain_factory_builder.buildFilterChain(*default_filter_chain, context_creator); + } +} + void FilterChainManagerImpl::addFilterChainForDestinationPorts( DestinationPortsMap& destination_ports_map, uint16_t destination_port, const std::vector& destination_ips, @@ -385,21 +417,30 @@ const Network::FilterChain* FilterChainManagerImpl::findFilterChain(const Network::ConnectionSocket& socket) const { const auto& address = socket.localAddress(); + const Network::FilterChain* best_match_filter_chain = nullptr; // Match on destination port (only for IP addresses). if (address->type() == Network::Address::Type::Ip) { const auto port_match = destination_ports_map_.find(address->ip()->port()); if (port_match != destination_ports_map_.end()) { - return findFilterChainForDestinationIP(*port_match->second.second, socket); + best_match_filter_chain = findFilterChainForDestinationIP(*port_match->second.second, socket); + if (best_match_filter_chain != nullptr) { + return best_match_filter_chain; + } else { + // There is entry for specific port but none of the filter chain matches. Instead of + // matching catch-all port 0, the fallback filter chain is returned. + return default_filter_chain_.get(); + } } } - - // Match on catch-all port 0. + // Match on catch-all port 0 if there is no specific port sub tree. const auto port_match = destination_ports_map_.find(0); if (port_match != destination_ports_map_.end()) { - return findFilterChainForDestinationIP(*port_match->second.second, socket); + best_match_filter_chain = findFilterChainForDestinationIP(*port_match->second.second, socket); } - - return nullptr; + return best_match_filter_chain != nullptr + ? best_match_filter_chain + // Neither exact port nor catch-all port matches. Use fallback filter chain. + : default_filter_chain_.get(); } const Network::FilterChain* FilterChainManagerImpl::findFilterChainForDestinationIP( @@ -635,7 +676,6 @@ bool FactoryContextImpl::healthCheckFailed() { return server_.healthCheckFailed( Http::Context& FactoryContextImpl::httpContext() { return server_.httpContext(); } Init::Manager& FactoryContextImpl::initManager() { return server_.initManager(); } const LocalInfo::LocalInfo& FactoryContextImpl::localInfo() const { return server_.localInfo(); } -Envoy::Random::RandomGenerator& FactoryContextImpl::random() { return server_.random(); } Envoy::Runtime::Loader& FactoryContextImpl::runtime() { return server_.runtime(); } Stats::Scope& FactoryContextImpl::scope() { return global_scope_; } Singleton::Manager& FactoryContextImpl::singletonManager() { return server_.singletonManager(); } diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index 59af0bb78ac5..3bcf01d2ec2e 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -58,7 +58,6 @@ class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactor Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; @@ -131,7 +130,6 @@ class FactoryContextImpl : public Configuration::FactoryContext { Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; @@ -188,17 +186,36 @@ class FilterChainManagerImpl : public Network::FilterChainManager, // Add all filter chains into this manager. During the lifetime of FilterChainManagerImpl this // should be called at most once. - void addFilterChain( + void addFilterChains( absl::Span filter_chain_span, - FilterChainFactoryBuilder& b, FilterChainFactoryContextCreator& context_creator); + const envoy::config::listener::v3::FilterChain* default_filter_chain, + FilterChainFactoryBuilder& filter_chain_factory_builder, + FilterChainFactoryContextCreator& context_creator); + static bool isWildcardServerName(const std::string& name); // Return the current view of filter chains, keyed by filter chain message. Used by the owning // listener to calculate the intersection of filter chains with another listener. const FcContextMap& filterChainsByMessage() const { return fc_contexts_; } + const absl::optional& + defaultFilterChainMessage() const { + return default_filter_chain_message_; + } + const Network::DrainableFilterChainSharedPtr& defaultFilterChain() const { + return default_filter_chain_; + } private: void convertIPsToTries(); + + // Build default filter chain from filter chain message. Skip the build but copy from original + // filter chain manager if the default filter chain message duplicates the message in origin + // filter chain manager. Called by addFilterChains(). + void copyOrRebuildDefaultFilterChain( + const envoy::config::listener::v3::FilterChain* default_filter_chain, + FilterChainFactoryBuilder& filter_chain_factory_builder, + FilterChainFactoryContextCreator& context_creator); + using SourcePortsMap = absl::flat_hash_map; using SourcePortsMapSharedPtr = std::shared_ptr; using SourceIPsMap = absl::flat_hash_map; @@ -295,9 +312,15 @@ class FilterChainManagerImpl : public Network::FilterChainManager, // detect the filter chains in the intersection of existing listener and new listener. FcContextMap fc_contexts_; + absl::optional default_filter_chain_message_; + // The optional fallback filter chain if destination_ports_map_ does not find a matched filter + // chain. + Network::DrainableFilterChainSharedPtr default_filter_chain_; + // Mapping of FilterChain's configured destination ports, IPs, server names, transport protocols // and application protocols, using structures defined above. DestinationPortsMap destination_ports_map_; + const Network::Address::InstanceConstSharedPtr address_; // This is the reference to a factory context which all the generations of listener share. Configuration::FactoryContext& parent_context_; diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index a1e11c24b82e..64b04228c1b8 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -13,6 +13,7 @@ #include "envoy/server/guarddog.h" #include "envoy/server/guarddog_config.h" #include "envoy/stats/scope.h" +#include "envoy/watchdog/v3alpha/abort_action.pb.h" #include "common/common/assert.h" #include "common/common/fmt.h" @@ -30,7 +31,8 @@ namespace Envoy { namespace Server { GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Watchdog& config, - Api::Api& api, std::unique_ptr&& test_interlock) + Api::Api& api, absl::string_view name, + std::unique_ptr&& test_interlock) : test_interlock_hook_(std::move(test_interlock)), stats_scope_(stats_scope), time_source_(api.timeSource()), miss_timeout_(config.missTimeout()), megamiss_timeout_(config.megaMissTimeout()), kill_timeout_(config.killTimeout()), @@ -46,21 +48,40 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio min_of_nonfatal}); }()), watchdog_miss_counter_(stats_scope.counterFromStatName( - Stats::StatNameManagedStorage("server.watchdog_miss", stats_scope.symbolTable()) + Stats::StatNameManagedStorage(absl::StrCat(name, ".watchdog_miss"), + stats_scope.symbolTable()) .statName())), watchdog_megamiss_counter_(stats_scope.counterFromStatName( - Stats::StatNameManagedStorage("server.watchdog_mega_miss", stats_scope.symbolTable()) + Stats::StatNameManagedStorage(absl::StrCat(name, ".watchdog_mega_miss"), + stats_scope.symbolTable()) .statName())), - dispatcher_(api.allocateDispatcher("guarddog_thread")), + dispatcher_(api.allocateDispatcher(absl::StrCat(name, "_guarddog_thread"))), loop_timer_(dispatcher_->createTimer([this]() { step(); })), events_to_actions_([&](const Server::Configuration::Watchdog& config) -> EventToActionsMap { EventToActionsMap map; // We should be able to share the dispatcher since guard dog's lifetime // should eclipse those of actions. - Configuration::GuardDogActionFactoryContext context = {api, *dispatcher_}; + Configuration::GuardDogActionFactoryContext context = {api, *dispatcher_, stats_scope, + name}; + + auto actions = config.actions(); + + // Add default abort_action if kill and/or multi-kill is enabled. + if (config.killTimeout().count() > 0) { + envoy::watchdog::v3alpha::AbortActionConfig abort_config; + WatchDogAction* abort_action_config = actions.Add(); + abort_action_config->set_event(WatchDogAction::KILL); + abort_action_config->mutable_config()->mutable_typed_config()->PackFrom(abort_config); + } + + if (config.multiKillTimeout().count() > 0) { + envoy::watchdog::v3alpha::AbortActionConfig abort_config; + WatchDogAction* abort_action_config = actions.Add(); + abort_action_config->set_event(WatchDogAction::MULTIKILL); + abort_action_config->mutable_config()->mutable_typed_config()->PackFrom(abort_config); + } - const auto& actions = config.actions(); for (const auto& action : actions) { // Get factory and add the created cb auto& factory = Config::Utility::getAndCheckFactory( @@ -75,8 +96,8 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio } GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Watchdog& config, - Api::Api& api) - : GuardDogImpl(stats_scope, config, api, std::make_unique()) {} + Api::Api& api, absl::string_view name) + : GuardDogImpl(stats_scope, config, api, name, std::make_unique()) {} GuardDogImpl::~GuardDogImpl() { stop(); } @@ -102,10 +123,10 @@ void GuardDogImpl::step() { static_cast(ceil(multi_kill_fraction_ * watched_dogs_.size()))); for (auto& watched_dog : watched_dogs_) { - const auto ltt = watched_dog->dog_->lastTouchTime(); + const auto last_checkin = watched_dog->dog_->lastTouchTime(); const auto tid = watched_dog->dog_->threadId(); - const auto delta = now - ltt; - if (watched_dog->last_alert_time_ && watched_dog->last_alert_time_.value() < ltt) { + const auto delta = now - last_checkin; + if (watched_dog->last_alert_time_ && watched_dog->last_alert_time_.value() < last_checkin) { watched_dog->miss_alerted_ = false; watched_dog->megamiss_alerted_ = false; } @@ -113,35 +134,28 @@ void GuardDogImpl::step() { if (!watched_dog->miss_alerted_) { watchdog_miss_counter_.inc(); watched_dog->miss_counter_.inc(); - watched_dog->last_alert_time_ = ltt; + watched_dog->last_alert_time_ = last_checkin; watched_dog->miss_alerted_ = true; - miss_threads.emplace_back(tid, ltt); + miss_threads.emplace_back(tid, last_checkin); } } if (delta > megamiss_timeout_) { if (!watched_dog->megamiss_alerted_) { watchdog_megamiss_counter_.inc(); watched_dog->megamiss_counter_.inc(); - watched_dog->last_alert_time_ = ltt; + watched_dog->last_alert_time_ = last_checkin; watched_dog->megamiss_alerted_ = true; - mega_miss_threads.emplace_back(tid, ltt); + mega_miss_threads.emplace_back(tid, last_checkin); } } if (killEnabled() && delta > kill_timeout_) { - invokeGuardDogActions(WatchDogAction::KILL, {{tid, ltt}}, now); - - PANIC(fmt::format("GuardDog: one thread ({}) stuck for more than watchdog_kill_timeout", - watched_dog->dog_->threadId().debugString())); + invokeGuardDogActions(WatchDogAction::KILL, {{tid, last_checkin}}, now); } if (multikillEnabled() && delta > multi_kill_timeout_) { - multi_kill_threads.emplace_back(tid, ltt); + multi_kill_threads.emplace_back(tid, last_checkin); if (multi_kill_threads.size() >= required_for_multi_kill) { invokeGuardDogActions(WatchDogAction::MULTIKILL, multi_kill_threads, now); - - PANIC(fmt::format("GuardDog: At least {} threads ({},...) stuck for more than " - "watchdog_multikill_timeout", - multi_kill_threads.size(), tid.debugString())); } } } @@ -217,11 +231,12 @@ void GuardDogImpl::stop() { void GuardDogImpl::invokeGuardDogActions( WatchDogAction::WatchdogEvent event, - std::vector> thread_ltt_pairs, MonotonicTime now) { + std::vector> thread_last_checkin_pairs, + MonotonicTime now) { const auto& registered_actions = events_to_actions_.find(event); if (registered_actions != events_to_actions_.end()) { for (auto& action : registered_actions->second) { - action->run(event, thread_ltt_pairs, now); + action->run(event, thread_last_checkin_pairs, now); } } } diff --git a/source/server/guarddog_impl.h b/source/server/guarddog_impl.h index 829ccb5f42d8..3d8503ec9530 100644 --- a/source/server/guarddog_impl.h +++ b/source/server/guarddog_impl.h @@ -67,9 +67,10 @@ class GuardDogImpl : public GuardDog { * See the configuration documentation for details on the timeout settings. */ GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Watchdog& config, - Api::Api& api, std::unique_ptr&& test_interlock); + Api::Api& api, absl::string_view name, + std::unique_ptr&& test_interlock); GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Watchdog& config, - Api::Api& api); + Api::Api& api, absl::string_view name); ~GuardDogImpl() override; /** @@ -105,10 +106,10 @@ class GuardDogImpl : public GuardDog { using WatchDogAction = envoy::config::bootstrap::v3::Watchdog::WatchdogAction; // Helper function to invoke all the GuardDogActions registered for an Event. - void - invokeGuardDogActions(WatchDogAction::WatchdogEvent event, - std::vector> thread_ltt_pairs, - MonotonicTime now); + void invokeGuardDogActions( + WatchDogAction::WatchdogEvent event, + std::vector> thread_last_checkin_pairs, + MonotonicTime now); struct WatchedDog { WatchedDog(Stats::Scope& stats_scope, const std::string& thread_name, diff --git a/source/server/hot_restart_impl.cc b/source/server/hot_restart_impl.cc index c9e0aa7e7d02..56db57578985 100644 --- a/source/server/hot_restart_impl.cc +++ b/source/server/hot_restart_impl.cc @@ -95,10 +95,11 @@ void initializeMutex(pthread_mutex_t& mutex) { // performed the multiplication in OptionsImpl which produced incorrect server info output. // TODO(zuercher): ideally, the base_id would be separated from the restart_epoch in // the socket names to entirely prevent collisions between consecutive base ids. -HotRestartImpl::HotRestartImpl(uint32_t base_id, uint32_t restart_epoch) +HotRestartImpl::HotRestartImpl(uint32_t base_id, uint32_t restart_epoch, + const std::string& socket_path, mode_t socket_mode) : base_id_(base_id), scaled_base_id_(base_id * 10), - as_child_(HotRestartingChild(scaled_base_id_, restart_epoch)), - as_parent_(HotRestartingParent(scaled_base_id_, restart_epoch)), + as_child_(HotRestartingChild(scaled_base_id_, restart_epoch, socket_path, socket_mode)), + as_parent_(HotRestartingParent(scaled_base_id_, restart_epoch, socket_path, socket_mode)), shmem_(attachSharedMemory(scaled_base_id_, restart_epoch)), log_lock_(shmem_->log_lock_), access_log_lock_(shmem_->access_log_lock_) { // If our parent ever goes away just terminate us so that we don't have to rely on ops/launching diff --git a/source/server/hot_restart_impl.h b/source/server/hot_restart_impl.h index 9b91e892d104..309b6e766484 100644 --- a/source/server/hot_restart_impl.h +++ b/source/server/hot_restart_impl.h @@ -98,7 +98,8 @@ class ProcessSharedMutex : public Thread::BasicLockable { */ class HotRestartImpl : public HotRestart { public: - HotRestartImpl(uint32_t base_id, uint32_t restart_epoch); + HotRestartImpl(uint32_t base_id, uint32_t restart_epoch, const std::string& socket_path, + mode_t socket_mode); // Server::HotRestart void drainParentListeners() override; diff --git a/source/server/hot_restarting_base.cc b/source/server/hot_restarting_base.cc index 724dd9e8b31a..5cf2f8378a6d 100644 --- a/source/server/hot_restarting_base.cc +++ b/source/server/hot_restarting_base.cc @@ -2,6 +2,7 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/utility.h" +#include "common/network/address_impl.h" #include "common/stats/utility.h" namespace Envoy { @@ -24,28 +25,30 @@ void HotRestartingBase::initDomainSocketAddress(sockaddr_un* address) { address->sun_family = AF_UNIX; } -sockaddr_un HotRestartingBase::createDomainSocketAddress(uint64_t id, const std::string& role) { +sockaddr_un HotRestartingBase::createDomainSocketAddress(uint64_t id, const std::string& role, + const std::string& socket_path, + mode_t socket_mode) { // Right now we only allow a maximum of 3 concurrent envoy processes to be running. When the third // starts up it will kill the oldest parent. static constexpr uint64_t MaxConcurrentProcesses = 3; id = id % MaxConcurrentProcesses; - - // This creates an anonymous domain socket name (where the first byte of the name of \0). sockaddr_un address; initDomainSocketAddress(&address); - StringUtil::strlcpy(&address.sun_path[1], - fmt::format("envoy_domain_socket_{}_{}", role, base_id_ + id).c_str(), - sizeof(address.sun_path) - 1); - address.sun_path[0] = 0; + Network::Address::PipeInstance addr(fmt::format(socket_path + "_{}_{}", role, base_id_ + id), + socket_mode, nullptr); + memcpy(&address, addr.sockAddr(), addr.sockAddrLen()); + fchmod(my_domain_socket_, socket_mode); return address; } -void HotRestartingBase::bindDomainSocket(uint64_t id, const std::string& role) { +void HotRestartingBase::bindDomainSocket(uint64_t id, const std::string& role, + const std::string& socket_path, mode_t socket_mode) { Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); // This actually creates the socket and binds it. We use the socket in datagram mode so we can // easily read single messages. my_domain_socket_ = socket(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0); - sockaddr_un address = createDomainSocketAddress(id, role); + sockaddr_un address = createDomainSocketAddress(id, role, socket_path, socket_mode); + unlink(address.sun_path); Api::SysCallIntResult result = os_sys_calls.bind(my_domain_socket_, reinterpret_cast(&address), sizeof(address)); if (result.rc_ != 0) { diff --git a/source/server/hot_restarting_base.h b/source/server/hot_restarting_base.h index 0e2b5abc4817..bbcbf3b2d990 100644 --- a/source/server/hot_restarting_base.h +++ b/source/server/hot_restarting_base.h @@ -1,7 +1,9 @@ #pragma once #include +#include #include +#include #include #include @@ -28,8 +30,10 @@ class HotRestartingBase { ~HotRestartingBase(); void initDomainSocketAddress(sockaddr_un* address); - sockaddr_un createDomainSocketAddress(uint64_t id, const std::string& role); - void bindDomainSocket(uint64_t id, const std::string& role); + sockaddr_un createDomainSocketAddress(uint64_t id, const std::string& role, + const std::string& socket_path, mode_t socket_mode); + void bindDomainSocket(uint64_t id, const std::string& role, const std::string& socket_path, + mode_t socket_mode); int myDomainSocket() const { return my_domain_socket_; } // Protocol description: diff --git a/source/server/hot_restarting_child.cc b/source/server/hot_restarting_child.cc index 25cb46bcf0fd..cd673303475b 100644 --- a/source/server/hot_restarting_child.cc +++ b/source/server/hot_restarting_child.cc @@ -7,13 +7,15 @@ namespace Server { using HotRestartMessage = envoy::HotRestartMessage; -HotRestartingChild::HotRestartingChild(int base_id, int restart_epoch) +HotRestartingChild::HotRestartingChild(int base_id, int restart_epoch, + const std::string& socket_path, mode_t socket_mode) : HotRestartingBase(base_id), restart_epoch_(restart_epoch) { initDomainSocketAddress(&parent_address_); if (restart_epoch_ != 0) { - parent_address_ = createDomainSocketAddress(restart_epoch_ + -1, "parent"); + parent_address_ = + createDomainSocketAddress(restart_epoch_ + -1, "parent", socket_path, socket_mode); } - bindDomainSocket(restart_epoch_, "child"); + bindDomainSocket(restart_epoch_, "child", socket_path, socket_mode); } int HotRestartingChild::duplicateParentListenSocket(const std::string& address) { diff --git a/source/server/hot_restarting_child.h b/source/server/hot_restarting_child.h index 0fe656d06d10..d7369b4a7ab0 100644 --- a/source/server/hot_restarting_child.h +++ b/source/server/hot_restarting_child.h @@ -12,7 +12,8 @@ namespace Server { */ class HotRestartingChild : HotRestartingBase, Logger::Loggable { public: - HotRestartingChild(int base_id, int restart_epoch); + HotRestartingChild(int base_id, int restart_epoch, const std::string& socket_path, + mode_t socket_mode); int duplicateParentListenSocket(const std::string& address); std::unique_ptr getParentStats(); diff --git a/source/server/hot_restarting_parent.cc b/source/server/hot_restarting_parent.cc index 472c772d3b2e..874c19b11a5a 100644 --- a/source/server/hot_restarting_parent.cc +++ b/source/server/hot_restarting_parent.cc @@ -15,10 +15,11 @@ namespace Server { using HotRestartMessage = envoy::HotRestartMessage; -HotRestartingParent::HotRestartingParent(int base_id, int restart_epoch) +HotRestartingParent::HotRestartingParent(int base_id, int restart_epoch, + const std::string& socket_path, mode_t socket_mode) : HotRestartingBase(base_id), restart_epoch_(restart_epoch) { - child_address_ = createDomainSocketAddress(restart_epoch_ + 1, "child"); - bindDomainSocket(restart_epoch_, "parent"); + child_address_ = createDomainSocketAddress(restart_epoch_ + 1, "child", socket_path, socket_mode); + bindDomainSocket(restart_epoch_, "parent", socket_path, socket_mode); } void HotRestartingParent::initialize(Event::Dispatcher& dispatcher, Server::Instance& server) { diff --git a/source/server/hot_restarting_parent.h b/source/server/hot_restarting_parent.h index d43d3636fe5a..da538b2b94dd 100644 --- a/source/server/hot_restarting_parent.h +++ b/source/server/hot_restarting_parent.h @@ -14,7 +14,8 @@ namespace Server { */ class HotRestartingParent : HotRestartingBase, Logger::Loggable { public: - HotRestartingParent(int base_id, int restart_epoch); + HotRestartingParent(int base_id, int restart_epoch, const std::string& socket_path, + mode_t socket_mode); void initialize(Event::Dispatcher& dispatcher, Server::Instance& server); void shutdown(); diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index 4a1a65ed125b..d56c1338ec11 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -20,6 +20,7 @@ namespace Envoy { namespace Server { LdsApiImpl::LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config, + const udpa::core::v1::ResourceLocator* lds_resources_locator, Upstream::ClusterManager& cm, Init::Manager& init_manager, Stats::Scope& scope, ListenerManager& lm, ProtobufMessage::ValidationVisitor& validation_visitor) @@ -28,8 +29,14 @@ LdsApiImpl::LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config, listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm), init_target_("LDS", [this]() { subscription_->start({}); }) { const auto resource_name = getResourceName(); - subscription_ = cm.subscriptionFactory().subscriptionFromConfigSource( - lds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_); + if (lds_resources_locator == nullptr) { + subscription_ = cm.subscriptionFactory().subscriptionFromConfigSource( + lds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_); + } else { + subscription_ = cm.subscriptionFactory().collectionSubscriptionFromUrl( + *lds_resources_locator, lds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this, + resource_decoder_); + } init_manager.add(init_target_); } @@ -46,8 +53,8 @@ void LdsApiImpl::onConfigUpdate(const std::vector& a bool any_applied = false; listener_manager_.beginListenerUpdate(); - // We do all listener removals before adding the new listeners. This allows adding a new listener - // with the same address as a listener that is to be removed. Do not change the order. + // We do all listener removals before adding the new listeners. This allows adding a new + // listener with the same address as a listener that is to be removed. Do not change the order. for (const auto& removed_listener : removed_resources) { if (listener_manager_.removeListener(removed_listener)) { ENVOY_LOG(info, "lds: remove listener '{}'", removed_listener); @@ -64,7 +71,8 @@ void LdsApiImpl::onConfigUpdate(const std::vector& a listener = dynamic_cast(resource.get().resource()); if (!listener_names.insert(listener.name()).second) { - // NOTE: at this point, the first of these duplicates has already been successfully applied. + // NOTE: at this point, the first of these duplicates has already been successfully + // applied. throw EnvoyException(fmt::format("duplicate listener {} found", listener.name())); } if (listener_manager_.addOrUpdateListener(listener, resource.get().version(), true)) { diff --git a/source/server/lds_api.h b/source/server/lds_api.h index 0ace5e7b937c..333ba48551dd 100644 --- a/source/server/lds_api.h +++ b/source/server/lds_api.h @@ -26,9 +26,10 @@ class LdsApiImpl : public LdsApi, Envoy::Config::SubscriptionBase, Logger::Loggable { public: - LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config, Upstream::ClusterManager& cm, - Init::Manager& init_manager, Stats::Scope& scope, ListenerManager& lm, - ProtobufMessage::ValidationVisitor& validation_visitor); + LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config, + const udpa::core::v1::ResourceLocator* lds_resources_locator, + Upstream::ClusterManager& cm, Init::Manager& init_manager, Stats::Scope& scope, + ListenerManager& lm, ProtobufMessage::ValidationVisitor& validation_visitor); // Server::LdsApi std::string versionInfo() const override { return system_version_info_; } diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 32e3a3efa0c4..f2dcd9d1c30f 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -19,6 +19,7 @@ #include "common/network/resolver_impl.h" #include "common/network/socket_option_factory.h" #include "common/network/socket_option_impl.h" +#include "common/network/udp_listener_impl.h" #include "common/network/utility.h" #include "common/protobuf/utility.h" #include "common/runtime/runtime_features.h" @@ -171,9 +172,6 @@ Http::Context& ListenerFactoryContextBaseImpl::httpContext() { return server_.ht const LocalInfo::LocalInfo& ListenerFactoryContextBaseImpl::localInfo() const { return server_.localInfo(); } -Envoy::Random::RandomGenerator& ListenerFactoryContextBaseImpl::random() { - return server_.random(); -} Envoy::Runtime::Loader& ListenerFactoryContextBaseImpl::runtime() { return server_.runtime(); } Stats::Scope& ListenerFactoryContextBaseImpl::scope() { return *global_scope_; } Singleton::Manager& ListenerFactoryContextBaseImpl::singletonManager() { @@ -325,6 +323,7 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, listener_filters_timeout_( PROTOBUF_GET_MS_OR_DEFAULT(config, listener_filters_timeout, 15000)), continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()), + connection_balancer_(origin.connection_balancer_), listener_factory_context_(std::make_shared( origin.listener_factory_context_->listener_factory_context_base_, this, *this)), filter_chain_manager_(address_, origin.listener_factory_context_->parentFactoryContext(), @@ -376,6 +375,9 @@ void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(udp_config, validation_visitor_, config_factory); udp_listener_factory_ = config_factory.createActiveUdpListenerFactory(*message, concurrency); + + udp_listener_worker_router_ = + std::make_unique(concurrency); } } @@ -476,24 +478,27 @@ void ListenerImpl::buildFilterChains() { Server::Configuration::TransportSocketFactoryContextImpl transport_factory_context( parent_.server_.admin(), parent_.server_.sslContextManager(), listenerScope(), parent_.server_.clusterManager(), parent_.server_.localInfo(), parent_.server_.dispatcher(), - parent_.server_.random(), parent_.server_.stats(), parent_.server_.singletonManager(), - parent_.server_.threadLocal(), validation_visitor_, parent_.server_.api()); + parent_.server_.stats(), parent_.server_.singletonManager(), parent_.server_.threadLocal(), + validation_visitor_, parent_.server_.api()); transport_factory_context.setInitManager(*dynamic_init_manager_); - // The init manager is a little messy. Will refactor when filter chain manager could accept - // network filter chain update. - // TODO(lambdai): create builder from filter_chain_manager to obtain the init manager ListenerFilterChainFactoryBuilder builder(*this, transport_factory_context); - filter_chain_manager_.addFilterChain(config_.filter_chains(), builder, filter_chain_manager_); + filter_chain_manager_.addFilterChains( + config_.filter_chains(), + config_.has_default_filter_chain() ? &config_.default_filter_chain() : nullptr, builder, + filter_chain_manager_); } void ListenerImpl::buildSocketOptions() { // TCP specific setup. - if (config_.has_connection_balance_config()) { - // Currently exact balance is the only supported type and there are no options. - ASSERT(config_.connection_balance_config().has_exact_balance()); - connection_balancer_ = std::make_unique(); - } else { - connection_balancer_ = std::make_unique(); + if (connection_balancer_ == nullptr) { + // Not in place listener update. + if (config_.has_connection_balance_config()) { + // Currently exact balance is the only supported type and there are no options. + ASSERT(config_.connection_balance_config().has_exact_balance()); + connection_balancer_ = std::make_shared(); + } else { + connection_balancer_ = std::make_shared(); + } } if (config_.has_tcp_fast_open_queue_length()) { @@ -575,9 +580,6 @@ Http::Context& PerListenerFactoryContextImpl::httpContext() { const LocalInfo::LocalInfo& PerListenerFactoryContextImpl::localInfo() const { return listener_factory_context_base_->localInfo(); } -Envoy::Random::RandomGenerator& PerListenerFactoryContextImpl::random() { - return listener_factory_context_base_->random(); -} Envoy::Runtime::Loader& PerListenerFactoryContextImpl::runtime() { return listener_factory_context_base_->runtime(); } @@ -741,6 +743,15 @@ void ListenerImpl::diffFilterChain(const ListenerImpl& another_listener, callback(*message_and_filter_chain.second); } } + // Filter chain manager maintains an optional default filter chain besides the filter chains + // indexed by message. + if (auto eq = MessageUtil(); + filter_chain_manager_.defaultFilterChainMessage().has_value() && + (!another_listener.filter_chain_manager_.defaultFilterChainMessage().has_value() || + !eq(*another_listener.filter_chain_manager_.defaultFilterChainMessage(), + *filter_chain_manager_.defaultFilterChainMessage()))) { + callback(*filter_chain_manager_.defaultFilterChain()); + } } bool ListenerMessageUtil::filterChainOnlyChange(const envoy::config::listener::v3::Listener& lhs, @@ -750,6 +761,8 @@ bool ListenerMessageUtil::filterChainOnlyChange(const envoy::config::listener::v differencer.set_repeated_field_comparison(Protobuf::util::MessageDifferencer::AS_SET); differencer.IgnoreField( envoy::config::listener::v3::Listener::GetDescriptor()->FindFieldByName("filter_chains")); + differencer.IgnoreField(envoy::config::listener::v3::Listener::GetDescriptor()->FindFieldByName( + "default_filter_chain")); return differencer.Compare(lhs, rhs); } diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 99c7a333b29f..b0fb4f5ec772 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -106,7 +106,6 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; @@ -172,7 +171,6 @@ class PerListenerFactoryContextImpl : public Configuration::ListenerFactoryConte Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; @@ -305,6 +303,11 @@ class ListenerImpl final : public Network::ListenerConfig, Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { return Network::UdpPacketWriterFactoryOptRef(std::ref(*udp_writer_factory_)); } + Network::UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() override { + return udp_listener_worker_router_ + ? Network::UdpListenerWorkerRouterOptRef(*udp_listener_worker_router_) + : absl::nullopt; + } Network::ConnectionBalancer& connectionBalancer() override { return *connection_balancer_; } ResourceLimit& openConnections() override { return *open_connections_; } @@ -393,7 +396,8 @@ class ListenerImpl final : public Network::ListenerConfig, const bool continue_on_listener_filters_timeout_; Network::ActiveUdpListenerFactoryPtr udp_listener_factory_; Network::UdpPacketWriterFactoryPtr udp_writer_factory_; - Network::ConnectionBalancerPtr connection_balancer_; + Network::UdpListenerWorkerRouterPtr udp_listener_worker_router_; + Network::ConnectionBalancerSharedPtr connection_balancer_; std::shared_ptr listener_factory_context_; FilterChainManagerImpl filter_chain_manager_; diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index ea735ffb41d5..d2d81a57a914 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -261,7 +261,7 @@ ListenerManagerImpl::ListenerManagerImpl(Instance& server, enable_dispatcher_stats_(enable_dispatcher_stats) { for (uint32_t i = 0; i < server.options().concurrency(); i++) { workers_.emplace_back( - worker_factory.createWorker(server.overloadManager(), absl::StrCat("worker_", i))); + worker_factory.createWorker(i, server.overloadManager(), absl::StrCat("worker_", i))); } } @@ -334,7 +334,11 @@ ListenerManagerStats ListenerManagerImpl::generateStats(Stats::Scope& scope) { bool ListenerManagerImpl::addOrUpdateListener(const envoy::config::listener::v3::Listener& config, const std::string& version_info, bool added_via_api) { - + RELEASE_ASSERT( + !config.address().has_envoy_internal_address(), + fmt::format("listener {} has envoy internal address {}. Internal address cannot be used by " + "listener yet", + config.name(), config.address().envoy_internal_address().DebugString())); // TODO(junr03): currently only one ApiListener can be installed via bootstrap to avoid having to // build a collection of listeners, and to have to be able to warm and drain the listeners. In the // future allow multiple ApiListeners, and allow them to be created via LDS as well as bootstrap. @@ -355,7 +359,7 @@ bool ListenerManagerImpl::addOrUpdateListener(const envoy::config::listener::v3: if (!config.name().empty()) { name = config.name(); } else { - name = server_.random().uuid(); + name = server_.api().randomGenerator().uuid(); } auto it = error_state_tracker_.find(name); diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index ab839373829e..af4e333c1adb 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -67,10 +67,12 @@ class ProdListenerComponentFactory : public ListenerComponentFactory, createListenerFilterMatcher(const envoy::config::listener::v3::ListenerFilter& listener_filter); // Server::ListenerComponentFactory - LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) override { + LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config, + const udpa::core::v1::ResourceLocator* lds_resources_locator) override { return std::make_unique( - lds_config, server_.clusterManager(), server_.initManager(), server_.stats(), - server_.listenerManager(), server_.messageValidationContext().dynamicValidationVisitor()); + lds_config, lds_resources_locator, server_.clusterManager(), server_.initManager(), + server_.stats(), server_.listenerManager(), + server_.messageValidationContext().dynamicValidationVisitor()); } std::vector createNetworkFilterFactoryList( const Protobuf::RepeatedPtrField& filters, @@ -182,9 +184,10 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable> listeners(ListenerState state = ListenerState::ACTIVE) override; @@ -299,7 +302,7 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable draining_listeners_; std::list draining_filter_chains_manager_; - std::list workers_; + std::vector workers_; bool workers_started_{}; absl::optional stop_listeners_type_; Stats::ScopePtr scope_; diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index e287cc1e1a6c..b7c031e914a2 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -160,6 +160,12 @@ OptionsImpl::OptionsImpl(std::vector args, "Comma-separated list of extensions to disable", false, "", "string", cmd); + TCLAP::ValueArg socket_path("", "socket-path", "Path to hot restart socket file", + false, "@envoy_domain_socket", "string", cmd); + + TCLAP::ValueArg socket_mode("", "socket-mode", "Socket file permission", false, + "600", "string", cmd); + cmd.setExceptionHandling(false); try { cmd.parse(args); @@ -181,6 +187,11 @@ OptionsImpl::OptionsImpl(std::vector args, hot_restart_disabled_ = disable_hot_restart.getValue(); mutex_tracing_enabled_ = enable_mutex_tracing.getValue(); fake_symbol_table_enabled_ = use_fake_symbol_table.getValue(); + if (fake_symbol_table_enabled_) { + ENVOY_LOG(warn, "Fake symbol tables have been removed. Please remove references to " + "--use-fake-symbol-table"); + } + cpuset_threads_ = cpuset_threads.getValue(); if (log_level.isSet()) { @@ -263,6 +274,18 @@ OptionsImpl::OptionsImpl(std::vector args, file_flush_interval_msec_ = std::chrono::milliseconds(file_flush_interval_msec.getValue()); drain_time_ = std::chrono::seconds(drain_time_s.getValue()); parent_shutdown_time_ = std::chrono::seconds(parent_shutdown_time_s.getValue()); + socket_path_ = socket_path.getValue(); + + if (socket_path_.at(0) == '@') { + socket_mode_ = 0; + } else { + uint64_t socket_mode_helper; + if (!StringUtil::atoull(socket_mode.getValue().c_str(), socket_mode_helper, 8)) { + throw MalformedArgvException( + fmt::format("error: invalid socket-mode '{}'", socket_mode.getValue())); + } + socket_mode_ = socket_mode_helper; + } if (drain_strategy.getValue() == "immediate") { drain_strategy_ = Server::DrainStrategy::Immediate; @@ -393,6 +416,8 @@ Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { for (const auto& e : disabledExtensions()) { command_line_options->add_disabled_extensions(e); } + command_line_options->set_socket_path(socketPath()); + command_line_options->set_socket_mode(socketMode()); return command_line_options; } @@ -406,7 +431,8 @@ OptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& service_zone_(service_zone), file_flush_interval_msec_(10000), drain_time_(600), parent_shutdown_time_(900), drain_strategy_(Server::DrainStrategy::Gradual), mode_(Server::Mode::Serve), hot_restart_disabled_(false), signal_handling_enabled_(true), - mutex_tracing_enabled_(false), cpuset_threads_(false), fake_symbol_table_enabled_(false) {} + mutex_tracing_enabled_(false), cpuset_threads_(false), fake_symbol_table_enabled_(false), + socket_path_("@envoy_domain_socket"), socket_mode_(0) {} void OptionsImpl::disableExtensions(const std::vector& names) { for (const auto& name : names) { diff --git a/source/server/options_impl.h b/source/server/options_impl.h index ad88e71f6c10..cacf88685aa9 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -104,6 +104,10 @@ class OptionsImpl : public Server::Options, protected Logger::Loggableindex()]; } - return it->second; + return always_inactive_; } - void setState(const std::string& action, OverloadActionState state) { - actions_.insert_or_assign(action, state); + void setState(NamedOverloadActionSymbolTable::Symbol action, OverloadActionState state) { + actions_[action.index()] = state; } private: - absl::node_hash_map actions_; + static const OverloadActionState always_inactive_; + const NamedOverloadActionSymbolTable& action_symbol_table_; + std::vector actions_; }; +const OverloadActionState ThreadLocalOverloadStateImpl::always_inactive_{0.0}; + Stats::Counter& makeCounter(Stats::Scope& scope, absl::string_view a, absl::string_view b) { Stats::StatNameManagedStorage stat_name(absl::StrCat("overload.", a, ".", b), scope.symbolTable()); @@ -106,6 +113,37 @@ Stats::Gauge& makeGauge(Stats::Scope& scope, absl::string_view a, absl::string_v } // namespace +NamedOverloadActionSymbolTable::Symbol +NamedOverloadActionSymbolTable::get(absl::string_view string) { + if (auto it = table_.find(string); it != table_.end()) { + return Symbol(it->second); + } + + size_t index = table_.size(); + + names_.emplace_back(string); + table_.emplace(std::make_pair(string, index)); + + return Symbol(index); +} + +absl::optional +NamedOverloadActionSymbolTable::lookup(absl::string_view string) const { + if (auto it = table_.find(string); it != table_.end()) { + return Symbol(it->second); + } + return absl::nullopt; +} + +const absl::string_view NamedOverloadActionSymbolTable::name(Symbol symbol) const { + return names_.at(symbol.index()); +} + +bool operator==(const NamedOverloadActionSymbolTable::Symbol& lhs, + const NamedOverloadActionSymbolTable::Symbol& rhs) { + return lhs.index() == rhs.index(); +} + OverloadAction::OverloadAction(const envoy::config::overload::v3::OverloadAction& config, Stats::Scope& stats_scope) : state_(OverloadActionState::inactive()), @@ -191,13 +229,14 @@ OverloadManagerImpl::OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::S for (const auto& action : config.actions()) { const auto& name = action.name(); + const auto symbol = action_symbol_table_.get(name); ENVOY_LOG(debug, "Adding overload action {}", name); // TODO: use in place construction once https://github.com/abseil/abseil-cpp/issues/388 is // addressed // We cannot currently use in place construction as the OverloadAction constructor may throw, // causing an inconsistent internal state of the actions_ map, which on destruction results in // an invalid free. - auto result = actions_.try_emplace(name, OverloadAction(action, stats_scope)); + auto result = actions_.try_emplace(symbol, OverloadAction(action, stats_scope)); if (!result.second) { throw EnvoyException(absl::StrCat("Duplicate overload action ", name)); } @@ -210,7 +249,7 @@ OverloadManagerImpl::OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::S fmt::format("Unknown trigger resource {} for overload action {}", resource, name)); } - resource_to_actions_.insert(std::make_pair(resource, name)); + resource_to_actions_.insert(std::make_pair(resource, symbol)); } } } @@ -219,8 +258,8 @@ void OverloadManagerImpl::start() { ASSERT(!started_); started_ = true; - tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { - return std::make_shared(); + tls_->set([this](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { + return std::make_shared(action_symbol_table_); }); if (resources_.empty()) { @@ -259,13 +298,14 @@ bool OverloadManagerImpl::registerForAction(const std::string& action, Event::Dispatcher& dispatcher, OverloadActionCb callback) { ASSERT(!started_); + const auto symbol = action_symbol_table_.get(action); - if (actions_.find(action) == actions_.end()) { + if (actions_.find(symbol) == actions_.end()) { ENVOY_LOG(debug, "No overload action is configured for {}.", action); return false; } - action_to_callbacks_.emplace(std::piecewise_construct, std::forward_as_tuple(action), + action_to_callbacks_.emplace(std::piecewise_construct, std::forward_as_tuple(symbol), std::forward_as_tuple(dispatcher, callback)); return true; } @@ -279,7 +319,7 @@ void OverloadManagerImpl::updateResourcePressure(const std::string& resource, do auto [start, end] = resource_to_actions_.equal_range(resource); std::for_each(start, end, [&](ResourceToActionMap::value_type& entry) { - const std::string& action = entry.second; + const NamedOverloadActionSymbolTable::Symbol action = entry.second; auto action_it = actions_.find(action); ASSERT(action_it != actions_.end()); const OverloadActionState old_state = action_it->second.getState(); @@ -287,7 +327,7 @@ void OverloadManagerImpl::updateResourcePressure(const std::string& resource, do const auto state = action_it->second.getState(); if (old_state.isSaturated() != state.isSaturated()) { - ENVOY_LOG(debug, "Overload action {} became {}", action, + ENVOY_LOG(debug, "Overload action {} became {}", action_symbol_table_.name(action), (state.isSaturated() ? "saturated" : "scaling")); } @@ -320,14 +360,18 @@ void OverloadManagerImpl::updateResourcePressure(const std::string& resource, do void OverloadManagerImpl::flushResourceUpdates() { if (!state_updates_to_flush_.empty()) { - auto shared_updates = std::make_shared>(); + auto shared_updates = std::make_shared< + absl::flat_hash_map>(); std::swap(*shared_updates, state_updates_to_flush_); - tls_->runOnAllThreads([this, updates = std::move(shared_updates)] { - for (const auto& [action, state] : *updates) { - tls_->getTyped().setState(action, state); - } - }); + tls_->runOnAllThreads( + [updates = std::move(shared_updates)](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + for (const auto& [action, state] : *updates) { + object->asType().setState(action, state); + } + return object; + }); } for (const auto& [cb, state] : callbacks_to_flush_) { diff --git a/source/server/overload_manager_impl.h b/source/server/overload_manager_impl.h index 05151c499816..ba5fa3a21e03 100644 --- a/source/server/overload_manager_impl.h +++ b/source/server/overload_manager_impl.h @@ -52,6 +52,53 @@ class OverloadAction { Stats::Gauge& scale_percent_gauge_; }; +// Simple table that converts strings into Symbol instances. Symbols are guaranteed to start at 0 +// and be indexed sequentially. +class NamedOverloadActionSymbolTable { +public: + class Symbol { + public: + // Allow copy construction everywhere. + Symbol(const Symbol&) = default; + + // Returns the index of the symbol in the table. + size_t index() const { return index_; } + + // Support use as a map key. + bool operator==(const Symbol other) { return other.index_ == index_; } + + // Support absl::Hash. + template + friend H AbslHashValue(H h, const Symbol& s) { // NOLINT(readability-identifier-naming) + return H::combine(std::move(h), s.index_); + } + + private: + friend class NamedOverloadActionSymbolTable; + // Only the symbol table class can create Symbol instances from indices. + explicit Symbol(size_t index) : index_(index) {} + + size_t index_; + }; + + // Finds an existing or adds a new entry for the given name. + Symbol get(absl::string_view name); + + // Returns the symbol for the name if there is one, otherwise nullopt. + absl::optional lookup(absl::string_view string) const; + + // Translates a symbol back into a name. + const absl::string_view name(Symbol symbol) const; + + // Returns the number of symbols in the table. All symbols are guaranteed to have an index less + // than size(). + size_t size() const { return table_.size(); } + +private: + absl::flat_hash_map table_; + std::vector names_; +}; + class OverloadManagerImpl : Logger::Loggable, public OverloadManager { public: OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::Scope& stats_scope, @@ -109,20 +156,25 @@ class OverloadManagerImpl : Logger::Loggable, public OverloadM bool started_; Event::Dispatcher& dispatcher_; ThreadLocal::SlotPtr tls_; + NamedOverloadActionSymbolTable action_symbol_table_; const std::chrono::milliseconds refresh_interval_; Event::TimerPtr timer_; absl::node_hash_map resources_; - absl::node_hash_map actions_; + absl::node_hash_map actions_; - absl::flat_hash_map state_updates_to_flush_; + absl::flat_hash_map + state_updates_to_flush_; absl::flat_hash_map callbacks_to_flush_; FlushEpochId flush_epoch_ = 0; uint64_t flush_awaiting_updates_ = 0; - using ResourceToActionMap = std::unordered_multimap; + using ResourceToActionMap = + std::unordered_multimap; ResourceToActionMap resource_to_actions_; - using ActionToCallbackMap = std::unordered_multimap; + using ActionToCallbackMap = + std::unordered_multimap>; ActionToCallbackMap action_to_callbacks_; }; diff --git a/source/server/server.cc b/source/server/server.cc index 35d49f4e5e68..b0a16884d982 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -67,14 +67,14 @@ InstanceImpl::InstanceImpl( options.ignoreUnknownDynamicFields()), time_source_(time_system), restarter_(restarter), start_time_(time(nullptr)), original_start_time_(start_time_), stats_store_(store), thread_local_(tls), - api_(new Api::Impl(thread_factory, store, time_system, file_system, + random_generator_(std::move(random_generator)), + api_(new Api::Impl(thread_factory, store, time_system, file_system, *random_generator_, process_context ? ProcessContextOptRef(std::ref(*process_context)) : absl::nullopt)), dispatcher_(api_->allocateDispatcher("main_thread")), singleton_manager_(new Singleton::ManagerImpl(api_->threadFactory())), - handler_(new ConnectionHandlerImpl(*dispatcher_)), - random_generator_(std::move(random_generator)), listener_component_factory_(*this), - worker_factory_(thread_local_, *api_, hooks), + handler_(new ConnectionHandlerImpl(*dispatcher_, absl::nullopt)), + listener_component_factory_(*this), worker_factory_(thread_local_, *api_, hooks), access_log_manager_(options.fileFlushIntervalMsec(), *api_, *dispatcher_, access_log_lock, store), terminated_(false), @@ -216,6 +216,13 @@ void InstanceImpl::updateServerStats() { parent_stats.parent_connections_); server_stats_->days_until_first_cert_expiring_.set( sslContextManager().daysUntilFirstCertExpires()); + + auto secs_until_ocsp_response_expires = + sslContextManager().secondsUntilFirstOcspResponseExpires(); + if (secs_until_ocsp_response_expires) { + server_stats_->seconds_until_first_ocsp_response_expiring_.set( + secs_until_ocsp_response_expires.value()); + } server_stats_->state_.set( enumToInt(Utility::serverState(initManager().state(), healthCheckFailed()))); server_stats_->stats_recent_lookups_.set( @@ -235,9 +242,9 @@ bool InstanceImpl::healthCheckFailed() { return !live_.load(); } namespace { // Loads a bootstrap object, potentially at a specific version (upgrading if necessary). -void loadBootsrap(absl::optional bootstrap_version, - envoy::config::bootstrap::v3::Bootstrap& bootstrap, - std::function load_function) { +void loadBootstrap(absl::optional bootstrap_version, + envoy::config::bootstrap::v3::Bootstrap& bootstrap, + std::function load_function) { if (!bootstrap_version.has_value()) { load_function(bootstrap, true); @@ -247,6 +254,7 @@ void loadBootsrap(absl::optional bootstrap_version, envoy::config::bootstrap::v2::Bootstrap bootstrap_v2; load_function(bootstrap_v2, false); Config::VersionConverter::upgrade(bootstrap_v2, bootstrap); + MessageUtil::onVersionUpgradeWarn("v2 bootstrap"); } else { throw EnvoyException(fmt::format("Unknown bootstrap version {}.", *bootstrap_version)); } @@ -268,7 +276,7 @@ void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& } if (!config_path.empty()) { - loadBootsrap( + loadBootstrap( options.bootstrapVersion(), bootstrap, [&config_path, &validation_visitor, &api](Protobuf::Message& message, bool do_boosting) { MessageUtil::loadFromFile(config_path, message, validation_visitor, api, do_boosting); @@ -276,10 +284,11 @@ void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& } if (!config_yaml.empty()) { envoy::config::bootstrap::v3::Bootstrap bootstrap_override; - loadBootsrap(options.bootstrapVersion(), bootstrap_override, - [&config_yaml, &validation_visitor](Protobuf::Message& message, bool do_boosting) { - MessageUtil::loadFromYaml(config_yaml, message, validation_visitor, do_boosting); - }); + loadBootstrap( + options.bootstrapVersion(), bootstrap_override, + [&config_yaml, &validation_visitor](Protobuf::Message& message, bool do_boosting) { + MessageUtil::loadFromYaml(config_yaml, message, validation_visitor, do_boosting); + }); // TODO(snowp): The fact that we do a merge here doesn't seem to be covered under test. bootstrap.MergeFrom(bootstrap_override); } @@ -478,8 +487,8 @@ void InstanceImpl::initialize(const Options& options, dns_resolver_ = dispatcher_->createDnsResolver({}, use_tcp_for_dns_lookups); cluster_manager_factory_ = std::make_unique( - *admin_, Runtime::LoaderSingleton::get(), stats_store_, thread_local_, *random_generator_, - dns_resolver_, *ssl_context_manager_, *dispatcher_, *local_info_, *secret_manager_, + *admin_, Runtime::LoaderSingleton::get(), stats_store_, thread_local_, dns_resolver_, + *ssl_context_manager_, *dispatcher_, *local_info_, *secret_manager_, messageValidationContext(), *api_, http_context_, grpc_context_, access_log_manager_, *singleton_manager_); @@ -491,8 +500,12 @@ void InstanceImpl::initialize(const Options& options, // Instruct the listener manager to create the LDS provider if needed. This must be done later // because various items do not yet exist when the listener manager is created. - if (bootstrap_.dynamic_resources().has_lds_config()) { - listener_manager_->createLdsApi(bootstrap_.dynamic_resources().lds_config()); + if (bootstrap_.dynamic_resources().has_lds_config() || + bootstrap_.dynamic_resources().has_lds_resources_locator()) { + listener_manager_->createLdsApi(bootstrap_.dynamic_resources().lds_config(), + bootstrap_.dynamic_resources().has_lds_resources_locator() + ? &bootstrap_.dynamic_resources().lds_resources_locator() + : nullptr); } // We have to defer RTDS initialization until after the cluster manager is @@ -513,8 +526,10 @@ void InstanceImpl::initialize(const Options& options, // GuardDog (deadlock detection) object and thread setup before workers are // started and before our own run() loop runs. - guard_dog_ = - std::make_unique(stats_store_, config_.watchdogConfig(), *api_); + main_thread_guard_dog_ = std::make_unique( + stats_store_, config_.mainThreadWatchdogConfig(), *api_, "main_thread"); + worker_guard_dog_ = std::make_unique( + stats_store_, config_.workerWatchdogConfig(), *api_, "workers"); } void InstanceImpl::onClusterManagerPrimaryInitializationComplete() { @@ -536,7 +551,7 @@ void InstanceImpl::onRuntimeReady() { stats_store_, false) ->create(), hds_config.transport_api_version(), *dispatcher_, Runtime::LoaderSingleton::get(), - stats_store_, *ssl_context_manager_, *random_generator_, info_factory_, access_log_manager_, + stats_store_, *ssl_context_manager_, info_factory_, access_log_manager_, *config_.clusterManager(), *local_info_, *admin_, *singleton_manager_, thread_local_, messageValidationContext().dynamicValidationVisitor(), *api_); } @@ -552,7 +567,7 @@ void InstanceImpl::onRuntimeReady() { } void InstanceImpl::startWorkers() { - listener_manager_->startWorkers(*guard_dog_); + listener_manager_->startWorkers(*worker_guard_dog_); initialization_timer_->complete(); // Update server stats as soon as initialization is done. updateServerStats(); @@ -568,8 +583,8 @@ Runtime::LoaderPtr InstanceUtil::createRuntime(Instance& server, ENVOY_LOG(info, "runtime: {}", MessageUtil::getYamlStringFromMessage(config.runtime())); return std::make_unique( server.dispatcher(), server.threadLocal(), config.runtime(), server.localInfo(), - server.stats(), server.random(), server.messageValidationContext().dynamicValidationVisitor(), - server.api()); + server.stats(), server.api().randomGenerator(), + server.messageValidationContext().dynamicValidationVisitor(), server.api()); } void InstanceImpl::loadServerFlags(const absl::optional& flags_path) { @@ -662,13 +677,13 @@ void InstanceImpl::run() { // Run the main dispatch loop waiting to exit. ENVOY_LOG(info, "starting main dispatch loop"); - auto watchdog = - guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "main_thread"); + auto watchdog = main_thread_guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), + "main_thread"); watchdog->startWatchdog(*dispatcher_); dispatcher_->post([this] { notifyCallbacksForStage(Stage::Startup); }); dispatcher_->run(Event::Dispatcher::RunType::Block); ENVOY_LOG(info, "main dispatch loop exited"); - guard_dog_->stopWatching(watchdog); + main_thread_guard_dog_->stopWatching(watchdog); watchdog.reset(); terminate(); diff --git a/source/server/server.h b/source/server/server.h index 22d2a8dd2c9d..f9ce2954d581 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -59,6 +59,7 @@ namespace Server { COUNTER(static_unknown_fields) \ GAUGE(concurrency, NeverImport) \ GAUGE(days_until_first_cert_expiring, Accumulate) \ + GAUGE(seconds_until_first_ocsp_response_expiring, Accumulate) \ GAUGE(hot_restart_epoch, NeverImport) \ /* hot_restart_generation is an Accumulate gauge; we omit it here for testing dynamics. */ \ GAUGE(live, NeverImport) \ @@ -162,7 +163,6 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, ProtobufMessage::ValidationContext& messageValidationContext() override { return server_.messageValidationContext(); } - Envoy::Random::RandomGenerator& random() override { return server_.random(); } Envoy::Runtime::Loader& runtime() override { return server_.runtime(); } Stats::Scope& scope() override { return *server_scope_; } Singleton::Manager& singletonManager() override { return server_.singletonManager(); } @@ -238,7 +238,6 @@ class InstanceImpl final : Logger::Loggable, Secret::SecretManager& secretManager() override { return *secret_manager_; } Envoy::MutexTracer* mutexTracer() override { return mutex_tracer_; } OverloadManager& overloadManager() override { return *overload_manager_; } - Random::RandomGenerator& random() override { return *random_generator_; } Runtime::Loader& runtime() override; void shutdown() override; bool isShutdown() final { return shutdown_; } @@ -324,12 +323,12 @@ class InstanceImpl final : Logger::Loggable, Assert::ActionRegistrationPtr assert_action_registration_; Assert::ActionRegistrationPtr envoy_bug_action_registration_; ThreadLocal::Instance& thread_local_; + Random::RandomGeneratorPtr random_generator_; Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; std::unique_ptr admin_; Singleton::ManagerPtr singleton_manager_; Network::ConnectionHandlerPtr handler_; - Random::RandomGeneratorPtr random_generator_; std::unique_ptr runtime_singleton_; std::unique_ptr ssl_context_manager_; ProdListenerComponentFactory listener_component_factory_; @@ -343,7 +342,8 @@ class InstanceImpl final : Logger::Loggable, DrainManagerPtr drain_manager_; AccessLog::AccessLogManagerImpl access_log_manager_; std::unique_ptr cluster_manager_factory_; - std::unique_ptr guard_dog_; + std::unique_ptr main_thread_guard_dog_; + std::unique_ptr worker_guard_dog_; bool terminated_; std::unique_ptr file_logger_; envoy::config::bootstrap::v3::Bootstrap bootstrap_; diff --git a/source/server/ssl_context_manager.cc b/source/server/ssl_context_manager.cc index 4573cdf6de2f..56462a36c377 100644 --- a/source/server/ssl_context_manager.cc +++ b/source/server/ssl_context_manager.cc @@ -26,6 +26,9 @@ class SslContextManagerNoTlsStub final : public Envoy::Ssl::ContextManager { } size_t daysUntilFirstCertExpires() const override { return std::numeric_limits::max(); } + absl::optional secondsUntilFirstOcspResponseExpires() const override { + return absl::nullopt; + } void iterateContexts(std::function /* callback */) override{}; diff --git a/source/server/transport_socket_config_impl.h b/source/server/transport_socket_config_impl.h index 560b9cf61aed..4ad45487e73d 100644 --- a/source/server/transport_socket_config_impl.h +++ b/source/server/transport_socket_config_impl.h @@ -12,16 +12,18 @@ namespace Configuration { */ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { public: - TransportSocketFactoryContextImpl( - Server::Admin& admin, Ssl::ContextManager& context_manager, Stats::Scope& stats_scope, - Upstream::ClusterManager& cm, const LocalInfo::LocalInfo& local_info, - Event::Dispatcher& dispatcher, Envoy::Random::RandomGenerator& random, Stats::Store& stats, - Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) + TransportSocketFactoryContextImpl(Server::Admin& admin, Ssl::ContextManager& context_manager, + Stats::Scope& stats_scope, Upstream::ClusterManager& cm, + const LocalInfo::LocalInfo& local_info, + Event::Dispatcher& dispatcher, Stats::Store& stats, + Singleton::Manager& singleton_manager, + ThreadLocal::SlotAllocator& tls, + ProtobufMessage::ValidationVisitor& validation_visitor, + Api::Api& api) : admin_(admin), context_manager_(context_manager), stats_scope_(stats_scope), - cluster_manager_(cm), local_info_(local_info), dispatcher_(dispatcher), random_(random), - stats_(stats), singleton_manager_(singleton_manager), tls_(tls), - validation_visitor_(validation_visitor), api_(api) {} + cluster_manager_(cm), local_info_(local_info), dispatcher_(dispatcher), stats_(stats), + singleton_manager_(singleton_manager), tls_(tls), validation_visitor_(validation_visitor), + api_(api) {} /** * Pass an init manager to register dynamic secret provider. @@ -39,7 +41,6 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } - Envoy::Random::RandomGenerator& random() override { return random_; } Stats::Store& stats() override { return stats_; } Init::Manager& initManager() override { ASSERT(init_manager_ != nullptr); @@ -59,7 +60,6 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { Upstream::ClusterManager& cluster_manager_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; - Envoy::Random::RandomGenerator& random_; Stats::Store& stats_; Singleton::Manager& singleton_manager_; ThreadLocal::SlotAllocator& tls_; diff --git a/source/server/worker_impl.cc b/source/server/worker_impl.cc index b5bbe8d91cbb..b659ffec6e06 100644 --- a/source/server/worker_impl.cc +++ b/source/server/worker_impl.cc @@ -14,13 +14,12 @@ namespace Envoy { namespace Server { -WorkerPtr ProdWorkerFactory::createWorker(OverloadManager& overload_manager, +WorkerPtr ProdWorkerFactory::createWorker(uint32_t index, OverloadManager& overload_manager, const std::string& worker_name) { Event::DispatcherPtr dispatcher(api_.allocateDispatcher(worker_name)); - return WorkerPtr{ - new WorkerImpl(tls_, hooks_, std::move(dispatcher), - Network::ConnectionHandlerPtr{new ConnectionHandlerImpl(*dispatcher)}, - overload_manager, api_)}; + return std::make_unique(tls_, hooks_, std::move(dispatcher), + std::make_unique(*dispatcher, index), + overload_manager, api_); } WorkerImpl::WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, @@ -32,6 +31,9 @@ WorkerImpl::WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, overload_manager.registerForAction( OverloadActionNames::get().StopAcceptingConnections, *dispatcher_, [this](OverloadActionState state) { stopAcceptingConnectionsCb(state); }); + overload_manager.registerForAction( + OverloadActionNames::get().RejectIncomingConnections, *dispatcher_, + [this](OverloadActionState state) { rejectIncomingConnectionsCb(state); }); } void WorkerImpl::addListener(absl::optional overridden_listener, @@ -150,5 +152,9 @@ void WorkerImpl::stopAcceptingConnectionsCb(OverloadActionState state) { } } +void WorkerImpl::rejectIncomingConnectionsCb(OverloadActionState state) { + handler_->setListenerRejectFraction(static_cast(state.value())); +} + } // namespace Server } // namespace Envoy diff --git a/source/server/worker_impl.h b/source/server/worker_impl.h index 4161c1abcc0a..22513b594e5d 100644 --- a/source/server/worker_impl.h +++ b/source/server/worker_impl.h @@ -23,7 +23,7 @@ class ProdWorkerFactory : public WorkerFactory, Logger::Loggable { private: void threadRoutine(GuardDog& guard_dog); void stopAcceptingConnectionsCb(OverloadActionState state); + void rejectIncomingConnectionsCb(OverloadActionState state); ThreadLocal::Instance& tls_; ListenerHooks& hooks_; diff --git a/test/benchmark/BUILD b/test/benchmark/BUILD index afcb2602898d..7d14fd5f4cb7 100644 --- a/test/benchmark/BUILD +++ b/test/benchmark/BUILD @@ -18,6 +18,9 @@ envoy_cc_test_library( ], deps = [ "//source/common/common:minimal_logger_lib", + "//source/common/common:thread_lib", "//test/test_common:environment_lib", + "//test/test_common:printers_lib", + "//test/test_common:test_runtime_lib", ], ) diff --git a/test/benchmark/main.cc b/test/benchmark/main.cc index 3c79ff36b2e0..ee248c84cd85 100644 --- a/test/benchmark/main.cc +++ b/test/benchmark/main.cc @@ -3,8 +3,10 @@ #include "test/benchmark/main.h" #include "common/common/logger.h" +#include "common/common/thread.h" #include "test/test_common/environment.h" +#include "test/test_common/test_runtime.h" #include "benchmark/benchmark.h" #include "tclap/CmdLine.h" @@ -21,10 +23,20 @@ static bool skip_expensive_benchmarks = false; int main(int argc, char** argv) { TestEnvironment::initializeTestMain(argv[0]); + // Suppressing non-error messages in benchmark tests. This hides warning + // messages that appear when using a runtime feature when there isn't an initialized + // runtime, and may have non-negligible impact on performance. + // TODO(adisuissa): This should be configurable, similarly to unit tests. + const spdlog::level::level_enum default_log_level = spdlog::level::err; + Envoy::Logger::Registry::setLogLevel(default_log_level); + // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall) TCLAP::CmdLine cmd("envoy-benchmark-test", ' ', "0.1"); TCLAP::SwitchArg skip_switch("s", "skip_expensive_benchmarks", "skip or minimize expensive benchmarks", cmd, false); + TCLAP::MultiArg runtime_features( + "r", "runtime_feature", "runtime feature settings each of the form: :", + false, "string", cmd); cmd.setExceptionHandling(false); try { @@ -35,8 +47,36 @@ int main(int argc, char** argv) { return 0; } + // Reduce logs so benchmark output is readable. + Thread::MutexBasicLockable lock; + Logger::Context logging_context{spdlog::level::warn, Logger::Context::getFancyLogFormat(), lock, + false}; + skip_expensive_benchmarks = skip_switch.getValue(); + // Initialize scoped_runtime if a runtime_feature argument is present. This + // allows benchmarks to use their own scoped_runtime in case no runtime flag is + // passed as an argument. + std::unique_ptr scoped_runtime = nullptr; + const auto& runtime_features_args = runtime_features.getValue(); + for (const absl::string_view runtime_feature_arg : runtime_features_args) { + if (scoped_runtime == nullptr) { + scoped_runtime = std::make_unique(); + } + // Make sure the argument contains a single ":" character. + const std::vector runtime_feature_split = absl::StrSplit(runtime_feature_arg, ':'); + if (runtime_feature_split.size() != 2) { + ENVOY_LOG_MISC(critical, + "Given runtime flag \"{}\" should have a single ':' separating the flag name " + "and its value.", + runtime_feature_arg); + return 1; + } + const auto feature_name = runtime_feature_split[0]; + const auto feature_val = runtime_feature_split[1]; + Runtime::LoaderSingleton::getExisting()->mergeValues({{feature_name, feature_val}}); + } + ::benchmark::Initialize(&argc, argv); if (skip_expensive_benchmarks) { diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index a2f02c0e2941..0b6ce4734f9d 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -283,13 +283,13 @@ name: accesslog InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); // Value is taken from random generator. - EXPECT_CALL(context_.random_, random()).WillOnce(Return(42)); + EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(42)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("access_log.test_key", 0, 42, 100)) .WillOnce(Return(true)); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); - EXPECT_CALL(context_.random_, random()).WillOnce(Return(43)); + EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(43)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("access_log.test_key", 0, 43, 100)) .WillOnce(Return(false)); EXPECT_CALL(*file_, write(_)).Times(0); @@ -326,13 +326,13 @@ name: accesslog InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); // Value is taken from random generator. - EXPECT_CALL(context_.random_, random()).WillOnce(Return(42)); + EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(42)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("access_log.test_key", 5, 42, 10000)) .WillOnce(Return(true)); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); - EXPECT_CALL(context_.random_, random()).WillOnce(Return(43)); + EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(43)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("access_log.test_key", 5, 43, 10000)) .WillOnce(Return(false)); EXPECT_CALL(*file_, write(_)).Times(0); @@ -370,13 +370,13 @@ name: accesslog // Value should not be taken from x-request-id. request_headers_.addCopy("x-request-id", "000000ff-0000-0000-0000-000000000000"); - EXPECT_CALL(context_.random_, random()).WillOnce(Return(42)); + EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(42)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("access_log.test_key", 5, 42, 1000000)) .WillOnce(Return(true)); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); - EXPECT_CALL(context_.random_, random()).WillOnce(Return(43)); + EXPECT_CALL(context_.api_.random_, random()).WillOnce(Return(43)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("access_log.test_key", 5, 43, 1000000)) .WillOnce(Return(false)); EXPECT_CALL(*file_, write(_)).Times(0); @@ -949,12 +949,13 @@ name: accesslog - UMSDR - RFCF - NFCF + - DT typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: /dev/null )EOF"; - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x200000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x400000, "A flag has been added. Fix this code."); const std::vector all_response_flags = { @@ -979,7 +980,8 @@ name: accesslog StreamInfo::ResponseFlag::DownstreamProtocolError, StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached, StreamInfo::ResponseFlag::ResponseFromCacheFilter, - StreamInfo::ResponseFlag::NoFilterConfigFound}; + StreamInfo::ResponseFlag::NoFilterConfigFound, + StreamInfo::ResponseFlag::DurationTimeout}; InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); @@ -1011,7 +1013,7 @@ name: accesslog "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\" \"NFCF\"]]): name: " + "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\" \"NFCF\" \"DT\"]]): name: " "\"accesslog\"\nfilter {\n " " " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n }\n}\ntyped_config {\n " @@ -1039,7 +1041,7 @@ name: accesslog "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\" \"NFCF\"]]): name: " + "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\" \"NFCF\" \"DT\"]]): name: " "\"accesslog\"\nfilter {\n " " " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n }\n}\ntyped_config {\n " diff --git a/test/common/buffer/buffer_fuzz.cc b/test/common/buffer/buffer_fuzz.cc index 5ab1bd85c4ae..4128ceea866d 100644 --- a/test/common/buffer/buffer_fuzz.cc +++ b/test/common/buffer/buffer_fuzz.cc @@ -144,15 +144,6 @@ class StringBuffer : public Buffer::Instance { src.size_ -= length; } - Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override { - FUZZ_ASSERT(start_ + size_ + max_length <= data_.size()); - Buffer::RawSlice slice{mutableEnd(), max_length}; - Api::IoCallUint64Result result = io_handle.readv(max_length, &slice, 1); - FUZZ_ASSERT(result.ok() && result.rc_ > 0); - size_ += result.rc_; - return result; - } - uint64_t reserve(uint64_t length, Buffer::RawSlice* iovecs, uint64_t num_iovecs) override { FUZZ_ASSERT(num_iovecs > 0); FUZZ_ASSERT(start_ + size_ + length <= data_.size()); @@ -172,15 +163,6 @@ class StringBuffer : public Buffer::Instance { std::string toString() const override { return std::string(data_.data() + start_, size_); } - Api::IoCallUint64Result write(Network::IoHandle& io_handle) override { - const Buffer::RawSlice slice{const_cast(start()), size_}; - Api::IoCallUint64Result result = io_handle.writev(&slice, 1); - FUZZ_ASSERT(result.ok()); - start_ += result.rc_; - size_ -= result.rc_; - return result; - } - absl::string_view asStringView() const { return {start(), size_}; } char* mutableStart() { return data_.data() + start_; } @@ -355,7 +337,7 @@ uint32_t bufferAction(Context& ctxt, char insert_value, uint32_t max_alloc, Buff std::string data(max_length, insert_value); const ssize_t rc = ::write(pipe_fds[1], data.data(), max_length); FUZZ_ASSERT(rc > 0); - Api::IoCallUint64Result result = target_buffer.read(io_handle, max_length); + Api::IoCallUint64Result result = io_handle.read(target_buffer, max_length); FUZZ_ASSERT(result.rc_ == static_cast(rc)); FUZZ_ASSERT(::close(pipe_fds[1]) == 0); break; @@ -370,7 +352,7 @@ uint32_t bufferAction(Context& ctxt, char insert_value, uint32_t max_alloc, Buff do { const bool empty = target_buffer.length() == 0; const std::string previous_data = target_buffer.toString(); - const auto result = target_buffer.write(io_handle); + const auto result = io_handle.write(target_buffer); FUZZ_ASSERT(result.ok()); rc = result.rc_; ENVOY_LOG_MISC(trace, "Write rc: {} errno: {}", rc, diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index ce7ec99e3847..dc15d80b4b5d 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -269,45 +269,45 @@ TEST_F(OwnedImplTest, Write) { Network::IoSocketHandleImpl io_handle; buffer.add("example"); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{7, 0})); - Api::IoCallUint64Result result = buffer.write(io_handle); + Api::IoCallUint64Result result = io_handle.write(buffer); EXPECT_TRUE(result.ok()); EXPECT_EQ(7, result.rc_); EXPECT_EQ(0, buffer.length()); buffer.add("example"); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{6, 0})); - result = buffer.write(io_handle); + result = io_handle.write(buffer); EXPECT_TRUE(result.ok()); EXPECT_EQ(6, result.rc_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{0, 0})); - result = buffer.write(io_handle); + result = io_handle.write(buffer); EXPECT_TRUE(result.ok()); EXPECT_EQ(0, result.rc_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, 0})); - result = buffer.write(io_handle); + result = io_handle.write(buffer); EXPECT_EQ(Api::IoError::IoErrorCode::UnknownError, result.err_->getErrorCode()); EXPECT_EQ(0, result.rc_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)) .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN})); - result = buffer.write(io_handle); + result = io_handle.write(buffer); EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); EXPECT_EQ(0, result.rc_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{1, 0})); - result = buffer.write(io_handle); + result = io_handle.write(buffer); EXPECT_TRUE(result.ok()); EXPECT_EQ(1, result.rc_); EXPECT_EQ(0, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).Times(0); - result = buffer.write(io_handle); + result = io_handle.write(buffer); EXPECT_EQ(0, result.rc_); EXPECT_EQ(0, buffer.length()); } @@ -319,14 +319,14 @@ TEST_F(OwnedImplTest, Read) { Buffer::OwnedImpl buffer; Network::IoSocketHandleImpl io_handle; EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{0, 0})); - Api::IoCallUint64Result result = buffer.read(io_handle, 100); + Api::IoCallUint64Result result = io_handle.read(buffer, 100); EXPECT_TRUE(result.ok()); EXPECT_EQ(0, result.rc_); EXPECT_EQ(0, buffer.length()); EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, 0})); - result = buffer.read(io_handle, 100); + result = io_handle.read(buffer, 100); EXPECT_EQ(Api::IoError::IoErrorCode::UnknownError, result.err_->getErrorCode()); EXPECT_EQ(0, result.rc_); EXPECT_EQ(0, buffer.length()); @@ -334,14 +334,14 @@ TEST_F(OwnedImplTest, Read) { EXPECT_CALL(os_sys_calls, readv(_, _, _)) .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN})); - result = buffer.read(io_handle, 100); + result = io_handle.read(buffer, 100); EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); EXPECT_EQ(0, result.rc_); EXPECT_EQ(0, buffer.length()); EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); EXPECT_CALL(os_sys_calls, readv(_, _, _)).Times(0); - result = buffer.read(io_handle, 0); + result = io_handle.read(buffer, 0); EXPECT_EQ(0, result.rc_); EXPECT_EQ(0, buffer.length()); EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); @@ -1161,7 +1161,7 @@ TEST_F(OwnedImplTest, ReserveZeroCommit) { const ssize_t rc = os_sys_calls.write(pipe_fds[1], data.data(), max_length).rc_; ASSERT_GT(rc, 0); const uint32_t previous_length = buf.length(); - Api::IoCallUint64Result result = buf.read(io_handle, max_length); + Api::IoCallUint64Result result = io_handle.read(buf, max_length); ASSERT_EQ(result.rc_, static_cast(rc)); ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0); ASSERT_EQ(previous_length, buf.search(data.data(), rc, previous_length, 0)); @@ -1189,7 +1189,7 @@ TEST_F(OwnedImplTest, ReadReserveAndCommit) { std::string data = "e"; const ssize_t rc = os_sys_calls.write(pipe_fds[1], data.data(), data.size()).rc_; ASSERT_GT(rc, 0); - Api::IoCallUint64Result result = buf.read(io_handle, read_length); + Api::IoCallUint64Result result = io_handle.read(buf, read_length); ASSERT_EQ(result.rc_, static_cast(rc)); ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0); EXPECT_EQ("bbbbbe", buf.toString()); diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index 45350e3136b9..1ce8149f3dac 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -253,7 +253,7 @@ TEST_F(WatermarkBufferTest, WatermarkFdFunctions) { int bytes_written_total = 0; Network::IoSocketHandleImpl io_handle1(pipe_fds[1]); while (bytes_written_total < 20) { - Api::IoCallUint64Result result = buffer_.write(io_handle1); + Api::IoCallUint64Result result = io_handle1.write(buffer_); if (!result.ok()) { ASSERT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); } else { @@ -267,7 +267,7 @@ TEST_F(WatermarkBufferTest, WatermarkFdFunctions) { int bytes_read_total = 0; Network::IoSocketHandleImpl io_handle2(pipe_fds[0]); while (bytes_read_total < 20) { - Api::IoCallUint64Result result = buffer_.read(io_handle2, 20); + Api::IoCallUint64Result result = io_handle2.read(buffer_, 20); bytes_read_total += result.rc_; } EXPECT_EQ(2, times_high_watermark_called_); diff --git a/test/common/common/log_macros_test.cc b/test/common/common/log_macros_test.cc index 5625a5f57056..39ebeff2d47f 100644 --- a/test/common/common/log_macros_test.cc +++ b/test/common/common/log_macros_test.cc @@ -50,33 +50,33 @@ TEST(Logger, All) { ENVOY_LOG_MISC(info, "fake message"); } -TEST(Logger, evaluateParams) { +TEST(Logger, EvaluateParams) { uint32_t i = 1; // Set logger's level to low level. // Log message with higher severity and make sure that params were evaluated. - GET_MISC_LOGGER().set_level(spdlog::level::info); + LogLevelSetter save_levels(spdlog::level::info); ENVOY_LOG_MISC(warn, "test message '{}'", i++); EXPECT_THAT(i, testing::Eq(2)); } -TEST(Logger, doNotEvaluateParams) { +TEST(Logger, DoNotEvaluateParams) { uint32_t i = 1; // Set logger's logging level high and log a message with lower severity // params should not be evaluated. - GET_MISC_LOGGER().set_level(spdlog::level::critical); + LogLevelSetter save_levels(spdlog::level::critical); ENVOY_LOG_MISC(error, "test message '{}'", i++); EXPECT_THAT(i, testing::Eq(1)); } -TEST(Logger, logAsStatement) { +TEST(Logger, LogAsStatement) { // Just log as part of if ... statement uint32_t i = 1, j = 1; // Set logger's logging level to high - GET_MISC_LOGGER().set_level(spdlog::level::critical); + LogLevelSetter save_levels(spdlog::level::critical); // Make sure that if statement inside of LOGGER macro does not catch trailing // else .... @@ -99,7 +99,7 @@ TEST(Logger, logAsStatement) { EXPECT_THAT(j, testing::Eq(1)); } -TEST(Logger, checkLoggerLevel) { +TEST(Logger, CheckLoggerLevel) { class LogTestClass : public Logger::Loggable { public: void setLevel(const spdlog::level::level_enum level) { ENVOY_LOGGER().set_level(level); } @@ -142,69 +142,124 @@ void spamCall(std::function&& call_to_spam, const uint32_t num_threads) } } -TEST(Logger, SparseLogMacros) { - class SparseLogMacrosTestHelper : public Logger::Loggable { - public: - SparseLogMacrosTestHelper() { ENVOY_LOGGER().set_level(spdlog::level::info); } - void logSomething() { ENVOY_LOG_ONCE(error, "foo1 '{}'", evaluations()++); } - void logSomethingElse() { ENVOY_LOG_ONCE(error, "foo2 '{}'", evaluations()++); } - void logSomethingBelowLogLevelOnce() { ENVOY_LOG_ONCE(debug, "foo3 '{}'", evaluations()++); } - void logSomethingThrice() { ENVOY_LOG_FIRST_N(error, 3, "foo4 '{}'", evaluations()++); } - void logEverySeventh() { ENVOY_LOG_EVERY_NTH(error, 7, "foo5 '{}'", evaluations()++); } - void logEveryPow2() { ENVOY_LOG_EVERY_POW_2(error, "foo6 '{}'", evaluations()++); } - void logEverySecond() { ENVOY_LOG_PERIODIC(error, 1s, "foo7 '{}'", evaluations()++); } - std::atomic& evaluations() { MUTABLE_CONSTRUCT_ON_FIRST_USE(std::atomic); }; - }; +class SparseLogMacrosTest : public testing::TestWithParam, + public Logger::Loggable { +public: + SparseLogMacrosTest() : use_misc_macros_(GetParam()) { evaluations() = 0; } + + void logSomething() { + if (use_misc_macros_) { + ENVOY_LOG_ONCE_MISC(error, "foo1 '{}'", evaluations()++); + } else { + ENVOY_LOG_ONCE(error, "foo1 '{}'", evaluations()++); + } + } + + void logSomethingElse() { + if (use_misc_macros_) { + ENVOY_LOG_ONCE_MISC(error, "foo2 '{}'", evaluations()++); + } else { + ENVOY_LOG_ONCE(error, "foo2 '{}'", evaluations()++); + } + } + + void logSomethingBelowLogLevelOnce() { + if (use_misc_macros_) { + ENVOY_LOG_ONCE_MISC(debug, "foo3 '{}'", evaluations()++); + } else { + ENVOY_LOG_ONCE(debug, "foo3 '{}'", evaluations()++); + } + } + + void logSomethingThrice() { + if (use_misc_macros_) { + ENVOY_LOG_FIRST_N_MISC(error, 3, "foo4 '{}'", evaluations()++); + } else { + ENVOY_LOG_FIRST_N(error, 3, "foo4 '{}'", evaluations()++); + } + } + + void logEverySeventh() { + if (use_misc_macros_) { + ENVOY_LOG_EVERY_NTH_MISC(error, 7, "foo5 '{}'", evaluations()++); + } else { + ENVOY_LOG_EVERY_NTH(error, 7, "foo5 '{}'", evaluations()++); + } + } + + void logEveryPow2() { + if (use_misc_macros_) { + ENVOY_LOG_EVERY_POW_2_MISC(error, "foo6 '{}'", evaluations()++); + } else { + ENVOY_LOG_EVERY_POW_2(error, "foo6 '{}'", evaluations()++); + } + } + + void logEverySecond() { + if (use_misc_macros_) { + ENVOY_LOG_PERIODIC_MISC(error, 1s, "foo7 '{}'", evaluations()++); + } else { + ENVOY_LOG_PERIODIC(error, 1s, "foo7 '{}'", evaluations()++); + } + } + std::atomic& evaluations() { MUTABLE_CONSTRUCT_ON_FIRST_USE(std::atomic); }; + + const bool use_misc_macros_; + LogLevelSetter save_levels_{spdlog::level::info}; +}; + +INSTANTIATE_TEST_SUITE_P(MiscOrNot, SparseLogMacrosTest, testing::Values(false, true)); + +TEST_P(SparseLogMacrosTest, All) { constexpr uint32_t kNumThreads = 100; - SparseLogMacrosTestHelper helper; spamCall( - [&helper]() { - helper.logSomething(); - helper.logSomething(); + [this]() { + logSomething(); + logSomething(); }, kNumThreads); - EXPECT_EQ(1, helper.evaluations()); + EXPECT_EQ(1, evaluations()); spamCall( - [&helper]() { - helper.logSomethingElse(); - helper.logSomethingElse(); + [this]() { + logSomethingElse(); + logSomethingElse(); }, kNumThreads); // Two distinct log lines ought to result in two evaluations, and no more. - EXPECT_EQ(2, helper.evaluations()); + EXPECT_EQ(2, evaluations()); - spamCall([&helper]() { helper.logSomethingThrice(); }, kNumThreads); + spamCall([this]() { logSomethingThrice(); }, kNumThreads); // Single log line should be emitted 3 times. - EXPECT_EQ(5, helper.evaluations()); + EXPECT_EQ(5, evaluations()); - spamCall([&helper]() { helper.logEverySeventh(); }, kNumThreads); + spamCall([this]() { logEverySeventh(); }, kNumThreads); // (100 threads / log every 7th) + 1s = 15 more evaluations upon logging very 7th. - EXPECT_EQ(20, helper.evaluations()); + EXPECT_EQ(20, evaluations()); - helper.logEveryPow2(); + logEveryPow2(); // First call ought to propagate. - EXPECT_EQ(21, helper.evaluations()); + EXPECT_EQ(21, evaluations()); - spamCall([&helper]() { helper.logEveryPow2(); }, kNumThreads); + spamCall([this]() { logEveryPow2(); }, kNumThreads); // 64 is the highest power of two that fits when kNumThreads == 100. // We should log on 2, 4, 8, 16, 32, 64, which means we can expect to add 6 more evaluations. - EXPECT_EQ(27, helper.evaluations()); + EXPECT_EQ(27, evaluations()); - spamCall([&helper]() { helper.logEverySecond(); }, kNumThreads); + spamCall([this]() { logEverySecond(); }, kNumThreads); // First call ought to evaluate. - EXPECT_EQ(28, helper.evaluations()); + EXPECT_EQ(28, evaluations()); // We expect one log entry / second. Therefore each spamCall ought to result in one // more evaluation. This depends on real time and not sim time, hopefully 1 second // is enough to not introduce flakes in practice. std::this_thread::sleep_for(1s); // NOLINT - spamCall([&helper]() { helper.logEverySecond(); }, kNumThreads); - EXPECT_EQ(29, helper.evaluations()); + spamCall([this]() { logEverySecond(); }, kNumThreads); + EXPECT_EQ(29, evaluations()); - spamCall([&helper]() { helper.logSomethingBelowLogLevelOnce(); }, kNumThreads); + spamCall([this]() { logSomethingBelowLogLevelOnce(); }, kNumThreads); // We shouldn't observe additional argument evaluations for log lines below the configured // log level. - EXPECT_EQ(29, helper.evaluations()); + EXPECT_EQ(29, evaluations()); } TEST(RegistryTest, LoggerWithName) { diff --git a/test/common/common/random_generator_test.cc b/test/common/common/random_generator_test.cc index b2098f987b62..71bf25624740 100644 --- a/test/common/common/random_generator_test.cc +++ b/test/common/common/random_generator_test.cc @@ -67,6 +67,24 @@ TEST(UUID, SanityCheckOfUniqueness) { EXPECT_EQ(num_of_uuids, uuids.size()); } +TEST(Random, Bernoilli) { + Random::RandomGeneratorImpl random; + + EXPECT_FALSE(random.bernoulli(0)); + EXPECT_FALSE(random.bernoulli(-1)); + EXPECT_TRUE(random.bernoulli(1)); + EXPECT_TRUE(random.bernoulli(2)); + + int true_count = 0; + static const auto num_rolls = 100000; + for (size_t i = 0; i < num_rolls; ++i) { + if (random.bernoulli(0.4)) { + ++true_count; + } + } + EXPECT_NEAR(static_cast(true_count) / num_rolls, 0.4, 0.01); +} + } // namespace } // namespace Random } // namespace Envoy diff --git a/test/common/config/BUILD b/test/common/config/BUILD index e870e01a733a..f53ca9aacb69 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -88,6 +88,7 @@ envoy_cc_test( "//test/mocks/filesystem:filesystem_mocks", "//test/test_common:logging_lib", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], ) @@ -131,6 +132,7 @@ envoy_cc_test( "//test/test_common:logging_lib", "//test/test_common:resources_lib", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", @@ -156,6 +158,7 @@ envoy_cc_test( "//test/test_common:logging_lib", "//test/test_common:resources_lib", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", @@ -275,6 +278,7 @@ envoy_cc_test( srcs = ["subscription_factory_impl_test.cc"], deps = [ "//source/common/config:subscription_factory_lib", + "//source/common/config:udpa_resource_lib", "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", "//test/mocks/filesystem:filesystem_mocks", diff --git a/test/common/config/api_type_oracle_test.cc b/test/common/config/api_type_oracle_test.cc index 327d4dc32e54..a2454953c3bb 100644 --- a/test/common/config/api_type_oracle_test.cc +++ b/test/common/config/api_type_oracle_test.cc @@ -27,6 +27,9 @@ TEST(ApiTypeOracleTest, All) { EXPECT_EQ(envoy::config::filter::http::ip_tagging::v2::IPTagging::descriptor()->full_name(), ApiTypeOracle::getEarlierVersionMessageTypeName(v3_config.GetDescriptor()->full_name()) .value()); + EXPECT_EQ("envoy.config.filter.http.ip_tagging.v2.IPTagging", + TypeUtil::typeUrlToDescriptorFullName( + "type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging")); } } // namespace diff --git a/test/common/config/datasource_test.cc b/test/common/config/datasource_test.cc index d6715bbb19d2..843f6e6ec1a4 100644 --- a/test/common/config/datasource_test.cc +++ b/test/common/config/datasource_test.cc @@ -244,7 +244,7 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceSuccessIncorrectSha256) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { Http::ResponseMessagePtr response(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - response->body() = std::make_unique(body); + response->body().add(body); callbacks.onSuccess(request_, std::move(response)); return nullptr; @@ -289,7 +289,7 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceSuccess) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { Http::ResponseMessagePtr response(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - response->body() = std::make_unique(body); + response->body().add(body); callbacks.onSuccess(request_, std::move(response)); return nullptr; @@ -371,7 +371,7 @@ TEST_F(AsyncDataSourceTest, DatasourceReleasedBeforeFetchingData) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { Http::ResponseMessagePtr response(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - response->body() = std::make_unique(body); + response->body().add(body); callbacks.onSuccess(request_, std::move(response)); return nullptr; @@ -446,7 +446,7 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceWithRetry) { Http::ResponseMessagePtr response( new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - response->body() = std::make_unique(body); + response->body().add(body); callbacks.onSuccess(request_, std::move(response)); return nullptr; diff --git a/test/common/config/dummy_config.proto b/test/common/config/dummy_config.proto index ae32e1477e04..1bf5df3d3202 100644 --- a/test/common/config/dummy_config.proto +++ b/test/common/config/dummy_config.proto @@ -1,4 +1,4 @@ -// Provides protos for testing source/common/config/config_provider_impl.{h,cc}. +// Provides protos for testing. syntax = "proto3"; diff --git a/test/common/config/filesystem_subscription_impl_test.cc b/test/common/config/filesystem_subscription_impl_test.cc index cee04cea212a..6d97fda9981d 100644 --- a/test/common/config/filesystem_subscription_impl_test.cc +++ b/test/common/config/filesystem_subscription_impl_test.cc @@ -1,4 +1,6 @@ #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/config/listener/v3/listener.pb.validate.h" #include "test/common/config/filesystem_subscription_test_harness.h" #include "test/mocks/event/mocks.h" @@ -79,6 +81,221 @@ TEST_F(FilesystemSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) { EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028, "0")); } +// TODO(htuch): Add generic test harness support for collection subscriptions so that we can test +// gRPC/HTTP transports similar to below. +class FilesystemCollectionSubscriptionImplTest : public testing::Test, + Event::TestUsingSimulatedTime { +public: + FilesystemCollectionSubscriptionImplTest() + : path_(TestEnvironment::temporaryPath("lds.yaml")), + stats_(Utility::generateStats(stats_store_)), + api_(Api::createApiForTest(stats_store_, simTime())), dispatcher_(setupDispatcher()), + subscription_(*dispatcher_, path_, callbacks_, resource_decoder_, stats_, + ProtobufMessage::getStrictValidationVisitor(), *api_) {} + ~FilesystemCollectionSubscriptionImplTest() override { TestEnvironment::removePath(path_); } + + Event::DispatcherPtr setupDispatcher() { + auto dispatcher = std::make_unique(); + EXPECT_CALL(*dispatcher, createFilesystemWatcher_()).WillOnce(InvokeWithoutArgs([this] { + Filesystem::MockWatcher* mock_watcher = new Filesystem::MockWatcher(); + EXPECT_CALL(*mock_watcher, addWatch(path_, Filesystem::Watcher::Events::MovedTo, _)) + .WillOnce(Invoke([this](absl::string_view, uint32_t, + Filesystem::Watcher::OnChangedCb cb) { on_changed_cb_ = cb; })); + return mock_watcher; + })); + return dispatcher; + } + + void updateFile(const std::string& yaml) { + // Write YAML contents to file, rename to path_ and invoke on change callback + const std::string temp_path = TestEnvironment::writeStringToFileForTest("lds.yaml.tmp", yaml); + TestEnvironment::renameFile(temp_path, path_); + on_changed_cb_(Filesystem::Watcher::Events::MovedTo); + } + + AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure, + uint64_t version, absl::string_view version_text) { + if (attempt != stats_.update_attempt_.value()) { + return testing::AssertionFailure() << "update_attempt: expected " << attempt << ", got " + << stats_.update_attempt_.value(); + } + if (success != stats_.update_success_.value()) { + return testing::AssertionFailure() << "update_success: expected " << success << ", got " + << stats_.update_success_.value(); + } + if (rejected != stats_.update_rejected_.value()) { + return testing::AssertionFailure() << "update_rejected: expected " << rejected << ", got " + << stats_.update_rejected_.value(); + } + // The first attempt always fail. + if (1 + failure != stats_.update_failure_.value()) { + return testing::AssertionFailure() << "update_failure: expected " << 1 + failure << ", got " + << stats_.update_failure_.value(); + } + if (version != stats_.version_.value()) { + return testing::AssertionFailure() + << "version: expected " << version << ", got " << stats_.version_.value(); + } + if (version_text != stats_.version_text_.value()) { + return testing::AssertionFailure() + << "version_text: expected " << version << ", got " << stats_.version_text_.value(); + } + return testing::AssertionSuccess(); + } + + const std::string path_; + Stats::IsolatedStoreImpl stats_store_; + SubscriptionStats stats_; + Api::ApiPtr api_; + Event::DispatcherPtr dispatcher_; + Filesystem::Watcher::OnChangedCb on_changed_cb_; + NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"name"}; + FilesystemCollectionSubscriptionImpl subscription_; +}; + +// Validate that an initial collection load succeeds, followed by a successful update, for inline +// entries. +TEST_F(FilesystemCollectionSubscriptionImplTest, InlineEntrySuccess) { + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("name"); + subscription_.start({}); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, "")); + // Initial config load. + const auto inline_entry = + TestUtility::parseYaml(R"EOF( +name: foo +version: resource.1 +resource: + "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: foo + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10000 + )EOF"); + const std::string resource = fmt::format(R"EOF( +version: system.1 +resource: + "@type": type.googleapis.com/envoy.config.listener.v3.ListenerCollection + entries: + - inline_entry: {} + )EOF", + MessageUtil::getJsonStringFromMessage(inline_entry)); + DecodedResourcesWrapper decoded_resources; + decoded_resources.pushBack(std::make_unique(resource_decoder, inline_entry)); + EXPECT_CALL(callbacks_, + onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), "system.1")); + updateFile(resource); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 1471442407191366964, "system.1")); + // Update. + const auto inline_entry_2 = + TestUtility::parseYaml(R"EOF( +name: foo +version: resource.2 +resource: + "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: foo + address: + socket_address: + protocol: TCP + address: 0.0.0.1 + port_value: 10001 + )EOF"); + const std::string resource_2 = fmt::format(R"EOF( +version: system.2 +resource: + "@type": type.googleapis.com/envoy.config.listener.v3.ListenerCollection + entries: + - inline_entry: {} + )EOF", + MessageUtil::getJsonStringFromMessage(inline_entry_2)); + { + DecodedResourcesWrapper decoded_resources_2; + decoded_resources_2.pushBack( + std::make_unique(resource_decoder, inline_entry_2)); + EXPECT_CALL(callbacks_, + onConfigUpdate(DecodedResourcesEq(decoded_resources_2.refvec_), "system.2")); + updateFile(resource_2); + } + EXPECT_TRUE(statsAre(3, 2, 0, 0, 17889017004055064037ULL, "system.2")); +} + +// Validate handling of invalid resource wrappers +TEST_F(FilesystemCollectionSubscriptionImplTest, BadEnvelope) { + subscription_.start({}); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, "")); + EXPECT_CALL(callbacks_, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, _)); + // Unknown collection type. + updateFile("{}"); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, "")); + const std::string resource = R"EOF( +version: system.1 +resource: + "@type": type.googleapis.com/envoy.config.listener.v3.Listener + )EOF"; + EXPECT_CALL(callbacks_, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, _)); + // Invalid collection type structure. + updateFile(resource); + EXPECT_TRUE(statsAre(3, 0, 0, 2, 0, "")); +} + +// Validate handling of unknown fields. +TEST_F(FilesystemCollectionSubscriptionImplTest, UnknownFields) { + subscription_.start({}); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, "")); + const std::string resource = R"EOF( +version: system.1 +resource: + "@type": type.googleapis.com/envoy.config.listener.v3.ListenerCollection + entries: + - inline_entry: + name: foo + version: resource.1 + resource: + "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: foo + unknown_bar: baz + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10000 + )EOF"; + EXPECT_CALL(callbacks_, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, _)); + updateFile(resource); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, "")); +} + +// Validate handling of rejected config. +TEST_F(FilesystemCollectionSubscriptionImplTest, ConfigRejection) { + subscription_.start({}); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, "")); + const std::string resource = R"EOF( +version: system.1 +resource: + "@type": type.googleapis.com/envoy.config.listener.v3.ListenerCollection + entries: + - inline_entry: + name: foo + version: resource.1 + resource: + "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: foo + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10000 + )EOF"; + EXPECT_CALL(callbacks_, onConfigUpdate(_, _)).WillOnce(Throw(EnvoyException("blah"))); + EXPECT_CALL(callbacks_, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, _)); + updateFile(resource); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, "")); +} + } // namespace } // namespace Config } // namespace Envoy diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index 5a8bd21840db..8c869aa44b1f 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -24,6 +24,7 @@ #include "test/test_common/logging.h" #include "test/test_common/resources.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/test_time.h" #include "test/test_common/utility.h" @@ -722,6 +723,90 @@ TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyNodeName) { "ads: node 'id' and 'cluster' are required. Set it either in 'node' config or via " "--service-node and --service-cluster options."); } + +// Send discovery request with v2 resource type_url, receive discovery response with v3 resource +// type_url. +TEST_F(GrpcMuxImplTest, WatchV2ResourceV3) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade", "true"}}); + setup(); + + InSequence s; + const std::string& v2_type_url = Config::TypeUrl::get().ClusterLoadAssignment; + const std::string& v3_type_url = + Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); + auto foo_sub = grpc_mux_->addWatch(v2_type_url, {}, callbacks_, resource_decoder); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(v2_type_url, {}, "", true); + grpc_mux_->start(); + + { + auto response = std::make_unique(); + response->set_type_url(v3_type_url); + response->set_version_info("1"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + response->add_resources()->PackFrom(load_assignment); + EXPECT_CALL(callbacks_, onConfigUpdate(_, "1")) + .WillOnce(Invoke([&load_assignment](const std::vector& resources, + const std::string&) { + EXPECT_EQ(1, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); + })); + expectSendMessage(v2_type_url, {}, "1"); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); + } +} + +// Send discovery request with v3 resource type_url, receive discovery response with v2 resource +// type_url. +TEST_F(GrpcMuxImplTest, WatchV3ResourceV2) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade", "true"}}); + setup(); + + InSequence s; + const std::string& v2_type_url = Config::TypeUrl::get().ClusterLoadAssignment; + const std::string& v3_type_url = + Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); + auto foo_sub = grpc_mux_->addWatch(v3_type_url, {}, callbacks_, resource_decoder); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(v3_type_url, {}, "", true); + grpc_mux_->start(); + + { + + auto response = std::make_unique(); + response->set_type_url(v2_type_url); + response->set_version_info("1"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment)); + EXPECT_CALL(callbacks_, onConfigUpdate(_, "1")) + .WillOnce(Invoke([&load_assignment](const std::vector& resources, + const std::string&) { + EXPECT_EQ(1, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); + })); + expectSendMessage(v3_type_url, {}, "1"); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); + } +} + } // namespace } // namespace Config } // namespace Envoy diff --git a/test/common/config/http_subscription_impl_test.cc b/test/common/config/http_subscription_impl_test.cc index abda847f03c4..33e54fb33162 100644 --- a/test/common/config/http_subscription_impl_test.cc +++ b/test/common/config/http_subscription_impl_test.cc @@ -32,7 +32,7 @@ TEST_F(HttpSubscriptionImplTest, BadJsonRecovery) { Http::ResponseHeaderMapPtr response_headers{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}}; Http::ResponseMessagePtr message{new Http::ResponseMessageImpl(std::move(response_headers))}; - message->body() = std::make_unique(";!@#badjso n"); + message->body().add(";!@#badjso n"); EXPECT_CALL(random_gen_, random()).WillOnce(Return(0)); EXPECT_CALL(*timer_, enableTimer(_, _)); EXPECT_CALL(callbacks_, diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index 499a3982b344..9e64b0d944e2 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -139,7 +139,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Http::ResponseHeaderMapPtr response_headers{ new Http::TestResponseHeaderMapImpl{{":status", response_code}}}; Http::ResponseMessagePtr message{new Http::ResponseMessageImpl(std::move(response_headers))}; - message->body() = std::make_unique(response_json); + message->body().add(response_json); const auto decoded_resources = TestUtility::decodeResources( response_pb, "cluster_name"); diff --git a/test/common/config/new_grpc_mux_impl_test.cc b/test/common/config/new_grpc_mux_impl_test.cc index 3357e04b8a99..2bcf1ecd75de 100644 --- a/test/common/config/new_grpc_mux_impl_test.cc +++ b/test/common/config/new_grpc_mux_impl_test.cc @@ -21,6 +21,7 @@ #include "test/test_common/logging.h" #include "test/test_common/resources.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/test_time.h" #include "test/test_common/utility.h" @@ -90,7 +91,8 @@ TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { std::make_unique(); unexpected_response->set_type_url(type_url); unexpected_response->set_system_version_info("0"); - EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "0")).Times(0); + // empty response should call onConfigUpdate on wildcard watch + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "0")); grpc_mux_->onDiscoveryResponse(std::move(unexpected_response), control_plane_stats_); } { @@ -165,14 +167,104 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithNotFoundResponse) { response->add_resources(); response->mutable_resources()->at(0).set_name("not-found"); response->mutable_resources()->at(0).add_aliases("prefix/domain1.test"); +} - grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); +// Watch v2 resource type_url, receive discovery response with v3 resource type_url. +TEST_F(NewGrpcMuxImplTest, V3ResourceResponseV2ResourceWatch) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade", "true"}}); + setup(); - const auto& subscriptions = grpc_mux_->subscriptions(); - auto sub = subscriptions.find(type_url); + // Watch for v2 resource type_url. + const std::string& v2_type_url = Config::TypeUrl::get().ClusterLoadAssignment; + const std::string& v3_type_url = + Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + auto watch = grpc_mux_->addWatch(v2_type_url, {}, callbacks_, resource_decoder_); - EXPECT_TRUE(sub != subscriptions.end()); - watch->update({}); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + // Cluster is not watched, v3 resource is rejected. + grpc_mux_->start(); + { + auto unexpected_response = + std::make_unique(); + envoy::config::cluster::v3::Cluster cluster; + unexpected_response->set_type_url(Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)); + unexpected_response->set_system_version_info("0"); + unexpected_response->add_resources()->mutable_resource()->PackFrom(cluster); + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "0")).Times(0); + grpc_mux_->onDiscoveryResponse(std::move(unexpected_response), control_plane_stats_); + } + // Cluster is not watched, v2 resource is rejected. + { + auto unexpected_response = + std::make_unique(); + envoy::config::cluster::v3::Cluster cluster; + unexpected_response->set_type_url(Config::TypeUrl::get().Cluster); + unexpected_response->set_system_version_info("0"); + unexpected_response->add_resources()->mutable_resource()->PackFrom(API_DOWNGRADE(cluster)); + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "0")).Times(0); + grpc_mux_->onDiscoveryResponse(std::move(unexpected_response), control_plane_stats_); + } + // ClusterLoadAssignment v2 is watched, v3 resource will be accepted. + { + auto response = std::make_unique(); + response->set_system_version_info("1"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + response->add_resources()->mutable_resource()->PackFrom(load_assignment); + // Send response that contains resource with v3 type url. + response->set_type_url(v3_type_url); + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "1")) + .WillOnce(Invoke([&load_assignment](const std::vector& added_resources, + const Protobuf::RepeatedPtrField&, + const std::string&) { + EXPECT_EQ(1, added_resources.size()); + EXPECT_TRUE( + TestUtility::protoEqual(added_resources[0].get().resource(), load_assignment)); + })); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } +} + +// Watch v3 resource type_url, receive discovery response with v2 resource type_url. +TEST_F(NewGrpcMuxImplTest, V2ResourceResponseV3ResourceWatch) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.enable_type_url_downgrade_and_upgrade", "true"}}); + setup(); + + // Watch for v3 resource type_url. + const std::string& v3_type_url = + Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const std::string& v2_type_url = Config::TypeUrl::get().ClusterLoadAssignment; + auto watch = grpc_mux_->addWatch(v3_type_url, {}, callbacks_, resource_decoder_); + + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + + grpc_mux_->start(); + // ClusterLoadAssignment v3 is watched, v2 resource will be accepted. + { + auto response = std::make_unique(); + response->set_system_version_info("1"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + response->add_resources()->mutable_resource()->PackFrom(API_DOWNGRADE(load_assignment)); + // Send response that contains resource with v3 type url. + response->set_type_url(v2_type_url); + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "1")) + .WillOnce(Invoke([&load_assignment](const std::vector& added_resources, + const Protobuf::RepeatedPtrField&, + const std::string&) { + EXPECT_EQ(1, added_resources.size()); + EXPECT_TRUE( + TestUtility::protoEqual(added_resources[0].get().resource(), load_assignment)); + })); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } } } // namespace diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index 35724d3f2bb4..65a7e1b7bd12 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -9,6 +9,7 @@ #include "envoy/stats/scope.h" #include "common/config/subscription_factory_impl.h" +#include "common/config/udpa_resource.h" #include "test/mocks/config/mocks.h" #include "test/mocks/event/mocks.h" @@ -37,9 +38,9 @@ namespace { class SubscriptionFactoryTest : public testing::Test { public: SubscriptionFactoryTest() - : http_request_(&cm_.async_client_), api_(Api::createApiForTest(stats_store_)), - subscription_factory_(local_info_, dispatcher_, cm_, random_, validation_visitor_, *api_, - runtime_) {} + : http_request_(&cm_.async_client_), api_(Api::createApiForTest(stats_store_, random_)), + subscription_factory_(local_info_, dispatcher_, cm_, validation_visitor_, *api_, runtime_) { + } SubscriptionPtr subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config) { @@ -48,6 +49,13 @@ class SubscriptionFactoryTest : public testing::Test { resource_decoder_); } + SubscriptionPtr collectionSubscriptionFromUrl(const std::string& udpa_url) { + const auto resource_locator = UdpaResourceIdentifier::decodeUrl(udpa_url); + return subscription_factory_.collectionSubscriptionFromUrl( + resource_locator, {}, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, + callbacks_, resource_decoder_); + } + Upstream::MockClusterManager cm_; Event::MockDispatcher dispatcher_; Random::MockRandomGenerator random_; @@ -197,6 +205,23 @@ TEST_F(SubscriptionFactoryTest, FilesystemSubscriptionNonExistentFile) { "'/blahblah' does not exist") } +TEST_F(SubscriptionFactoryTest, FilesystemCollectionSubscription) { + std::string test_path = TestEnvironment::temporaryDirectory(); + auto* watcher = new Filesystem::MockWatcher(); + EXPECT_CALL(dispatcher_, createFilesystemWatcher_()).WillOnce(Return(watcher)); + EXPECT_CALL(*watcher, addWatch(test_path, _, _)); + EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _)); + // Unix paths start with /, Windows with c:/. + const std::string file_path = test_path[0] == '/' ? test_path.substr(1) : test_path; + collectionSubscriptionFromUrl(fmt::format("file:///{}", file_path))->start({}); +} + +TEST_F(SubscriptionFactoryTest, FilesystemCollectionSubscriptionNonExistentFile){ + EXPECT_THROW_WITH_MESSAGE(collectionSubscriptionFromUrl("file:///blahblah")->start({}), + EnvoyException, + "envoy::api::v2::Path must refer to an existing path in the system: " + "'/blahblah' does not exist")} + TEST_F(SubscriptionFactoryTest, LegacySubscription) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); @@ -301,7 +326,7 @@ TEST_F(SubscriptionFactoryTest, LogWarningOnDeprecatedApi) { NiceMock snapshot; EXPECT_CALL(runtime_, snapshot()).WillRepeatedly(ReturnRef(snapshot)); EXPECT_CALL(snapshot, runtimeFeatureEnabled(_)).WillOnce(Return(true)); - EXPECT_CALL(snapshot, countDeprecatedFeatureUse()); + EXPECT_CALL(runtime_, countDeprecatedFeatureUse()); Upstream::ClusterManager::ClusterSet primary_clusters; primary_clusters.insert("static_cluster"); diff --git a/test/common/config/watch_map_test.cc b/test/common/config/watch_map_test.cc index ff26dee1e0d3..5a0f3c5fe0b6 100644 --- a/test/common/config/watch_map_test.cc +++ b/test/common/config/watch_map_test.cc @@ -128,6 +128,12 @@ TEST(WatchMapTest, Basic) { WatchMap watch_map(false); Watch* watch = watch_map.addWatch(callbacks, resource_decoder); + { + // nothing is interested, so become wildcard watch + // should callback with empty resource + expectDeltaAndSotwUpdate(callbacks, {}, {}, "version1"); + doDeltaAndSotwUpdate(watch_map, {}, {}, "version1"); + } { // The watch is interested in Alice and Bob... std::set update_to({"alice", "bob"}); diff --git a/test/common/conn_pool/conn_pool_base_test.cc b/test/common/conn_pool/conn_pool_base_test.cc index 282e66c18612..bf2b1946967c 100644 --- a/test/common/conn_pool/conn_pool_base_test.cc +++ b/test/common/conn_pool/conn_pool_base_test.cc @@ -11,6 +11,7 @@ namespace Envoy { namespace ConnectionPool { +using testing::AnyNumber; using testing::InvokeWithoutArgs; using testing::Return; @@ -75,7 +76,7 @@ class ConnPoolImplBaseTest : public testing::Test { TEST_F(ConnPoolImplBaseTest, BasicPrefetch) { // Create more than one connection per new stream. - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); // On new stream, create 2 connections. EXPECT_CALL(pool_, instantiateActiveClient).Times(2); @@ -89,7 +90,7 @@ TEST_F(ConnPoolImplBaseTest, PrefetchOnDisconnect) { testing::InSequence s; // Create more than one connection per new stream. - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); // On new stream, create 2 connections. EXPECT_CALL(pool_, instantiateActiveClient).Times(2); @@ -109,7 +110,7 @@ TEST_F(ConnPoolImplBaseTest, PrefetchOnDisconnect) { TEST_F(ConnPoolImplBaseTest, NoPrefetchIfUnhealthy) { // Create more than one connection per new stream. - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); host_->healthFlagSet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC); EXPECT_EQ(host_->health(), Upstream::Host::Health::Unhealthy); @@ -124,7 +125,7 @@ TEST_F(ConnPoolImplBaseTest, NoPrefetchIfUnhealthy) { TEST_F(ConnPoolImplBaseTest, NoPrefetchIfDegraded) { // Create more than one connection per new stream. - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); EXPECT_EQ(host_->health(), Upstream::Host::Health::Healthy); host_->healthFlagSet(Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH); @@ -138,5 +139,33 @@ TEST_F(ConnPoolImplBaseTest, NoPrefetchIfDegraded) { pool_.destructAllConnections(); } +TEST_F(ConnPoolImplBaseTest, ExplicitPrefetch) { + // Create more than one connection per new stream. + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + EXPECT_CALL(pool_, instantiateActiveClient).Times(AnyNumber()); + + // With global prefetch off, we won't prefetch. + EXPECT_FALSE(pool_.maybePrefetch(0)); + // With prefetch ratio of 1.1, we'll prefetch two connections. + // Currently, no number of subsequent calls to prefetch will increase that. + EXPECT_TRUE(pool_.maybePrefetch(1.1)); + EXPECT_TRUE(pool_.maybePrefetch(1.1)); + EXPECT_FALSE(pool_.maybePrefetch(1.1)); + + // With a higher prefetch ratio, more connections may be prefetched. + EXPECT_TRUE(pool_.maybePrefetch(3)); + + pool_.destructAllConnections(); +} + +TEST_F(ConnPoolImplBaseTest, ExplicitPrefetchNotHealthy) { + // Create more than one connection per new stream. + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + + // Prefetch won't occur if the host is not healthy. + host_->healthFlagSet(Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH); + EXPECT_FALSE(pool_.maybePrefetch(1)); +} + } // namespace ConnectionPool } // namespace Envoy diff --git a/test/common/event/BUILD b/test/common/event/BUILD index b6032fe71825..036d0f424359 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -41,3 +41,13 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_cc_test( + name = "scaled_range_timer_manager_test", + srcs = ["scaled_range_timer_manager_test.cc"], + deps = [ + "//source/common/event:scaled_range_timer_manager", + "//test/mocks/event:wrapped_dispatcher", + "//test/test_common:simulated_time_system_lib", + ], +) diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index eaeee86bdd2c..c6c6a7a96272 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -173,6 +173,47 @@ TEST_F(SchedulableCallbackImplTest, ScheduleChainingAndCancellation) { dispatcher_->run(Dispatcher::RunType::Block); } +TEST_F(SchedulableCallbackImplTest, RescheduleNext) { + DispatcherImpl* dispatcher_impl = static_cast(dispatcher_.get()); + ReadyWatcher prepare_watcher; + evwatch_prepare_new(&dispatcher_impl->base(), onWatcherReady, &prepare_watcher); + + ReadyWatcher watcher0; + createCallback([&]() { + watcher0.ready(); + // Callback 1 was scheduled from the previous iteration, expect it to fire in the current + // iteration despite the attempt to reschedule. + callbacks_[1]->scheduleCallbackNextIteration(); + // Callback 2 expected to execute next iteration because current called before next. + callbacks_[2]->scheduleCallbackCurrentIteration(); + callbacks_[2]->scheduleCallbackNextIteration(); + // Callback 3 expected to execute next iteration because next was called before current. + callbacks_[3]->scheduleCallbackNextIteration(); + callbacks_[3]->scheduleCallbackCurrentIteration(); + }); + + ReadyWatcher watcher1; + createCallback([&]() { watcher1.ready(); }); + ReadyWatcher watcher2; + createCallback([&]() { watcher2.ready(); }); + ReadyWatcher watcher3; + createCallback([&]() { watcher3.ready(); }); + + // Schedule callbacks 0 and 1 outside the loop, both will run in the same iteration of the event + // loop. + callbacks_[0]->scheduleCallbackCurrentIteration(); + callbacks_[1]->scheduleCallbackNextIteration(); + + InSequence s; + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(watcher0, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(watcher3, ready()); + dispatcher_->run(Dispatcher::RunType::Block); +} + class TestDeferredDeletable : public DeferredDeletable { public: TestDeferredDeletable(std::function on_destroy) : on_destroy_(on_destroy) {} @@ -1017,8 +1058,10 @@ class TimerImplTimingTest : public testing::Test { Dispatcher& dispatcher, Event::Timer& timer) { const auto start = time_system.monotonicTime(); EXPECT_TRUE(timer.enabled()); - while (true) { - dispatcher.run(Dispatcher::RunType::NonBlock); + dispatcher.run(Dispatcher::RunType::NonBlock); + while (timer.enabled()) { + time_system.advanceTimeAndRun(std::chrono::microseconds(1), dispatcher, + Dispatcher::RunType::NonBlock); #ifdef WIN32 // The event loop runs for a single iteration in NonBlock mode on Windows. A few iterations // are required to ensure that next iteration callbacks have a chance to run before time @@ -1026,11 +1069,6 @@ class TimerImplTimingTest : public testing::Test { dispatcher.run(Dispatcher::RunType::NonBlock); dispatcher.run(Dispatcher::RunType::NonBlock); #endif - if (timer.enabled()) { - time_system.advanceTimeAsync(std::chrono::microseconds(1)); - } else { - break; - } } return time_system.monotonicTime() - start; } diff --git a/test/common/event/scaled_range_timer_manager_test.cc b/test/common/event/scaled_range_timer_manager_test.cc new file mode 100644 index 000000000000..ff9bcab08772 --- /dev/null +++ b/test/common/event/scaled_range_timer_manager_test.cc @@ -0,0 +1,578 @@ +#include + +#include "envoy/event/timer.h" + +#include "common/event/dispatcher_impl.h" +#include "common/event/scaled_range_timer_manager.h" + +#include "test/mocks/common.h" +#include "test/mocks/event/wrapped_dispatcher.h" +#include "test/test_common/simulated_time_system.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Event { +namespace { + +using testing::ElementsAre; +using testing::InSequence; +using testing::MockFunction; + +class ScopeTrackingDispatcher : public WrappedDispatcher { +public: + ScopeTrackingDispatcher(DispatcherPtr dispatcher) + : WrappedDispatcher(*dispatcher), dispatcher_(std::move(dispatcher)) {} + + const ScopeTrackedObject* setTrackedObject(const ScopeTrackedObject* object) override { + scope_ = object; + return impl_.setTrackedObject(object); + } + + const ScopeTrackedObject* scope_{nullptr}; + + Dispatcher* impl() const { return dispatcher_.get(); } + +private: + DispatcherPtr dispatcher_; +}; + +class ScaledRangeTimerManagerTest : public testing::Test, public TestUsingSimulatedTime { +public: + ScaledRangeTimerManagerTest() + : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} + + Api::ApiPtr api_; + ScopeTrackingDispatcher dispatcher_; +}; + +struct TrackedTimer { + explicit TrackedTimer(ScaledRangeTimerManager& manager, TimeSystem& time_system) + : timer(manager.createTimer([trigger_times = trigger_times.get(), &time_system] { + trigger_times->push_back(time_system.monotonicTime()); + })) {} + std::unique_ptr> trigger_times{ + std::make_unique>()}; + RangeTimerPtr timer; +}; + +TEST_F(ScaledRangeTimerManagerTest, CreateAndDestroy) { + ScaledRangeTimerManager manager(dispatcher_); +} + +TEST_F(ScaledRangeTimerManagerTest, CreateAndDestroyTimer) { + ScaledRangeTimerManager manager(dispatcher_); + + { + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + } +} + +TEST_F(ScaledRangeTimerManagerTest, CreateSingleScaledTimer) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + + timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(10)); + EXPECT_TRUE(timer->enabled()); + + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + EXPECT_TRUE(timer->enabled()); + + EXPECT_CALL(callback, Call()); + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + EXPECT_FALSE(timer->enabled()); +} + +TEST_F(ScaledRangeTimerManagerTest, EnableAndDisableTimer) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + + timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(30)); + EXPECT_TRUE(timer->enabled()); + + timer->disableTimer(); + EXPECT_FALSE(timer->enabled()); + + // Provide some additional guarantee of safety by running the dispatcher for a little bit. This + // should be a no-op, and if not (because a timer was fired), that's a problem that will be caught + // by the strict mock callback. + simTime().advanceTimeAndRun(std::chrono::seconds(10), dispatcher_, Dispatcher::RunType::Block); +} + +TEST_F(ScaledRangeTimerManagerTest, DisableWhileDisabled) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + + EXPECT_FALSE(timer->enabled()); + timer->disableTimer(); + + EXPECT_FALSE(timer->enabled()); +} + +TEST_F(ScaledRangeTimerManagerTest, DisableWhileWaitingForMin) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + timer->enableTimer(std::chrono::seconds(10), std::chrono::seconds(100)); + EXPECT_TRUE(timer->enabled()); + + timer->disableTimer(); + EXPECT_FALSE(timer->enabled()); +} + +TEST_F(ScaledRangeTimerManagerTest, DisableWhileScalingMax) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + + timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(100)); + + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + + EXPECT_TRUE(timer->enabled()); + + timer->disableTimer(); + EXPECT_FALSE(timer->enabled()); + + // Run the dispatcher to make sure nothing happens when it's not supposed to. + simTime().advanceTimeAndRun(std::chrono::seconds(100), dispatcher_, Dispatcher::RunType::Block); +} + +TEST_F(ScaledRangeTimerManagerTest, DisableWithZeroMinTime) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + + timer->enableTimer(std::chrono::seconds(0), std::chrono::seconds(100)); + + EXPECT_TRUE(timer->enabled()); + + timer->disableTimer(); + EXPECT_FALSE(timer->enabled()); + + // Run the dispatcher to make sure nothing happens when it's not supposed to. + simTime().advanceTimeAndRun(std::chrono::seconds(100), dispatcher_, Dispatcher::RunType::Block); +} + +TEST_F(ScaledRangeTimerManagerTest, TriggerWithZeroMinTime) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + + timer->enableTimer(std::chrono::seconds(0), std::chrono::seconds(10)); + + simTime().advanceTimeAndRun(std::chrono::seconds(9), dispatcher_, Dispatcher::RunType::Block); + EXPECT_CALL(callback, Call); + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); +} + +TEST_F(ScaledRangeTimerManagerTest, DisableFrontScalingMaxTimer) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback1, callback2; + auto timer1 = manager.createTimer(callback1.AsStdFunction()); + auto timer2 = manager.createTimer(callback2.AsStdFunction()); + + // These timers have the same max-min. + timer1->enableTimer(std::chrono::seconds(5), std::chrono::seconds(30)); + timer2->enableTimer(std::chrono::seconds(10), std::chrono::seconds(35)); + + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + + timer1->disableTimer(); + EXPECT_FALSE(timer1->enabled()); + ASSERT_TRUE(timer2->enabled()); + + // Check that timer2 doesn't trigger when timer1 was originally going to, at start+30. + simTime().advanceTimeAndRun(std::chrono::seconds(20), dispatcher_, Dispatcher::RunType::Block); + + // Advancing to timer2's max should trigger it. + EXPECT_CALL(callback2, Call); + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); +} + +TEST_F(ScaledRangeTimerManagerTest, DisableLaterScalingMaxTimer) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback1, callback2; + auto timer1 = manager.createTimer(callback1.AsStdFunction()); + auto timer2 = manager.createTimer(callback2.AsStdFunction()); + + // These timers have the same max-min. + timer1->enableTimer(std::chrono::seconds(5), std::chrono::seconds(30)); + timer2->enableTimer(std::chrono::seconds(10), std::chrono::seconds(35)); + + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + + timer2->disableTimer(); + EXPECT_FALSE(timer2->enabled()); + ASSERT_TRUE(timer1->enabled()); + + // After the original windows for both timers have long expired, only the enabled one should fire. + EXPECT_CALL(callback1, Call); + simTime().advanceTimeAndRun(std::chrono::seconds(100), dispatcher_, Dispatcher::RunType::Block); +} + +class ScaledRangeTimerManagerTestWithScope : public ScaledRangeTimerManagerTest, + public testing::WithParamInterface { +public: + ScopeTrackedObject* getScope() { return GetParam() ? &scope_ : nullptr; } + MockScopedTrackedObject scope_; +}; + +TEST_P(ScaledRangeTimerManagerTestWithScope, ReRegisterOnCallback) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + + EXPECT_EQ(dispatcher_.scope_, nullptr); + { + InSequence s; + EXPECT_CALL(callback, Call).WillOnce([&] { + EXPECT_EQ(dispatcher_.scope_, getScope()); + timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(2), getScope()); + }); + EXPECT_CALL(callback, Call).WillOnce([&] { EXPECT_EQ(dispatcher_.scope_, getScope()); }); + } + + timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(2), getScope()); + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + + EXPECT_EQ(dispatcher_.scope_, nullptr); + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + EXPECT_EQ(dispatcher_.scope_, nullptr); + + EXPECT_TRUE(timer->enabled()); + + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + + EXPECT_FALSE(timer->enabled()); +}; + +TEST_P(ScaledRangeTimerManagerTestWithScope, ScheduleWithScalingFactorZero) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + manager.setScaleFactor(0); + + EXPECT_CALL(callback, Call).WillOnce([&] { EXPECT_EQ(dispatcher_.scope_, getScope()); }); + + timer->enableTimer(std::chrono::seconds(0), std::chrono::seconds(1), getScope()); + simTime().advanceTimeAndRun(std::chrono::milliseconds(1), dispatcher_, + Dispatcher::RunType::Block); +} + +INSTANTIATE_TEST_SUITE_P(WithAndWithoutScope, ScaledRangeTimerManagerTestWithScope, + testing::Bool()); + +TEST_F(ScaledRangeTimerManagerTest, SingleTimerTriggeredNoScaling) { + ScaledRangeTimerManager manager(dispatcher_); + bool triggered = false; + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + EXPECT_CALL(callback, Call()).WillOnce([&] { triggered = true; }); + + timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(9)); + + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + EXPECT_FALSE(triggered); + + simTime().advanceTimeAndRun(std::chrono::seconds(4) - std::chrono::milliseconds(1), dispatcher_, + Dispatcher::RunType::Block); + EXPECT_FALSE(triggered); + + simTime().advanceTimeAndRun(std::chrono::milliseconds(1), dispatcher_, + Dispatcher::RunType::Block); + EXPECT_TRUE(triggered); +} + +TEST_F(ScaledRangeTimerManagerTest, SingleTimerSameMinMax) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback; + auto timer = manager.createTimer(callback.AsStdFunction()); + EXPECT_CALL(callback, Call()); + + timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(1)); + + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + EXPECT_FALSE(timer->enabled()); +} + +TEST_F(ScaledRangeTimerManagerTest, MultipleTimersNoScaling) { + ScaledRangeTimerManager manager(dispatcher_); + std::vector timers; + timers.reserve(3); + + const MonotonicTime start = simTime().monotonicTime(); + for (int i = 0; i < 3; ++i) { + timers.emplace_back(manager, simTime()); + } + + timers[0].timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(3)); + timers[1].timer->enableTimer(std::chrono::seconds(2), std::chrono::seconds(6)); + timers[2].timer->enableTimer(std::chrono::seconds(0), std::chrono::seconds(9)); + + for (int i = 0; i < 10; ++i) { + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + } + + EXPECT_THAT(*timers[0].trigger_times, ElementsAre(start + std::chrono::seconds(3))); + EXPECT_THAT(*timers[1].trigger_times, ElementsAre(start + std::chrono::seconds(6))); + EXPECT_THAT(*timers[2].trigger_times, ElementsAre(start + std::chrono::seconds(9))); +} + +TEST_F(ScaledRangeTimerManagerTest, MultipleTimersWithScaling) { + ScaledRangeTimerManager manager(dispatcher_); + std::vector timers; + timers.reserve(3); + + for (int i = 0; i < 3; ++i) { + timers.emplace_back(manager, simTime()); + } + + const MonotonicTime start = simTime().monotonicTime(); + + timers[0].timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(3)); + timers[1].timer->enableTimer(std::chrono::seconds(2), std::chrono::seconds(6)); + timers[2].timer->enableTimer(std::chrono::seconds(6), std::chrono::seconds(10)); + + manager.setScaleFactor(0.5); + + // Advance time to start = 1 second, so timers[0] hits its min. + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + + // Advance time to start = 2, which should make timers[0] hit its scaled max. + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + + // At 4x speed, timers[1] will fire in only 1 second. + manager.setScaleFactor(0.25); + + // Advance time to start = 3, which should make timers[1] hit its scaled max. + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + + // Advance time to start = 6, which is the minimum required for timers[2] to fire. + simTime().advanceTimeAndRun(std::chrono::seconds(3), dispatcher_, Dispatcher::RunType::Block); + + manager.setScaleFactor(0); + // With a scale factor of 0, timers[2] should be ready to be fired immediately. + dispatcher_.run(Dispatcher::RunType::Block); + + EXPECT_THAT(*timers[0].trigger_times, ElementsAre(start + std::chrono::seconds(2))); + EXPECT_THAT(*timers[1].trigger_times, ElementsAre(start + std::chrono::seconds(3))); + EXPECT_THAT(*timers[2].trigger_times, ElementsAre(start + std::chrono::seconds(6))); +} + +TEST_F(ScaledRangeTimerManagerTest, MultipleTimersSameTimes) { + ScaledRangeTimerManager manager(dispatcher_); + std::vector timers; + timers.reserve(3); + + const MonotonicTime start = simTime().monotonicTime(); + + for (int i = 0; i < 3; ++i) { + timers.emplace_back(manager, simTime()); + timers[i].timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(2)); + } + + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + + EXPECT_THAT(*timers[0].trigger_times, ElementsAre(start + std::chrono::seconds(2))); + EXPECT_THAT(*timers[1].trigger_times, ElementsAre(start + std::chrono::seconds(2))); + EXPECT_THAT(*timers[2].trigger_times, ElementsAre(start + std::chrono::seconds(2))); +} + +TEST_F(ScaledRangeTimerManagerTest, MultipleTimersSameTimesFastClock) { + ScaledRangeTimerManager manager(dispatcher_); + std::vector timers; + timers.reserve(3); + + const MonotonicTime start = simTime().monotonicTime(); + + for (int i = 0; i < 3; ++i) { + timers.emplace_back(manager, simTime()); + timers[i].timer->enableTimer(std::chrono::seconds(1), std::chrono::seconds(2)); + } + + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + // The clock runs fast here before the dispatcher gets to the timer callbacks. + simTime().advanceTimeAndRun(std::chrono::seconds(2), dispatcher_, Dispatcher::RunType::Block); + + EXPECT_THAT(*timers[0].trigger_times, ElementsAre(start + std::chrono::seconds(3))); + EXPECT_THAT(*timers[1].trigger_times, ElementsAre(start + std::chrono::seconds(3))); + EXPECT_THAT(*timers[2].trigger_times, ElementsAre(start + std::chrono::seconds(3))); +} + +TEST_F(ScaledRangeTimerManagerTest, ScheduledWithScalingFactorZero) { + ScaledRangeTimerManager manager(dispatcher_); + manager.setScaleFactor(0); + + TrackedTimer timer(manager, simTime()); + + // The timer should fire at start = 4 since the scaling factor is 0. + const MonotonicTime start = simTime().monotonicTime(); + timer.timer->enableTimer(std::chrono::seconds(4), std::chrono::seconds(10)); + + for (int i = 0; i < 10; ++i) { + simTime().advanceTimeAndRun(std::chrono::seconds(4), dispatcher_, Dispatcher::RunType::Block); + } + + EXPECT_THAT(*timer.trigger_times, ElementsAre(start + std::chrono::seconds(4))); +} + +TEST_F(ScaledRangeTimerManagerTest, ScheduledWithMaxBeforeMin) { + // When max < min, the timer behaves the same as if max == min. This ensures that min is always + // respected, and max is respected as much as possible. + ScaledRangeTimerManager manager(dispatcher_); + + TrackedTimer timer(manager, simTime()); + + const MonotonicTime start = simTime().monotonicTime(); + timer.timer->enableTimer(std::chrono::seconds(4), std::chrono::seconds(3)); + + for (int i = 0; i < 10; ++i) { + simTime().advanceTimeAndRun(std::chrono::seconds(4), dispatcher_, Dispatcher::RunType::Block); + } + + EXPECT_THAT(*timer.trigger_times, ElementsAre(start + std::chrono::seconds(4))); +} + +TEST_F(ScaledRangeTimerManagerTest, MultipleTimersTriggeredInTheSameEventLoopIteration) { + ScaledRangeTimerManager manager(dispatcher_); + + MockFunction callback1, callback2, callback3; + auto timer1 = manager.createTimer(callback1.AsStdFunction()); + auto timer2 = manager.createTimer(callback2.AsStdFunction()); + auto timer3 = manager.createTimer(callback3.AsStdFunction()); + + timer1->enableTimer(std::chrono::seconds(5), std::chrono::seconds(10)); + timer2->enableTimer(std::chrono::seconds(5), std::chrono::seconds(10)); + timer3->enableTimer(std::chrono::seconds(5), std::chrono::seconds(10)); + + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + + DispatcherImpl* dispatcher_impl = static_cast(dispatcher_.impl()); + ASSERT(dispatcher_impl != nullptr); + + ReadyWatcher prepare_watcher; + evwatch_prepare_new( + &dispatcher_impl->base(), + +[](evwatch*, const evwatch_prepare_cb_info*, void* arg) { + // `arg` contains the ReadyWatcher passed in from evwatch_prepare_new. + auto watcher = static_cast(arg); + watcher->ready(); + }, + &prepare_watcher); + + ReadyWatcher schedulable_watcher; + SchedulableCallbackPtr schedulable_callback = + dispatcher_.createSchedulableCallback([&] { schedulable_watcher.ready(); }); + + testing::Expectation first_prepare = EXPECT_CALL(prepare_watcher, ready()); + testing::ExpectationSet after_first_prepare; + after_first_prepare += + EXPECT_CALL(schedulable_watcher, ready()).After(first_prepare).WillOnce([&] { + schedulable_callback->scheduleCallbackNextIteration(); + }); + after_first_prepare += EXPECT_CALL(callback1, Call).After(first_prepare); + after_first_prepare += EXPECT_CALL(callback2, Call).After(first_prepare); + after_first_prepare += EXPECT_CALL(callback3, Call).After(first_prepare); + testing::Expectation second_prepare = + EXPECT_CALL(prepare_watcher, ready()).After(after_first_prepare).WillOnce([&] { + schedulable_callback->scheduleCallbackNextIteration(); + }); + EXPECT_CALL(schedulable_watcher, ready()).After(second_prepare); + + // Running outside the event loop, this should schedule a run on the next event loop iteration. + schedulable_callback->scheduleCallbackNextIteration(); + + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + dispatcher_.run(Dispatcher::RunType::Block); +} + +TEST_F(ScaledRangeTimerManagerTest, MultipleTimersWithChangeInScalingFactor) { + ScaledRangeTimerManager manager(dispatcher_); + const MonotonicTime start = simTime().monotonicTime(); + + std::vector timers; + timers.reserve(4); + for (int i = 0; i < 4; i++) { + timers.emplace_back(manager, simTime()); + } + + timers[0].timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(15)); + timers[1].timer->enableTimer(std::chrono::seconds(12), std::chrono::seconds(14)); + + manager.setScaleFactor(0.1); + + timers[2].timer->enableTimer(std::chrono::seconds(7), std::chrono::seconds(21)); + timers[3].timer->enableTimer(std::chrono::seconds(10), std::chrono::seconds(16)); + + // Advance to timer 0's min. + simTime().advanceTimeAndRun(std::chrono::seconds(5), dispatcher_, Dispatcher::RunType::Block); + + manager.setScaleFactor(0.5); + + // Now that the scale factor is 0.5, fire times are 0: start+10, 1: start+13, 2: start+14, 3: + // start+13. Advance to timer 2's min. + simTime().advanceTimeAndRun(std::chrono::seconds(2), dispatcher_, Dispatcher::RunType::Block); + + // Advance to time start+9. + simTime().advanceTimeAndRun(std::chrono::seconds(2), dispatcher_, Dispatcher::RunType::Block); + + manager.setScaleFactor(0.1); + // Now that the scale factor is reduced, fire times are 0: start+6, 1: start+12.2, + // 2: start+8.4, 3: start+10.6. Timers 0 and 2 should fire immediately since their + // trigger times are in the past. + dispatcher_.run(Dispatcher::RunType::Block); + EXPECT_THAT(*timers[0].trigger_times, ElementsAre(start + std::chrono::seconds(9))); + EXPECT_THAT(*timers[2].trigger_times, ElementsAre(start + std::chrono::seconds(9))); + + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + + // The time is now start+10. Re-enable timer 0. + ASSERT_FALSE(timers[0].timer->enabled()); + timers[0].timer->enableTimer(std::chrono::seconds(5), std::chrono::seconds(13)); + + // Fire times are now 0: start+19, 1: start+13, 2: none, 3: start+13. + manager.setScaleFactor(0.5); + + // Advance to timer 1's min. + simTime().advanceTimeAndRun(std::chrono::seconds(2), dispatcher_, Dispatcher::RunType::Block); + + // Advance again to start+13, which should trigger both timers 1 and 3. + simTime().advanceTimeAndRun(std::chrono::seconds(1), dispatcher_, Dispatcher::RunType::Block); + EXPECT_THAT(*timers[1].trigger_times, ElementsAre(start + std::chrono::seconds(13))); + EXPECT_THAT(*timers[3].trigger_times, ElementsAre(start + std::chrono::seconds(13))); + + simTime().advanceTimeAndRun(std::chrono::seconds(3), dispatcher_, Dispatcher::RunType::Block); + + // The time is now start+16. Setting the scale factor to 0 should make timer 0 fire immediately. + manager.setScaleFactor(0); + dispatcher_.run(Dispatcher::RunType::Block); + EXPECT_THAT(*timers[0].trigger_times, + ElementsAre(start + std::chrono::seconds(9), start + std::chrono::seconds(16))); +} + +} // namespace +} // namespace Event +} // namespace Envoy diff --git a/test/common/filesystem/filesystem_impl_test.cc b/test/common/filesystem/filesystem_impl_test.cc index 7870c285e19d..127451d3929e 100644 --- a/test/common/filesystem/filesystem_impl_test.cc +++ b/test/common/filesystem/filesystem_impl_test.cc @@ -19,7 +19,7 @@ static constexpr FlagSet DefaultFlags{ class FileSystemImplTest : public testing::Test { protected: - int getFd(File* file) { + filesystem_os_id_t getFd(File* file) { #ifdef WIN32 auto file_impl = dynamic_cast(file); #else @@ -63,11 +63,7 @@ TEST_F(FileSystemImplTest, DirectoryExists) { } TEST_F(FileSystemImplTest, FileSize) { -#ifdef WIN32 - EXPECT_EQ(0, file_system_.fileSize("NUL")); -#else - EXPECT_EQ(0, file_system_.fileSize("/dev/null")); -#endif + EXPECT_EQ(0, file_system_.fileSize(std::string(Platform::null_device_path))); EXPECT_EQ(-1, file_system_.fileSize("/dev/blahblahblah")); const std::string data = "test string\ntest"; const std::string file_path = TestEnvironment::writeStringToFileForTest("test_envoy", data); @@ -235,15 +231,27 @@ TEST_F(FileSystemImplTest, Open) { EXPECT_TRUE(file->isOpen()); } +TEST_F(FileSystemImplTest, OpenReadOnly) { + const std::string new_file_path = TestEnvironment::temporaryPath("envoy_this_not_exist"); + ::unlink(new_file_path.c_str()); + static constexpr FlagSet ReadOnlyFlags{1 << Filesystem::File::Operation::Read | + 1 << Filesystem::File::Operation::Create | + 1 << Filesystem::File::Operation::Append}; + FilePtr file = file_system_.createFile(new_file_path); + const Api::IoCallBoolResult result = file->open(ReadOnlyFlags); + EXPECT_TRUE(result.rc_); + EXPECT_TRUE(file->isOpen()); +} + TEST_F(FileSystemImplTest, OpenTwice) { const std::string new_file_path = TestEnvironment::temporaryPath("envoy_this_not_exist"); ::unlink(new_file_path.c_str()); FilePtr file = file_system_.createFile(new_file_path); - EXPECT_EQ(getFd(file.get()), -1); + EXPECT_EQ(getFd(file.get()), INVALID_HANDLE); const Api::IoCallBoolResult result1 = file->open(DefaultFlags); - const int initial_fd = getFd(file.get()); + const filesystem_os_id_t initial_fd = getFd(file.get()); EXPECT_TRUE(result1.rc_); EXPECT_TRUE(file->isOpen()); @@ -319,8 +327,7 @@ TEST_F(FileSystemImplTest, WriteAfterClose) { EXPECT_TRUE(bool_result2.rc_); const Api::IoCallSizeResult size_result = file->write(" new data"); EXPECT_EQ(-1, size_result.rc_); - EXPECT_EQ(IoFileError::IoErrorCode::UnknownError, size_result.err_->getErrorCode()); - EXPECT_EQ("Bad file descriptor", size_result.err_->getErrorDetails()); + EXPECT_EQ(IoFileError::IoErrorCode::BadFd, size_result.err_->getErrorCode()); } TEST_F(FileSystemImplTest, NonExistingFileAndReadOnly) { @@ -345,12 +352,30 @@ TEST_F(FileSystemImplTest, ExistingReadOnlyFileAndWrite) { std::string data(" new data"); const Api::IoCallSizeResult result = file->write(data); EXPECT_TRUE(result.rc_ < 0); - EXPECT_EQ(result.err_->getErrorDetails(), "Bad file descriptor"); +#ifdef WIN32 + EXPECT_EQ(IoFileError::IoErrorCode::Permission, result.err_->getErrorCode()); +#else + EXPECT_EQ(IoFileError::IoErrorCode::BadFd, result.err_->getErrorCode()); +#endif } auto contents = TestEnvironment::readFileToStringForTest(file_path); EXPECT_EQ("existing file", contents); } +TEST_F(FileSystemImplTest, TestIoFileError) { + IoFileError error1(HANDLE_ERROR_PERM); + EXPECT_EQ(IoFileError::IoErrorCode::Permission, error1.getErrorCode()); + EXPECT_EQ(errorDetails(HANDLE_ERROR_PERM), error1.getErrorDetails()); + + IoFileError error2(HANDLE_ERROR_INVALID); + EXPECT_EQ(IoFileError::IoErrorCode::BadFd, error2.getErrorCode()); + EXPECT_EQ(errorDetails(HANDLE_ERROR_INVALID), error2.getErrorDetails()); + + int not_known_error = 42; + IoFileError error3(not_known_error); + EXPECT_EQ(IoFileError::IoErrorCode::UnknownError, error3.getErrorCode()); +} + } // namespace Filesystem } // namespace Envoy diff --git a/test/common/formatter/BUILD b/test/common/formatter/BUILD index 8e873cfe24d9..c0203c1f1f00 100644 --- a/test/common/formatter/BUILD +++ b/test/common/formatter/BUILD @@ -40,6 +40,7 @@ envoy_cc_test( "//source/common/common:utility_lib", "//source/common/formatter:substitution_formatter_lib", "//source/common/http:header_map_lib", + "//source/common/json:json_loader_lib", "//source/common/network:address_lib", "//source/common/router:string_accessor_lib", "//test/mocks/api:api_mocks", diff --git a/test/common/formatter/substitution_formatter_test.cc b/test/common/formatter/substitution_formatter_test.cc index 8c611136842e..6e16c79e6992 100644 --- a/test/common/formatter/substitution_formatter_test.cc +++ b/test/common/formatter/substitution_formatter_test.cc @@ -5,9 +5,11 @@ #include "envoy/config/core/v3/base.pb.h" +#include "common/common/logger.h" #include "common/common/utility.h" #include "common/formatter/substitution_formatter.h" #include "common/http/header_map_impl.h" +#include "common/json/json_loader.h" #include "common/network/address_impl.h" #include "common/protobuf/utility.h" #include "common/router/string_accessor_impl.h" @@ -272,6 +274,30 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::stringValue("via_upstream"))); } + { + StreamInfoFormatter termination_details_format("CONNECTION_TERMINATION_DETAILS"); + absl::optional details; + EXPECT_CALL(stream_info, connectionTerminationDetails()).WillRepeatedly(ReturnRef(details)); + EXPECT_EQ(absl::nullopt, + termination_details_format.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(termination_details_format.formatValue(request_headers, response_headers, + response_trailers, stream_info, body), + ProtoEq(ValueUtil::nullValue())); + } + + { + StreamInfoFormatter termination_details_format("CONNECTION_TERMINATION_DETAILS"); + absl::optional details{"access_denied"}; + EXPECT_CALL(stream_info, connectionTerminationDetails()).WillRepeatedly(ReturnRef(details)); + EXPECT_EQ("access_denied", + termination_details_format.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(termination_details_format.formatValue(request_headers, response_headers, + response_trailers, stream_info, body), + ProtoEq(ValueUtil::stringValue("access_denied"))); + } + { StreamInfoFormatter bytes_sent_format("BYTES_SENT"); EXPECT_CALL(stream_info, bytesSent()).WillRepeatedly(Return(1)); @@ -465,6 +491,17 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::stringValue("127.0.0.1:0"))); } + { + StreamInfoFormatter upstream_format("CONNECTION_ID"); + uint64_t id = 123; + EXPECT_CALL(stream_info, connectionID()).WillRepeatedly(Return(id)); + EXPECT_EQ("123", upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::numberValue(id))); + } + { StreamInfoFormatter upstream_format("REQUESTED_SERVER_NAME"); std::string requested_server_name = "stub_server"; @@ -2158,6 +2195,16 @@ TEST(SubstitutionFormatterTest, CompositeFormatterSuccess) { "%%|%%123456000|1522796769%%123|1%%1522796769", formatter.format(request_header, response_header, response_trailer, stream_info, body)); } +#ifndef WIN32 + { + const std::string format = "%START_TIME(%E4n)%"; + const SystemTime start_time(std::chrono::microseconds(1522796769123456)); + EXPECT_CALL(stream_info, startTime()).WillOnce(Return(start_time)); + FormatterImpl formatter(format); + EXPECT_EQ("%E4n", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); + } +#endif } TEST(SubstitutionFormatterTest, CompositeFormatterEmpty) { @@ -2268,13 +2315,27 @@ TEST(SubstitutionFormatterTest, ParserFailures) { "%FILTER_STATE(TEST", "%FILTER_STATE()%", "%START_TIME(%85n)%", - "%START_TIME(%#__88n)%"}; + "%START_TIME(%#__88n)%", + "%START_TIME(%En%)%", + "%START_TIME(%4En%)%", + "%START_TIME(%On%)%", + "%START_TIME(%4On%)%"}; for (const std::string& test_case : test_cases) { EXPECT_THROW(parser.parse(test_case), EnvoyException) << test_case; } } +TEST(SubstitutionFormatterTest, ParserSuccesses) { + SubstitutionFormatParser parser; + + std::vector test_cases = {"%START_TIME(%E4n%)%", "%START_TIME(%O4n%)%"}; + + for (const std::string& test_case : test_cases) { + EXPECT_NO_THROW(parser.parse(test_case)); + } +} + } // namespace } // namespace Formatter } // namespace Envoy diff --git a/test/common/grpc/BUILD b/test/common/grpc/BUILD index 16c3d937ff69..eac8fbefa8a5 100644 --- a/test/common/grpc/BUILD +++ b/test/common/grpc/BUILD @@ -82,7 +82,6 @@ envoy_cc_test( "//source/common/grpc:common_lib", "//source/common/grpc:context_lib", "//source/common/http:headers_lib", - "//source/common/stats:fake_symbol_table_lib", "//test/mocks/upstream:cluster_info_mocks", "//test/test_common:global_lib", ], diff --git a/test/common/grpc/async_client_impl_test.cc b/test/common/grpc/async_client_impl_test.cc index 6544c33bf952..4e690c9d5a19 100644 --- a/test/common/grpc/async_client_impl_test.cc +++ b/test/common/grpc/async_client_impl_test.cc @@ -1,6 +1,7 @@ #include "envoy/config/core/v3/grpc_service.pb.h" #include "common/grpc/async_client_impl.h" +#include "common/network/address_impl.h" #include "test/mocks/http/mocks.h" #include "test/mocks/tracing/mocks.h" @@ -27,6 +28,11 @@ class EnvoyAsyncClientImplTest : public testing::Test { : method_descriptor_(helloworld::Greeter::descriptor()->FindMethodByName("SayHello")) { envoy::config::core::v3::GrpcService config; config.mutable_envoy_grpc()->set_cluster_name("test_cluster"); + + auto& initial_metadata_entry = *config.mutable_initial_metadata()->Add(); + initial_metadata_entry.set_key("downstream-local-address"); + initial_metadata_entry.set_value("%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%"); + grpc_client_ = std::make_unique(cm_, config, test_time_.timeSystem()); ON_CALL(cm_, httpAsyncClientForCluster("test_cluster")).WillByDefault(ReturnRef(http_client_)); } @@ -96,6 +102,44 @@ TEST_F(EnvoyAsyncClientImplTest, HostIsOverrideByConfig) { EXPECT_EQ(grpc_stream, nullptr); } +// Validate that the metadata header is the initial metadata in gRPC service config and the value is +// interpolated. +TEST_F(EnvoyAsyncClientImplTest, MetadataIsInitialized) { + NiceMock> grpc_callbacks; + Http::AsyncClient::StreamCallbacks* http_callbacks; + + Http::MockAsyncClientStream http_stream; + EXPECT_CALL(http_client_, start(_, _)) + .WillOnce( + Invoke([&http_callbacks, &http_stream](Http::AsyncClient::StreamCallbacks& callbacks, + const Http::AsyncClient::StreamOptions&) { + http_callbacks = &callbacks; + return &http_stream; + })); + + const std::string expected_downstream_local_address = "5.5.5.5"; + EXPECT_CALL(grpc_callbacks, + onCreateInitialMetadata(testing::Truly([&expected_downstream_local_address]( + Http::RequestHeaderMap& headers) { + return headers.get(Http::LowerCaseString("downstream-local-address"))[0]->value() == + expected_downstream_local_address; + }))); + EXPECT_CALL(http_stream, sendHeaders(_, _)) + .WillOnce(Invoke([&http_callbacks](Http::HeaderMap&, bool) { http_callbacks->onReset(); })); + + // Prepare the parent context of this call. + StreamInfo::StreamInfoImpl stream_info{test_time_.timeSystem()}; + stream_info.setDownstreamLocalAddress( + std::make_shared(expected_downstream_local_address)); + Http::AsyncClient::ParentContext parent_context{&stream_info}; + + Http::AsyncClient::StreamOptions stream_options; + stream_options.setParentContext(parent_context); + + auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, stream_options); + EXPECT_EQ(grpc_stream, nullptr); +} + // Validate that a failure in the HTTP client returns immediately with status // UNAVAILABLE. TEST_F(EnvoyAsyncClientImplTest, StreamHttpStartFail) { diff --git a/test/common/grpc/common_test.cc b/test/common/grpc/common_test.cc index 0b3f39ed3db1..3f6d88dd0096 100644 --- a/test/common/grpc/common_test.cc +++ b/test/common/grpc/common_test.cc @@ -70,19 +70,22 @@ TEST(GrpcContextTest, GetGrpcMessage) { TEST(GrpcContextTest, GetGrpcTimeout) { Http::TestRequestHeaderMapImpl empty_headers; - EXPECT_EQ(std::chrono::milliseconds(0), Common::getGrpcTimeout(empty_headers)); + EXPECT_EQ(absl::nullopt, Common::getGrpcTimeout(empty_headers)); Http::TestRequestHeaderMapImpl empty_grpc_timeout{{"grpc-timeout", ""}}; - EXPECT_EQ(std::chrono::milliseconds(0), Common::getGrpcTimeout(empty_grpc_timeout)); + EXPECT_EQ(absl::nullopt, Common::getGrpcTimeout(empty_grpc_timeout)); Http::TestRequestHeaderMapImpl missing_unit{{"grpc-timeout", "123"}}; - EXPECT_EQ(std::chrono::milliseconds(0), Common::getGrpcTimeout(missing_unit)); + EXPECT_EQ(absl::nullopt, Common::getGrpcTimeout(missing_unit)); Http::TestRequestHeaderMapImpl illegal_unit{{"grpc-timeout", "123F"}}; - EXPECT_EQ(std::chrono::milliseconds(0), Common::getGrpcTimeout(illegal_unit)); + EXPECT_EQ(absl::nullopt, Common::getGrpcTimeout(illegal_unit)); - Http::TestRequestHeaderMapImpl unit_hours{{"grpc-timeout", "1H"}}; - EXPECT_EQ(std::chrono::milliseconds(60 * 60 * 1000), Common::getGrpcTimeout(unit_hours)); + Http::TestRequestHeaderMapImpl unit_hours{{"grpc-timeout", "0H"}}; + EXPECT_EQ(std::chrono::milliseconds(0), Common::getGrpcTimeout(unit_hours)); + + Http::TestRequestHeaderMapImpl zero_hours{{"grpc-timeout", "1H"}}; + EXPECT_EQ(std::chrono::milliseconds(60 * 60 * 1000), Common::getGrpcTimeout(zero_hours)); Http::TestRequestHeaderMapImpl unit_minutes{{"grpc-timeout", "1M"}}; EXPECT_EQ(std::chrono::milliseconds(60 * 1000), Common::getGrpcTimeout(unit_minutes)); diff --git a/test/common/grpc/context_impl_test.cc b/test/common/grpc/context_impl_test.cc index 64ed11321215..745ddb797f49 100644 --- a/test/common/grpc/context_impl_test.cc +++ b/test/common/grpc/context_impl_test.cc @@ -5,7 +5,7 @@ #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/http/utility.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "test/mocks/upstream/cluster_info.h" #include "test/test_common/global.h" diff --git a/test/common/grpc/google_async_client_impl_test.cc b/test/common/grpc/google_async_client_impl_test.cc index 1474899621f1..31e732d98d85 100644 --- a/test/common/grpc/google_async_client_impl_test.cc +++ b/test/common/grpc/google_async_client_impl_test.cc @@ -4,7 +4,9 @@ #include "common/api/api_impl.h" #include "common/event/dispatcher_impl.h" #include "common/grpc/google_async_client_impl.h" +#include "common/network/address_impl.h" #include "common/stats/isolated_store_impl.h" +#include "common/stream_info/stream_info_impl.h" #include "test/mocks/grpc/mocks.h" #include "test/mocks/tracing/mocks.h" @@ -58,6 +60,11 @@ class EnvoyGoogleAsyncClientImplTest : public testing::Test { auto* google_grpc = config_.mutable_google_grpc(); google_grpc->set_target_uri("fake_address"); google_grpc->set_stat_prefix("test_cluster"); + + auto& initial_metadata_entry = *config_.mutable_initial_metadata()->Add(); + initial_metadata_entry.set_key("downstream-local-address"); + initial_metadata_entry.set_value("%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%"); + tls_ = std::make_unique(*api_); } @@ -94,6 +101,38 @@ TEST_F(EnvoyGoogleAsyncClientImplTest, StreamHttpStartFail) { EXPECT_TRUE(grpc_stream == nullptr); } +// Validate that the metadata header is the initial metadata in gRPC service config and the value is +// interpolated. +TEST_F(EnvoyGoogleAsyncClientImplTest, MetadataIsInitialized) { + initialize(); + + EXPECT_CALL(*stub_factory_.stub_, PrepareCall_(_, _, _)).WillOnce(Return(nullptr)); + MockAsyncStreamCallbacks grpc_callbacks; + + const std::string expected_downstream_local_address = "5.5.5.5"; + EXPECT_CALL(grpc_callbacks, + onCreateInitialMetadata(testing::Truly([&expected_downstream_local_address]( + Http::RequestHeaderMap& headers) { + return headers.get(Http::LowerCaseString("downstream-local-address"))[0]->value() == + expected_downstream_local_address; + }))); + + EXPECT_CALL(grpc_callbacks, onReceiveTrailingMetadata_(_)); + EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, "")); + + // Prepare the parent context of this call. + StreamInfo::StreamInfoImpl stream_info{test_time_.timeSystem()}; + stream_info.setDownstreamLocalAddress( + std::make_shared(expected_downstream_local_address)); + Http::AsyncClient::ParentContext parent_context{&stream_info}; + + Http::AsyncClient::StreamOptions stream_options; + stream_options.setParentContext(parent_context); + + auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, stream_options); + EXPECT_TRUE(grpc_stream == nullptr); +} + // Validate that a failure in gRPC stub call creation returns immediately with // status UNAVAILABLE. TEST_F(EnvoyGoogleAsyncClientImplTest, RequestHttpStartFail) { diff --git a/test/common/grpc/grpc_client_integration_test.cc b/test/common/grpc/grpc_client_integration_test.cc index e347226f0354..f1c8d83d8d83 100644 --- a/test/common/grpc/grpc_client_integration_test.cc +++ b/test/common/grpc/grpc_client_integration_test.cc @@ -413,13 +413,12 @@ class GrpcAccessTokenClientIntegrationTest : public GrpcSslClientIntegrationTest void expectExtraHeaders(FakeStream& fake_stream) override { AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - std::vector auth_headers; - Http::HeaderUtility::getAllOfHeader(fake_stream.headers(), "authorization", auth_headers); + const auto auth_headers = fake_stream.headers().get(Http::LowerCaseString("authorization")); if (!access_token_value_.empty()) { - EXPECT_EQ("Bearer " + access_token_value_, auth_headers[0]); + EXPECT_EQ("Bearer " + access_token_value_, auth_headers[0]->value().getStringView()); } if (!access_token_value_2_.empty()) { - EXPECT_EQ("Bearer " + access_token_value_2_, auth_headers[1]); + EXPECT_EQ("Bearer " + access_token_value_2_, auth_headers[1]->value().getStringView()); } } diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index a7d10614dd00..d610a221f8b7 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -20,7 +20,7 @@ #include "common/http/async_client_impl.h" #include "common/http/codes.h" #include "common/http/http2/conn_pool.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "common/network/connection_impl.h" #include "common/network/raw_buffer_socket.h" @@ -299,7 +299,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { EXPECT_CALL(*mock_host_, cluster()).WillRepeatedly(ReturnRef(*cluster_info_ptr_)); EXPECT_CALL(*mock_host_description_, locality()).WillRepeatedly(ReturnRef(host_locality_)); http_conn_pool_ = std::make_unique( - *dispatcher_, host_ptr_, Upstream::ResourcePriority::Default, nullptr, nullptr); + *dispatcher_, random_, host_ptr_, Upstream::ResourcePriority::Default, nullptr, nullptr); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillRepeatedly(Return(http_conn_pool_.get())); http_async_client_ = std::make_unique( diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 7d09b84f43d9..7b19f269bbdd 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -95,6 +95,18 @@ envoy_cc_fuzz_test( ], ) +envoy_cc_test( + name = "filter_manager_test", + srcs = ["filter_manager_test.cc"], + deps = [ + "//source/common/http:filter_manager_lib", + "//test/mocks/event:event_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/local_reply:local_reply_mocks", + "//test/mocks/network:network_mocks", + ], +) + envoy_cc_test( name = "codec_wrappers_test", srcs = ["codec_wrappers_test.cc"], @@ -173,7 +185,6 @@ envoy_cc_fuzz_test( "//source/common/http:request_id_extension_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", - "//source/common/stats:symbol_table_creator_lib", "//source/server:drain_manager_lib", "//test/fuzz:utility_lib", "//test/mocks/access_log:access_log_mocks", @@ -193,54 +204,30 @@ envoy_cc_fuzz_test( envoy_cc_test( name = "conn_manager_impl_test", - srcs = ["conn_manager_impl_test.cc"], + srcs = [ + # Split to avoid compiler OOM, especially on ASAN. + "conn_manager_impl_test.cc", + "conn_manager_impl_test_2.cc", + "conn_manager_impl_test_base.cc", + "conn_manager_impl_test_base.h", + ], shard_count = 3, deps = [ - "//include/envoy/access_log:access_log_interface", - "//include/envoy/buffer:buffer_interface", - "//include/envoy/event:dispatcher_interface", - "//include/envoy/http:request_id_extension_interface", - "//include/envoy/tracing:http_tracer_interface", - "//source/common/access_log:access_log_lib", - "//source/common/buffer:buffer_lib", - "//source/common/common:macros", - "//source/common/event:dispatcher_lib", - "//source/common/formatter:substitution_formatter_lib", "//source/common/http:conn_manager_lib", "//source/common/http:context_lib", - "//source/common/http:date_provider_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:headers_lib", - "//source/common/http:request_id_extension_lib", - "//source/common/network:address_lib", - "//source/common/stats:stats_lib", - "//source/common/upstream:upstream_includes", - "//source/common/upstream:upstream_lib", "//source/extensions/access_loggers/file:file_access_log_lib", - "//source/server:drain_manager_lib", - "//test/mocks:common_lib", "//test/mocks/access_log:access_log_mocks", - "//test/mocks/buffer:buffer_mocks", + "//test/mocks/event:event_mocks", "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/network:network_mocks", "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:factory_context_mocks", - "//test/mocks/server:instance_mocks", - "//test/mocks/server:overload_manager_mocks", "//test/mocks/ssl:ssl_mocks", - "//test/mocks/tracing:tracing_mocks", - "//test/mocks/upstream:cluster_manager_mocks", - "//test/mocks/upstream:host_mocks", - "//test/mocks/upstream:thread_local_cluster_mocks", "//test/test_common:logging_lib", + "//test/test_common:simulated_time_system_lib", "//test/test_common:test_runtime_lib", - "//test/test_common:test_time_lib", - "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", - "@envoy_api//envoy/type/tracing/v3:pkg_cc_proto", - "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) @@ -287,6 +274,7 @@ envoy_cc_test( "//source/common/http:header_list_view_lib", "//source/common/http:header_map_lib", "//source/common/http:header_utility_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) @@ -320,6 +308,7 @@ envoy_cc_fuzz_test( ":header_map_impl_fuzz_proto_cc_proto", "//source/common/http:header_map_lib", "//test/fuzz:utility_lib", + "//test/test_common:test_runtime_lib", ], ) @@ -328,6 +317,7 @@ envoy_cc_test( srcs = ["header_utility_test.cc"], deps = [ "//source/common/http:header_utility_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], @@ -369,7 +359,6 @@ envoy_cc_test( deps = [ "//source/common/http:exception_lib", "//source/common/http:header_map_lib", - "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/common/network:address_lib", "//test/mocks/http:http_mocks", diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index 5e35a2590d9a..d47719351501 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -155,8 +155,8 @@ TEST_F(AsyncClientImplTest, BasicStream) { } TEST_F(AsyncClientImplTest, Basic) { - message_->body() = std::make_unique("test body"); - Buffer::Instance& data = *message_->body(); + message_->body().add("test body"); + Buffer::Instance& data = message_->body(); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](ResponseDecoder& decoder, @@ -193,8 +193,8 @@ TEST_F(AsyncClientImplTest, Basic) { TEST_F(AsyncClientImplTracingTest, Basic) { Tracing::MockSpan* child_span{new Tracing::MockSpan()}; - message_->body() = std::make_unique("test body"); - Buffer::Instance& data = *message_->body(); + message_->body().add("test body"); + Buffer::Instance& data = message_->body(); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](ResponseDecoder& decoder, @@ -238,8 +238,8 @@ TEST_F(AsyncClientImplTracingTest, Basic) { TEST_F(AsyncClientImplTracingTest, BasicNamedChildSpan) { Tracing::MockSpan* child_span{new Tracing::MockSpan()}; - message_->body() = std::make_unique("test body"); - Buffer::Instance& data = *message_->body(); + message_->body().add("test body"); + Buffer::Instance& data = message_->body(); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](ResponseDecoder& decoder, @@ -284,8 +284,8 @@ TEST_F(AsyncClientImplTracingTest, BasicNamedChildSpan) { } TEST_F(AsyncClientImplTest, BasicHashPolicy) { - message_->body() = std::make_unique("test body"); - Buffer::Instance& data = *message_->body(); + message_->body().add("test body"); + Buffer::Instance& data = message_->body(); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](ResponseDecoder& decoder, @@ -331,8 +331,8 @@ TEST_F(AsyncClientImplTest, Retry) { .WillByDefault(Return(true)); RequestMessage* message_copy = message_.get(); - message_->body() = std::make_unique("test body"); - Buffer::Instance& data = *message_->body(); + message_->body().add("test body"); + Buffer::Instance& data = message_->body(); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](ResponseDecoder& decoder, @@ -484,8 +484,8 @@ TEST_F(AsyncClientImplTest, MultipleStreams) { TEST_F(AsyncClientImplTest, MultipleRequests) { // Send request 1 - message_->body() = std::make_unique("test body"); - Buffer::Instance& data = *message_->body(); + message_->body().add("test body"); + Buffer::Instance& data = message_->body(); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](ResponseDecoder& decoder, @@ -572,8 +572,8 @@ TEST_F(AsyncClientImplTest, MultipleRequests) { TEST_F(AsyncClientImplTest, StreamAndRequest) { // Send request - message_->body() = std::make_unique("test body"); - Buffer::Instance& data = *message_->body(); + message_->body().add("test body"); + Buffer::Instance& data = message_->body(); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](ResponseDecoder& decoder, @@ -664,8 +664,8 @@ TEST_F(AsyncClientImplTest, StreamWithTrailers) { } TEST_F(AsyncClientImplTest, Trailers) { - message_->body() = std::make_unique("test body"); - Buffer::Instance& data = *message_->body(); + message_->body().add("test body"); + Buffer::Instance& data = message_->body(); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](ResponseDecoder& decoder, @@ -854,7 +854,7 @@ TEST_F(AsyncClientImplTest, ResetInOnHeaders) { Http::StreamDecoderFilterCallbacks* filter_callbacks = static_cast(stream); filter_callbacks->encodeHeaders( - ResponseHeaderMapPtr(new TestResponseHeaderMapImpl{{":status", "200"}}), false); + ResponseHeaderMapPtr(new TestResponseHeaderMapImpl{{":status", "200"}}), false, "details"); } TEST_F(AsyncClientImplTest, RemoteResetAfterStreamStart) { @@ -1116,7 +1116,7 @@ TEST_F(AsyncClientImplTest, PoolFailureWithBody) { EXPECT_NE(nullptr, &request); EXPECT_EQ(503, Utility::getResponseStatus(response->headers())); })); - message_->body() = std::make_unique("hello"); + message_->body().add("hello"); EXPECT_EQ(nullptr, client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions())); EXPECT_EQ( diff --git a/test/common/http/codec_impl_corpus/h1_dispatch_after_reset b/test/common/http/codec_impl_corpus/h1_dispatch_after_reset new file mode 100644 index 000000000000..826c7fe08faa --- /dev/null +++ b/test/common/http/codec_impl_corpus/h1_dispatch_after_reset @@ -0,0 +1,37 @@ +h1_settings { +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "blah" + } + headers { + key: "content-length" + value: "55" + } + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + } + } +} +actions { + client_drain { + } +} +actions { + stream_action { + stream_id: 1 + request { + data: 73711616 + end_stream: true + } + } +} diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index e7d1b787d5e0..3829d69b5dc4 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -385,7 +385,8 @@ class HttpStream : public LinkedObject { // the buffer via swap() or modified with mutate(). class ReorderBuffer { public: - ReorderBuffer(Connection& connection) : connection_(connection) {} + ReorderBuffer(Connection& connection, const bool& should_close_connection) + : connection_(connection), should_close_connection_(should_close_connection) {} void add(Buffer::Instance& data) { bufs_.emplace_back(); @@ -397,6 +398,10 @@ class ReorderBuffer { while (!bufs_.empty()) { Buffer::OwnedImpl& buf = bufs_.front(); while (buf.length() > 0) { + if (should_close_connection_) { + ENVOY_LOG_MISC(trace, "Buffer dispatch disabled, stopping drain"); + return codecClientError("preventing buffer drain due to connection closure"); + } status = connection_.dispatch(buf); if (!status.ok()) { ENVOY_LOG_MISC(trace, "Error status: {}", status.message()); @@ -439,6 +444,9 @@ class ReorderBuffer { Connection& connection_; std::deque bufs_; + // A reference to a flag indicating whether the reorder buffer is allowed to dispatch data to + // the connection (reference to should_close_connection). + const bool& should_close_connection_; }; using HttpStreamPtr = std::unique_ptr; @@ -456,6 +464,7 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi NiceMock client_callbacks; NiceMock server_connection; NiceMock server_callbacks; + NiceMock random; uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT; uint32_t max_response_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT; @@ -471,7 +480,7 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi if (http2) { client = std::make_unique( client_connection, client_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store), - client_http2_options, max_request_headers_kb, max_response_headers_count, + random, client_http2_options, max_request_headers_kb, max_response_headers_count, Http2::ProdNghttp2SessionFactory::get()); } else { client = std::make_unique( @@ -484,7 +493,7 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi fromHttp2Settings(input.h2_settings().server())}; server = std::make_unique( server_connection, server_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store), - server_http2_options, max_request_headers_kb, max_request_headers_count, + random, server_http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } else { const Http1Settings server_http1settings{fromHttp1Settings(input.h1_settings().server())}; @@ -494,8 +503,14 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi headers_with_underscores_action); } - ReorderBuffer client_write_buf{*server}; - ReorderBuffer server_write_buf{*client}; + // We track whether the connection should be closed for HTTP/1, since stream resets imply + // connection closes. + bool should_close_connection = false; + + // The buffers will be blocked from dispatching data if should_close_connection is set to true. + // This prevents sending data if a stream reset occurs during the test cleanup when using HTTP/1. + ReorderBuffer client_write_buf{*server, should_close_connection}; + ReorderBuffer server_write_buf{*client, should_close_connection}; ON_CALL(client_connection, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { @@ -545,10 +560,6 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi return status; }; - // We track whether the connection should be closed for HTTP/1, since stream resets imply - // connection closes. - bool should_close_connection = false; - constexpr auto max_actions = 1024; bool codec_error = false; for (int i = 0; i < std::min(max_actions, input.actions().size()) && !should_close_connection && diff --git a/test/common/http/codes_speed_test.cc b/test/common/http/codes_speed_test.cc index 0aa91791a20f..f6730603abc6 100644 --- a/test/common/http/codes_speed_test.cc +++ b/test/common/http/codes_speed_test.cc @@ -9,8 +9,8 @@ #include "envoy/stats/stats.h" #include "common/http/codes.h" -#include "common/stats/fake_symbol_table_impl.h" #include "common/stats/isolated_store_impl.h" +#include "common/stats/symbol_table_impl.h" #include "benchmark/benchmark.h" @@ -76,24 +76,7 @@ template class CodeUtilitySpeedTest { } // namespace Http } // namespace Envoy -static void BM_AddResponsesFakeSymtab(benchmark::State& state) { - Envoy::Http::CodeUtilitySpeedTest context; - - for (auto _ : state) { - context.addResponses(); - } -} -BENCHMARK(BM_AddResponsesFakeSymtab); - -static void BM_ResponseTimingFakeSymtab(benchmark::State& state) { - Envoy::Http::CodeUtilitySpeedTest context; - - for (auto _ : state) { - context.responseTiming(); - } -} -BENCHMARK(BM_ResponseTimingFakeSymtab); - +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_AddResponsesRealSymtab(benchmark::State& state) { Envoy::Http::CodeUtilitySpeedTest context; @@ -103,6 +86,7 @@ static void BM_AddResponsesRealSymtab(benchmark::State& state) { } BENCHMARK(BM_AddResponsesRealSymtab); +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_ResponseTimingRealSymtab(benchmark::State& state) { Envoy::Http::CodeUtilitySpeedTest context; diff --git a/test/common/http/codes_test.cc b/test/common/http/codes_test.cc index 9a071f8c122f..bcbcf4820489 100644 --- a/test/common/http/codes_test.cc +++ b/test/common/http/codes_test.cc @@ -8,7 +8,6 @@ #include "common/common/empty_string.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" -#include "common/stats/symbol_table_creator.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/printers.h" diff --git a/test/common/http/conn_manager_impl_fuzz.proto b/test/common/http/conn_manager_impl_fuzz.proto index 92d6e1c32652..004af6a82e4b 100644 --- a/test/common/http/conn_manager_impl_fuzz.proto +++ b/test/common/http/conn_manager_impl_fuzz.proto @@ -19,7 +19,7 @@ message NewStream { enum HeaderStatus { HEADER_CONTINUE = 0; HEADER_STOP_ITERATION = 1; - HEADER_CONTINUE_AND_END_STREAM = 2; + reserved 2; HEADER_STOP_ALL_ITERATION_AND_BUFFER = 3; HEADER_STOP_ALL_ITERATION_AND_WATERMARK = 4; } diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index b7f5056e3aff..2e97cfbd42f1 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -23,7 +23,6 @@ #include "common/http/request_id_extension_impl.h" #include "common/network/address_impl.h" #include "common/network/utility.h" -#include "common/stats/symbol_table_creator.h" #include "test/common/http/conn_manager_impl_fuzz.pb.validate.h" #include "test/fuzz/fuzz_runner.h" @@ -299,16 +298,11 @@ class FuzzStream { return Http::okStatus(); })); ON_CALL(*decoder_filter_, decodeHeaders(_, _)) - .WillByDefault(InvokeWithoutArgs([this, decode_header_status, - end_stream]() -> Http::FilterHeadersStatus { - header_status_ = fromHeaderStatus(decode_header_status); - // When a filter should not return ContinueAndEndStream when send with end_stream set - // (see https://github.com/envoyproxy/envoy/pull/4885#discussion_r232176826) - if (end_stream && (*header_status_ == Http::FilterHeadersStatus::ContinueAndEndStream)) { - *header_status_ = Http::FilterHeadersStatus::Continue; - } - return *header_status_; - })); + .WillByDefault( + InvokeWithoutArgs([this, decode_header_status]() -> Http::FilterHeadersStatus { + header_status_ = fromHeaderStatus(decode_header_status); + return *header_status_; + })); fakeOnData(); FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); } @@ -324,8 +318,6 @@ class FuzzStream { return Http::FilterHeadersStatus::Continue; case test::common::http::HeaderStatus::HEADER_STOP_ITERATION: return Http::FilterHeadersStatus::StopIteration; - case test::common::http::HeaderStatus::HEADER_CONTINUE_AND_END_STREAM: - return Http::FilterHeadersStatus::ContinueAndEndStream; case test::common::http::HeaderStatus::HEADER_STOP_ALL_ITERATION_AND_BUFFER: return Http::FilterHeadersStatus::StopAllIterationAndBuffer; case test::common::http::HeaderStatus::HEADER_STOP_ALL_ITERATION_AND_WATERMARK: @@ -489,7 +481,7 @@ class FuzzStream { if (CodeUtility::is1xx(status) && status != enumToInt(Http::Code::SwitchingProtocols)) { headers->setReferenceKey(Headers::get().Status, "200"); } - decoder_filter_->callbacks_->encodeHeaders(std::move(headers), end_stream); + decoder_filter_->callbacks_->encodeHeaders(std::move(headers), end_stream, "details"); state = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; } break; @@ -560,8 +552,8 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { FuzzConfig config(input.forward_client_cert()); NiceMock drain_close; NiceMock random; - Stats::SymbolTablePtr symbol_table(Stats::SymbolTableCreator::makeSymbolTable()); - Http::ContextImpl http_context(*symbol_table); + Stats::SymbolTableImpl symbol_table; + Http::ContextImpl http_context(symbol_table); NiceMock runtime; NiceMock local_info; NiceMock cluster_manager; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 02c556b14a74..a022f18c2cd8 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -1,59 +1,6 @@ -#include -#include -#include -#include -#include - -#include "envoy/access_log/access_log.h" -#include "envoy/buffer/buffer.h" -#include "envoy/event/dispatcher.h" -#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" -#include "envoy/http/request_id_extension.h" -#include "envoy/tracing/http_tracer.h" -#include "envoy/type/tracing/v3/custom_tag.pb.h" -#include "envoy/type/v3/percent.pb.h" - -#include "common/access_log/access_log_impl.h" -#include "common/buffer/buffer_impl.h" -#include "common/common/empty_string.h" -#include "common/common/macros.h" -#include "common/formatter/substitution_formatter.h" -#include "common/http/conn_manager_impl.h" -#include "common/http/context_impl.h" -#include "common/http/date_provider_impl.h" -#include "common/http/exception.h" -#include "common/http/header_map_impl.h" -#include "common/http/headers.h" -#include "common/http/request_id_extension_impl.h" -#include "common/network/address_impl.h" -#include "common/network/utility.h" -#include "common/upstream/upstream_impl.h" - -#include "extensions/access_loggers/file/file_access_log_impl.h" - -#include "test/mocks/access_log/mocks.h" -#include "test/mocks/buffer/mocks.h" -#include "test/mocks/common.h" -#include "test/mocks/http/mocks.h" -#include "test/mocks/local_info/mocks.h" -#include "test/mocks/network/mocks.h" -#include "test/mocks/router/mocks.h" -#include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/factory_context.h" -#include "test/mocks/server/instance.h" -#include "test/mocks/server/overload_manager.h" -#include "test/mocks/ssl/mocks.h" -#include "test/mocks/tracing/mocks.h" -#include "test/mocks/upstream/cluster_manager.h" -#include "test/mocks/upstream/host.h" -#include "test/mocks/upstream/thread_local_cluster.h" +#include "test/common/http/conn_manager_impl_test_base.h" #include "test/test_common/logging.h" -#include "test/test_common/printers.h" #include "test/test_common/test_runtime.h" -#include "test/test_common/test_time.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" using testing::_; using testing::An; @@ -64,392 +11,12 @@ using testing::HasSubstr; using testing::InSequence; using testing::Invoke; using testing::InvokeWithoutArgs; -using testing::Mock; -using testing::NiceMock; -using testing::Property; -using testing::Ref; using testing::Return; using testing::ReturnRef; namespace Envoy { namespace Http { -class HttpConnectionManagerImplTest : public testing::Test, public ConnectionManagerConfig { -public: - struct RouteConfigProvider : public Router::RouteConfigProvider { - RouteConfigProvider(TimeSource& time_source) : time_source_(time_source) {} - - // Router::RouteConfigProvider - Router::ConfigConstSharedPtr config() override { return route_config_; } - absl::optional configInfo() const override { return {}; } - SystemTime lastUpdated() const override { return time_source_.systemTime(); } - void onConfigUpdate() override {} - - TimeSource& time_source_; - std::shared_ptr route_config_{new NiceMock()}; - }; - - HttpConnectionManagerImplTest() - : http_context_(fake_stats_.symbolTable()), access_log_path_("dummy_path"), - access_logs_{ - AccessLog::InstanceSharedPtr{new Extensions::AccessLoggers::File::FileAccessLog( - access_log_path_, {}, - Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(), log_manager_)}}, - codec_(new NiceMock()), - stats_({ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_), - POOL_HISTOGRAM(fake_stats_))}, - "", fake_stats_), - - listener_stats_({CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))}), - request_id_extension_(RequestIDExtensionFactory::defaultInstance(random_)), - local_reply_(LocalReply::Factory::createDefault()) { - - ON_CALL(route_config_provider_, lastUpdated()) - .WillByDefault(Return(test_time_.timeSystem().systemTime())); - ON_CALL(scoped_route_config_provider_, lastUpdated()) - .WillByDefault(Return(test_time_.timeSystem().systemTime())); - // response_encoder_ is not a NiceMock on purpose. This prevents complaining about this - // method only. - EXPECT_CALL(response_encoder_, getStream()).Times(AtLeast(0)); - } - - ~HttpConnectionManagerImplTest() override { - filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); - } - - Tracing::CustomTagConstSharedPtr requestHeaderCustomTag(const std::string& header) { - envoy::type::tracing::v3::CustomTag::Header headerTag; - headerTag.set_name(header); - return std::make_shared(header, headerTag); - } - - void setup(bool ssl, const std::string& server_name, bool tracing = true, bool use_srds = false) { - use_srds_ = use_srds; - if (ssl) { - ssl_connection_ = std::make_shared(); - } - - server_name_ = server_name; - ON_CALL(filter_callbacks_.connection_, ssl()).WillByDefault(Return(ssl_connection_)); - ON_CALL(Const(filter_callbacks_.connection_), ssl()).WillByDefault(Return(ssl_connection_)); - filter_callbacks_.connection_.local_address_ = - std::make_shared("127.0.0.1", 443); - filter_callbacks_.connection_.remote_address_ = - std::make_shared("0.0.0.0"); - conn_manager_ = std::make_unique( - *this, drain_close_, random_, http_context_, runtime_, local_info_, cluster_manager_, - overload_manager_, test_time_.timeSystem()); - conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); - - if (tracing) { - envoy::type::v3::FractionalPercent percent1; - percent1.set_numerator(100); - envoy::type::v3::FractionalPercent percent2; - percent2.set_numerator(10000); - percent2.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND); - tracing_config_ = std::make_unique( - TracingConnectionManagerConfig{Tracing::OperationName::Ingress, - {{":method", requestHeaderCustomTag(":method")}}, - percent1, - percent2, - percent1, - false, - 256}); - } - } - - void setupFilterChain(int num_decoder_filters, int num_encoder_filters, int num_requests = 1) { - // NOTE: The length/repetition in this routine allows InSequence to work correctly in an outer - // scope. - for (int i = 0; i < num_decoder_filters * num_requests; i++) { - decoder_filters_.push_back(new MockStreamDecoderFilter()); - } - - for (int i = 0; i < num_encoder_filters * num_requests; i++) { - encoder_filters_.push_back(new MockStreamEncoderFilter()); - } - - InSequence s; - for (int req = 0; req < num_requests; req++) { - EXPECT_CALL(filter_factory_, createFilterChain(_)) - .WillOnce(Invoke([num_decoder_filters, num_encoder_filters, req, - this](FilterChainFactoryCallbacks& callbacks) -> void { - if (log_handler_.get()) { - callbacks.addAccessLogHandler(log_handler_); - } - for (int i = 0; i < num_decoder_filters; i++) { - callbacks.addStreamDecoderFilter( - StreamDecoderFilterSharedPtr{decoder_filters_[req * num_decoder_filters + i]}); - } - - for (int i = 0; i < num_encoder_filters; i++) { - callbacks.addStreamEncoderFilter( - StreamEncoderFilterSharedPtr{encoder_filters_[req * num_encoder_filters + i]}); - } - })); - - for (int i = 0; i < num_decoder_filters; i++) { - EXPECT_CALL(*decoder_filters_[req * num_decoder_filters + i], setDecoderFilterCallbacks(_)); - } - - for (int i = 0; i < num_encoder_filters; i++) { - EXPECT_CALL(*encoder_filters_[req * num_encoder_filters + i], setEncoderFilterCallbacks(_)); - } - } - } - - void setUpBufferLimits() { - ON_CALL(response_encoder_, getStream()).WillByDefault(ReturnRef(stream_)); - EXPECT_CALL(stream_, bufferLimit()).WillOnce(Return(initial_buffer_limit_)); - EXPECT_CALL(stream_, addCallbacks(_)) - .WillOnce(Invoke( - [&](Http::StreamCallbacks& callbacks) -> void { stream_callbacks_ = &callbacks; })); - EXPECT_CALL(stream_, setFlushTimeout(_)); - } - - // If request_with_data_and_trailers is true, includes data and trailers in the request. If - // decode_headers_stop_all is true, decoder_filters_[0]'s callback decodeHeaders() returns - // StopAllIterationAndBuffer. - void setUpEncoderAndDecoder(bool request_with_data_and_trailers, bool decode_headers_stop_all) { - setUpBufferLimits(); - EXPECT_CALL(*codec_, dispatch(_)) - .WillOnce(Invoke([&, request_with_data_and_trailers](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - if (request_with_data_and_trailers) { - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("12345"); - decoder->decodeData(fake_data, false); - - RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; - decoder->decodeTrailers(std::move(trailers)); - } else { - decoder->decodeHeaders(std::move(headers), true); - } - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, _)) - .WillOnce(InvokeWithoutArgs([&, decode_headers_stop_all]() -> FilterHeadersStatus { - Buffer::OwnedImpl data("hello"); - decoder_filters_[0]->callbacks_->addDecodedData(data, true); - if (decode_headers_stop_all) { - return FilterHeadersStatus::StopAllIterationAndBuffer; - } else { - return FilterHeadersStatus::Continue; - } - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - } - - Event::MockTimer* setUpTimer() { - // this timer belongs to whatever by whatever next creates a timer. - // See Envoy::Event::MockTimer for details. - return new Event::MockTimer(&filter_callbacks_.connection_.dispatcher_); - } - - void sendRequestHeadersAndData() { - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - auto status = streaming_filter_ ? FilterDataStatus::StopIterationAndWatermark - : FilterDataStatus::StopIterationAndBuffer; - EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)).WillOnce(Return(status)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Kick off the incoming data. |fake_input| is not sent, but instead kicks - // off sending the headers and |data| queued up in setUpEncoderAndDecoder(). - Buffer::OwnedImpl fake_input("asdf"); - conn_manager_->onData(fake_input, false); - } - - ResponseHeaderMap* sendResponseHeaders(ResponseHeaderMapPtr&& response_headers) { - ResponseHeaderMap* altered_response_headers = nullptr; - - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, _)) - .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { - altered_response_headers = &headers; - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); - return altered_response_headers; - } - - void expectOnDestroy(bool deferred = true) { - for (auto filter : decoder_filters_) { - EXPECT_CALL(*filter, onDestroy()); - } - - auto setup_filter_expect = [](MockStreamEncoderFilter* filter) { - EXPECT_CALL(*filter, onDestroy()); - }; - std::for_each(encoder_filters_.rbegin(), encoder_filters_.rend(), setup_filter_expect); - - if (deferred) { - EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)); - } - } - - void doRemoteClose(bool deferred = true) { - EXPECT_CALL(stream_, removeCallbacks(_)); - expectOnDestroy(deferred); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); - } - - // Http::ConnectionManagerConfig - const std::list& accessLogs() override { return access_logs_; } - ServerConnectionPtr createCodec(Network::Connection&, const Buffer::Instance&, - ServerConnectionCallbacks&) override { - return ServerConnectionPtr{codec_}; - } - DateProvider& dateProvider() override { return date_provider_; } - std::chrono::milliseconds drainTimeout() const override { return std::chrono::milliseconds(100); } - FilterChainFactory& filterFactory() override { return filter_factory_; } - bool generateRequestId() const override { return true; } - bool preserveExternalRequestId() const override { return false; } - bool alwaysSetRequestIdInResponse() const override { return false; } - uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; } - uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; } - absl::optional idleTimeout() const override { return idle_timeout_; } - bool isRoutable() const override { return true; } - absl::optional maxConnectionDuration() const override { - return max_connection_duration_; - } - std::chrono::milliseconds streamIdleTimeout() const override { return stream_idle_timeout_; } - std::chrono::milliseconds requestTimeout() const override { return request_timeout_; } - std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } - absl::optional maxStreamDuration() const override { - return max_stream_duration_; - } - bool use_srds_{}; - Router::RouteConfigProvider* routeConfigProvider() override { - if (use_srds_) { - return nullptr; - } - return &route_config_provider_; - } - Config::ConfigProvider* scopedRouteConfigProvider() override { - if (use_srds_) { - return &scoped_route_config_provider_; - } - return nullptr; - } - const std::string& serverName() const override { return server_name_; } - HttpConnectionManagerProto::ServerHeaderTransformation - serverHeaderTransformation() const override { - return server_transformation_; - } - ConnectionManagerStats& stats() override { return stats_; } - ConnectionManagerTracingStats& tracingStats() override { return tracing_stats_; } - bool useRemoteAddress() const override { return use_remote_address_; } - const Http::InternalAddressConfig& internalAddressConfig() const override { - return internal_address_config_; - } - uint32_t xffNumTrustedHops() const override { return 0; } - bool skipXffAppend() const override { return false; } - const std::string& via() const override { return EMPTY_STRING; } - Http::ForwardClientCertType forwardClientCert() const override { return forward_client_cert_; } - const std::vector& setCurrentClientCertDetails() const override { - return set_current_client_cert_details_; - } - const Network::Address::Instance& localAddress() override { return local_address_; } - const absl::optional& userAgent() override { return user_agent_; } - Tracing::HttpTracerSharedPtr tracer() override { return tracer_; } - const TracingConnectionManagerConfig* tracingConfig() override { return tracing_config_.get(); } - ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } - bool proxy100Continue() const override { return proxy_100_continue_; } - bool streamErrorOnInvalidHttpMessaging() const override { - return stream_error_on_invalid_http_messaging_; - } - const Http::Http1Settings& http1Settings() const override { return http1_settings_; } - bool shouldNormalizePath() const override { return normalize_path_; } - bool shouldMergeSlashes() const override { return merge_slashes_; } - bool shouldStripMatchingPort() const override { return strip_matching_port_; } - RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; } - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headersWithUnderscoresAction() const override { - return headers_with_underscores_action_; - } - const LocalReply::LocalReply& localReply() const override { return *local_reply_; } - - Envoy::Event::SimulatedTimeSystem test_time_; - NiceMock route_config_provider_; - std::shared_ptr route_config_{new NiceMock()}; - NiceMock scoped_route_config_provider_; - Stats::IsolatedStoreImpl fake_stats_; - Http::ContextImpl http_context_; - NiceMock runtime_; - NiceMock log_manager_; - std::string access_log_path_; - std::list access_logs_; - NiceMock filter_callbacks_; - MockServerConnection* codec_; - NiceMock filter_factory_; - ConnectionManagerStats stats_; - ConnectionManagerTracingStats tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}; - NiceMock drain_close_; - std::unique_ptr conn_manager_; - std::string server_name_; - HttpConnectionManagerProto::ServerHeaderTransformation server_transformation_{ - HttpConnectionManagerProto::OVERWRITE}; - Network::Address::Ipv4Instance local_address_{"127.0.0.1"}; - bool use_remote_address_{true}; - Http::DefaultInternalAddressConfig internal_address_config_; - Http::ForwardClientCertType forward_client_cert_{Http::ForwardClientCertType::Sanitize}; - std::vector set_current_client_cert_details_; - absl::optional user_agent_; - uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; - uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; - absl::optional idle_timeout_; - absl::optional max_connection_duration_; - std::chrono::milliseconds stream_idle_timeout_{}; - std::chrono::milliseconds request_timeout_{}; - std::chrono::milliseconds delayed_close_timeout_{}; - absl::optional max_stream_duration_{}; - NiceMock random_; - NiceMock local_info_; - NiceMock factory_context_; - std::shared_ptr ssl_connection_; - std::shared_ptr> tracer_{ - std::make_shared>()}; - TracingConnectionManagerConfigPtr tracing_config_; - SlowDateProviderImpl date_provider_{test_time_.timeSystem()}; - MockStream stream_; - Http::StreamCallbacks* stream_callbacks_{nullptr}; - NiceMock cluster_manager_; - NiceMock overload_manager_; - uint32_t initial_buffer_limit_{}; - bool streaming_filter_{false}; - Stats::IsolatedStoreImpl fake_listener_stats_; - ConnectionManagerListenerStats listener_stats_; - bool proxy_100_continue_ = false; - bool stream_error_on_invalid_http_messaging_ = false; - bool preserve_external_request_id_ = false; - Http::Http1Settings http1_settings_; - bool normalize_path_ = false; - bool merge_slashes_ = false; - bool strip_matching_port_ = false; - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; - NiceMock upstream_conn_; // for websocket tests - NiceMock conn_pool_; // for websocket tests - RequestIDExtensionSharedPtr request_id_extension_; - const LocalReply::LocalReplyPtr local_reply_; - - // TODO(mattklein123): Not all tests have been converted over to better setup. Convert the rest. - MockResponseEncoder response_encoder_; - std::vector decoder_filters_; - std::vector encoder_filters_; - std::shared_ptr log_handler_; -}; - TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { setup(false, "envoy-custom-server", false); @@ -480,26 +47,25 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers // only request into it. Then we respond into the filter. - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .Times(2) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); // Test not charging stats on the second call. if (data.length() == 4) { RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); } else { RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/healthcheck"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); } ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); // Drain 2 so that on the 2nd iteration we will hit zero. data.drain(2); @@ -541,21 +107,20 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponse) { // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers // only request into it. Then we respond into the filter. - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); // Test not charging stats on the second call. RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); data.drain(4); return Http::okStatus(); @@ -595,7 +160,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFiltersProxy EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); doRemoteClose(); } @@ -621,7 +186,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFilters) { EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); doRemoteClose(); } @@ -654,7 +219,7 @@ TEST_F(HttpConnectionManagerImplTest, PauseResume100Continue) { EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), false); + decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); doRemoteClose(); } @@ -687,28 +252,27 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithDecoderPause) { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); // Test not charging stats on the second call. RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); // Allow the decode pipeline to pause. - decoder->decodeData(data, false); + decoder_->decodeData(data, false); ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); // Resume decode pipeline after encoding 100 continue headers, we're now // ready to trigger #10923. - decoder->decodeData(data, true); + decoder_->decodeData(data, true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); data.drain(4); return Http::okStatus(); @@ -800,10 +364,10 @@ TEST_F(HttpConnectionManagerImplTest, InvalidPathWithDualFilter) { setup(false, ""); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "http://api.lyft.com/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); data.drain(4); return Http::okStatus(); })); @@ -824,6 +388,7 @@ TEST_F(HttpConnectionManagerImplTest, InvalidPathWithDualFilter) { EXPECT_EQ("absolute_path_rejected", filter->decoder_callbacks_->streamInfo().responseCodeDetails().value()); })); + EXPECT_CALL(*filter, onStreamComplete()); EXPECT_CALL(*filter, onDestroy()); Buffer::OwnedImpl fake_input("1234"); @@ -838,12 +403,12 @@ TEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) { normalize_path_ = true; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/ab%00c"}, // "%00" is not valid in path according to RFC {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); data.drain(4); return Http::okStatus(); })); @@ -864,6 +429,7 @@ TEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) { EXPECT_EQ("path_normalization_failed", filter->decoder_callbacks_->streamInfo().responseCodeDetails().value()); })); + EXPECT_CALL(*filter, onStreamComplete()); EXPECT_CALL(*filter, onDestroy()); Buffer::OwnedImpl fake_input("1234"); @@ -895,10 +461,10 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseSantizedPath) { EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", original_path}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); return Http::okStatus(); })); @@ -906,6 +472,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseSantizedPath) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + EXPECT_CALL(*filter, onStreamComplete()); EXPECT_CALL(*filter, onDestroy()); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } @@ -920,10 +487,10 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { const std::string normalized_path = "/z"; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", original_path}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); return Http::okStatus(); })); @@ -955,10 +522,10 @@ TEST_F(HttpConnectionManagerImplTest, RouteOverride) { setup(false, ""); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); return Http::okStatus(); })); @@ -1175,10 +742,10 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseNormalizedHost) { EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", original_host}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); return Http::okStatus(); })); @@ -1187,6 +754,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseNormalizedHost) { conn_manager_->onData(fake_input, false); // Clean up. + EXPECT_CALL(*filter, onStreamComplete()); EXPECT_CALL(*filter, onDestroy()); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } @@ -1201,10 +769,10 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseNormalizedHost) { const std::string normalized_host = "host"; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", original_host}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); return Http::okStatus(); })); @@ -1439,28 +1007,27 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); data.drain(4); return Http::okStatus(); })); // Should be no 'x-envoy-decorator-operation' response header. - EXPECT_CALL(encoder, encodeHeaders(_, true)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ(nullptr, headers.EnvoyDecoratorOperation()); })); @@ -1509,28 +1076,27 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); data.drain(4); return Http::okStatus(); })); // Verify decorator operation response header has been defined. - EXPECT_CALL(encoder, encodeHeaders(_, true)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ("testOp", headers.getEnvoyDecoratorOperationValue()); })); @@ -1577,28 +1143,27 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); data.drain(4); return Http::okStatus(); })); // Verify decorator operation response header has NOT been defined (i.e. not propagated). - EXPECT_CALL(encoder, encodeHeaders(_, true)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ(nullptr, headers.EnvoyDecoratorOperation()); })); @@ -1643,10 +1208,9 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -1654,11 +1218,11 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}, {"x-envoy-decorator-operation", "testOp"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); data.drain(4); @@ -1667,7 +1231,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat // Should be no 'x-envoy-decorator-operation' response header, as decorator // was overridden by request header. - EXPECT_CALL(encoder, encodeHeaders(_, true)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ(nullptr, headers.EnvoyDecoratorOperation()); })); @@ -1726,21 +1290,20 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); data.drain(4); @@ -1810,21 +1373,20 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); data.drain(4); @@ -1894,22 +1456,21 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); data.drain(4); @@ -1952,22 +1513,21 @@ TEST_F(HttpConnectionManagerImplTest, use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); data.drain(4); return Http::okStatus(); @@ -2010,10 +1570,9 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLog) { local_address); })); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -2021,11 +1580,59 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLog) { {":path", "/"}, {"x-forwarded-for", xff_address}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder_->decodeHeaders(std::move(headers), true); + + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); + + data.drain(4); + return Http::okStatus(); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, TestFilterCanEnrichAccessLogs) { + setup(false, ""); + + std::shared_ptr filter(new NiceMock()); + std::shared_ptr handler(new NiceMock()); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(filter); + callbacks.addAccessLogHandler(handler); + })); + + EXPECT_CALL(*filter, onStreamComplete()).WillOnce(Invoke([&]() { + ProtobufWkt::Value metadata_value; + metadata_value.set_string_value("value"); + ProtobufWkt::Struct metadata; + metadata.mutable_fields()->insert({"field", metadata_value}); + filter->callbacks_->streamInfo().setDynamicMetadata("metadata_key", metadata); + })); + + EXPECT_CALL(*handler, log(_, _, _, _)) + .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, + const StreamInfo::StreamInfo& stream_info) { + auto dynamic_meta = stream_info.dynamicMetadata().filter_metadata().at("metadata_key"); + EXPECT_EQ("value", dynamic_meta.fields().at("field").string_value()); + })); + + NiceMock encoder; + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; decoder->decodeHeaders(std::move(headers), true); filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); data.drain(4); return Http::okStatus(); @@ -2057,14 +1664,13 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamDisconnectAccessLog) { EXPECT_EQ("downstream_remote_disconnect", stream_info.responseCodeDetails().value()); })); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); data.drain(4); return Http::okStatus(); @@ -2099,21 +1705,20 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithTrailers) { EXPECT_NE(nullptr, stream_info.routeEntry()); })); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), false); + filter->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"x-trailer", "1"}}}; filter->callbacks_->encodeTrailers(std::move(response_trailers)); @@ -2150,15 +1755,14 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithInvalidRequest) { EXPECT_EQ(nullptr, stream_info.routeEntry()); })); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); // These request headers are missing the necessary ":host" RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); data.drain(0); return Http::okStatus(); })); @@ -2174,12 +1778,12 @@ class StreamErrorOnInvalidHttpMessageTest : public HttpConnectionManagerImplTest EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); // These request headers are missing the necessary ":host" RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); data.drain(0); return Http::okStatus(); })); @@ -2208,6 +1812,8 @@ class StreamErrorOnInvalidHttpMessageTest : public HttpConnectionManagerImplTest EXPECT_EQ(nullptr, headers.Connection()); } })); + + EXPECT_CALL(*filter, onStreamComplete()); EXPECT_CALL(*filter, onDestroy()); Buffer::OwnedImpl fake_input; @@ -2247,21 +1853,20 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogSsl) { EXPECT_NE(nullptr, stream_info.routeEntry()); })); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), false); + filter->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"x-trailer", "1"}}}; filter->callbacks_->encodeTrailers(std::move(response_trailers)); @@ -2292,21 +1897,20 @@ TEST_F(HttpConnectionManagerImplTest, DoNotStartSpanIfTracingIsNotEnabled) { callbacks.addStreamDecoderFilter(filter); })); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); data.drain(4); return Http::okStatus(); @@ -2319,17 +1923,16 @@ TEST_F(HttpConnectionManagerImplTest, DoNotStartSpanIfTracingIsNotEnabled) { TEST_F(HttpConnectionManagerImplTest, NoPath) { setup(false, ""); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "NOT_CONNECT"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); data.drain(4); return Http::okStatus(); })); - EXPECT_CALL(encoder, encodeHeaders(_, true)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ("404", headers.getStatusValue()); })); @@ -2347,11 +1950,11 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNotConfigured) { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_(_)).Times(0); EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); data.drain(4); return Http::okStatus(); @@ -2437,6 +2040,8 @@ TEST_F(HttpConnectionManagerImplTest, AccessEncoderRouteBeforeHeadersArriveOnIdl })); EXPECT_CALL(*filter, encodeData(_, _)); EXPECT_CALL(*filter, encodeComplete()); + + EXPECT_CALL(*filter, onStreamComplete()); EXPECT_CALL(*filter, onDestroy()); EXPECT_CALL(response_encoder_, encodeHeaders(_, _)); @@ -2450,7 +2055,6 @@ TEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) { stream_idle_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* idle_timer = setUpTimer(); EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); @@ -2497,6 +2101,193 @@ TEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) { EXPECT_EQ(1U, stats_.named_.downstream_rq_idle_timeout_.value()); } +// Test timeout variants. +TEST_F(HttpConnectionManagerImplTest, DurationTimeout) { + stream_idle_timeout_ = std::chrono::milliseconds(10); + setup(false, ""); + setupFilterChain(1, 0); + RequestHeaderMap* latched_headers = nullptr; + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + + // Create the stream. + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + Event::MockTimer* idle_timer = setUpTimer(); + EXPECT_CALL(*idle_timer, enableTimer(_, _)); + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + EXPECT_CALL(*idle_timer, enableTimer(_, _)); + EXPECT_CALL(*idle_timer, disableTimer()); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + latched_headers = headers.get(); + decoder->decodeHeaders(std::move(headers), false); + + data.drain(4); + return Http::okStatus(); + })); + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + // Clear and refresh the route cache (checking clusterInfo refreshes the route cache) + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + + Event::MockTimer* timer = setUpTimer(); + + // Set a max duration of 30ms and make sure a 30ms timer is set. + { + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(30), _)); + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, maxStreamDuration()) + .Times(2) + .WillRepeatedly(Return(std::chrono::milliseconds(30))); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + // Clear the timeout and make sure the timer is disabled. + { + EXPECT_CALL(*timer, disableTimer()); + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, maxStreamDuration()) + .Times(1) + .WillRepeatedly(Return(absl::nullopt)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + // With no route timeout, but HCM defaults, the HCM defaults will be used. + { + max_stream_duration_ = std::chrono::milliseconds(17); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(17), _)); + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, maxStreamDuration()) + .Times(1) + .WillRepeatedly(Return(absl::nullopt)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + max_stream_duration_ = absl::nullopt; + } + + // Add a gRPC header, but not a gRPC timeout and verify the timer is unchanged. + latched_headers->setGrpcTimeout("1M"); + { + EXPECT_CALL(*timer, disableTimer()); + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, maxStreamDuration()) + .Times(1) + .WillRepeatedly(Return(absl::nullopt)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + // With a gRPC header of 1M and a gRPC header max of 0, respect the gRPC header. + { + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax()) + .Times(AnyNumber()) + .WillRepeatedly(Return(std::chrono::milliseconds(0))); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(60000), _)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + // With a gRPC header and a larger gRPC header cap, respect the gRPC header. + { + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax()) + .Times(AnyNumber()) + .WillRepeatedly(Return(std::chrono::milliseconds(20000000))); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(60000), _)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + // With a gRPC header and a small gRPC header cap, use the cap. + { + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax()) + .Times(AnyNumber()) + .WillRepeatedly(Return(std::chrono::milliseconds(20))); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(20), _)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + latched_headers->setGrpcTimeout("0m"); + // With a gRPC header of 0, use the header + { + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax()) + .Times(AnyNumber()) + .WillRepeatedly(Return(std::chrono::milliseconds(20))); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(0), _)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + latched_headers->setGrpcTimeout("1M"); + // With a timeout of 20ms and an offset of 10ms, set a timeout for 10ms. + { + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax()) + .Times(AnyNumber()) + .WillRepeatedly(Return(std::chrono::milliseconds(20))); + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, + grpcTimeoutHeaderOffset()) + .Times(AnyNumber()) + .WillRepeatedly(Return(std::chrono::milliseconds(10))); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(10), _)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + // With a timeout of 20ms and an offset of 30ms, set a timeout for 0ms + { + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax()) + .Times(AnyNumber()) + .WillRepeatedly(Return(std::chrono::milliseconds(20))); + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, + grpcTimeoutHeaderOffset()) + .Times(AnyNumber()) + .WillRepeatedly(Return(std::chrono::milliseconds(30))); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(0), _)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + // With a gRPC timeout of 20ms, and 5ms used already when the route was + // refreshed, set a timer for 15ms. + { + test_time_.timeSystem().setMonotonicTime(MonotonicTime(std::chrono::milliseconds(5))); + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax()) + .Times(AnyNumber()) + .WillRepeatedly(Return(std::chrono::milliseconds(20))); + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, + grpcTimeoutHeaderOffset()) + .Times(AnyNumber()) + .WillRepeatedly(Return(absl::nullopt)); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(15), _)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + // With a gRPC timeout of 20ms, and 25ms used already when the route was + // refreshed, set a timer for now (0ms) + { + test_time_.timeSystem().setMonotonicTime(MonotonicTime(std::chrono::milliseconds(25))); + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, grpcTimeoutHeaderMax()) + .Times(AnyNumber()) + .WillRepeatedly(Return(std::chrono::milliseconds(20))); + EXPECT_CALL(route_config_provider_.route_config_->route_->route_entry_, + grpcTimeoutHeaderOffset()) + .Times(AnyNumber()) + .WillRepeatedly(Return(absl::nullopt)); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(0), _)); + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[0]->callbacks_->clusterInfo(); + } + + // Cleanup. + EXPECT_CALL(*timer, disableTimer()); + EXPECT_CALL(*decoder_filters_[0], onStreamComplete()); + EXPECT_CALL(*decoder_filters_[0], onDestroy()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + // Per-route timeouts override the global stream idle timeout. TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteOverride) { stream_idle_timeout_ = std::chrono::milliseconds(10); @@ -2508,13 +2299,13 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteOverride) { .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { Event::MockTimer* idle_timer = setUpTimer(); EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(30), _)); EXPECT_CALL(*idle_timer, disableTimer()); - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); data.drain(4); return Http::okStatus(); @@ -2538,12 +2329,12 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteZeroOverride) { .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { Event::MockTimer* idle_timer = setUpTimer(); EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; EXPECT_CALL(*idle_timer, disableTimer()); - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); data.drain(4); return Http::okStatus(); @@ -2565,13 +2356,13 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders // Codec sends downstream request headers. EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); Event::MockTimer* idle_timer = setUpTimer(); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; EXPECT_CALL(*idle_timer, enableTimer(_, _)); - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); // Expect resetIdleTimer() to be called for the response // encodeHeaders()/encodeData(). @@ -2607,12 +2398,12 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNormalTermination) { // Codec sends downstream request headers. Event::MockTimer* idle_timer = setUpTimer(); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; EXPECT_CALL(*idle_timer, enableTimer(_, _)); - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); data.drain(4); return Http::okStatus(); @@ -2636,16 +2427,16 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders // Codec sends downstream request headers. EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); Event::MockTimer* idle_timer = setUpTimer(); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; EXPECT_CALL(*idle_timer, enableTimer(_, _)); - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); EXPECT_CALL(*idle_timer, enableTimer(_, _)); - decoder->decodeData(data, false); + decoder_->decodeData(data, false); // Expect resetIdleTimer() to be called for the response // encodeHeaders()/encodeData(). @@ -2690,17 +2481,17 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterUpstreamHeaders) // Codec sends downstream request headers, upstream response headers are // encoded. EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); Event::MockTimer* idle_timer = setUpTimer(); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; EXPECT_CALL(*idle_timer, enableTimer(_, _)); - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; EXPECT_CALL(*idle_timer, enableTimer(_, _)); - filter->callbacks_->encodeHeaders(std::move(response_headers), false); + filter->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); EXPECT_CALL(*idle_timer, disableTimer()); idle_timer->invokeCallback(); @@ -2740,13 +2531,12 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { // Codec sends downstream request headers, upstream response headers are // encoded, data events happen in various directions. Event::MockTimer* idle_timer = setUpTimer(); - RequestDecoder* decoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; EXPECT_CALL(*idle_timer, enableTimer(_, _)); - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); ResponseHeaderMapPtr response_continue_headers{ new TestResponseHeaderMapImpl{{":status", "100"}}}; @@ -2756,14 +2546,14 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; EXPECT_CALL(*idle_timer, enableTimer(_, _)); filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), false); + filter->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); EXPECT_CALL(*idle_timer, enableTimer(_, _)); - decoder->decodeData(data, false); + decoder_->decodeData(data, false); RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; EXPECT_CALL(*idle_timer, enableTimer(_, _)); - decoder->decodeTrailers(std::move(trailers)); + decoder_->decodeTrailers(std::move(trailers)); Buffer::OwnedImpl fake_response("world"); EXPECT_CALL(*idle_timer, enableTimer(_, _)); @@ -2884,12 +2674,12 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsNotDisarmedOnIncompleteReq EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); EXPECT_CALL(*request_timer, disableTimer()).Times(1); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; // the second parameter 'false' leaves the stream open - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); return Http::okStatus(); })); @@ -2910,12 +2700,12 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; EXPECT_CALL(*request_timer, disableTimer()).Times(2); - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); return Http::okStatus(); })); @@ -2936,13 +2726,13 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "POST"}}}; - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); EXPECT_CALL(*request_timer, disableTimer()).Times(2); - decoder->decodeData(data, true); + decoder_->decodeData(data, true); return Http::okStatus(); })); @@ -2962,16 +2752,16 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - decoder->decodeData(data, false); + decoder_->decodeHeaders(std::move(headers), false); + decoder_->decodeData(data, false); EXPECT_CALL(*request_timer, disableTimer()).Times(2); RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; - decoder->decodeTrailers(std::move(trailers)); + decoder_->decodeTrailers(std::move(trailers)); return Http::okStatus(); })); @@ -2998,16 +2788,16 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnEncodeHeaders) { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); EXPECT_CALL(*request_timer, disableTimer()).Times(2); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), false); + filter->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); return Http::okStatus(); })); @@ -3025,11 +2815,11 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnConnectionTermin Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); return Http::okStatus(); })); @@ -3103,19 +2893,17 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackResetStream) { TEST_F(HttpConnectionManagerImplTest, Http10Rejected) { setup(false, ""); - RequestDecoder* decoder = nullptr; - NiceMock encoder; EXPECT_CALL(*codec_, protocol()).Times(AnyNumber()).WillRepeatedly(Return(Protocol::Http10)); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); data.drain(4); return Http::okStatus(); })); - EXPECT_CALL(encoder, encodeHeaders(_, true)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ("426", headers.getStatusValue()); EXPECT_EQ("close", headers.getConnectionValue()); @@ -3131,19 +2919,17 @@ TEST_F(HttpConnectionManagerImplTest, Http10ConnCloseLegacy) { Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.fixed_connection_close", "false"}}); setup(false, ""); - RequestDecoder* decoder = nullptr; - NiceMock encoder; EXPECT_CALL(*codec_, protocol()).Times(AnyNumber()).WillRepeatedly(Return(Protocol::Http10)); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host:80"}, {":method", "CONNECT"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); data.drain(4); return Http::okStatus(); })); - EXPECT_CALL(encoder, encodeHeaders(_, true)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ("close", headers.getConnectionValue()); })); @@ -3157,18 +2943,16 @@ TEST_F(HttpConnectionManagerImplTest, ProxyConnectLegacyClose) { Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.fixed_connection_close", "false"}}); setup(false, ""); - RequestDecoder* decoder = nullptr; - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host:80"}, {":method", "CONNECT"}, {"proxy-connection", "close"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); data.drain(4); return Http::okStatus(); })); - EXPECT_CALL(encoder, encodeHeaders(_, true)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ("close", headers.getConnectionValue()); })); @@ -3182,18 +2966,16 @@ TEST_F(HttpConnectionManagerImplTest, ConnectLegacyClose) { Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.fixed_connection_close", "false"}}); setup(false, ""); - RequestDecoder* decoder = nullptr; - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":method", "CONNECT"}, {"connection", "close"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); data.drain(4); return Http::okStatus(); })); - EXPECT_CALL(encoder, encodeHeaders(_, true)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ("close", headers.getConnectionValue()); })); @@ -3225,25 +3007,24 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackNotCalledIfResetS TEST_F(HttpConnectionManagerImplTest, RejectWebSocketOnNonWebSocketRoute) { setup(false, ""); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/"}, {"connection", "Upgrade"}, {"upgrade", "websocket"}}}; - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); // Try sending trailers after the headers which will be rejected, just to // test the HCM logic that further decoding will not be passed to the // filters once the early response path is kicked off. RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"bazzz", "bar"}}}; - decoder->decodeTrailers(std::move(trailers)); + decoder_->decodeTrailers(std::move(trailers)); data.drain(4); return Http::okStatus(); })); - EXPECT_CALL(encoder, encodeHeaders(_, true)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ("403", headers.getStatusValue()); })); @@ -3271,8 +3052,7 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { .WillRepeatedly(Invoke( [&](HeaderMap&, bool) -> FilterHeadersStatus { return FilterHeadersStatus::Continue; })); - NiceMock encoder; - EXPECT_CALL(encoder, encodeHeaders(_, false)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Connection()); EXPECT_EQ("upgrade", headers.getConnectionValue()); @@ -3292,19 +3072,19 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { // only request into it. Then we respond into the filter. EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/"}, {"connection", "Upgrade"}, {"upgrade", "foo"}}}; - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); filter->decoder_callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ {":status", "101"}, {"Connection", "upgrade"}, {"upgrade", "foo"}}}; - filter->decoder_callbacks_->encodeHeaders(std::move(response_headers), false); + filter->decoder_callbacks_->encodeHeaders(std::move(response_headers), false, "details"); data.drain(4); return Http::okStatus(); @@ -3314,6 +3094,7 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + EXPECT_CALL(*filter, onStreamComplete()); EXPECT_CALL(*filter, onDestroy()); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } @@ -3322,17 +3103,15 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { TEST_F(HttpConnectionManagerImplTest, ConnectAsUpgrade) { setup(false, "envoy-custom-server", false); - NiceMock encoder; - EXPECT_CALL(filter_factory_, createUpgradeFilterChain("CONNECT", _, _)) .WillRepeatedly(Return(true)); EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "CONNECT"}}}; - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); data.drain(4); return Http::okStatus(); })); @@ -3348,17 +3127,15 @@ TEST_F(HttpConnectionManagerImplTest, ConnectAsUpgrade) { TEST_F(HttpConnectionManagerImplTest, ConnectWithEmptyPath) { setup(false, "envoy-custom-server", false); - NiceMock encoder; - EXPECT_CALL(filter_factory_, createUpgradeFilterChain("CONNECT", _, _)) .WillRepeatedly(Return(true)); EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", ""}, {":method", "CONNECT"}}}; - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); data.drain(4); return Http::okStatus(); })); @@ -3378,23 +3155,20 @@ TEST_F(HttpConnectionManagerImplTest, ConnectLegacy) { setup(false, "envoy-custom-server", false); - NiceMock encoder; - RequestDecoder* decoder = nullptr; - EXPECT_CALL(filter_factory_, createUpgradeFilterChain("CONNECT", _, _)) .WillRepeatedly(Return(false)); EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "CONNECT"}}}; - decoder->decodeHeaders(std::move(headers), false); + decoder_->decodeHeaders(std::move(headers), false); data.drain(4); return Http::okStatus(); })); - EXPECT_CALL(encoder, encodeHeaders(_, _)) + EXPECT_CALL(response_encoder_, encodeHeaders(_, _)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ("403", headers.getStatusValue()); })); @@ -3411,12 +3185,11 @@ TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { InSequence s; setup(false, ""); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); return Http::okStatus(); })); @@ -3436,7 +3209,7 @@ TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { EXPECT_CALL(*drain_timer, enableTimer(_, _)); expectOnDestroy(); decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); // Fake a protocol error that races with the drain timeout. This will cause a local close. // Also fake the local close not closing immediately. @@ -3456,12 +3229,11 @@ TEST_F(HttpConnectionManagerImplTest, InSequence s; setup(false, ""); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); return Http::okStatus(); })); @@ -3518,12 +3290,11 @@ TEST_F(HttpConnectionManagerImplTest, DrainClose) { return FilterHeadersStatus::StopIteration; })); - NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); + decoder_ = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder_->decodeHeaders(std::move(headers), true); return Http::okStatus(); })); @@ -3536,7 +3307,7 @@ TEST_F(HttpConnectionManagerImplTest, DrainClose) { EXPECT_CALL(drain_close_, drainClose()).WillOnce(Return(true)); EXPECT_CALL(*codec_, shutdownNotice()); filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); EXPECT_EQ(ssl_connection_.get(), filter->callbacks_->connection()->ssl().get()); EXPECT_CALL(*codec_, goAway()); @@ -3552,3294 +3323,5 @@ TEST_F(HttpConnectionManagerImplTest, DrainClose) { EXPECT_EQ(1U, listener_stats_.downstream_rq_completed_.value()); } -TEST_F(HttpConnectionManagerImplTest, ResponseBeforeRequestComplete) { - InSequence s; - setup(false, "envoy-server-test"); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - return Http::okStatus(); - })); - - setupFilterChain(1, 0); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - - Buffer::OwnedImpl fake_input; - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) - .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_NE(nullptr, headers.Server()); - EXPECT_EQ("envoy-server-test", headers.getServerValue()); - })); - EXPECT_CALL(*decoder_filters_[0], onDestroy()); - EXPECT_CALL(filter_callbacks_.connection_, - close(Network::ConnectionCloseType::FlushWriteAndDelay)); - - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true); -} - -TEST_F(HttpConnectionManagerImplTest, DisconnectOnProxyConnectionDisconnect) { - InSequence s; - setup(false, "envoy-server-test"); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":path", "/"}, {":method", "GET"}, {"proxy-connection", "close"}}}; - decoder->decodeHeaders(std::move(headers), false); - return Http::okStatus(); - })); - - setupFilterChain(1, 0); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - - Buffer::OwnedImpl fake_input; - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) - .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_NE(nullptr, headers.Connection()); - EXPECT_EQ("close", headers.getConnectionValue()); - EXPECT_EQ(nullptr, headers.ProxyConnection()); - })); - EXPECT_CALL(*decoder_filters_[0], onDestroy()); - EXPECT_CALL(filter_callbacks_.connection_, - close(Network::ConnectionCloseType::FlushWriteAndDelay)); - - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true); -} - -TEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) { - setup(false, ""); - - // This is like ResponseBeforeRequestComplete, but it tests the case where we start the reply - // before the request completes, but don't finish the reply until after the request completes. - MockStreamDecoderFilter* filter = new NiceMock(); - EXPECT_CALL(filter_factory_, createFilterChain(_)) - .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); - })); - - EXPECT_CALL(*filter, decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - - // Start the request - NiceMock encoder; - RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - return Http::okStatus(); - })); - - Buffer::OwnedImpl fake_input("hello"); - conn_manager_->onData(fake_input, false); - - // Start the response - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - EXPECT_CALL(encoder, encodeHeaders(_, false)) - .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_NE(nullptr, headers.Server()); - EXPECT_EQ("", headers.getServerValue()); - })); - filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), false); - - // Finish the request. - EXPECT_CALL(*filter, decodeData(_, true)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder->decodeData(data, true); - return Http::okStatus(); - })); - - conn_manager_->onData(fake_input, false); - - // Since we started the response before the request was complete, we will still close the - // connection since we already sent a connection: close header. We won't "reset" the stream - // however. - EXPECT_CALL(filter_callbacks_.connection_, - close(Network::ConnectionCloseType::FlushWriteAndDelay)); - Buffer::OwnedImpl fake_response("world"); - filter->callbacks_->encodeData(fake_response, true); -} - -TEST_F(HttpConnectionManagerImplTest, DownstreamDisconnect) { - InSequence s; - setup(false, ""); - - NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - conn_manager_->newStream(encoder); - data.drain(2); - return Http::okStatus(); - })); - - EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - // Now raise a remote disconnection, we should see the filter get reset called. - conn_manager_->onEvent(Network::ConnectionEvent::RemoteClose); -} - -TEST_F(HttpConnectionManagerImplTest, DownstreamProtocolError) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - conn_manager_->newStream(response_encoder_); - return codecProtocolError("protocol error"); - })); - - EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_)); - EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0); - - // A protocol exception should result in reset of the streams followed by a remote or local close - // depending on whether the downstream client closes the connection prior to the delayed close - // timer firing. - EXPECT_CALL(filter_callbacks_.connection_, - close(Network::ConnectionCloseType::FlushWriteAndDelay)); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); -} - -TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAccessLog) { - std::shared_ptr handler(new NiceMock()); - access_logs_ = {handler}; - setup(false, ""); - - EXPECT_CALL(*handler, log(_, _, _, _)) - .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, - const StreamInfo::StreamInfo& stream_info) { - EXPECT_FALSE(stream_info.responseCode()); - EXPECT_TRUE(stream_info.hasAnyResponseFlag()); - EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)); - })); - - NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { - conn_manager_->newStream(encoder); - return codecProtocolError("protocol error"); - })); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); -} - -TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAfterHeadersAccessLog) { - setup(false, ""); - - std::shared_ptr filter(new NiceMock()); - std::shared_ptr handler(new NiceMock()); - - EXPECT_CALL(filter_factory_, createFilterChain(_)) - .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamDecoderFilter(filter); - callbacks.addAccessLogHandler(handler); - })); - - EXPECT_CALL(*handler, log(_, _, _, _)) - .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, - const StreamInfo::StreamInfo& stream_info) { - EXPECT_FALSE(stream_info.responseCode()); - EXPECT_TRUE(stream_info.hasAnyResponseFlag()); - EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)); - })); - - NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); - - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; - decoder->decodeHeaders(std::move(headers), true); - - return codecProtocolError("protocol error"); - })); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); -} - -// Verify that FrameFloodException causes connection to be closed abortively. -TEST_F(HttpConnectionManagerImplTest, FrameFloodError) { - std::shared_ptr log_handler = - std::make_shared>(); - access_logs_ = {log_handler}; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - conn_manager_->newStream(response_encoder_); - return bufferFloodError("too many outbound frames."); - })); - - EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_)); - EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0); - - // FrameFloodException should result in reset of the streams followed by abortive close. - EXPECT_CALL(filter_callbacks_.connection_, - close(Network::ConnectionCloseType::FlushWriteAndDelay)); - - EXPECT_CALL(*log_handler, log(_, _, _, _)) - .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, - const StreamInfo::StreamInfo& stream_info) { - ASSERT_TRUE(stream_info.responseCodeDetails().has_value()); - EXPECT_EQ("codec error: too many outbound frames.", - stream_info.responseCodeDetails().value()); - })); - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - EXPECT_LOG_NOT_CONTAINS("warning", "downstream HTTP flood", - conn_manager_->onData(fake_input, false)); - - EXPECT_TRUE(filter_callbacks_.connection_.streamInfo().hasResponseFlag( - StreamInfo::ResponseFlag::DownstreamProtocolError)); -} - -TEST_F(HttpConnectionManagerImplTest, IdleTimeoutNoCodec) { - // Not used in the test. - delete codec_; - - idle_timeout_ = (std::chrono::milliseconds(10)); - Event::MockTimer* idle_timer = setUpTimer(); - EXPECT_CALL(*idle_timer, enableTimer(_, _)); - setup(false, ""); - - EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); - EXPECT_CALL(*idle_timer, disableTimer()); - idle_timer->invokeCallback(); - - EXPECT_EQ(1U, stats_.named_.downstream_cx_idle_timeout_.value()); -} - -TEST_F(HttpConnectionManagerImplTest, IdleTimeout) { - idle_timeout_ = (std::chrono::milliseconds(10)); - Event::MockTimer* idle_timer = setUpTimer(); - EXPECT_CALL(*idle_timer, enableTimer(_, _)); - setup(false, ""); - - MockStreamDecoderFilter* filter = new NiceMock(); - EXPECT_CALL(filter_factory_, createFilterChain(_)) - .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); - })); - - NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, true); - return Http::okStatus(); - })); - - EXPECT_CALL(*idle_timer, disableTimer()); - EXPECT_CALL(*filter, decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*filter, decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(*idle_timer, enableTimer(_, _)); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - - Event::MockTimer* drain_timer = setUpTimer(); - EXPECT_CALL(*drain_timer, enableTimer(_, _)); - idle_timer->invokeCallback(); - - EXPECT_CALL(*codec_, goAway()); - EXPECT_CALL(filter_callbacks_.connection_, - close(Network::ConnectionCloseType::FlushWriteAndDelay)); - EXPECT_CALL(*idle_timer, disableTimer()); - EXPECT_CALL(*drain_timer, disableTimer()); - drain_timer->invokeCallback(); - - EXPECT_EQ(1U, stats_.named_.downstream_cx_idle_timeout_.value()); -} - -TEST_F(HttpConnectionManagerImplTest, ConnectionDurationNoCodec) { - // Not used in the test. - delete codec_; - - max_connection_duration_ = (std::chrono::milliseconds(10)); - Event::MockTimer* connection_duration_timer = setUpTimer(); - EXPECT_CALL(*connection_duration_timer, enableTimer(_, _)); - setup(false, ""); - - EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); - EXPECT_CALL(*connection_duration_timer, disableTimer()); - - connection_duration_timer->invokeCallback(); - - EXPECT_EQ(1U, stats_.named_.downstream_cx_max_duration_reached_.value()); -} - -TEST_F(HttpConnectionManagerImplTest, ConnectionDuration) { - max_connection_duration_ = (std::chrono::milliseconds(10)); - Event::MockTimer* connection_duration_timer = setUpTimer(); - EXPECT_CALL(*connection_duration_timer, enableTimer(_, _)); - setup(false, ""); - - MockStreamDecoderFilter* filter = new NiceMock(); - EXPECT_CALL(filter_factory_, createFilterChain(_)) - .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); - })); - - NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, true); - return Http::okStatus(); - })); - - EXPECT_CALL(*filter, decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*filter, decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - - Event::MockTimer* drain_timer = setUpTimer(); - EXPECT_CALL(*drain_timer, enableTimer(_, _)); - connection_duration_timer->invokeCallback(); - - EXPECT_CALL(*codec_, goAway()); - EXPECT_CALL(filter_callbacks_.connection_, - close(Network::ConnectionCloseType::FlushWriteAndDelay)); - EXPECT_CALL(*connection_duration_timer, disableTimer()); - EXPECT_CALL(*drain_timer, disableTimer()); - drain_timer->invokeCallback(); - - EXPECT_EQ(1U, stats_.named_.downstream_cx_max_duration_reached_.value()); -} - -TEST_F(HttpConnectionManagerImplTest, IntermediateBufferingEarlyResponse) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, true); - return Http::okStatus(); - })); - - setupFilterChain(2, 0); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - // Mimic a decoder filter that trapped data and now sends on the headers. - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Invoke([&](RequestHeaderMap&, bool) -> FilterHeadersStatus { - // Now filter 2 will send a complete response. - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), true); - return FilterHeadersStatus::StopIteration; - })); - - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); - expectOnDestroy(); - - // Response is already complete so we drop buffered body data when we continue. - EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).Times(0); - decoder_filters_[0]->callbacks_->continueDecoding(); -} - -TEST_F(HttpConnectionManagerImplTest, DoubleBuffering) { - InSequence s; - setup(false, ""); - - // The data will get moved so we need to have a copy to compare against. - Buffer::OwnedImpl fake_data("hello"); - Buffer::OwnedImpl fake_data_copy("hello"); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - decoder->decodeData(fake_data, true); - return Http::okStatus(); - })); - - setupFilterChain(3, 0); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - // Continue iteration and stop and buffer on the 2nd filter. - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - decoder_filters_[0]->callbacks_->continueDecoding(); - - // Continue iteration. We expect the 3rd filter to not receive double data but for the buffered - // data to have been kept inline as it moves through. - EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[2], decodeData(BufferEqual(&fake_data_copy), true)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - EXPECT_CALL(*decoder_filters_[2], decodeComplete()); - decoder_filters_[1]->callbacks_->continueDecoding(); - - expectOnDestroy(); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -TEST_F(HttpConnectionManagerImplTest, ZeroByteDataFiltering) { - InSequence s; - setup(false, ""); - - RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - return Http::okStatus(); - })); - - setupFilterChain(2, 0); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - // Continue headers only of filter 1. - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - decoder_filters_[0]->callbacks_->continueDecoding(); - - // Stop zero byte data. - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - Buffer::OwnedImpl zero; - decoder->decodeData(zero, true); - - // Continue. - EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - decoder_filters_[0]->callbacks_->continueDecoding(); - - expectOnDestroy(); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, false); - - RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"bazzz", "bar"}}}; - decoder->decodeTrailers(std::move(trailers)); - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - Http::LowerCaseString trailer_key("foo"); - std::string trailers_data("trailers"); - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) - .WillOnce(Invoke([&](Http::HeaderMap& trailers) -> FilterTrailersStatus { - Http::LowerCaseString key("foo"); - EXPECT_EQ(trailers.get(key), nullptr); - return FilterTrailersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - // set up encodeHeaders expectations - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - - // invoke encodeHeaders - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - - // set up encodeData expectations - EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeData(_, false)); - - // invoke encodeData - Buffer::OwnedImpl response_body("response"); - decoder_filters_[0]->callbacks_->encodeData(response_body, false); - // set up encodeTrailer expectations - EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - - EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) - .WillOnce(Invoke([&](Http::HeaderMap& trailers) -> FilterTrailersStatus { - // assert that the trailers set in the previous filter was ignored - Http::LowerCaseString key("foo"); - EXPECT_EQ(trailers.get(key), nullptr); - return FilterTrailersStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeTrailers(_)); - expectOnDestroy(); - - // invoke encodeTrailers - decoder_filters_[0]->callbacks_->encodeTrailers( - ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); -} - -TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInDataCallbackNoTrailers) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, true); - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - std::string trailers_data("trailers"); - Http::LowerCaseString trailer_key("foo"); - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterDataStatus { - decoder_filters_[0]->callbacks_->addDecodedTrailers().addCopy(trailer_key, trailers_data); - return FilterDataStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - // ensure that the second decodeData call sees end_stream = false - EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) - .WillOnce(Return(FilterDataStatus::Continue)); - - // since we added trailers, we should see decodeTrailers - EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)).WillOnce(Invoke([&](HeaderMap& trailers) { - // ensure that we see the trailers set in decodeData - Http::LowerCaseString key("foo"); - auto t = trailers.get(key); - ASSERT(t); - EXPECT_EQ(t->value(), trailers_data.c_str()); - return FilterTrailersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - // set up encodeHeaders expectations - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - - // invoke encodeHeaders - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - - // set up encodeData expectations - EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterDataStatus { - encoder_filters_[1]->callbacks_->addEncodedTrailers().addCopy(trailer_key, trailers_data); - return FilterDataStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - // ensure encodeData calls after setting header sees end_stream = false - EXPECT_CALL(*encoder_filters_[0], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::Continue)); - - EXPECT_CALL(response_encoder_, encodeData(_, false)); - - // since we added trailers, we should see encodeTrailer callbacks - EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)).WillOnce(Invoke([&](HeaderMap& trailers) { - // ensure that we see the trailers set in decodeData - Http::LowerCaseString key("foo"); - auto t = trailers.get(key); - EXPECT_EQ(t->value(), trailers_data.c_str()); - return FilterTrailersStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - - // Ensure that we call encodeTrailers - EXPECT_CALL(response_encoder_, encodeTrailers(_)); - - expectOnDestroy(); - // invoke encodeData - Buffer::OwnedImpl response_body("response"); - decoder_filters_[0]->callbacks_->encodeData(response_body, true); -} - -TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, false); - - RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; - decoder->decodeTrailers(std::move(trailers)); - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - Buffer::OwnedImpl trailers_data("hello"); - EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { - decoder_filters_[0]->callbacks_->addDecodedData(trailers_data, true); - return FilterTrailersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[1], decodeData(Ref(trailers_data), false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - - EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeData(_, false)); - - Buffer::OwnedImpl response_body("response"); - decoder_filters_[1]->callbacks_->encodeData(response_body, false); - EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { - encoder_filters_[1]->callbacks_->addEncodedData(trailers_data, true); - return FilterTrailersStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[0], encodeData(Ref(trailers_data), false)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeData(_, false)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeTrailers(_)); - expectOnDestroy(); - - decoder_filters_[1]->callbacks_->encodeTrailers( - ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); -} - -// Don't send data frames, only headers and trailers. -TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_NoDataFrames) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; - decoder->decodeTrailers(std::move(trailers)); - return Http::okStatus(); - })); - - setupFilterChain(2, 1); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - - Buffer::OwnedImpl trailers_data("hello"); - EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { - decoder_filters_[0]->callbacks_->addDecodedData(trailers_data, false); - return FilterTrailersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - - EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { - encoder_filters_[0]->callbacks_->addEncodedData(trailers_data, false); - return FilterTrailersStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - EXPECT_CALL(response_encoder_, encodeData(_, false)); - EXPECT_CALL(response_encoder_, encodeTrailers(_)); - expectOnDestroy(); - - decoder_filters_[0]->callbacks_->encodeTrailers( - ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); -} - -// Don't send data frames, only headers and trailers. -TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_ContinueAfterCallback) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; - decoder->decodeTrailers(std::move(trailers)); - return Http::okStatus(); - })); - - setupFilterChain(2, 1); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - - Buffer::OwnedImpl trailers_data("hello"); - EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { - decoder_filters_[0]->callbacks_->addDecodedData(trailers_data, false); - return FilterTrailersStatus::StopIteration; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - decoder_filters_[0]->callbacks_->continueDecoding(); - - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - - EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { - encoder_filters_[0]->callbacks_->addEncodedData(trailers_data, false); - return FilterTrailersStatus::StopIteration; - })); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - - decoder_filters_[0]->callbacks_->encodeTrailers( - ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); - - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - EXPECT_CALL(response_encoder_, encodeData(_, false)); - EXPECT_CALL(response_encoder_, encodeTrailers(_)); - expectOnDestroy(); - - encoder_filters_[0]->callbacks_->continueEncoding(); -} - -// Add*Data during the *Data callbacks. -TEST_F(HttpConnectionManagerImplTest, FilterAddBodyDuringDecodeData) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl data1("hello"); - decoder->decodeData(data1, false); - - Buffer::OwnedImpl data2("world"); - decoder->decodeData(data2, true); - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> FilterDataStatus { - decoder_filters_[0]->callbacks_->addDecodedData(data, true); - EXPECT_EQ(decoder_filters_[0]->callbacks_->decodingBuffer()->toString(), "helloworld"); - return FilterDataStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)) - .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> FilterDataStatus { - encoder_filters_[1]->callbacks_->addEncodedData(data, true); - EXPECT_EQ(encoder_filters_[1]->callbacks_->encodingBuffer()->toString(), "goodbye"); - return FilterDataStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeData(_, true)); - expectOnDestroy(); - - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - Buffer::OwnedImpl data1("good"); - decoder_filters_[1]->callbacks_->encodeData(data1, false); - Buffer::OwnedImpl data2("bye"); - decoder_filters_[1]->callbacks_->encodeData(data2, true); -} - -TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInline) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - Buffer::OwnedImpl data("hello"); - decoder_filters_[0]->callbacks_->addDecodedData(data, true); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - Buffer::OwnedImpl data("hello"); - encoder_filters_[1]->callbacks_->addEncodedData(data, true); - EXPECT_EQ(5UL, encoder_filters_[0]->callbacks_->encodingBuffer()->length()); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeData(_, true)); - expectOnDestroy(); - - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); -} - -TEST_F(HttpConnectionManagerImplTest, Filter) { - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - return Http::okStatus(); - })); - - setupFilterChain(3, 2); - const std::string fake_cluster1_name = "fake_cluster1"; - const std::string fake_cluster2_name = "fake_cluster2"; - - std::shared_ptr fake_cluster1 = - std::make_shared>(); - EXPECT_CALL(cluster_manager_, get(_)) - .WillOnce(Return(fake_cluster1.get())) - .WillOnce(Return(nullptr)); - - std::shared_ptr route1 = std::make_shared>(); - EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name)); - std::shared_ptr route2 = std::make_shared>(); - EXPECT_CALL(route2->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster2_name)); - - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) - .WillOnce(Return(route1)) - .WillOnce(Return(route2)) - .WillOnce(Return(nullptr)); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); - EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); - EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); - decoder_filters_[0]->callbacks_->clearRouteCache(); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->route()); - EXPECT_EQ(route2->routeEntry(), decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); - // RDS & CDS consistency problem: route2 points to fake_cluster2, which doesn't exist. - EXPECT_EQ(nullptr, decoder_filters_[1]->callbacks_->clusterInfo()); - decoder_filters_[1]->callbacks_->clearRouteCache(); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->clusterInfo()); - EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->route()); - EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->streamInfo().routeEntry()); - return FilterHeadersStatus::StopIteration; - })); - EXPECT_CALL(*decoder_filters_[2], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - expectOnDestroy(); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -TEST_F(HttpConnectionManagerImplTest, UpstreamWatermarkCallbacks) { - setup(false, ""); - setUpEncoderAndDecoder(false, false); - sendRequestHeadersAndData(); - - // Mimic the upstream connection backing up. The router would call - // onDecoderFilterAboveWriteBufferHighWatermark which should readDisable the stream and increment - // stats. - EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_)); - EXPECT_CALL(stream_, readDisable(true)); - ASSERT(decoder_filters_[0]->callbacks_ != nullptr); - decoder_filters_[0]->callbacks_->onDecoderFilterAboveWriteBufferHighWatermark(); - EXPECT_EQ(1U, stats_.named_.downstream_flow_control_paused_reading_total_.value()); - - // Resume the flow of data. When the router buffer drains it calls - // onDecoderFilterBelowWriteBufferLowWatermark which should re-enable reads on the stream. - EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_)); - EXPECT_CALL(stream_, readDisable(false)); - ASSERT(decoder_filters_[0]->callbacks_ != nullptr); - decoder_filters_[0]->callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); - EXPECT_EQ(1U, stats_.named_.downstream_flow_control_resumed_reading_total_.value()); - - // Backup upstream once again. - EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_)); - EXPECT_CALL(stream_, readDisable(true)); - ASSERT(decoder_filters_[0]->callbacks_ != nullptr); - decoder_filters_[0]->callbacks_->onDecoderFilterAboveWriteBufferHighWatermark(); - EXPECT_EQ(2U, stats_.named_.downstream_flow_control_paused_reading_total_.value()); - - // Send a full response. - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); - expectOnDestroy(); - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); -} - -TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksPassedOnWithLazyCreation) { - setup(false, ""); - - // Make sure codec_ is created. - EXPECT_CALL(*codec_, dispatch(_)); - Buffer::OwnedImpl fake_input(""); - conn_manager_->onData(fake_input, false); - - // Mark the connection manger as backed up before the stream is created. - ASSERT_EQ(decoder_filters_.size(), 0); - EXPECT_CALL(*codec_, onUnderlyingConnectionAboveWriteBufferHighWatermark()); - conn_manager_->onAboveWriteBufferHighWatermark(); - - // Create the stream. Defer the creation of the filter chain by not sending - // complete headers. - RequestDecoder* decoder; - { - setUpBufferLimits(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - decoder = &conn_manager_->newStream(response_encoder_); - // Call the high buffer callbacks as the codecs do. - stream_callbacks_->onAboveWriteBufferHighWatermark(); - return Http::okStatus(); - })); - - // Send fake data to kick off newStream being created. - Buffer::OwnedImpl fake_input2("asdf"); - conn_manager_->onData(fake_input2, false); - } - - // Now set up the filter chain by sending full headers. The filters should be - // immediately appraised that the low watermark is in effect. - { - setupFilterChain(2, 2); - EXPECT_CALL(filter_callbacks_.connection_, aboveHighWatermark()).Times(0); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - return Http::okStatus(); - })); - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - Buffer::OwnedImpl data("hello"); - decoder_filters_[0]->callbacks_->addDecodedData(data, true); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - sendRequestHeadersAndData(); - ASSERT_GE(decoder_filters_.size(), 1); - MockDownstreamWatermarkCallbacks callbacks; - EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); - decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks); - - // Ensures that when new callbacks are registered they get invoked immediately - // and the already-registered callbacks do not. - MockDownstreamWatermarkCallbacks callbacks2; - EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark()); - decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks2); - } - doRemoteClose(); -} - -TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithLazyCreation) { - setup(false, ""); - - // Make sure codec_ is created. - EXPECT_CALL(*codec_, dispatch(_)); - Buffer::OwnedImpl fake_input(""); - conn_manager_->onData(fake_input, false); - - // Mark the connection manger as backed up before the stream is created. - ASSERT_EQ(decoder_filters_.size(), 0); - EXPECT_CALL(*codec_, onUnderlyingConnectionAboveWriteBufferHighWatermark()); - conn_manager_->onAboveWriteBufferHighWatermark(); - - // Create the stream. Defer the creation of the filter chain by not sending - // complete headers. - RequestDecoder* decoder; - { - setUpBufferLimits(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - decoder = &conn_manager_->newStream(response_encoder_); - // Call the high buffer callbacks as the codecs do. - stream_callbacks_->onAboveWriteBufferHighWatermark(); - return Http::okStatus(); - })); - - // Send fake data to kick off newStream being created. - Buffer::OwnedImpl fake_input2("asdf"); - conn_manager_->onData(fake_input2, false); - } - - // Now before the filter chain is created, fire the low watermark callbacks - // and ensure it is passed down to the stream. - ASSERT(stream_callbacks_ != nullptr); - EXPECT_CALL(*codec_, onUnderlyingConnectionBelowWriteBufferLowWatermark()) - .WillOnce(Invoke([&]() -> void { stream_callbacks_->onBelowWriteBufferLowWatermark(); })); - conn_manager_->onBelowWriteBufferLowWatermark(); - - // Now set up the filter chain by sending full headers. The filters should - // not get any watermark callbacks. - { - setupFilterChain(2, 2); - EXPECT_CALL(filter_callbacks_.connection_, aboveHighWatermark()).Times(0); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - return Http::okStatus(); - })); - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - Buffer::OwnedImpl data("hello"); - decoder_filters_[0]->callbacks_->addDecodedData(data, true); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - sendRequestHeadersAndData(); - ASSERT_GE(decoder_filters_.size(), 1); - MockDownstreamWatermarkCallbacks callbacks; - EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0); - EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0); - decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks); - } - doRemoteClose(); -} - -TEST_F(HttpConnectionManagerImplTest, AlterFilterWatermarkLimits) { - initial_buffer_limit_ = 100; - setup(false, ""); - setUpEncoderAndDecoder(false, false); - sendRequestHeadersAndData(); - - // Check initial limits. - EXPECT_EQ(initial_buffer_limit_, decoder_filters_[0]->callbacks_->decoderBufferLimit()); - EXPECT_EQ(initial_buffer_limit_, encoder_filters_[0]->callbacks_->encoderBufferLimit()); - - // Check lowering the limits. - decoder_filters_[0]->callbacks_->setDecoderBufferLimit(initial_buffer_limit_ - 1); - EXPECT_EQ(initial_buffer_limit_ - 1, decoder_filters_[0]->callbacks_->decoderBufferLimit()); - - // Check raising the limits. - decoder_filters_[0]->callbacks_->setDecoderBufferLimit(initial_buffer_limit_ + 1); - EXPECT_EQ(initial_buffer_limit_ + 1, decoder_filters_[0]->callbacks_->decoderBufferLimit()); - EXPECT_EQ(initial_buffer_limit_ + 1, encoder_filters_[0]->callbacks_->encoderBufferLimit()); - - // Verify turning off buffer limits works. - decoder_filters_[0]->callbacks_->setDecoderBufferLimit(0); - EXPECT_EQ(0, decoder_filters_[0]->callbacks_->decoderBufferLimit()); - - // Once the limits are turned off can be turned on again. - decoder_filters_[0]->callbacks_->setDecoderBufferLimit(100); - EXPECT_EQ(100, decoder_filters_[0]->callbacks_->decoderBufferLimit()); - - doRemoteClose(); -} - -TEST_F(HttpConnectionManagerImplTest, HitFilterWatermarkLimits) { - log_handler_ = std::make_shared>(); - - initial_buffer_limit_ = 1; - streaming_filter_ = true; - setup(false, ""); - setUpEncoderAndDecoder(false, false); - - // The filter is a streaming filter. Sending 4 bytes should hit the - // watermark limit and disable reads on the stream. - EXPECT_CALL(stream_, readDisable(true)); - sendRequestHeadersAndData(); - - // Change the limit so the buffered data is below the new watermark. The - // stream should be read-enabled - EXPECT_CALL(stream_, readDisable(false)); - int buffer_len = decoder_filters_[0]->callbacks_->decodingBuffer()->length(); - decoder_filters_[0]->callbacks_->setDecoderBufferLimit((buffer_len + 1) * 2); - - // Start the response - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); - - MockDownstreamWatermarkCallbacks callbacks; - decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks); - MockDownstreamWatermarkCallbacks callbacks2; - decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks2); - - // Now overload the buffer with response data. The downstream watermark - // callbacks should be called. - EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); - EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark()); - Buffer::OwnedImpl fake_response("A long enough string to go over watermarks"); - EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndWatermark)); - decoder_filters_[0]->callbacks_->encodeData(fake_response, false); - - // unregister callbacks2 - decoder_filters_[0]->callbacks_->removeDownstreamWatermarkCallbacks(callbacks2); - - // Change the limit so the buffered data is below the new watermark. - buffer_len = encoder_filters_[1]->callbacks_->encodingBuffer()->length(); - EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()); - EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()).Times(0); - encoder_filters_[1]->callbacks_->setEncoderBufferLimit((buffer_len + 1) * 2); - - EXPECT_CALL(*log_handler_, log(_, _, _, _)) - .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, - const StreamInfo::StreamInfo& stream_info) { - EXPECT_FALSE(stream_info.hasAnyResponseFlag()); - })); - - expectOnDestroy(); - EXPECT_CALL(stream_, removeCallbacks(_)); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); -} - -TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimits) { - initial_buffer_limit_ = 10; - streaming_filter_ = false; - setup(false, ""); - setUpEncoderAndDecoder(false, false); - sendRequestHeadersAndData(); - - // Set the filter to be a buffering filter. Sending any data will hit the - // watermark limit and result in a 413 being sent to the user. - Http::TestResponseHeaderMapImpl response_headers{ - {":status", "413"}, {"content-length", "17"}, {"content-type", "text/plain"}}; - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(HeaderMapEqualRef(&response_headers), false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationAndWatermark)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - Buffer::OwnedImpl data("A longer string"); - decoder_filters_[0]->callbacks_->addDecodedData(data, false); - const auto rc_details = encoder_filters_[1]->callbacks_->streamInfo().responseCodeDetails(); - EXPECT_EQ("request_payload_too_large", rc_details.value()); - - doRemoteClose(); -} - -// Return 413 from an intermediate filter and make sure we don't continue the filter chain. -TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimitsIntermediateFilter) { - InSequence s; - initial_buffer_limit_ = 10; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, false); - - Buffer::OwnedImpl fake_data2("world world"); - decoder->decodeData(fake_data2, true); - return Http::okStatus(); - })); - - setUpBufferLimits(); - setupFilterChain(2, 1); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - Http::TestResponseHeaderMapImpl response_headers{ - {":status", "413"}, {"content-length", "17"}, {"content-type", "text/plain"}}; - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(HeaderMapEqualRef(&response_headers), false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationAndWatermark)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - doRemoteClose(false); -} - -TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsBeforeHeaders) { - initial_buffer_limit_ = 10; - setup(false, ""); - setUpEncoderAndDecoder(false, false); - sendRequestHeadersAndData(); - - // Start the response without processing the request headers through all - // filters. - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); - - // Now overload the buffer with response data. The filter returns - // StopIterationAndBuffer, which will trigger an early response. - - expectOnDestroy(); - Buffer::OwnedImpl fake_response("A long enough string to go over watermarks"); - // Fake response starts doing through the filter. - EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - std::string response_body; - // The 500 goes directly to the encoder. - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) - .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { - // Make sure this is a 500 - EXPECT_EQ("500", headers.getStatusValue()); - // Make sure Envoy standard sanitization has been applied. - EXPECT_TRUE(headers.Date() != nullptr); - EXPECT_EQ("response_payload_too_large", - decoder_filters_[0]->callbacks_->streamInfo().responseCodeDetails().value()); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); - decoder_filters_[0]->callbacks_->encodeData(fake_response, false); - EXPECT_EQ("Internal Server Error", response_body); - - EXPECT_EQ(1U, stats_.named_.rs_too_large_.value()); -} - -TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsAfterHeaders) { - initial_buffer_limit_ = 10; - setup(false, ""); - setUpEncoderAndDecoder(false, false); - sendRequestHeadersAndData(); - - // Start the response, and make sure the request headers are fully processed. - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); - - // Now overload the buffer with response data. The filter returns - // StopIterationAndBuffer, which will trigger an early reset. - const std::string data = "A long enough string to go over watermarks"; - Buffer::OwnedImpl fake_response(data); - InSequence s; - EXPECT_CALL(stream_, removeCallbacks(_)); - expectOnDestroy(false); - EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(stream_, resetStream(_)); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); - EXPECT_LOG_CONTAINS( - "debug", - "Resetting stream due to response_payload_too_large. Prior headers have already been sent", - decoder_filters_[0]->callbacks_->encodeData(fake_response, false);); - EXPECT_EQ(1U, stats_.named_.rs_too_large_.value()); -} - -TEST_F(HttpConnectionManagerImplTest, FilterHeadReply) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "HEAD"}}}; - decoder->decodeHeaders(std::move(headers), true); - data.drain(4); - return Http::okStatus(); - })); - - setupFilterChain(1, 1); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - decoder_filters_[0]->callbacks_->sendLocalReply(Code::BadRequest, "Bad request", nullptr, - absl::nullopt, ""); - return FilterHeadersStatus::Continue; - })); - - EXPECT_CALL(response_encoder_, streamErrorOnInvalidHttpMessage()).WillOnce(Return(true)); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true)) - .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { - EXPECT_EQ("11", headers.getContentLengthValue()); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); - expectOnDestroy(); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11)); - conn_manager_->onData(fake_input, false); -} - -// Verify that if an encoded stream has been ended, but gets stopped by a filter chain, we end -// up resetting the stream in the doEndStream() path (e.g., via filter reset due to timeout, etc.), -// we emit a reset to the codec. -TEST_F(HttpConnectionManagerImplTest, ResetWithStoppedFilter) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)) - .WillOnce(Invoke([&](Buffer::Instance& data) -> Envoy::Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - data.drain(4); - return Http::okStatus(); - })); - - setupFilterChain(1, 1); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - decoder_filters_[0]->callbacks_->sendLocalReply(Code::BadRequest, "Bad request", nullptr, - absl::nullopt, ""); - return FilterHeadersStatus::Continue; - })); - - EXPECT_CALL(response_encoder_, streamErrorOnInvalidHttpMessage()).WillOnce(Return(true)); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { - EXPECT_EQ("11", headers.getContentLengthValue()); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) - .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterDataStatus { - return FilterDataStatus::StopIterationAndBuffer; - })); - - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(response_encoder_.stream_, resetStream(_)); - expectOnDestroy(); - encoder_filters_[0]->callbacks_->resetStream(); -} - -TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamHeaders) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - auto headers = std::make_unique( - std::initializer_list>( - {{":authority", "host"}, {":path", "/"}, {":method", "GET"}})); - decoder->decodeHeaders(std::move(headers), false); - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, true); - - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); - - expectOnDestroy(); - - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - makeHeaderMap({{":status", "200"}}), false); - - Buffer::OwnedImpl response_body("response"); - decoder_filters_[1]->callbacks_->encodeData(response_body, true); -} - -TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamData) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - auto headers = makeHeaderMap( - {{":authority", "host"}, {":path", "/"}, {":method", "GET"}}); - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, true); - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); - - expectOnDestroy(); - - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - makeHeaderMap({{":status", "200"}}), false); - - Buffer::OwnedImpl response_body("response"); - decoder_filters_[1]->callbacks_->encodeData(response_body, true); -} - -TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamTrailers) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - auto headers = makeHeaderMap( - {{":authority", "host"}, {":path", "/"}, {":method", "GET"}}); - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, false); - - auto trailers = makeHeaderMap({{"foo", "bar"}}); - decoder->decodeTrailers(std::move(trailers)); - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::ContinueAndEndStream)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); - - expectOnDestroy(); - - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - makeHeaderMap({{":status", "200"}}), false); - - Buffer::OwnedImpl response_body("response"); - decoder_filters_[1]->callbacks_->encodeData(response_body, false); - - auto response_trailers = makeHeaderMap({{"x-trailer", "1"}}); - decoder_filters_[1]->callbacks_->encodeTrailers(std::move(response_trailers)); -} - -// Filter continues headers iteration without ending the stream, then injects a body later. -TEST_F(HttpConnectionManagerImplTest, FilterContinueDontEndStreamInjectBody) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - auto headers = makeHeaderMap( - {{":authority", "host"}, {":path", "/"}, {":method", "GET"}}); - decoder->decodeHeaders(std::move(headers), true); - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - // Decode filter 0 changes end_stream to false. - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::ContinueAndDontEndStream)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, true); - - EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - // Decode filter 0 injects request body later. - Buffer::OwnedImpl data("hello"); - decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(data, true); - - // Encode filter 1 changes end_stream to false. - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::ContinueAndDontEndStream)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - makeHeaderMap({{":status", "200"}}), true); - - EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeData(_, true)); - expectOnDestroy(); - - // Encode filter 1 injects request body later. - Buffer::OwnedImpl data2("hello"); - encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(data2, true); -} - -TEST_F(HttpConnectionManagerImplTest, FilterAddBodyContinuation) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - return Http::okStatus(); - })); - - setupFilterChain(2, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - Buffer::OwnedImpl data("hello"); - decoder_filters_[0]->callbacks_->addDecodedData(data, true); - decoder_filters_[0]->callbacks_->continueDecoding(); - - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeData(_, true)); - expectOnDestroy(); - - Buffer::OwnedImpl data2("hello"); - encoder_filters_[1]->callbacks_->addEncodedData(data2, true); - encoder_filters_[1]->callbacks_->continueEncoding(); -} - -// This test verifies proper sequences of decodeData() and encodeData() are called -// when all filers return "CONTINUE" in following case: -// -// 3 decode filters: -// -// filter0->decodeHeaders(_, true) -// return CONTINUE -// filter1->decodeHeaders(_, true) -// filter1->addDecodeData() -// return CONTINUE -// filter2->decodeHeaders(_, false) -// return CONTINUE -// filter2->decodeData(_, true) -// return CONTINUE -// -// filter0->decodeData(, true) is NOT called. -// filter1->decodeData(, true) is NOT called. -// -// 3 encode filters: -// -// filter2->encodeHeaders(_, true) -// return CONTINUE -// filter1->encodeHeaders(_, true) -// filter1->addEncodeData() -// return CONTINUE -// filter0->decodeHeaders(_, false) -// return CONTINUE -// filter0->decodeData(_, true) -// return CONTINUE -// -// filter2->encodeData(, true) is NOT called. -// filter1->encodeData(, true) is NOT called. -// -TEST_F(HttpConnectionManagerImplTest, AddDataWithAllContinue) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - return Http::okStatus(); - })); - - setupFilterChain(3, 3); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - Buffer::OwnedImpl data2("hello"); - decoder_filters_[1]->callbacks_->addDecodedData(data2, true); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[2], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[2], decodeComplete()); - - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)).Times(0); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)).Times(0); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, true); - - // For encode direction - EXPECT_CALL(*encoder_filters_[2], encodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[2], encodeComplete()); - - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - Buffer::OwnedImpl data2("goodbyte"); - encoder_filters_[1]->callbacks_->addEncodedData(data2, true); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeData(_, true)); - expectOnDestroy(); - - EXPECT_CALL(*encoder_filters_[2], encodeData(_, true)).Times(0); - EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)).Times(0); - - decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[2]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); -} - -// This test verifies proper sequences of decodeData() and encodeData() are called -// when the first filer is "stopped" and "continue" in following case: -// -// 3 decode filters: -// -// filter0->decodeHeaders(_, true) -// return STOP -// filter0->continueDecoding() -// filter1->decodeHeaders(_, true) -// filter1->addDecodeData() -// return CONTINUE -// filter2->decodeHeaders(_, false) -// return CONTINUE -// filter2->decodeData(_, true) -// return CONTINUE -// -// filter0->decodeData(, true) is NOT called. -// filter1->decodeData(, true) is NOT called. -// -// 3 encode filters: -// -// filter2->encodeHeaders(_, true) -// return STOP -// filter2->continueEncoding() -// filter1->encodeHeaders(_, true) -// filter1->addEncodeData() -// return CONTINUE -// filter0->decodeHeaders(_, false) -// return CONTINUE -// filter0->decodeData(_, true) -// return CONTINUE -// -// filter2->encodeData(, true) is NOT called. -// filter1->encodeData(, true) is NOT called. -// -TEST_F(HttpConnectionManagerImplTest, AddDataWithStopAndContinue) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - return Http::okStatus(); - })); - - setupFilterChain(3, 3); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, true); - - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - Buffer::OwnedImpl data2("hello"); - decoder_filters_[1]->callbacks_->addDecodedData(data2, true); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - - EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - // This fail, it is called twice. - EXPECT_CALL(*decoder_filters_[2], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[2], decodeComplete()); - - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)).Times(0); - // This fail, it is called once - EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)).Times(0); - - decoder_filters_[0]->callbacks_->continueDecoding(); - - // For encode direction - EXPECT_CALL(*encoder_filters_[2], encodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*encoder_filters_[2], encodeComplete()); - - decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[2]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - Buffer::OwnedImpl data2("goodbyte"); - encoder_filters_[1]->callbacks_->addEncodedData(data2, true); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - - EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeData(_, true)); - expectOnDestroy(); - - EXPECT_CALL(*encoder_filters_[2], encodeData(_, true)).Times(0); - EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)).Times(0); - - encoder_filters_[2]->callbacks_->continueEncoding(); -} - -// Use filter direct decode/encodeData() calls without trailers. -TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, true); - return Http::okStatus(); - })); - - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); - setupFilterChain(2, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - - Buffer::OwnedImpl decode_buffer; - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(Invoke([&](Buffer::Instance& data, bool) { - decode_buffer.move(data); - return FilterDataStatus::StopIterationNoBuffer; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11)); - conn_manager_->onData(fake_input, false); - - Buffer::OwnedImpl decoded_data_to_forward; - decoded_data_to_forward.move(decode_buffer, 2); - EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual("he"), false)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decoded_data_to_forward, false); - - EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual("llo"), true)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decode_buffer, true); - - // Response path. - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - - Buffer::OwnedImpl encoder_buffer; - EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)) - .WillOnce(Invoke([&](Buffer::Instance& data, bool) { - encoder_buffer.move(data); - return FilterDataStatus::StopIterationNoBuffer; - })); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - Buffer::OwnedImpl response_body("response"); - decoder_filters_[1]->callbacks_->encodeData(response_body, true); - - Buffer::OwnedImpl encoded_data_to_forward; - encoded_data_to_forward.move(encoder_buffer, 3); - EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual("res"), false)); - EXPECT_CALL(response_encoder_, encodeData(_, false)); - encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoded_data_to_forward, false); - - EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual("ponse"), true)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeData(_, true)); - expectOnDestroy(); - encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoder_buffer, true); -} - -// Use filter direct decode/encodeData() calls with trailers. -TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataTrailers) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, false); - - RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; - decoder->decodeTrailers(std::move(trailers)); - return Http::okStatus(); - })); - - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); - setupFilterChain(2, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - - Buffer::OwnedImpl decode_buffer; - EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) - .WillOnce(Invoke([&](Buffer::Instance& data, bool) { - decode_buffer.move(data); - return FilterDataStatus::StopIterationNoBuffer; - })); - EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - Buffer::OwnedImpl decoded_data_to_forward; - decoded_data_to_forward.move(decode_buffer, 2); - EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual("he"), false)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decoded_data_to_forward, false); - - EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual("llo"), false)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decode_buffer, false); - - EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - decoder_filters_[0]->callbacks_->continueDecoding(); - - // Response path. - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - - Buffer::OwnedImpl encoder_buffer; - EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) - .WillOnce(Invoke([&](Buffer::Instance& data, bool) { - encoder_buffer.move(data); - return FilterDataStatus::StopIterationNoBuffer; - })); - EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::StopIteration)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - - decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[1]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - Buffer::OwnedImpl response_body("response"); - decoder_filters_[1]->callbacks_->encodeData(response_body, false); - decoder_filters_[1]->callbacks_->encodeTrailers( - ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); - - Buffer::OwnedImpl encoded_data_to_forward; - encoded_data_to_forward.move(encoder_buffer, 3); - EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual("res"), false)); - EXPECT_CALL(response_encoder_, encodeData(_, false)); - encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoded_data_to_forward, false); - - EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual("ponse"), false)); - EXPECT_CALL(response_encoder_, encodeData(_, false)); - encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoder_buffer, false); - - EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeTrailers(_)); - expectOnDestroy(); - encoder_filters_[1]->callbacks_->continueEncoding(); -} - -TEST_F(HttpConnectionManagerImplTest, MultipleFilters) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("hello"); - decoder->decodeData(fake_data, false); - - Buffer::OwnedImpl fake_data2("world"); - decoder->decodeData(fake_data2, true); - return Http::okStatus(); - })); - - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); - setupFilterChain(3, 2); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - EXPECT_EQ(route_config_provider_.route_config_->route_, - decoder_filters_[0]->callbacks_->route()); - EXPECT_EQ(ssl_connection_.get(), - decoder_filters_[0]->callbacks_->connection()->ssl().get()); - return FilterHeadersStatus::StopIteration; - })); - - EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11)); - conn_manager_->onData(fake_input, false); - - // Mimic a decoder filter that trapped data and now sends it on, since the data was buffered - // by the first filter, we expect to get it in 1 decodeData() call. - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - EXPECT_EQ(route_config_provider_.route_config_->route_, - decoder_filters_[1]->callbacks_->route()); - EXPECT_EQ(ssl_connection_.get(), - decoder_filters_[1]->callbacks_->connection()->ssl().get()); - return FilterHeadersStatus::StopIteration; - })); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[2], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - EXPECT_CALL(*decoder_filters_[2], decodeComplete()); - decoder_filters_[0]->callbacks_->continueDecoding(); - - // Now start encoding and mimic trapping in the encoding filter. - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); - EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::StopIteration)); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - EXPECT_EQ(ssl_connection_.get(), encoder_filters_[1]->callbacks_->connection()->ssl().get()); - decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[2]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - Buffer::OwnedImpl response_body("response"); - decoder_filters_[2]->callbacks_->encodeData(response_body, false); - decoder_filters_[2]->callbacks_->encodeTrailers( - ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); - EXPECT_EQ(ssl_connection_.get(), decoder_filters_[2]->callbacks_->connection()->ssl().get()); - - // Now finish the encode. - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - EXPECT_CALL(*encoder_filters_[0], encodeData(_, false)) - .WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeData(_, false)); - EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(response_encoder_, encodeTrailers(_)); - expectOnDestroy(); - encoder_filters_[1]->callbacks_->continueEncoding(); - - EXPECT_EQ(ssl_connection_.get(), encoder_filters_[0]->callbacks_->connection()->ssl().get()); -} - -TEST(HttpConnectionManagerTracingStatsTest, verifyTracingStats) { - Stats::IsolatedStoreImpl stats; - ConnectionManagerTracingStats tracing_stats{CONN_MAN_TRACING_STATS(POOL_COUNTER(stats))}; - - EXPECT_THROW( - ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::HealthCheck, tracing_stats), - std::invalid_argument); - - ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::ClientForced, tracing_stats); - EXPECT_EQ(1UL, tracing_stats.client_enabled_.value()); - - ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::NotTraceableRequestId, tracing_stats); - EXPECT_EQ(1UL, tracing_stats.not_traceable_.value()); - - ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::Sampling, tracing_stats); - EXPECT_EQ(1UL, tracing_stats.random_sampling_.value()); -} - -TEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) { - Server::OverloadActionState stop_accepting_requests = Server::OverloadActionState::saturated(); - ON_CALL(overload_manager_.overload_state_, - getState(Server::OverloadActionNames::get().StopAcceptingRequests)) - .WillByDefault(ReturnRef(stop_accepting_requests)); - - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - return Http::okStatus(); - })); - - // 503 direct response when overloaded. - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) - .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("503", headers.getStatusValue()); - })); - std::string response_body; - EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_EQ("envoy overloaded", response_body); - EXPECT_EQ(1U, stats_.named_.downstream_rq_overload_close_.value()); -} - -TEST_F(HttpConnectionManagerImplTest, DisableHttp1KeepAliveWhenOverloaded) { - Server::OverloadActionState disable_http_keep_alive = Server::OverloadActionState::saturated(); - ON_CALL(overload_manager_.overload_state_, - getState(Server::OverloadActionNames::get().DisableHttpKeepAlive)) - .WillByDefault(ReturnRef(disable_http_keep_alive)); - - codec_->protocol_ = Protocol::Http11; - setup(false, ""); - - std::shared_ptr filter(new NiceMock()); - EXPECT_CALL(filter_factory_, createFilterChain(_)) - .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); - })); - - EXPECT_CALL(*codec_, dispatch(_)) - .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, - {":path", "/"}, - {":method", "GET"}, - {"connection", "keep-alive"}}}; - decoder->decodeHeaders(std::move(headers), true); - - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - - data.drain(4); - return Http::okStatus(); - })); - - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) - .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("close", headers.getConnectionValue()); - })); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - EXPECT_EQ(1U, stats_.named_.downstream_cx_overload_disable_keepalive_.value()); -} - -class DrainH2HttpConnectionManagerImplTest : public HttpConnectionManagerImplTest, - public testing::WithParamInterface { -public: - DrainH2HttpConnectionManagerImplTest() { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2", "true"}}); - } - -private: - TestScopedRuntime runtime_; -}; - -// Verify that, if the runtime option is enabled, HTTP2 connections will receive -// a GOAWAY message when the overload action is triggered. -TEST_P(DrainH2HttpConnectionManagerImplTest, DisableHttp2KeepAliveWhenOverloaded) { - Server::OverloadActionState disable_http_keep_alive = Server::OverloadActionState::saturated(); - ON_CALL(overload_manager_.overload_state_, - getState(Server::OverloadActionNames::get().DisableHttpKeepAlive)) - .WillByDefault(ReturnRef(disable_http_keep_alive)); - - codec_->protocol_ = Protocol::Http2; - setup(false, ""); - if (GetParam()) { - EXPECT_CALL(*codec_, shutdownNotice); - } - - std::shared_ptr filter(new NiceMock()); - EXPECT_CALL(filter_factory_, createFilterChain(_)) - .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); - })); - - EXPECT_CALL(*codec_, dispatch(_)) - .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, - {":path", "/"}, - {":method", "GET"}, - {"connection", "keep-alive"}}}; - decoder->decodeHeaders(std::move(headers), true); - - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - - data.drain(4); - return Http::okStatus(); - })); - - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - Mock::VerifyAndClearExpectations(codec_); - EXPECT_EQ(1, stats_.named_.downstream_cx_overload_disable_keepalive_.value()); -} - -INSTANTIATE_TEST_SUITE_P(WithRuntimeOverride, DrainH2HttpConnectionManagerImplTest, - testing::Bool()); - -TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPathFirstFilter) { - setup(false, "envoy-custom-server", false); - setUpEncoderAndDecoder(true, true); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - // Verify that once the decoder_filters_[0]'s continueDecoding() is called, decoder_filters_[1]'s - // decodeHeaders() is called, and both filters receive data and trailers consequently. - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, _)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - decoder_filters_[0]->callbacks_->continueDecoding(); - - doRemoteClose(); -} - -TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPathSecondFilter) { - setup(false, "envoy-custom-server", false); - setUpEncoderAndDecoder(true, false); - - // Verify headers go through both filters, and data and trailers go through the first filter only. - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, _)) - .WillOnce(Return(FilterHeadersStatus::StopAllIterationAndBuffer)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::Continue)); - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - // Verify that once the decoder_filters_[1]'s continueDecoding() is called, both data and trailers - // go through the second filter. - EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::Continue)); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - decoder_filters_[1]->callbacks_->continueDecoding(); - - doRemoteClose(); -} - -TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnEncodingPath) { - setup(false, "envoy-custom-server", false); - setUpEncoderAndDecoder(false, false); - sendRequestHeadersAndData(); - - // encoder_filters_[1] is the first filter in the chain. - EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) - .WillOnce(Invoke([&](HeaderMap&, bool) -> FilterHeadersStatus { - return FilterHeadersStatus::StopAllIterationAndBuffer; - })); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); - - // Invoke encodeData while all iteration is stopped and make sure the filters do not have - // encodeData called. - EXPECT_CALL(*encoder_filters_[0], encodeData(_, _)).Times(0); - EXPECT_CALL(*encoder_filters_[1], encodeData(_, _)).Times(0); - Buffer::OwnedImpl response_body("response"); - decoder_filters_[0]->callbacks_->encodeData(response_body, false); - decoder_filters_[0]->callbacks_->encodeTrailers( - ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); - - // Verify that once encoder_filters_[1]'s continueEncoding() is called, encoder_filters_[0]'s - // encodeHeaders() is called, and both filters receive data and trailers consequently. - EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, _)) - .WillOnce(Return(FilterHeadersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - EXPECT_CALL(*encoder_filters_[1], encodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeData(_, _)); - EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::Continue)); - EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::Continue)); - EXPECT_CALL(response_encoder_, encodeTrailers(_)); - EXPECT_CALL(*encoder_filters_[0], encodeComplete()); - EXPECT_CALL(*encoder_filters_[1], encodeComplete()); - expectOnDestroy(); - encoder_filters_[1]->callbacks_->continueEncoding(); -} - -TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenDraining) { - setup(false, ""); - - EXPECT_CALL(drain_close_, drainClose()).WillOnce(Return(true)); - - std::shared_ptr filter(new NiceMock()); - EXPECT_CALL(filter_factory_, createFilterChain(_)) - .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); - })); - - EXPECT_CALL(*codec_, dispatch(_)) - .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, - {":path", "/"}, - {":method", "GET"}, - {"connection", "keep-alive"}}}; - decoder->decodeHeaders(std::move(headers), true); - - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - - data.drain(4); - return Http::okStatus(); - })); - - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) - .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("close", headers.getConnectionValue()); - })); - - Buffer::OwnedImpl fake_input; - conn_manager_->onData(fake_input, false); -} - -TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { - setup(false, ""); - - // Set up the codec. - EXPECT_CALL(*codec_, dispatch(_)) - .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - data.drain(4); - return Http::okStatus(); - })); - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - setupFilterChain(1, 1); - - // Create a new stream - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - - // Send headers to that stream, and verify we both set and clear the tracked object. - { - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "POST"}}}; - EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, setTrackedObject(_)) - .Times(2) - .WillOnce(Invoke([](const ScopeTrackedObject* object) -> const ScopeTrackedObject* { - ASSERT(object != nullptr); // On the first call, this should be the active stream. - std::stringstream out; - object->dumpState(out); - std::string state = out.str(); - EXPECT_THAT(state, - testing::HasSubstr("filter_manager_callbacks_.requestHeaders(): empty")); - EXPECT_THAT(state, testing::HasSubstr("protocol_: 1")); - return nullptr; - })) - .WillRepeatedly(Return(nullptr)); - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Invoke([](HeaderMap&, bool) -> FilterHeadersStatus { - return FilterHeadersStatus::StopIteration; - })); - decoder->decodeHeaders(std::move(headers), false); - } - - // Send trailers to that stream, and verify by this point headers are in logged state. - { - RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; - EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, setTrackedObject(_)) - .Times(2) - .WillOnce(Invoke([](const ScopeTrackedObject* object) -> const ScopeTrackedObject* { - ASSERT(object != nullptr); // On the first call, this should be the active stream. - std::stringstream out; - object->dumpState(out); - std::string state = out.str(); - EXPECT_THAT(state, testing::HasSubstr("filter_manager_callbacks_.requestHeaders(): \n")); - EXPECT_THAT(state, testing::HasSubstr("':authority', 'host'\n")); - EXPECT_THAT(state, testing::HasSubstr("protocol_: 1")); - return nullptr; - })) - .WillRepeatedly(Return(nullptr)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) - .WillOnce(Return(FilterTrailersStatus::StopIteration)); - decoder->decodeTrailers(std::move(trailers)); - } - - expectOnDestroy(); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -// SRDS no scope found. -TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteNotFound) { - setup(false, "", true, true); - setupFilterChain(1, 0); // Recreate the chain for second stream. - - EXPECT_CALL(*static_cast( - scopedRouteConfigProvider()->config().get()), - getRouteConfig(_)) - .Times(2) - .WillRepeatedly(Return(nullptr)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; - decoder->decodeHeaders(std::move(headers), true); - data.drain(4); - return Http::okStatus(); - })); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - EXPECT_EQ(nullptr, decoder_filters_[0]->callbacks_->route()); - return FilterHeadersStatus::StopIteration; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); // end_stream=true. - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - expectOnDestroy(); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -// SRDS updating scopes affects routing. -TEST_F(HttpConnectionManagerImplTest, TestSrdsUpdate) { - setup(false, "", true, true); - - EXPECT_CALL(*static_cast( - scopedRouteConfigProvider()->config().get()), - getRouteConfig(_)) - .Times(3) - .WillOnce(Return(nullptr)) - .WillOnce(Return(nullptr)) // refreshCachedRoute first time. - .WillOnce(Return(route_config_)); // triggered by callbacks_->route(), SRDS now updated. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; - decoder->decodeHeaders(std::move(headers), true); - data.drain(4); - return Http::okStatus(); - })); - const std::string fake_cluster1_name = "fake_cluster1"; - std::shared_ptr route1 = std::make_shared>(); - EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name)); - std::shared_ptr fake_cluster1 = - std::make_shared>(); - EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(fake_cluster1.get())); - EXPECT_CALL(*route_config_, route(_, _, _, _)).WillOnce(Return(route1)); - // First no-scope-found request will be handled by decoder_filters_[0]. - setupFilterChain(1, 0); - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - EXPECT_EQ(nullptr, decoder_filters_[0]->callbacks_->route()); - - // Clear route and next call on callbacks_->route() will trigger a re-snapping of the - // snapped_route_config_. - decoder_filters_[0]->callbacks_->clearRouteCache(); - - // Now route config provider returns something. - EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); - EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); - EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); - return FilterHeadersStatus::StopIteration; - - return FilterHeadersStatus::StopIteration; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); // end_stream=true. - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - expectOnDestroy(); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -// SRDS Scope header update cause cross-scope reroute. -TEST_F(HttpConnectionManagerImplTest, TestSrdsCrossScopeReroute) { - setup(false, "", true, true); - - std::shared_ptr route_config1 = - std::make_shared>(); - std::shared_ptr route_config2 = - std::make_shared>(); - std::shared_ptr route1 = std::make_shared>(); - std::shared_ptr route2 = std::make_shared>(); - EXPECT_CALL(*route_config1, route(_, _, _, _)).WillRepeatedly(Return(route1)); - EXPECT_CALL(*route_config2, route(_, _, _, _)).WillRepeatedly(Return(route2)); - EXPECT_CALL(*static_cast( - scopedRouteConfigProvider()->config().get()), - getRouteConfig(_)) - // 1. Snap scoped route config; - // 2. refreshCachedRoute (both in decodeHeaders(headers,end_stream); - // 3. then refreshCachedRoute triggered by decoder_filters_[1]->callbacks_->route(). - .Times(3) - .WillRepeatedly(Invoke([&](const HeaderMap& headers) -> Router::ConfigConstSharedPtr { - auto& test_headers = dynamic_cast(headers); - if (test_headers.get_("scope_key") == "foo") { - return route_config1; - } - return route_config2; - })); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":method", "GET"}, {"scope_key", "foo"}, {":path", "/foo"}}}; - decoder->decodeHeaders(std::move(headers), false); - data.drain(4); - return Http::okStatus(); - })); - setupFilterChain(2, 0); - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) -> FilterHeadersStatus { - EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); - auto& test_headers = dynamic_cast(headers); - // Clear cached route and change scope key to "bar". - decoder_filters_[0]->callbacks_->clearRouteCache(); - test_headers.remove("scope_key"); - test_headers.addCopy("scope_key", "bar"); - return FilterHeadersStatus::Continue; - })); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) - .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) -> FilterHeadersStatus { - auto& test_headers = dynamic_cast(headers); - EXPECT_EQ(test_headers.get_("scope_key"), "bar"); - // Route now switched to route2 as header "scope_key" has changed. - EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->route()); - EXPECT_EQ(route2->routeEntry(), decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); - return FilterHeadersStatus::StopIteration; - })); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - expectOnDestroy(); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -// SRDS scoped RouteConfiguration found and route found. -TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteFound) { - setup(false, "", true, true); - setupFilterChain(1, 0); - - const std::string fake_cluster1_name = "fake_cluster1"; - std::shared_ptr route1 = std::make_shared>(); - EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name)); - std::shared_ptr fake_cluster1 = - std::make_shared>(); - EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(fake_cluster1.get())); - EXPECT_CALL(*scopedRouteConfigProvider()->config(), getRouteConfig(_)) - // 1. decodeHeaders() snapping route config. - // 2. refreshCachedRoute() later in the same decodeHeaders(). - .Times(2); - EXPECT_CALL( - *static_cast( - scopedRouteConfigProvider()->config()->route_config_.get()), - route(_, _, _, _)) - .WillOnce(Return(route1)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; - decoder->decodeHeaders(std::move(headers), true); - data.drain(4); - return Http::okStatus(); - })); - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { - EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); - EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); - EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); - return FilterHeadersStatus::StopIteration; - })); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - expectOnDestroy(); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -TEST_F(HttpConnectionManagerImplTest, NewConnection) { - setup(false, "", true, true); - - filter_callbacks_.connection_.stream_info_.protocol_ = absl::nullopt; - EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol()); - EXPECT_EQ(Network::FilterStatus::Continue, conn_manager_->onNewConnection()); - EXPECT_EQ(0U, stats_.named_.downstream_cx_http3_total_.value()); - EXPECT_EQ(0U, stats_.named_.downstream_cx_http3_active_.value()); - - filter_callbacks_.connection_.stream_info_.protocol_ = Envoy::Http::Protocol::Http3; - codec_->protocol_ = Http::Protocol::Http3; - EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol()); - EXPECT_CALL(*codec_, protocol()).Times(AtLeast(1)); - EXPECT_EQ(Network::FilterStatus::StopIteration, conn_manager_->onNewConnection()); - EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_total_.value()); - EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_active_.value()); -} - -TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestHeadersSize) { - // Test with Headers only request, No Data, No response. - setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - return Http::okStatus(); - })); - - setupFilterChain(1, 0); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - std::shared_ptr> host_{ - new NiceMock()}; - filter_callbacks_.upstreamHost(host_); - - EXPECT_CALL( - host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); - EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 0)); - EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - expectOnDestroy(); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestBodySize) { - // Test Request with Headers and Data, No response. - setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("12345"); - decoder->decodeData(fake_data, true); - return Http::okStatus(); - })); - - setupFilterChain(1, 0); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - std::shared_ptr> host_{ - new NiceMock()}; - filter_callbacks_.upstreamHost(host_); - - EXPECT_CALL( - host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); - EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 5)); - EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - expectOnDestroy(); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseHeadersSize) { - // Test with Header only response. - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("1234"); - decoder->decodeData(fake_data, true); - - return Http::okStatus(); - })); - - setupFilterChain(1, 0); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - std::shared_ptr> host_{ - new NiceMock()}; - filter_callbacks_.upstreamHost(host_); - - EXPECT_CALL( - host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); - - // Response headers are internally mutated and we record final response headers. - // for example in the below test case, response headers are modified as - // {':status', '200' 'date', 'Mon, 06 Jul 2020 06:08:55 GMT' 'server', ''} - // whose size is 49 instead of original response headers size 10({":status", "200"}). - EXPECT_CALL( - host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_headers_size"), 49)); - EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 4)); - EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); - expectOnDestroy(); - - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); -} - -TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseBodySize) { - // Test with response headers and body. - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - - Buffer::OwnedImpl fake_data("1234"); - decoder->decodeData(fake_data, true); - - return Http::okStatus(); - })); - - setupFilterChain(1, 0); - - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) - .WillOnce(Return(FilterHeadersStatus::StopIteration)); - EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) - .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); - - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - - std::shared_ptr> host_{ - new NiceMock()}; - filter_callbacks_.upstreamHost(host_); - - EXPECT_CALL( - host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); - EXPECT_CALL( - host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_headers_size"), 49)); - EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 4)); - EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 11)); - - Buffer::OwnedImpl fake_input("1234"); - conn_manager_->onData(fake_input, false); - - EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); - - decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); - decoder_filters_[0]->callbacks_->encodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - - EXPECT_CALL(response_encoder_, encodeData(_, true)); - expectOnDestroy(); - - Buffer::OwnedImpl fake_response("hello-world"); - decoder_filters_[0]->callbacks_->encodeData(fake_response, true); -} - -TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponseUsingHttp3) { - setup(false, "envoy-custom-server", false); - - filter_callbacks_.connection_.stream_info_.protocol_ = Envoy::Http::Protocol::Http3; - codec_->protocol_ = Http::Protocol::Http3; - EXPECT_EQ(Network::FilterStatus::StopIteration, conn_manager_->onNewConnection()); - - // Store the basic request encoder during filter chain setup. - std::shared_ptr filter(new NiceMock()); - - EXPECT_CALL(*filter, decodeHeaders(_, true)) - .WillOnce(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { - EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_EQ("http", headers.getForwardedProtoValue()); - return FilterHeadersStatus::StopIteration; - })); - - EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); - - EXPECT_CALL(filter_factory_, createFilterChain(_)) - .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamDecoderFilter(filter); - })); - - EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)); - - // Pretend to get a new stream and then fire a headers only request into it. Then we respond into - // the filter. - NiceMock encoder; - RequestDecoder& decoder = conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder.decodeHeaders(std::move(headers), true); - - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->streamInfo().setResponseCodeDetails(""); - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - - EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value()); - EXPECT_EQ(1U, listener_stats_.downstream_rq_2xx_.value()); - EXPECT_EQ(1U, stats_.named_.downstream_rq_completed_.value()); - EXPECT_EQ(1U, listener_stats_.downstream_rq_completed_.value()); - EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_total_.value()); - filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); - conn_manager_.reset(); - EXPECT_EQ(0U, stats_.named_.downstream_cx_http3_active_.value()); -} - -namespace { - -class SimpleType : public StreamInfo::FilterState::Object { -public: - SimpleType(int value) : value_(value) {} - int access() const { return value_; } - -private: - int value_; -}; - -} // namespace - -TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { - filter_callbacks_.connection_.stream_info_.filter_state_->setData( - "connection_provided_data", std::make_shared(555), - StreamInfo::FilterState::StateType::ReadOnly); - - setup(false, "envoy-custom-server", false); - setupFilterChain(1, 0, /* num_requests = */ 3); - - EXPECT_CALL(*codec_, dispatch(_)) - .Times(2) - .WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - return Http::okStatus(); - })); - { - InSequence s; - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(Invoke([this](HeaderMap&, bool) -> FilterHeadersStatus { - decoder_filters_[0]->callbacks_->streamInfo().filterState()->setData( - "per_filter_chain", std::make_unique(1), - StreamInfo::FilterState::StateType::ReadOnly, - StreamInfo::FilterState::LifeSpan::FilterChain); - decoder_filters_[0]->callbacks_->streamInfo().filterState()->setData( - "per_downstream_request", std::make_unique(2), - StreamInfo::FilterState::StateType::ReadOnly, - StreamInfo::FilterState::LifeSpan::Request); - decoder_filters_[0]->callbacks_->streamInfo().filterState()->setData( - "per_downstream_connection", std::make_unique(3), - StreamInfo::FilterState::StateType::ReadOnly, - StreamInfo::FilterState::LifeSpan::Connection); - return FilterHeadersStatus::StopIteration; - })); - EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) - .WillOnce(Invoke([this](HeaderMap&, bool) -> FilterHeadersStatus { - EXPECT_FALSE( - decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( - "per_filter_chain")); - EXPECT_TRUE( - decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( - "per_downstream_request")); - EXPECT_TRUE( - decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( - "per_downstream_connection")); - EXPECT_TRUE( - decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( - "connection_provided_data")); - return FilterHeadersStatus::StopIteration; - })); - EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, true)) - .WillOnce(Invoke([this](HeaderMap&, bool) -> FilterHeadersStatus { - EXPECT_FALSE( - decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData( - "per_filter_chain")); - EXPECT_FALSE( - decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData( - "per_downstream_request")); - EXPECT_TRUE( - decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData( - "per_downstream_connection")); - EXPECT_TRUE( - decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( - "connection_provided_data")); - return FilterHeadersStatus::StopIteration; - })); - } - - EXPECT_CALL(*decoder_filters_[0], decodeComplete()); - EXPECT_CALL(*decoder_filters_[0], onDestroy()); - EXPECT_CALL(*decoder_filters_[1], decodeComplete()); - EXPECT_CALL(*decoder_filters_[2], decodeComplete()); - - Buffer::OwnedImpl fake_input; - conn_manager_->onData(fake_input, false); - decoder_filters_[0]->callbacks_->recreateStream(); - conn_manager_->onData(fake_input, false); - - // The connection life time data should have been written to the connection filter state. - EXPECT_TRUE(filter_callbacks_.connection_.stream_info_.filter_state_->hasData( - "per_downstream_connection")); - EXPECT_CALL(*decoder_filters_[1], onDestroy()); - EXPECT_CALL(*decoder_filters_[2], onDestroy()); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -class HttpConnectionManagerImplDeathTest : public HttpConnectionManagerImplTest { -public: - Router::RouteConfigProvider* routeConfigProvider() override { - return route_config_provider2_.get(); - } - Config::ConfigProvider* scopedRouteConfigProvider() override { - return scoped_route_config_provider2_.get(); - } - - std::shared_ptr route_config_provider2_; - std::shared_ptr scoped_route_config_provider2_; -}; - -// HCM config can only have either RouteConfigProvider or ScopedRoutesConfigProvider. -TEST_F(HttpConnectionManagerImplDeathTest, InvalidConnectionManagerConfig) { - setup(false, ""); - - Buffer::OwnedImpl fake_input("1234"); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { - conn_manager_->newStream(response_encoder_); - return Http::okStatus(); - })); - // Either RDS or SRDS should be set. - EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false), - "Either routeConfigProvider or scopedRouteConfigProvider should be set in " - "ConnectionManagerImpl."); - - route_config_provider2_ = std::make_shared>(); - - // Only route config provider valid. - EXPECT_NO_THROW(conn_manager_->onData(fake_input, false)); - - scoped_route_config_provider2_ = - std::make_shared>(); - // Can't have RDS and SRDS provider in the same time. - EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false), - "Either routeConfigProvider or scopedRouteConfigProvider should be set in " - "ConnectionManagerImpl."); - - route_config_provider2_.reset(); - // Only scoped route config provider valid. - EXPECT_NO_THROW(conn_manager_->onData(fake_input, false)); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); -} - } // namespace Http } // namespace Envoy diff --git a/test/common/http/conn_manager_impl_test_2.cc b/test/common/http/conn_manager_impl_test_2.cc new file mode 100644 index 000000000000..6a7ed3ab088a --- /dev/null +++ b/test/common/http/conn_manager_impl_test_2.cc @@ -0,0 +1,2970 @@ +#include "test/common/http/conn_manager_impl_test_base.h" +#include "test/test_common/logging.h" +#include "test/test_common/test_runtime.h" + +using testing::_; +using testing::AtLeast; +using testing::HasSubstr; +using testing::InSequence; +using testing::Invoke; +using testing::InvokeWithoutArgs; +using testing::Mock; +using testing::Property; +using testing::Ref; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Http { + +TEST_F(HttpConnectionManagerImplTest, ResponseBeforeRequestComplete) { + setup(false, "envoy-server-test"); + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + startRequest(); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_NE(nullptr, headers.Server()); + EXPECT_EQ("envoy-server-test", headers.getServerValue()); + })); + EXPECT_CALL(*decoder_filters_[0], onStreamComplete()); + EXPECT_CALL(*decoder_filters_[0], onDestroy()); + EXPECT_CALL(filter_callbacks_.connection_, + close(Network::ConnectionCloseType::FlushWriteAndDelay)); + + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); +} + +TEST_F(HttpConnectionManagerImplTest, DisconnectOnProxyConnectionDisconnect) { + setup(false, "envoy-server-test"); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + + startRequest(); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_NE(nullptr, headers.Connection()); + EXPECT_EQ("close", headers.getConnectionValue()); + EXPECT_EQ(nullptr, headers.ProxyConnection()); + })); + EXPECT_CALL(*decoder_filters_[0], onStreamComplete()); + EXPECT_CALL(*decoder_filters_[0], onDestroy()); + EXPECT_CALL(filter_callbacks_.connection_, + close(Network::ConnectionCloseType::FlushWriteAndDelay)); + + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); +} + +TEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) { + setup(false, ""); + + // This is like ResponseBeforeRequestComplete, but it tests the case where we start the reply + // before the request completes, but don't finish the reply until after the request completes. + MockStreamDecoderFilter* filter = new NiceMock(); + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); + })); + + EXPECT_CALL(*filter, decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + + // Start the request + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + return Http::okStatus(); + })); + + Buffer::OwnedImpl fake_input("hello"); + conn_manager_->onData(fake_input, false); + + // Start the response + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_NE(nullptr, headers.Server()); + EXPECT_EQ("", headers.getServerValue()); + })); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); + + // Finish the request. + EXPECT_CALL(*filter, decodeData(_, true)); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder_->decodeData(data, true); + return Http::okStatus(); + })); + + conn_manager_->onData(fake_input, false); + + // Since we started the response before the request was complete, we will still close the + // connection since we already sent a connection: close header. We won't "reset" the stream + // however. + EXPECT_CALL(filter_callbacks_.connection_, + close(Network::ConnectionCloseType::FlushWriteAndDelay)); + Buffer::OwnedImpl fake_response("world"); + filter->callbacks_->encodeData(fake_response, true); +} + +TEST_F(HttpConnectionManagerImplTest, DownstreamDisconnect) { + InSequence s; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + conn_manager_->newStream(response_encoder_); + data.drain(2); + return Http::okStatus(); + })); + + EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + // Now raise a remote disconnection, we should see the filter get reset called. + conn_manager_->onEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, DownstreamProtocolError) { + InSequence s; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + conn_manager_->newStream(response_encoder_); + return codecProtocolError("protocol error"); + })); + + EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_)); + EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0); + + // A protocol exception should result in reset of the streams followed by a remote or local close + // depending on whether the downstream client closes the connection prior to the delayed close + // timer firing. + EXPECT_CALL(filter_callbacks_.connection_, + close(Network::ConnectionCloseType::FlushWriteAndDelay)); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAccessLog) { + std::shared_ptr handler(new NiceMock()); + access_logs_ = {handler}; + setup(false, ""); + + EXPECT_CALL(*handler, log(_, _, _, _)) + .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, + const StreamInfo::StreamInfo& stream_info) { + EXPECT_FALSE(stream_info.responseCode()); + EXPECT_TRUE(stream_info.hasAnyResponseFlag()); + EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)); + })); + + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { + conn_manager_->newStream(response_encoder_); + return codecProtocolError("protocol error"); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAfterHeadersAccessLog) { + setup(false, ""); + + std::shared_ptr filter(new NiceMock()); + std::shared_ptr handler(new NiceMock()); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(filter); + callbacks.addAccessLogHandler(handler); + })); + + EXPECT_CALL(*handler, log(_, _, _, _)) + .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, + const StreamInfo::StreamInfo& stream_info) { + EXPECT_FALSE(stream_info.responseCode()); + EXPECT_TRUE(stream_info.hasAnyResponseFlag()); + EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)); + })); + + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; + decoder_->decodeHeaders(std::move(headers), true); + + return codecProtocolError("protocol error"); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +// Verify that FrameFloodException causes connection to be closed abortively. +TEST_F(HttpConnectionManagerImplTest, FrameFloodError) { + std::shared_ptr log_handler = + std::make_shared>(); + access_logs_ = {log_handler}; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + conn_manager_->newStream(response_encoder_); + return bufferFloodError("too many outbound frames"); + })); + + EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_)); + EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0); + + // FrameFloodException should result in reset of the streams followed by abortive close. + EXPECT_CALL(filter_callbacks_.connection_, + close(Network::ConnectionCloseType::FlushWriteAndDelay)); + + EXPECT_CALL(*log_handler, log(_, _, _, _)) + .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, + const StreamInfo::StreamInfo& stream_info) { + ASSERT_TRUE(stream_info.responseCodeDetails().has_value()); + EXPECT_EQ("codec_error:too_many_outbound_frames", + stream_info.responseCodeDetails().value()); + })); + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + EXPECT_LOG_NOT_CONTAINS("warning", "downstream HTTP flood", + conn_manager_->onData(fake_input, false)); + + EXPECT_TRUE(filter_callbacks_.connection_.streamInfo().hasResponseFlag( + StreamInfo::ResponseFlag::DownstreamProtocolError)); +} + +TEST_F(HttpConnectionManagerImplTest, IdleTimeoutNoCodec) { + // Not used in the test. + delete codec_; + + idle_timeout_ = (std::chrono::milliseconds(10)); + Event::MockTimer* idle_timer = setUpTimer(); + EXPECT_CALL(*idle_timer, enableTimer(_, _)); + setup(false, ""); + + EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); + EXPECT_CALL(*idle_timer, disableTimer()); + idle_timer->invokeCallback(); + + EXPECT_EQ(1U, stats_.named_.downstream_cx_idle_timeout_.value()); +} + +TEST_F(HttpConnectionManagerImplTest, IdleTimeout) { + idle_timeout_ = (std::chrono::milliseconds(10)); + Event::MockTimer* idle_timer = setUpTimer(); + EXPECT_CALL(*idle_timer, enableTimer(_, _)); + setup(false, ""); + + MockStreamDecoderFilter* filter = new NiceMock(); + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); + })); + + EXPECT_CALL(*idle_timer, disableTimer()); + EXPECT_CALL(*filter, decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*filter, decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + startRequest(true, "hello"); + + EXPECT_CALL(*idle_timer, enableTimer(_, _)); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); + + Event::MockTimer* drain_timer = setUpTimer(); + EXPECT_CALL(*drain_timer, enableTimer(_, _)); + idle_timer->invokeCallback(); + + EXPECT_CALL(*codec_, goAway()); + EXPECT_CALL(filter_callbacks_.connection_, + close(Network::ConnectionCloseType::FlushWriteAndDelay)); + EXPECT_CALL(*idle_timer, disableTimer()); + EXPECT_CALL(*drain_timer, disableTimer()); + drain_timer->invokeCallback(); + + EXPECT_EQ(1U, stats_.named_.downstream_cx_idle_timeout_.value()); +} + +TEST_F(HttpConnectionManagerImplTest, ConnectionDurationNoCodec) { + // Not used in the test. + delete codec_; + + max_connection_duration_ = (std::chrono::milliseconds(10)); + Event::MockTimer* connection_duration_timer = setUpTimer(); + EXPECT_CALL(*connection_duration_timer, enableTimer(_, _)); + setup(false, ""); + + EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); + EXPECT_CALL(*connection_duration_timer, disableTimer()); + + connection_duration_timer->invokeCallback(); + + EXPECT_EQ(1U, stats_.named_.downstream_cx_max_duration_reached_.value()); +} + +TEST_F(HttpConnectionManagerImplTest, ConnectionDuration) { + max_connection_duration_ = (std::chrono::milliseconds(10)); + Event::MockTimer* connection_duration_timer = setUpTimer(); + EXPECT_CALL(*connection_duration_timer, enableTimer(_, _)); + setup(false, ""); + + MockStreamDecoderFilter* filter = new NiceMock(); + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); + })); + + EXPECT_CALL(*filter, decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*filter, decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + // Kick off the incoming data. + startRequest(true, "hello"); + + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); + + Event::MockTimer* drain_timer = setUpTimer(); + EXPECT_CALL(*drain_timer, enableTimer(_, _)); + connection_duration_timer->invokeCallback(); + + EXPECT_CALL(*codec_, goAway()); + EXPECT_CALL(filter_callbacks_.connection_, + close(Network::ConnectionCloseType::FlushWriteAndDelay)); + EXPECT_CALL(*connection_duration_timer, disableTimer()); + EXPECT_CALL(*drain_timer, disableTimer()); + drain_timer->invokeCallback(); + + EXPECT_EQ(1U, stats_.named_.downstream_cx_max_duration_reached_.value()); +} + +TEST_F(HttpConnectionManagerImplTest, IntermediateBufferingEarlyResponse) { + setup(false, ""); + + setupFilterChain(2, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // Kick off the request. + startRequest(true, "hello"); + + // Mimic a decoder filter that trapped data and now sends on the headers. + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Invoke([&](RequestHeaderMap&, bool) -> FilterHeadersStatus { + // Now filter 2 will send a complete response. + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), true, + "details"); + return FilterHeadersStatus::StopIteration; + })); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); + expectOnDestroy(); + + // Response is already complete so we drop buffered body data when we continue. + EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).Times(0); + decoder_filters_[0]->callbacks_->continueDecoding(); +} + +TEST_F(HttpConnectionManagerImplTest, DoubleBuffering) { + setup(false, ""); + setupFilterChain(3, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_data_copy("hello"); + startRequest(true, "hello"); + + // Continue iteration and stop and buffer on the 2nd filter. + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + decoder_filters_[0]->callbacks_->continueDecoding(); + + // Continue iteration. We expect the 3rd filter to not receive double data but for the buffered + // data to have been kept inline as it moves through. + EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[2], decodeData(BufferEqual(&fake_data_copy), true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + EXPECT_CALL(*decoder_filters_[2], decodeComplete()); + decoder_filters_[1]->callbacks_->continueDecoding(); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, ZeroByteDataFiltering) { + setup(false, ""); + setupFilterChain(2, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + + startRequest(); + + // Continue headers only of filter 1. + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->continueDecoding(); + + // Stop zero byte data. + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + Buffer::OwnedImpl zero; + decoder_->decodeData(zero, true); + + // Continue. + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + decoder_filters_[0]->callbacks_->continueDecoding(); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) { + InSequence s; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("hello"); + decoder_->decodeData(fake_data, false); + + RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"bazzz", "bar"}}}; + decoder_->decodeTrailers(std::move(trailers)); + return Http::okStatus(); + })); + + setupFilterChain(2, 2); + + Http::LowerCaseString trailer_key("foo"); + std::string trailers_data("trailers"); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) + .WillOnce(Invoke([&](Http::HeaderMap& trailers) -> FilterTrailersStatus { + Http::LowerCaseString key("foo"); + EXPECT_TRUE(trailers.get(key).empty()); + return FilterTrailersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + // set up encodeHeaders expectations + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + + // invoke encodeHeaders + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false, "details"); + + // set up encodeData expectations + EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeData(_, false)); + + // invoke encodeData + Buffer::OwnedImpl response_body("response"); + decoder_filters_[0]->callbacks_->encodeData(response_body, false); + // set up encodeTrailer expectations + EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + + EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) + .WillOnce(Invoke([&](Http::HeaderMap& trailers) -> FilterTrailersStatus { + // assert that the trailers set in the previous filter was ignored + Http::LowerCaseString key("foo"); + EXPECT_TRUE(trailers.get(key).empty()); + return FilterTrailersStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeTrailers(_)); + expectOnDestroy(); + + // invoke encodeTrailers + decoder_filters_[0]->callbacks_->encodeTrailers( + ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); +} + +TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInDataCallbackNoTrailers) { + setup(false, ""); + setupFilterChain(2, 2); + + std::string trailers_data("trailers"); + Http::LowerCaseString trailer_key("foo"); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterDataStatus { + decoder_filters_[0]->callbacks_->addDecodedTrailers().addCopy(trailer_key, trailers_data); + return FilterDataStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // ensure that the second decodeData call sees end_stream = false + EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) + .WillOnce(Return(FilterDataStatus::Continue)); + + // since we added trailers, we should see decodeTrailers + EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)).WillOnce(Invoke([&](HeaderMap& trailers) { + // ensure that we see the trailers set in decodeData + Http::LowerCaseString key("foo"); + auto t = trailers.get(key); + ASSERT(!t.empty()); + EXPECT_EQ(t[0]->value(), trailers_data.c_str()); + return FilterTrailersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + // Kick off the request. + startRequest(true, "hello"); + + // set up encodeHeaders expectations + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + + // invoke encodeHeaders + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false, "details"); + + // set up encodeData expectations + EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterDataStatus { + encoder_filters_[1]->callbacks_->addEncodedTrailers().addCopy(trailer_key, trailers_data); + return FilterDataStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + // ensure encodeData calls after setting header sees end_stream = false + EXPECT_CALL(*encoder_filters_[0], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::Continue)); + + EXPECT_CALL(response_encoder_, encodeData(_, false)); + + // since we added trailers, we should see encodeTrailer callbacks + EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)).WillOnce(Invoke([&](HeaderMap& trailers) { + // ensure that we see the trailers set in decodeData + Http::LowerCaseString key("foo"); + auto t = trailers.get(key); + EXPECT_EQ(t[0]->value(), trailers_data.c_str()); + return FilterTrailersStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + + // Ensure that we call encodeTrailers + EXPECT_CALL(response_encoder_, encodeTrailers(_)); + + expectOnDestroy(); + // invoke encodeData + Buffer::OwnedImpl response_body("response"); + decoder_filters_[0]->callbacks_->encodeData(response_body, true); +} + +TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback) { + InSequence s; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("hello"); + decoder_->decodeData(fake_data, false); + + RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; + decoder_->decodeTrailers(std::move(trailers)); + return Http::okStatus(); + })); + + setupFilterChain(2, 2); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + Buffer::OwnedImpl trailers_data("hello"); + EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { + decoder_filters_[0]->callbacks_->addDecodedData(trailers_data, true); + return FilterTrailersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[1], decodeData(Ref(trailers_data), false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[1]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false, "details"); + + EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeData(_, false)); + + Buffer::OwnedImpl response_body("response"); + decoder_filters_[1]->callbacks_->encodeData(response_body, false); + EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { + encoder_filters_[1]->callbacks_->addEncodedData(trailers_data, true); + return FilterTrailersStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[0], encodeData(Ref(trailers_data), false)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeData(_, false)); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeTrailers(_)); + expectOnDestroy(); + + decoder_filters_[1]->callbacks_->encodeTrailers( + ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); +} + +// Don't send data frames, only headers and trailers. +TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_NoDataFrames) { + InSequence s; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; + decoder_->decodeTrailers(std::move(trailers)); + return Http::okStatus(); + })); + + setupFilterChain(2, 1); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + + Buffer::OwnedImpl trailers_data("hello"); + EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { + decoder_filters_[0]->callbacks_->addDecodedData(trailers_data, false); + return FilterTrailersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false, "details"); + + EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { + encoder_filters_[0]->callbacks_->addEncodedData(trailers_data, false); + return FilterTrailersStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(response_encoder_, encodeData(_, false)); + EXPECT_CALL(response_encoder_, encodeTrailers(_)); + expectOnDestroy(); + + decoder_filters_[0]->callbacks_->encodeTrailers( + ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); +} + +// Don't send data frames, only headers and trailers. +TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_ContinueAfterCallback) { + InSequence s; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; + decoder_->decodeTrailers(std::move(trailers)); + return Http::okStatus(); + })); + + setupFilterChain(2, 1); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + + Buffer::OwnedImpl trailers_data("hello"); + EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { + decoder_filters_[0]->callbacks_->addDecodedData(trailers_data, false); + return FilterTrailersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + decoder_filters_[0]->callbacks_->continueDecoding(); + + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false, "details"); + + EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterTrailersStatus { + encoder_filters_[0]->callbacks_->addEncodedData(trailers_data, false); + return FilterTrailersStatus::StopIteration; + })); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + + decoder_filters_[0]->callbacks_->encodeTrailers( + ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(response_encoder_, encodeData(_, false)); + EXPECT_CALL(response_encoder_, encodeTrailers(_)); + expectOnDestroy(); + + encoder_filters_[0]->callbacks_->continueEncoding(); +} + +// Add*Data during the *Data callbacks. +TEST_F(HttpConnectionManagerImplTest, FilterAddBodyDuringDecodeData) { + InSequence s; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl data1("hello"); + decoder_->decodeData(data1, false); + + Buffer::OwnedImpl data2("world"); + decoder_->decodeData(data2, true); + return Http::okStatus(); + })); + + setupFilterChain(2, 2); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> FilterDataStatus { + decoder_filters_[0]->callbacks_->addDecodedData(data, true); + EXPECT_EQ(decoder_filters_[0]->callbacks_->decodingBuffer()->toString(), "helloworld"); + return FilterDataStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)) + .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> FilterDataStatus { + encoder_filters_[1]->callbacks_->addEncodedData(data, true); + EXPECT_EQ(encoder_filters_[1]->callbacks_->encodingBuffer()->toString(), "goodbye"); + return FilterDataStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeData(_, true)); + expectOnDestroy(); + + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[1]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false, "details"); + Buffer::OwnedImpl data1("good"); + decoder_filters_[1]->callbacks_->encodeData(data1, false); + Buffer::OwnedImpl data2("bye"); + decoder_filters_[1]->callbacks_->encodeData(data2, true); +} + +TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInline) { + setup(false, ""); + setupFilterChain(2, 2); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + Buffer::OwnedImpl data("hello"); + decoder_filters_[0]->callbacks_->addDecodedData(data, true); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + // Kick off the incoming data. + startRequest(true); + + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + Buffer::OwnedImpl data("hello"); + encoder_filters_[1]->callbacks_->addEncodedData(data, true); + EXPECT_EQ(5UL, encoder_filters_[0]->callbacks_->encodingBuffer()->length()); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeData(_, true)); + expectOnDestroy(); + + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[1]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true, "details"); +} + +TEST_F(HttpConnectionManagerImplTest, Filter) { + setup(false, ""); + + setupFilterChain(3, 2); + const std::string fake_cluster1_name = "fake_cluster1"; + const std::string fake_cluster2_name = "fake_cluster2"; + + std::shared_ptr fake_cluster1 = + std::make_shared>(); + EXPECT_CALL(cluster_manager_, get(_)) + .WillOnce(Return(fake_cluster1.get())) + .WillOnce(Return(nullptr)); + + std::shared_ptr route1 = std::make_shared>(); + EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name)); + std::shared_ptr route2 = std::make_shared>(); + EXPECT_CALL(route2->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster2_name)); + + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Return(route1)) + .WillOnce(Return(route2)) + .WillOnce(Return(nullptr)); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); + EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); + decoder_filters_[0]->callbacks_->clearRouteCache(); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->route()); + EXPECT_EQ(route2->routeEntry(), decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + // RDS & CDS consistency problem: route2 points to fake_cluster2, which doesn't exist. + EXPECT_EQ(nullptr, decoder_filters_[1]->callbacks_->clusterInfo()); + decoder_filters_[1]->callbacks_->clearRouteCache(); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->clusterInfo()); + EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->route()); + EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->streamInfo().routeEntry()); + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[2], decodeComplete()); + + // Kick off the incoming data. + startRequest(true); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, UpstreamWatermarkCallbacks) { + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + + // Mimic the upstream connection backing up. The router would call + // onDecoderFilterAboveWriteBufferHighWatermark which should readDisable the stream and increment + // stats. + EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_)); + EXPECT_CALL(stream_, readDisable(true)); + ASSERT(decoder_filters_[0]->callbacks_ != nullptr); + decoder_filters_[0]->callbacks_->onDecoderFilterAboveWriteBufferHighWatermark(); + EXPECT_EQ(1U, stats_.named_.downstream_flow_control_paused_reading_total_.value()); + + // Resume the flow of data. When the router buffer drains it calls + // onDecoderFilterBelowWriteBufferLowWatermark which should re-enable reads on the stream. + EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_)); + EXPECT_CALL(stream_, readDisable(false)); + ASSERT(decoder_filters_[0]->callbacks_ != nullptr); + decoder_filters_[0]->callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); + EXPECT_EQ(1U, stats_.named_.downstream_flow_control_resumed_reading_total_.value()); + + // Backup upstream once again. + EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_)); + EXPECT_CALL(stream_, readDisable(true)); + ASSERT(decoder_filters_[0]->callbacks_ != nullptr); + decoder_filters_[0]->callbacks_->onDecoderFilterAboveWriteBufferHighWatermark(); + EXPECT_EQ(2U, stats_.named_.downstream_flow_control_paused_reading_total_.value()); + + // Send a full response. + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); + expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[1]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true, "details"); +} + +TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksPassedOnWithLazyCreation) { + setup(false, ""); + + // Make sure codec_ is created. + EXPECT_CALL(*codec_, dispatch(_)); + Buffer::OwnedImpl fake_input(""); + conn_manager_->onData(fake_input, false); + + // Mark the connection manger as backed up before the stream is created. + ASSERT_EQ(decoder_filters_.size(), 0); + EXPECT_CALL(*codec_, onUnderlyingConnectionAboveWriteBufferHighWatermark()); + conn_manager_->onAboveWriteBufferHighWatermark(); + + // Create the stream. Defer the creation of the filter chain by not sending + // complete headers. + { + setUpBufferLimits(); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + // Call the high buffer callbacks as the codecs do. + stream_callbacks_->onAboveWriteBufferHighWatermark(); + return Http::okStatus(); + })); + + // Send fake data to kick off newStream being created. + Buffer::OwnedImpl fake_input2("asdf"); + conn_manager_->onData(fake_input2, false); + } + + // Now set up the filter chain by sending full headers. The filters should be + // immediately appraised that the low watermark is in effect. + { + setupFilterChain(2, 2); + EXPECT_CALL(filter_callbacks_.connection_, aboveHighWatermark()).Times(0); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + Buffer::OwnedImpl data("hello"); + decoder_filters_[0]->callbacks_->addDecodedData(data, true); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + sendRequestHeadersAndData(); + ASSERT_GE(decoder_filters_.size(), 1); + MockDownstreamWatermarkCallbacks callbacks; + EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); + decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks); + + // Ensures that when new callbacks are registered they get invoked immediately + // and the already-registered callbacks do not. + MockDownstreamWatermarkCallbacks callbacks2; + EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark()); + decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks2); + } + doRemoteClose(); +} + +TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithLazyCreation) { + setup(false, ""); + + // Make sure codec_ is created. + EXPECT_CALL(*codec_, dispatch(_)); + Buffer::OwnedImpl fake_input(""); + conn_manager_->onData(fake_input, false); + + // Mark the connection manger as backed up before the stream is created. + ASSERT_EQ(decoder_filters_.size(), 0); + EXPECT_CALL(*codec_, onUnderlyingConnectionAboveWriteBufferHighWatermark()); + conn_manager_->onAboveWriteBufferHighWatermark(); + + // Create the stream. Defer the creation of the filter chain by not sending + // complete headers. + { + setUpBufferLimits(); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + // Call the high buffer callbacks as the codecs do. + stream_callbacks_->onAboveWriteBufferHighWatermark(); + return Http::okStatus(); + })); + + // Send fake data to kick off newStream being created. + Buffer::OwnedImpl fake_input2("asdf"); + conn_manager_->onData(fake_input2, false); + } + + // Now before the filter chain is created, fire the low watermark callbacks + // and ensure it is passed down to the stream. + ASSERT(stream_callbacks_ != nullptr); + EXPECT_CALL(*codec_, onUnderlyingConnectionBelowWriteBufferLowWatermark()) + .WillOnce(Invoke([&]() -> void { stream_callbacks_->onBelowWriteBufferLowWatermark(); })); + conn_manager_->onBelowWriteBufferLowWatermark(); + + // Now set up the filter chain by sending full headers. The filters should + // not get any watermark callbacks. + { + setupFilterChain(2, 2); + EXPECT_CALL(filter_callbacks_.connection_, aboveHighWatermark()).Times(0); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + Buffer::OwnedImpl data("hello"); + decoder_filters_[0]->callbacks_->addDecodedData(data, true); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + sendRequestHeadersAndData(); + ASSERT_GE(decoder_filters_.size(), 1); + MockDownstreamWatermarkCallbacks callbacks; + EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0); + EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0); + decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks); + } + doRemoteClose(); +} + +TEST_F(HttpConnectionManagerImplTest, AlterFilterWatermarkLimits) { + initial_buffer_limit_ = 100; + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + + // Check initial limits. + EXPECT_EQ(initial_buffer_limit_, decoder_filters_[0]->callbacks_->decoderBufferLimit()); + EXPECT_EQ(initial_buffer_limit_, encoder_filters_[0]->callbacks_->encoderBufferLimit()); + + // Check lowering the limits. + decoder_filters_[0]->callbacks_->setDecoderBufferLimit(initial_buffer_limit_ - 1); + EXPECT_EQ(initial_buffer_limit_ - 1, decoder_filters_[0]->callbacks_->decoderBufferLimit()); + + // Check raising the limits. + decoder_filters_[0]->callbacks_->setDecoderBufferLimit(initial_buffer_limit_ + 1); + EXPECT_EQ(initial_buffer_limit_ + 1, decoder_filters_[0]->callbacks_->decoderBufferLimit()); + EXPECT_EQ(initial_buffer_limit_ + 1, encoder_filters_[0]->callbacks_->encoderBufferLimit()); + + // Verify turning off buffer limits works. + decoder_filters_[0]->callbacks_->setDecoderBufferLimit(0); + EXPECT_EQ(0, decoder_filters_[0]->callbacks_->decoderBufferLimit()); + + // Once the limits are turned off can be turned on again. + decoder_filters_[0]->callbacks_->setDecoderBufferLimit(100); + EXPECT_EQ(100, decoder_filters_[0]->callbacks_->decoderBufferLimit()); + + doRemoteClose(); +} + +TEST_F(HttpConnectionManagerImplTest, HitFilterWatermarkLimits) { + log_handler_ = std::make_shared>(); + + initial_buffer_limit_ = 1; + streaming_filter_ = true; + setup(false, ""); + setUpEncoderAndDecoder(false, false); + + // The filter is a streaming filter. Sending 4 bytes should hit the + // watermark limit and disable reads on the stream. + EXPECT_CALL(stream_, readDisable(true)); + sendRequestHeadersAndData(); + + // Change the limit so the buffered data is below the new watermark. The + // stream should be read-enabled + EXPECT_CALL(stream_, readDisable(false)); + int buffer_len = decoder_filters_[0]->callbacks_->decodingBuffer()->length(); + decoder_filters_[0]->callbacks_->setDecoderBufferLimit((buffer_len + 1) * 2); + + // Start the response + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); + + MockDownstreamWatermarkCallbacks callbacks; + decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks); + MockDownstreamWatermarkCallbacks callbacks2; + decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks2); + + // Now overload the buffer with response data. The downstream watermark + // callbacks should be called. + EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark()); + Buffer::OwnedImpl fake_response("A long enough string to go over watermarks"); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndWatermark)); + decoder_filters_[0]->callbacks_->encodeData(fake_response, false); + + // unregister callbacks2 + decoder_filters_[0]->callbacks_->removeDownstreamWatermarkCallbacks(callbacks2); + + // Change the limit so the buffered data is below the new watermark. + buffer_len = encoder_filters_[1]->callbacks_->encodingBuffer()->length(); + EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()); + EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()).Times(0); + encoder_filters_[1]->callbacks_->setEncoderBufferLimit((buffer_len + 1) * 2); + + EXPECT_CALL(*log_handler_, log(_, _, _, _)) + .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, + const StreamInfo::StreamInfo& stream_info) { + EXPECT_FALSE(stream_info.hasAnyResponseFlag()); + })); + + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); +} + +TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimits) { + initial_buffer_limit_ = 10; + streaming_filter_ = false; + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + + // Set the filter to be a buffering filter. Sending any data will hit the + // watermark limit and result in a 413 being sent to the user. + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "413"}, {"content-length", "17"}, {"content-type", "text/plain"}}; + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(HeaderMapEqualRef(&response_headers), false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationAndWatermark)); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + Buffer::OwnedImpl data("A longer string"); + decoder_filters_[0]->callbacks_->addDecodedData(data, false); + const auto rc_details = encoder_filters_[1]->callbacks_->streamInfo().responseCodeDetails(); + EXPECT_EQ("request_payload_too_large", rc_details.value()); + + doRemoteClose(); +} + +// Return 413 from an intermediate filter and make sure we don't continue the filter chain. +TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimitsIntermediateFilter) { + InSequence s; + initial_buffer_limit_ = 10; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("hello"); + decoder_->decodeData(fake_data, false); + + Buffer::OwnedImpl fake_data2("world world"); + decoder_->decodeData(fake_data2, true); + return Http::okStatus(); + })); + + setUpBufferLimits(); + setupFilterChain(2, 1); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "413"}, {"content-length", "17"}, {"content-type", "text/plain"}}; + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(HeaderMapEqualRef(&response_headers), false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationAndWatermark)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + doRemoteClose(false); +} + +TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsBeforeHeaders) { + initial_buffer_limit_ = 10; + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + + // Start the response without processing the request headers through all + // filters. + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); + + // Now overload the buffer with response data. The filter returns + // StopIterationAndBuffer, which will trigger an early response. + + expectOnDestroy(); + Buffer::OwnedImpl fake_response("A long enough string to go over watermarks"); + // Fake response starts doing through the filter. + EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + std::string response_body; + // The 500 goes directly to the encoder. + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) + .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { + // Make sure this is a 500 + EXPECT_EQ("500", headers.getStatusValue()); + // Make sure Envoy standard sanitization has been applied. + EXPECT_TRUE(headers.Date() != nullptr); + EXPECT_EQ("response_payload_too_large", + decoder_filters_[0]->callbacks_->streamInfo().responseCodeDetails().value()); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); + decoder_filters_[0]->callbacks_->encodeData(fake_response, false); + EXPECT_EQ("Internal Server Error", response_body); + + EXPECT_EQ(1U, stats_.named_.rs_too_large_.value()); +} + +TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsAfterHeaders) { + initial_buffer_limit_ = 10; + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + + // Start the response, and make sure the request headers are fully processed. + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); + + // Now overload the buffer with response data. The filter returns + // StopIterationAndBuffer, which will trigger an early reset. + const std::string data = "A long enough string to go over watermarks"; + Buffer::OwnedImpl fake_response(data); + InSequence s; + EXPECT_CALL(stream_, removeCallbacks(_)); + expectOnDestroy(false); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(stream_, resetStream(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); + EXPECT_LOG_CONTAINS( + "debug", + "Resetting stream due to response_payload_too_large. Prior headers have already been sent", + decoder_filters_[0]->callbacks_->encodeData(fake_response, false);); + EXPECT_EQ(1U, stats_.named_.rs_too_large_.value()); +} + +TEST_F(HttpConnectionManagerImplTest, FilterHeadReply) { + InSequence s; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "HEAD"}}}; + decoder_->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + + setupFilterChain(1, 1); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + decoder_filters_[0]->callbacks_->sendLocalReply(Code::BadRequest, "Bad request", nullptr, + absl::nullopt, ""); + return FilterHeadersStatus::Continue; + })); + + EXPECT_CALL(response_encoder_, streamErrorOnInvalidHttpMessage()).WillOnce(Return(true)); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true)) + .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { + EXPECT_EQ("11", headers.getContentLengthValue()); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); + expectOnDestroy(); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11)); + conn_manager_->onData(fake_input, false); +} + +// Verify that if an encoded stream has been ended, but gets stopped by a filter chain, we end +// up resetting the stream in the doEndStream() path (e.g., via filter reset due to timeout, etc.), +// we emit a reset to the codec. +TEST_F(HttpConnectionManagerImplTest, ResetWithStoppedFilter) { + setup(false, ""); + setupFilterChain(1, 1); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + decoder_filters_[0]->callbacks_->sendLocalReply(Code::BadRequest, "Bad request", nullptr, + absl::nullopt, ""); + return FilterHeadersStatus::Continue; + })); + + EXPECT_CALL(response_encoder_, streamErrorOnInvalidHttpMessage()).WillOnce(Return(true)); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { + EXPECT_EQ("11", headers.getContentLengthValue()); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterDataStatus { + return FilterDataStatus::StopIterationAndBuffer; + })); + + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // Kick off the request + startRequest(true); + + EXPECT_CALL(response_encoder_.stream_, resetStream(_)); + expectOnDestroy(); + encoder_filters_[0]->callbacks_->resetStream(); +} + +// Filter continues headers iteration without ending the stream, then injects a body later. +TEST_F(HttpConnectionManagerImplTest, FilterContinueDontEndStreamInjectBody) { + setup(false, ""); + setupFilterChain(2, 2); + + // Decode filter 0 changes end_stream to false. + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::ContinueAndDontEndStream)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + + // Kick off the incoming data. + startRequest(true); + + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + // Decode filter 0 injects request body later. + Buffer::OwnedImpl data("hello"); + decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(data, true); + + // Encode filter 1 changes end_stream to false. + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::ContinueAndDontEndStream)); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[1]->callbacks_->encodeHeaders( + makeHeaderMap({{":status", "200"}}), true, "details"); + + EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeData(_, true)); + expectOnDestroy(); + + // Encode filter 1 injects request body later. + Buffer::OwnedImpl data2("hello"); + encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(data2, true); +} + +TEST_F(HttpConnectionManagerImplTest, FilterAddBodyContinuation) { + setup(false, ""); + setupFilterChain(2, 2); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // Kick off the incoming request. + startRequest(true); + + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + Buffer::OwnedImpl data("hello"); + decoder_filters_[0]->callbacks_->addDecodedData(data, true); + decoder_filters_[0]->callbacks_->continueDecoding(); + + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[1]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true, "details"); + + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeData(_, true)); + expectOnDestroy(); + + Buffer::OwnedImpl data2("hello"); + encoder_filters_[1]->callbacks_->addEncodedData(data2, true); + encoder_filters_[1]->callbacks_->continueEncoding(); +} + +// This test verifies proper sequences of decodeData() and encodeData() are called +// when all filers return "CONTINUE" in following case: +// +// 3 decode filters: +// +// filter0->decodeHeaders(_, true) +// return CONTINUE +// filter1->decodeHeaders(_, true) +// filter1->addDecodeData() +// return CONTINUE +// filter2->decodeHeaders(_, false) +// return CONTINUE +// filter2->decodeData(_, true) +// return CONTINUE +// +// filter0->decodeData(, true) is NOT called. +// filter1->decodeData(, true) is NOT called. +// +// 3 encode filters: +// +// filter2->encodeHeaders(_, true) +// return CONTINUE +// filter1->encodeHeaders(_, true) +// filter1->addEncodeData() +// return CONTINUE +// filter0->decodeHeaders(_, false) +// return CONTINUE +// filter0->decodeData(_, true) +// return CONTINUE +// +// filter2->encodeData(, true) is NOT called. +// filter1->encodeData(, true) is NOT called. +// +TEST_F(HttpConnectionManagerImplTest, AddDataWithAllContinue) { + setup(false, ""); + setupFilterChain(3, 3); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + Buffer::OwnedImpl data2("hello"); + decoder_filters_[1]->callbacks_->addDecodedData(data2, true); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[2], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[2], decodeComplete()); + + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)).Times(0); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)).Times(0); + + // Kick off the incoming data. + startRequest(true); + + // For encode direction + EXPECT_CALL(*encoder_filters_[2], encodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[2], encodeComplete()); + + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + Buffer::OwnedImpl data2("goodbyte"); + encoder_filters_[1]->callbacks_->addEncodedData(data2, true); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeData(_, true)); + expectOnDestroy(); + + EXPECT_CALL(*encoder_filters_[2], encodeData(_, true)).Times(0); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)).Times(0); + + decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[2]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true, "details"); +} + +// This test verifies proper sequences of decodeData() and encodeData() are called +// when the first filer is "stopped" and "continue" in following case: +// +// 3 decode filters: +// +// filter0->decodeHeaders(_, true) +// return STOP +// filter0->continueDecoding() +// filter1->decodeHeaders(_, true) +// filter1->addDecodeData() +// return CONTINUE +// filter2->decodeHeaders(_, false) +// return CONTINUE +// filter2->decodeData(_, true) +// return CONTINUE +// +// filter0->decodeData(, true) is NOT called. +// filter1->decodeData(, true) is NOT called. +// +// 3 encode filters: +// +// filter2->encodeHeaders(_, true) +// return STOP +// filter2->continueEncoding() +// filter1->encodeHeaders(_, true) +// filter1->addEncodeData() +// return CONTINUE +// filter0->decodeHeaders(_, false) +// return CONTINUE +// filter0->decodeData(_, true) +// return CONTINUE +// +// filter2->encodeData(, true) is NOT called. +// filter1->encodeData(, true) is NOT called. +// +TEST_F(HttpConnectionManagerImplTest, AddDataWithStopAndContinue) { + setup(false, ""); + + setupFilterChain(3, 3); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // Kick off the request. + startRequest(true); + + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + Buffer::OwnedImpl data2("hello"); + decoder_filters_[1]->callbacks_->addDecodedData(data2, true); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + // This fail, it is called twice. + EXPECT_CALL(*decoder_filters_[2], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[2], decodeComplete()); + + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)).Times(0); + // This fail, it is called once + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)).Times(0); + + decoder_filters_[0]->callbacks_->continueDecoding(); + + // For encode direction + EXPECT_CALL(*encoder_filters_[2], encodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*encoder_filters_[2], encodeComplete()); + + decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[2]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true, "details"); + + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + Buffer::OwnedImpl data2("goodbyte"); + encoder_filters_[1]->callbacks_->addEncodedData(data2, true); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + + EXPECT_CALL(*encoder_filters_[0], encodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeData(_, true)); + expectOnDestroy(); + + EXPECT_CALL(*encoder_filters_[2], encodeData(_, true)).Times(0); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)).Times(0); + + encoder_filters_[2]->callbacks_->continueEncoding(); +} + +// Use filter direct decode/encodeData() calls without trailers. +TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) { + setup(false, ""); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); + setupFilterChain(2, 2); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + + Buffer::OwnedImpl decode_buffer; + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Invoke([&](Buffer::Instance& data, bool) { + decode_buffer.move(data); + return FilterDataStatus::StopIterationNoBuffer; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // Kick off the request. + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11)); + startRequest(true, "hello"); + + Buffer::OwnedImpl decoded_data_to_forward; + decoded_data_to_forward.move(decode_buffer, 2); + EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual("he"), false)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decoded_data_to_forward, false); + + EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual("llo"), true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decode_buffer, true); + + // Response path. + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + + Buffer::OwnedImpl encoder_buffer; + EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)) + .WillOnce(Invoke([&](Buffer::Instance& data, bool) { + encoder_buffer.move(data); + return FilterDataStatus::StopIterationNoBuffer; + })); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[1]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false, "details"); + Buffer::OwnedImpl response_body("response"); + decoder_filters_[1]->callbacks_->encodeData(response_body, true); + + Buffer::OwnedImpl encoded_data_to_forward; + encoded_data_to_forward.move(encoder_buffer, 3); + EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual("res"), false)); + EXPECT_CALL(response_encoder_, encodeData(_, false)); + encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoded_data_to_forward, false); + + EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual("ponse"), true)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeData(_, true)); + expectOnDestroy(); + encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoder_buffer, true); +} + +// Use filter direct decode/encodeData() calls with trailers. +TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataTrailers) { + InSequence s; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("hello"); + decoder_->decodeData(fake_data, false); + + RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; + decoder_->decodeTrailers(std::move(trailers)); + return Http::okStatus(); + })); + + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); + setupFilterChain(2, 2); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + + Buffer::OwnedImpl decode_buffer; + EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) + .WillOnce(Invoke([&](Buffer::Instance& data, bool) { + decode_buffer.move(data); + return FilterDataStatus::StopIterationNoBuffer; + })); + EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + Buffer::OwnedImpl decoded_data_to_forward; + decoded_data_to_forward.move(decode_buffer, 2); + EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual("he"), false)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decoded_data_to_forward, false); + + EXPECT_CALL(*decoder_filters_[1], decodeData(BufferStringEqual("llo"), false)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + decoder_filters_[0]->callbacks_->injectDecodedDataToFilterChain(decode_buffer, false); + + EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + decoder_filters_[0]->callbacks_->continueDecoding(); + + // Response path. + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + + Buffer::OwnedImpl encoder_buffer; + EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) + .WillOnce(Invoke([&](Buffer::Instance& data, bool) { + encoder_buffer.move(data); + return FilterDataStatus::StopIterationNoBuffer; + })); + EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::StopIteration)); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[1]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false, "details"); + Buffer::OwnedImpl response_body("response"); + decoder_filters_[1]->callbacks_->encodeData(response_body, false); + decoder_filters_[1]->callbacks_->encodeTrailers( + ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); + + Buffer::OwnedImpl encoded_data_to_forward; + encoded_data_to_forward.move(encoder_buffer, 3); + EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual("res"), false)); + EXPECT_CALL(response_encoder_, encodeData(_, false)); + encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoded_data_to_forward, false); + + EXPECT_CALL(*encoder_filters_[0], encodeData(BufferStringEqual("ponse"), false)); + EXPECT_CALL(response_encoder_, encodeData(_, false)); + encoder_filters_[1]->callbacks_->injectEncodedDataToFilterChain(encoder_buffer, false); + + EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeTrailers(_)); + expectOnDestroy(); + encoder_filters_[1]->callbacks_->continueEncoding(); +} + +TEST_F(HttpConnectionManagerImplTest, MultipleFilters) { + InSequence s; + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("hello"); + decoder_->decodeData(fake_data, false); + + Buffer::OwnedImpl fake_data2("world"); + decoder_->decodeData(fake_data2, true); + return Http::okStatus(); + })); + + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); + setupFilterChain(3, 2); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(route_config_provider_.route_config_->route_, + decoder_filters_[0]->callbacks_->route()); + EXPECT_EQ(ssl_connection_.get(), + decoder_filters_[0]->callbacks_->connection()->ssl().get()); + return FilterHeadersStatus::StopIteration; + })); + + EXPECT_CALL(*decoder_filters_[0], decodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11)); + conn_manager_->onData(fake_input, false); + + // Mimic a decoder filter that trapped data and now sends it on, since the data was buffered + // by the first filter, we expect to get it in 1 decodeData() call. + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(route_config_provider_.route_config_->route_, + decoder_filters_[1]->callbacks_->route()); + EXPECT_EQ(ssl_connection_.get(), + decoder_filters_[1]->callbacks_->connection()->ssl().get()); + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[2], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + EXPECT_CALL(*decoder_filters_[2], decodeComplete()); + decoder_filters_[0]->callbacks_->continueDecoding(); + + // Now start encoding and mimic trapping in the encoding filter. + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); + EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::StopIteration)); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + EXPECT_EQ(ssl_connection_.get(), encoder_filters_[1]->callbacks_->connection()->ssl().get()); + decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[2]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false, "details"); + Buffer::OwnedImpl response_body("response"); + decoder_filters_[2]->callbacks_->encodeData(response_body, false); + decoder_filters_[2]->callbacks_->encodeTrailers( + ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); + EXPECT_EQ(ssl_connection_.get(), decoder_filters_[2]->callbacks_->connection()->ssl().get()); + + // Now finish the encode. + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, false)) + .WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeData(_, false)); + EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(response_encoder_, encodeTrailers(_)); + expectOnDestroy(); + encoder_filters_[1]->callbacks_->continueEncoding(); + + EXPECT_EQ(ssl_connection_.get(), encoder_filters_[0]->callbacks_->connection()->ssl().get()); +} + +TEST(HttpConnectionManagerTracingStatsTest, verifyTracingStats) { + Stats::IsolatedStoreImpl stats; + ConnectionManagerTracingStats tracing_stats{CONN_MAN_TRACING_STATS(POOL_COUNTER(stats))}; + + EXPECT_THROW( + ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::HealthCheck, tracing_stats), + std::invalid_argument); + + ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::ClientForced, tracing_stats); + EXPECT_EQ(1UL, tracing_stats.client_enabled_.value()); + + ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::NotTraceableRequestId, tracing_stats); + EXPECT_EQ(1UL, tracing_stats.not_traceable_.value()); + + ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::Sampling, tracing_stats); + EXPECT_EQ(1UL, tracing_stats.random_sampling_.value()); +} + +TEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) { + Server::OverloadActionState stop_accepting_requests = Server::OverloadActionState(0.8); + ON_CALL(overload_manager_.overload_state_, + getState(Server::OverloadActionNames::get().StopAcceptingRequests)) + .WillByDefault(ReturnRef(stop_accepting_requests)); + + setup(false, ""); + + EXPECT_CALL(random_, random()) + .WillRepeatedly(Return(static_cast(Random::RandomGenerator::max()) * 0.5)); + + // 503 direct response when overloaded. + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("503", headers.getStatusValue()); + })); + std::string response_body; + EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); + + startRequest(); + + EXPECT_EQ("envoy overloaded", response_body); + EXPECT_EQ(1U, stats_.named_.downstream_rq_overload_close_.value()); +} + +TEST_F(HttpConnectionManagerImplTest, DisableHttp1KeepAliveWhenOverloaded) { + Server::OverloadActionState disable_http_keep_alive = Server::OverloadActionState(0.8); + ON_CALL(overload_manager_.overload_state_, + getState(Server::OverloadActionNames::get().DisableHttpKeepAlive)) + .WillByDefault(ReturnRef(disable_http_keep_alive)); + + codec_->protocol_ = Protocol::Http11; + setup(false, ""); + + EXPECT_CALL(random_, random()) + .WillRepeatedly(Return(static_cast(Random::RandomGenerator::max()) * 0.5)); + + std::shared_ptr filter(new NiceMock()); + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); + })); + + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "keep-alive"}}}; + decoder_->decodeHeaders(std::move(headers), true); + + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); + + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("close", headers.getConnectionValue()); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + EXPECT_EQ(1U, stats_.named_.downstream_cx_overload_disable_keepalive_.value()); +} + +class DrainH2HttpConnectionManagerImplTest : public HttpConnectionManagerImplTest, + public testing::WithParamInterface { +public: + DrainH2HttpConnectionManagerImplTest() { + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2", "true"}}); + } + +private: + TestScopedRuntime runtime_; +}; + +// Verify that, if the runtime option is enabled, HTTP2 connections will receive +// a GOAWAY message when the overload action is triggered. +TEST_P(DrainH2HttpConnectionManagerImplTest, DisableHttp2KeepAliveWhenOverloaded) { + Server::OverloadActionState disable_http_keep_alive = Server::OverloadActionState::saturated(); + ON_CALL(overload_manager_.overload_state_, + getState(Server::OverloadActionNames::get().DisableHttpKeepAlive)) + .WillByDefault(ReturnRef(disable_http_keep_alive)); + + codec_->protocol_ = Protocol::Http2; + setup(false, ""); + if (GetParam()) { + EXPECT_CALL(*codec_, shutdownNotice); + } + + std::shared_ptr filter(new NiceMock()); + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); + })); + + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "keep-alive"}}}; + decoder_->decodeHeaders(std::move(headers), true); + + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); + + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + Mock::VerifyAndClearExpectations(codec_); + EXPECT_EQ(1, stats_.named_.downstream_cx_overload_disable_keepalive_.value()); +} + +INSTANTIATE_TEST_SUITE_P(WithRuntimeOverride, DrainH2HttpConnectionManagerImplTest, + testing::Bool()); + +TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPathFirstFilter) { + setup(false, "envoy-custom-server", false); + setUpEncoderAndDecoder(true, true); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + // Verify that once the decoder_filters_[0]'s continueDecoding() is called, decoder_filters_[1]'s + // decodeHeaders() is called, and both filters receive data and trailers consequently. + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, _)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + decoder_filters_[0]->callbacks_->continueDecoding(); + + doRemoteClose(); +} + +TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPathSecondFilter) { + setup(false, "envoy-custom-server", false); + setUpEncoderAndDecoder(true, false); + + // Verify headers go through both filters, and data and trailers go through the first filter only. + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, _)) + .WillOnce(Return(FilterHeadersStatus::StopAllIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + // Verify that once the decoder_filters_[1]'s continueDecoding() is called, both data and trailers + // go through the second filter. + EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + decoder_filters_[1]->callbacks_->continueDecoding(); + + doRemoteClose(); +} + +TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnEncodingPath) { + setup(false, "envoy-custom-server", false); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + + // encoder_filters_[1] is the first filter in the chain. + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Invoke([&](HeaderMap&, bool) -> FilterHeadersStatus { + return FilterHeadersStatus::StopAllIterationAndBuffer; + })); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); + + // Invoke encodeData while all iteration is stopped and make sure the filters do not have + // encodeData called. + EXPECT_CALL(*encoder_filters_[0], encodeData(_, _)).Times(0); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, _)).Times(0); + Buffer::OwnedImpl response_body("response"); + decoder_filters_[0]->callbacks_->encodeData(response_body, false); + decoder_filters_[0]->callbacks_->encodeTrailers( + ResponseTrailerMapPtr{new TestResponseTrailerMapImpl{{"some", "trailer"}}}); + + // Verify that once encoder_filters_[1]'s continueEncoding() is called, encoder_filters_[0]'s + // encodeHeaders() is called, and both filters receive data and trailers consequently. + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, _)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeData(_, _)); + EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeTrailers(_)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + expectOnDestroy(); + encoder_filters_[1]->callbacks_->continueEncoding(); +} + +TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenDraining) { + setup(false, ""); + + EXPECT_CALL(drain_close_, drainClose()).WillOnce(Return(true)); + + std::shared_ptr filter(new NiceMock()); + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); + })); + + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "keep-alive"}}}; + decoder_->decodeHeaders(std::move(headers), true); + + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); + + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("close", headers.getConnectionValue()); + })); + + Buffer::OwnedImpl fake_input; + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { + setup(false, ""); + + // Set up the codec. + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + data.drain(4); + return Http::okStatus(); + })); + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + setupFilterChain(1, 1); + + // Create a new stream + decoder_ = &conn_manager_->newStream(response_encoder_); + + // Send headers to that stream, and verify we both set and clear the tracked object. + { + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "POST"}}}; + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, setTrackedObject(_)) + .Times(2) + .WillOnce(Invoke([](const ScopeTrackedObject* object) -> const ScopeTrackedObject* { + ASSERT(object != nullptr); // On the first call, this should be the active stream. + std::stringstream out; + object->dumpState(out); + std::string state = out.str(); + EXPECT_THAT(state, + testing::HasSubstr("filter_manager_callbacks_.requestHeaders(): empty")); + EXPECT_THAT(state, testing::HasSubstr("protocol_: 1")); + return nullptr; + })) + .WillRepeatedly(Return(nullptr)); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Invoke([](HeaderMap&, bool) -> FilterHeadersStatus { + return FilterHeadersStatus::StopIteration; + })); + decoder_->decodeHeaders(std::move(headers), false); + } + + // Send trailers to that stream, and verify by this point headers are in logged state. + { + RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, setTrackedObject(_)) + .Times(2) + .WillOnce(Invoke([](const ScopeTrackedObject* object) -> const ScopeTrackedObject* { + ASSERT(object != nullptr); // On the first call, this should be the active stream. + std::stringstream out; + object->dumpState(out); + std::string state = out.str(); + EXPECT_THAT(state, testing::HasSubstr("filter_manager_callbacks_.requestHeaders(): \n")); + EXPECT_THAT(state, testing::HasSubstr("':authority', 'host'\n")); + EXPECT_THAT(state, testing::HasSubstr("protocol_: 1")); + return nullptr; + })) + .WillRepeatedly(Return(nullptr)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::StopIteration)); + decoder_->decodeTrailers(std::move(trailers)); + } + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +// SRDS no scope found. +TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteNotFound) { + setup(false, "", true, true); + setupFilterChain(1, 0); // Recreate the chain for second stream. + + EXPECT_CALL(*static_cast( + scopedRouteConfigProvider()->config().get()), + getRouteConfig(_)) + .Times(2) + .WillRepeatedly(Return(nullptr)); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; + decoder_->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(nullptr, decoder_filters_[0]->callbacks_->route()); + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); // end_stream=true. + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +// SRDS updating scopes affects routing. +TEST_F(HttpConnectionManagerImplTest, TestSrdsUpdate) { + setup(false, "", true, true); + + EXPECT_CALL(*static_cast( + scopedRouteConfigProvider()->config().get()), + getRouteConfig(_)) + .Times(3) + .WillOnce(Return(nullptr)) + .WillOnce(Return(nullptr)) // refreshCachedRoute first time. + .WillOnce(Return(route_config_)); // triggered by callbacks_->route(), SRDS now updated. + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; + decoder_->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + const std::string fake_cluster1_name = "fake_cluster1"; + std::shared_ptr route1 = std::make_shared>(); + EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name)); + std::shared_ptr fake_cluster1 = + std::make_shared>(); + EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(fake_cluster1.get())); + EXPECT_CALL(*route_config_, route(_, _, _, _)).WillOnce(Return(route1)); + // First no-scope-found request will be handled by decoder_filters_[0]. + setupFilterChain(1, 0); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(nullptr, decoder_filters_[0]->callbacks_->route()); + + // Clear route and next call on callbacks_->route() will trigger a re-snapping of the + // snapped_route_config_. + decoder_filters_[0]->callbacks_->clearRouteCache(); + + // Now route config provider returns something. + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); + EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); + return FilterHeadersStatus::StopIteration; + + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); // end_stream=true. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +// SRDS Scope header update cause cross-scope reroute. +TEST_F(HttpConnectionManagerImplTest, TestSrdsCrossScopeReroute) { + setup(false, "", true, true); + + std::shared_ptr route_config1 = + std::make_shared>(); + std::shared_ptr route_config2 = + std::make_shared>(); + std::shared_ptr route1 = std::make_shared>(); + std::shared_ptr route2 = std::make_shared>(); + EXPECT_CALL(*route_config1, route(_, _, _, _)).WillRepeatedly(Return(route1)); + EXPECT_CALL(*route_config2, route(_, _, _, _)).WillRepeatedly(Return(route2)); + EXPECT_CALL(*static_cast( + scopedRouteConfigProvider()->config().get()), + getRouteConfig(_)) + // 1. Snap scoped route config; + // 2. refreshCachedRoute (both in decodeHeaders(headers,end_stream); + // 3. then refreshCachedRoute triggered by decoder_filters_[1]->callbacks_->route(). + .Times(3) + .WillRepeatedly(Invoke([&](const HeaderMap& headers) -> Router::ConfigConstSharedPtr { + auto& test_headers = dynamic_cast(headers); + if (test_headers.get_("scope_key") == "foo") { + return route_config1; + } + return route_config2; + })); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":method", "GET"}, {"scope_key", "foo"}, {":path", "/foo"}}}; + decoder_->decodeHeaders(std::move(headers), false); + data.drain(4); + return Http::okStatus(); + })); + setupFilterChain(2, 0); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) -> FilterHeadersStatus { + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); + auto& test_headers = dynamic_cast(headers); + // Clear cached route and change scope key to "bar". + decoder_filters_[0]->callbacks_->clearRouteCache(); + test_headers.remove("scope_key"); + test_headers.addCopy("scope_key", "bar"); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) -> FilterHeadersStatus { + auto& test_headers = dynamic_cast(headers); + EXPECT_EQ(test_headers.get_("scope_key"), "bar"); + // Route now switched to route2 as header "scope_key" has changed. + EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->route()); + EXPECT_EQ(route2->routeEntry(), decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + return FilterHeadersStatus::StopIteration; + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +// SRDS scoped RouteConfiguration found and route found. +TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteFound) { + setup(false, "", true, true); + setupFilterChain(1, 0); + + const std::string fake_cluster1_name = "fake_cluster1"; + std::shared_ptr route1 = std::make_shared>(); + EXPECT_CALL(route1->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster1_name)); + std::shared_ptr fake_cluster1 = + std::make_shared>(); + EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(fake_cluster1.get())); + EXPECT_CALL(*scopedRouteConfigProvider()->config(), getRouteConfig(_)) + // 1. decodeHeaders() snapping route config. + // 2. refreshCachedRoute() later in the same decodeHeaders(). + .Times(2); + EXPECT_CALL( + *static_cast( + scopedRouteConfigProvider()->config()->route_config_.get()), + route(_, _, _, _)) + .WillOnce(Return(route1)); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; + decoder_->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); + EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, NewConnection) { + setup(false, "", true, true); + + filter_callbacks_.connection_.stream_info_.protocol_ = absl::nullopt; + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol()); + EXPECT_EQ(Network::FilterStatus::Continue, conn_manager_->onNewConnection()); + EXPECT_EQ(0U, stats_.named_.downstream_cx_http3_total_.value()); + EXPECT_EQ(0U, stats_.named_.downstream_cx_http3_active_.value()); + + filter_callbacks_.connection_.stream_info_.protocol_ = Envoy::Http::Protocol::Http3; + codec_->protocol_ = Http::Protocol::Http3; + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol()); + EXPECT_CALL(*codec_, protocol()).Times(AtLeast(1)); + EXPECT_EQ(Network::FilterStatus::StopIteration, conn_manager_->onNewConnection()); + EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_total_.value()); + EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_active_.value()); +} + +TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestHeadersSize) { + // Test with Headers only request, No Data, No response. + setup(false, ""); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 0)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestBodySize) { + // Test Request with Headers and Data, No response. + setup(false, ""); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("12345"); + decoder_->decodeData(fake_data, true); + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 5)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseHeadersSize) { + // Test with Header only response. + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("1234"); + decoder_->decodeData(fake_data, true); + + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + + // Response headers are internally mutated and we record final response headers. + // for example in the below test case, response headers are modified as + // {':status', '200' 'date', 'Mon, 06 Jul 2020 06:08:55 GMT' 'server', ''} + // whose size is 49 instead of original response headers size 10({":status", "200"}). + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_headers_size"), 49)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 4)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); + expectOnDestroy(); + + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true, "details"); +} + +TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseBodySize) { + // Test with response headers and body. + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("1234"); + decoder_->decodeData(fake_data, true); + + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_headers_size"), 49)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 4)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 11)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false, "details"); + + EXPECT_CALL(response_encoder_, encodeData(_, true)); + expectOnDestroy(); + + Buffer::OwnedImpl fake_response("hello-world"); + decoder_filters_[0]->callbacks_->encodeData(fake_response, true); +} + +TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponseUsingHttp3) { + setup(false, "envoy-custom-server", false); + + filter_callbacks_.connection_.stream_info_.protocol_ = Envoy::Http::Protocol::Http3; + codec_->protocol_ = Http::Protocol::Http3; + EXPECT_EQ(Network::FilterStatus::StopIteration, conn_manager_->onNewConnection()); + + // Store the basic request encoder during filter chain setup. + std::shared_ptr filter(new NiceMock()); + + EXPECT_CALL(*filter, decodeHeaders(_, true)) + .WillOnce(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { + EXPECT_NE(nullptr, headers.ForwardedFor()); + EXPECT_EQ("http", headers.getForwardedProtoValue()); + return FilterHeadersStatus::StopIteration; + })); + + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(filter); + })); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)); + + // Pretend to get a new stream and then fire a headers only request into it. Then we respond into + // the filter. + RequestDecoder& decoder = conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder.decodeHeaders(std::move(headers), true); + + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); + filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); + + EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value()); + EXPECT_EQ(1U, listener_stats_.downstream_rq_2xx_.value()); + EXPECT_EQ(1U, stats_.named_.downstream_rq_completed_.value()); + EXPECT_EQ(1U, listener_stats_.downstream_rq_completed_.value()); + EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_total_.value()); + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + conn_manager_.reset(); + EXPECT_EQ(0U, stats_.named_.downstream_cx_http3_active_.value()); +} + +namespace { + +class SimpleType : public StreamInfo::FilterState::Object { +public: + SimpleType(int value) : value_(value) {} + int access() const { return value_; } + +private: + int value_; +}; + +} // namespace + +TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { + filter_callbacks_.connection_.stream_info_.filter_state_->setData( + "connection_provided_data", std::make_shared(555), + StreamInfo::FilterState::StateType::ReadOnly); + + setup(false, "envoy-custom-server", false); + setupFilterChain(1, 0, /* num_requests = */ 3); + + EXPECT_CALL(*codec_, dispatch(_)) + .Times(2) + .WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + { + InSequence s; + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(Invoke([this](HeaderMap&, bool) -> FilterHeadersStatus { + decoder_filters_[0]->callbacks_->streamInfo().filterState()->setData( + "per_filter_chain", std::make_unique(1), + StreamInfo::FilterState::StateType::ReadOnly, + StreamInfo::FilterState::LifeSpan::FilterChain); + decoder_filters_[0]->callbacks_->streamInfo().filterState()->setData( + "per_downstream_request", std::make_unique(2), + StreamInfo::FilterState::StateType::ReadOnly, + StreamInfo::FilterState::LifeSpan::Request); + decoder_filters_[0]->callbacks_->streamInfo().filterState()->setData( + "per_downstream_connection", std::make_unique(3), + StreamInfo::FilterState::StateType::ReadOnly, + StreamInfo::FilterState::LifeSpan::Connection); + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) + .WillOnce(Invoke([this](HeaderMap&, bool) -> FilterHeadersStatus { + EXPECT_FALSE( + decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( + "per_filter_chain")); + EXPECT_TRUE( + decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( + "per_downstream_request")); + EXPECT_TRUE( + decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( + "per_downstream_connection")); + EXPECT_TRUE( + decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( + "connection_provided_data")); + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, true)) + .WillOnce(Invoke([this](HeaderMap&, bool) -> FilterHeadersStatus { + EXPECT_FALSE( + decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData( + "per_filter_chain")); + EXPECT_FALSE( + decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData( + "per_downstream_request")); + EXPECT_TRUE( + decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData( + "per_downstream_connection")); + EXPECT_TRUE( + decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( + "connection_provided_data")); + return FilterHeadersStatus::StopIteration; + })); + } + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[0], onStreamComplete()); + EXPECT_CALL(*decoder_filters_[0], onDestroy()); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + EXPECT_CALL(*decoder_filters_[2], decodeComplete()); + + Buffer::OwnedImpl fake_input; + conn_manager_->onData(fake_input, false); + decoder_filters_[0]->callbacks_->recreateStream(); + conn_manager_->onData(fake_input, false); + + // The connection life time data should have been written to the connection filter state. + EXPECT_TRUE(filter_callbacks_.connection_.stream_info_.filter_state_->hasData( + "per_downstream_connection")); + EXPECT_CALL(*decoder_filters_[1], onStreamComplete()); + EXPECT_CALL(*decoder_filters_[1], onDestroy()); + EXPECT_CALL(*decoder_filters_[2], onStreamComplete()); + EXPECT_CALL(*decoder_filters_[2], onDestroy()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +class HttpConnectionManagerImplDeathTest : public HttpConnectionManagerImplTest { +public: + Router::RouteConfigProvider* routeConfigProvider() override { + return route_config_provider2_.get(); + } + Config::ConfigProvider* scopedRouteConfigProvider() override { + return scoped_route_config_provider2_.get(); + } + + std::shared_ptr route_config_provider2_; + std::shared_ptr scoped_route_config_provider2_; +}; + +// HCM config can only have either RouteConfigProvider or ScopedRoutesConfigProvider. +TEST_F(HttpConnectionManagerImplDeathTest, InvalidConnectionManagerConfig) { + setup(false, ""); + + Buffer::OwnedImpl fake_input("1234"); + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { + conn_manager_->newStream(response_encoder_); + return Http::okStatus(); + })); + // Either RDS or SRDS should be set. + EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false), + "Either routeConfigProvider or scopedRouteConfigProvider should be set in " + "ConnectionManagerImpl."); + + route_config_provider2_ = std::make_shared>(); + + // Only route config provider valid. + EXPECT_NO_THROW(conn_manager_->onData(fake_input, false)); + + scoped_route_config_provider2_ = + std::make_shared>(); + // Can't have RDS and SRDS provider in the same time. + EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false), + "Either routeConfigProvider or scopedRouteConfigProvider should be set in " + "ConnectionManagerImpl."); + + route_config_provider2_.reset(); + // Only scoped route config provider valid. + EXPECT_NO_THROW(conn_manager_->onData(fake_input, false)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/conn_manager_impl_test_base.cc b/test/common/http/conn_manager_impl_test_base.cc new file mode 100644 index 000000000000..2d8278d4ad75 --- /dev/null +++ b/test/common/http/conn_manager_impl_test_base.cc @@ -0,0 +1,256 @@ +#include "test/common/http/conn_manager_impl_test_base.h" + +using testing::AtLeast; +using testing::InSequence; +using testing::InvokeWithoutArgs; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Http { + +HttpConnectionManagerImplTest::HttpConnectionManagerImplTest() + : http_context_(fake_stats_.symbolTable()), access_log_path_("dummy_path"), + access_logs_{AccessLog::InstanceSharedPtr{new Extensions::AccessLoggers::File::FileAccessLog( + access_log_path_, {}, Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(), + log_manager_)}}, + codec_(new NiceMock()), + stats_({ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_), + POOL_HISTOGRAM(fake_stats_))}, + "", fake_stats_), + + listener_stats_({CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))}), + request_id_extension_(RequestIDExtensionFactory::defaultInstance(random_)), + local_reply_(LocalReply::Factory::createDefault()) { + + ON_CALL(route_config_provider_, lastUpdated()) + .WillByDefault(Return(test_time_.timeSystem().systemTime())); + ON_CALL(scoped_route_config_provider_, lastUpdated()) + .WillByDefault(Return(test_time_.timeSystem().systemTime())); + // response_encoder_ is not a NiceMock on purpose. This prevents complaining about this + // method only. + EXPECT_CALL(response_encoder_, getStream()).Times(AtLeast(0)); +} + +HttpConnectionManagerImplTest::~HttpConnectionManagerImplTest() { + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); +} + +Tracing::CustomTagConstSharedPtr +HttpConnectionManagerImplTest::requestHeaderCustomTag(const std::string& header) { + envoy::type::tracing::v3::CustomTag::Header headerTag; + headerTag.set_name(header); + return std::make_shared(header, headerTag); +} + +void HttpConnectionManagerImplTest::setup(bool ssl, const std::string& server_name, bool tracing, + bool use_srds) { + use_srds_ = use_srds; + if (ssl) { + ssl_connection_ = std::make_shared(); + } + + server_name_ = server_name; + ON_CALL(filter_callbacks_.connection_, ssl()).WillByDefault(Return(ssl_connection_)); + ON_CALL(Const(filter_callbacks_.connection_), ssl()).WillByDefault(Return(ssl_connection_)); + filter_callbacks_.connection_.local_address_ = + std::make_shared("127.0.0.1", 443); + filter_callbacks_.connection_.remote_address_ = + std::make_shared("0.0.0.0"); + conn_manager_ = std::make_unique( + *this, drain_close_, random_, http_context_, runtime_, local_info_, cluster_manager_, + overload_manager_, test_time_.timeSystem()); + conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); + + if (tracing) { + envoy::type::v3::FractionalPercent percent1; + percent1.set_numerator(100); + envoy::type::v3::FractionalPercent percent2; + percent2.set_numerator(10000); + percent2.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND); + tracing_config_ = std::make_unique( + TracingConnectionManagerConfig{Tracing::OperationName::Ingress, + {{":method", requestHeaderCustomTag(":method")}}, + percent1, + percent2, + percent1, + false, + 256}); + } +} + +void HttpConnectionManagerImplTest::setupFilterChain(int num_decoder_filters, + int num_encoder_filters, int num_requests) { + // NOTE: The length/repetition in this routine allows InSequence to work correctly in an outer + // scope. + for (int i = 0; i < num_decoder_filters * num_requests; i++) { + decoder_filters_.push_back(new MockStreamDecoderFilter()); + } + + for (int i = 0; i < num_encoder_filters * num_requests; i++) { + encoder_filters_.push_back(new MockStreamEncoderFilter()); + } + + InSequence s; + for (int req = 0; req < num_requests; req++) { + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([num_decoder_filters, num_encoder_filters, req, + this](FilterChainFactoryCallbacks& callbacks) -> void { + if (log_handler_.get()) { + callbacks.addAccessLogHandler(log_handler_); + } + for (int i = 0; i < num_decoder_filters; i++) { + callbacks.addStreamDecoderFilter( + StreamDecoderFilterSharedPtr{decoder_filters_[req * num_decoder_filters + i]}); + } + + for (int i = 0; i < num_encoder_filters; i++) { + callbacks.addStreamEncoderFilter( + StreamEncoderFilterSharedPtr{encoder_filters_[req * num_encoder_filters + i]}); + } + })); + + for (int i = 0; i < num_decoder_filters; i++) { + EXPECT_CALL(*decoder_filters_[req * num_decoder_filters + i], setDecoderFilterCallbacks(_)); + } + + for (int i = 0; i < num_encoder_filters; i++) { + EXPECT_CALL(*encoder_filters_[req * num_encoder_filters + i], setEncoderFilterCallbacks(_)); + } + } +} + +void HttpConnectionManagerImplTest::setUpBufferLimits() { + ON_CALL(response_encoder_, getStream()).WillByDefault(ReturnRef(stream_)); + EXPECT_CALL(stream_, bufferLimit()).WillOnce(Return(initial_buffer_limit_)); + EXPECT_CALL(stream_, addCallbacks(_)) + .WillOnce(Invoke( + [&](Http::StreamCallbacks& callbacks) -> void { stream_callbacks_ = &callbacks; })); + EXPECT_CALL(stream_, setFlushTimeout(_)); +} + +void HttpConnectionManagerImplTest::setUpEncoderAndDecoder(bool request_with_data_and_trailers, + bool decode_headers_stop_all) { + setUpBufferLimits(); + EXPECT_CALL(*codec_, dispatch(_)) + .WillOnce(Invoke([&, request_with_data_and_trailers](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + if (request_with_data_and_trailers) { + decoder->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("12345"); + decoder->decodeData(fake_data, false); + + RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; + decoder->decodeTrailers(std::move(trailers)); + } else { + decoder->decodeHeaders(std::move(headers), true); + } + return Http::okStatus(); + })); + + setupFilterChain(2, 2); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, _)) + .WillOnce(InvokeWithoutArgs([&, decode_headers_stop_all]() -> FilterHeadersStatus { + Buffer::OwnedImpl data("hello"); + decoder_filters_[0]->callbacks_->addDecodedData(data, true); + if (decode_headers_stop_all) { + return FilterHeadersStatus::StopAllIterationAndBuffer; + } else { + return FilterHeadersStatus::Continue; + } + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); +} + +void HttpConnectionManagerImplTest::startRequest(bool end_stream, + absl::optional body) { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + decoder_ = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder_->decodeHeaders(std::move(headers), end_stream && !body.has_value()); + if (body.has_value()) { + Buffer::OwnedImpl fake_data(body.value()); + decoder_->decodeData(fake_data, end_stream); + } + return Http::okStatus(); + })); + Buffer::OwnedImpl fake_input; + conn_manager_->onData(fake_input, false); +} + +Event::MockTimer* HttpConnectionManagerImplTest::setUpTimer() { + // this timer belongs to whatever by whatever next creates a timer. + // See Envoy::Event::MockTimer for details. + return new Event::MockTimer(&filter_callbacks_.connection_.dispatcher_); +} + +void HttpConnectionManagerImplTest::sendRequestHeadersAndData() { + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + auto status = streaming_filter_ ? FilterDataStatus::StopIterationAndWatermark + : FilterDataStatus::StopIterationAndBuffer; + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)).WillOnce(Return(status)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + + // Kick off the incoming data. |fake_input| is not sent, but instead kicks + // off sending the headers and |data| queued up in setUpEncoderAndDecoder(). + Buffer::OwnedImpl fake_input("asdf"); + conn_manager_->onData(fake_input, false); +} + +ResponseHeaderMap* +HttpConnectionManagerImplTest::sendResponseHeaders(ResponseHeaderMapPtr&& response_headers) { + ResponseHeaderMap* altered_response_headers = nullptr; + + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, _)) + .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { + altered_response_headers = &headers; + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false, "details"); + return altered_response_headers; +} + +void HttpConnectionManagerImplTest::expectOnDestroy(bool deferred) { + for (auto filter : decoder_filters_) { + EXPECT_CALL(*filter, onStreamComplete()); + } + { + auto setup_filter_expect = [](MockStreamEncoderFilter* filter) { + EXPECT_CALL(*filter, onStreamComplete()); + }; + std::for_each(encoder_filters_.rbegin(), encoder_filters_.rend(), setup_filter_expect); + } + + for (auto filter : decoder_filters_) { + EXPECT_CALL(*filter, onDestroy()); + } + { + auto setup_filter_expect = [](MockStreamEncoderFilter* filter) { + EXPECT_CALL(*filter, onDestroy()); + }; + std::for_each(encoder_filters_.rbegin(), encoder_filters_.rend(), setup_filter_expect); + } + + if (deferred) { + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)); + } +} + +void HttpConnectionManagerImplTest::doRemoteClose(bool deferred) { + EXPECT_CALL(stream_, removeCallbacks(_)); + expectOnDestroy(deferred); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/conn_manager_impl_test_base.h b/test/common/http/conn_manager_impl_test_base.h new file mode 100644 index 000000000000..067ca0a1f369 --- /dev/null +++ b/test/common/http/conn_manager_impl_test_base.h @@ -0,0 +1,214 @@ +#pragma once + +#include "common/http/conn_manager_impl.h" +#include "common/http/context_impl.h" +#include "common/http/date_provider_impl.h" +#include "common/network/address_impl.h" + +#include "extensions/access_loggers/file/file_access_log_impl.h" + +#include "test/mocks/access_log/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/router/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/ssl/mocks.h" +#include "test/test_common/simulated_time_system.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Http { + +class HttpConnectionManagerImplTest : public testing::Test, public ConnectionManagerConfig { +public: + struct RouteConfigProvider : public Router::RouteConfigProvider { + RouteConfigProvider(TimeSource& time_source) : time_source_(time_source) {} + + // Router::RouteConfigProvider + Router::ConfigConstSharedPtr config() override { return route_config_; } + absl::optional configInfo() const override { return {}; } + SystemTime lastUpdated() const override { return time_source_.systemTime(); } + void onConfigUpdate() override {} + + TimeSource& time_source_; + std::shared_ptr route_config_{new NiceMock()}; + }; + + HttpConnectionManagerImplTest(); + ~HttpConnectionManagerImplTest() override; + Tracing::CustomTagConstSharedPtr requestHeaderCustomTag(const std::string& header); + void setup(bool ssl, const std::string& server_name, bool tracing = true, bool use_srds = false); + void setupFilterChain(int num_decoder_filters, int num_encoder_filters, int num_requests = 1); + void setUpBufferLimits(); + + // If request_with_data_and_trailers is true, includes data and trailers in the request. If + // decode_headers_stop_all is true, decoder_filters_[0]'s callback decodeHeaders() returns + // StopAllIterationAndBuffer. + void setUpEncoderAndDecoder(bool request_with_data_and_trailers, bool decode_headers_stop_all); + + // Sends request headers, and stashes the new stream in decoder_; + void startRequest(bool end_stream = false, absl::optional body = absl::nullopt); + + Event::MockTimer* setUpTimer(); + void sendRequestHeadersAndData(); + ResponseHeaderMap* sendResponseHeaders(ResponseHeaderMapPtr&& response_headers); + void expectOnDestroy(bool deferred = true); + void doRemoteClose(bool deferred = true); + + // Http::ConnectionManagerConfig + const std::list& accessLogs() override { return access_logs_; } + ServerConnectionPtr createCodec(Network::Connection&, const Buffer::Instance&, + ServerConnectionCallbacks&) override { + return ServerConnectionPtr{codec_}; + } + DateProvider& dateProvider() override { return date_provider_; } + std::chrono::milliseconds drainTimeout() const override { return std::chrono::milliseconds(100); } + FilterChainFactory& filterFactory() override { return filter_factory_; } + bool generateRequestId() const override { return true; } + bool preserveExternalRequestId() const override { return false; } + bool alwaysSetRequestIdInResponse() const override { return false; } + uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; } + uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; } + absl::optional idleTimeout() const override { return idle_timeout_; } + bool isRoutable() const override { return true; } + absl::optional maxConnectionDuration() const override { + return max_connection_duration_; + } + std::chrono::milliseconds streamIdleTimeout() const override { return stream_idle_timeout_; } + std::chrono::milliseconds requestTimeout() const override { return request_timeout_; } + std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } + absl::optional maxStreamDuration() const override { + return max_stream_duration_; + } + bool use_srds_{}; + Router::RouteConfigProvider* routeConfigProvider() override { + if (use_srds_) { + return nullptr; + } + return &route_config_provider_; + } + Config::ConfigProvider* scopedRouteConfigProvider() override { + if (use_srds_) { + return &scoped_route_config_provider_; + } + return nullptr; + } + const std::string& serverName() const override { return server_name_; } + HttpConnectionManagerProto::ServerHeaderTransformation + serverHeaderTransformation() const override { + return server_transformation_; + } + ConnectionManagerStats& stats() override { return stats_; } + ConnectionManagerTracingStats& tracingStats() override { return tracing_stats_; } + bool useRemoteAddress() const override { return use_remote_address_; } + const Http::InternalAddressConfig& internalAddressConfig() const override { + return internal_address_config_; + } + uint32_t xffNumTrustedHops() const override { return 0; } + bool skipXffAppend() const override { return false; } + const std::string& via() const override { return EMPTY_STRING; } + Http::ForwardClientCertType forwardClientCert() const override { return forward_client_cert_; } + const std::vector& setCurrentClientCertDetails() const override { + return set_current_client_cert_details_; + } + const Network::Address::Instance& localAddress() override { return local_address_; } + const absl::optional& userAgent() override { return user_agent_; } + Tracing::HttpTracerSharedPtr tracer() override { return tracer_; } + const TracingConnectionManagerConfig* tracingConfig() override { return tracing_config_.get(); } + ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } + bool proxy100Continue() const override { return proxy_100_continue_; } + bool streamErrorOnInvalidHttpMessaging() const override { + return stream_error_on_invalid_http_messaging_; + } + const Http::Http1Settings& http1Settings() const override { return http1_settings_; } + bool shouldNormalizePath() const override { return normalize_path_; } + bool shouldMergeSlashes() const override { return merge_slashes_; } + bool shouldStripMatchingPort() const override { return strip_matching_port_; } + RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; } + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const override { + return headers_with_underscores_action_; + } + const LocalReply::LocalReply& localReply() const override { return *local_reply_; } + + Envoy::Event::SimulatedTimeSystem test_time_; + NiceMock route_config_provider_; + std::shared_ptr route_config_{new NiceMock()}; + NiceMock scoped_route_config_provider_; + Stats::IsolatedStoreImpl fake_stats_; + Http::ContextImpl http_context_; + NiceMock runtime_; + NiceMock log_manager_; + std::string access_log_path_; + std::list access_logs_; + NiceMock filter_callbacks_; + MockServerConnection* codec_; + NiceMock filter_factory_; + ConnectionManagerStats stats_; + ConnectionManagerTracingStats tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}; + NiceMock drain_close_; + std::unique_ptr conn_manager_; + std::string server_name_; + HttpConnectionManagerProto::ServerHeaderTransformation server_transformation_{ + HttpConnectionManagerProto::OVERWRITE}; + Network::Address::Ipv4Instance local_address_{"127.0.0.1"}; + bool use_remote_address_{true}; + Http::DefaultInternalAddressConfig internal_address_config_; + Http::ForwardClientCertType forward_client_cert_{Http::ForwardClientCertType::Sanitize}; + std::vector set_current_client_cert_details_; + absl::optional user_agent_; + uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; + uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; + absl::optional idle_timeout_; + absl::optional max_connection_duration_; + std::chrono::milliseconds stream_idle_timeout_{}; + std::chrono::milliseconds request_timeout_{}; + std::chrono::milliseconds delayed_close_timeout_{}; + absl::optional max_stream_duration_{}; + NiceMock random_; + NiceMock local_info_; + NiceMock factory_context_; + RequestDecoder* decoder_{}; + std::shared_ptr ssl_connection_; + std::shared_ptr> tracer_{ + std::make_shared>()}; + TracingConnectionManagerConfigPtr tracing_config_; + SlowDateProviderImpl date_provider_{test_time_.timeSystem()}; + MockStream stream_; + Http::StreamCallbacks* stream_callbacks_{nullptr}; + NiceMock cluster_manager_; + NiceMock overload_manager_; + uint32_t initial_buffer_limit_{}; + bool streaming_filter_{false}; + Stats::IsolatedStoreImpl fake_listener_stats_; + ConnectionManagerListenerStats listener_stats_; + bool proxy_100_continue_ = false; + bool stream_error_on_invalid_http_messaging_ = false; + bool preserve_external_request_id_ = false; + Http::Http1Settings http1_settings_; + bool normalize_path_ = false; + bool merge_slashes_ = false; + bool strip_matching_port_ = false; + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; + NiceMock upstream_conn_; // for websocket tests + NiceMock conn_pool_; // for websocket tests + RequestIDExtensionSharedPtr request_id_extension_; + const LocalReply::LocalReplyPtr local_reply_; + + // TODO(mattklein123): Not all tests have been converted over to better setup. Convert the rest. + NiceMock response_encoder_; + std::vector decoder_filters_; + std::vector encoder_filters_; + std::shared_ptr log_handler_; +}; + +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/filter_manager_test.cc b/test/common/http/filter_manager_test.cc new file mode 100644 index 000000000000..fdc6e6a4b6be --- /dev/null +++ b/test/common/http/filter_manager_test.cc @@ -0,0 +1,144 @@ +#include "envoy/stream_info/filter_state.h" + +#include "common/http/filter_manager.h" +#include "common/stream_info/filter_state_impl.h" +#include "common/stream_info/stream_info_impl.h" + +#include "test/mocks/event/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/local_reply/mocks.h" +#include "test/mocks/network/mocks.h" + +#include "gtest/gtest.h" + +using testing::Return; + +namespace Envoy { +namespace Http { +namespace { +class FilterManagerTest : public testing::Test { +public: + void initialize() { + filter_manager_ = std::make_unique( + filter_manager_callbacks_, dispatcher_, connection_, 0, true, 10000, filter_factory_, + local_reply_, protocol_, time_source_, filter_state_, + StreamInfo::FilterState::LifeSpan::Connection); + } + + std::unique_ptr filter_manager_; + NiceMock filter_manager_callbacks_; + Event::MockDispatcher dispatcher_; + NiceMock connection_; + Envoy::Http::MockFilterChainFactory filter_factory_; + LocalReply::MockLocalReply local_reply_; + Protocol protocol_{Protocol::Http2}; + NiceMock time_source_; + StreamInfo::FilterStateSharedPtr filter_state_ = + std::make_shared(StreamInfo::FilterState::LifeSpan::Connection); +}; + +// Verifies that the local reply persists the gRPC classification even if the request headers are +// modified. +TEST_F(FilterManagerTest, SendLocalReplyDuringDecodingGrpcClassiciation) { + initialize(); + + std::shared_ptr filter(new NiceMock()); + + EXPECT_CALL(*filter, decodeHeaders(_, true)) + .WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { + headers.setContentType("text/plain"); + + filter->callbacks_->sendLocalReply(Code::InternalServerError, "", nullptr, absl::nullopt, + ""); + + return FilterHeadersStatus::StopIteration; + })); + + RequestHeaderMapPtr grpc_headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"content-type", "application/grpc"}}}; + + ON_CALL(filter_manager_callbacks_, requestHeaders()) + .WillByDefault(Return(absl::make_optional(std::ref(*grpc_headers)))); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(filter); + })); + + filter_manager_->createFilterChain(); + + filter_manager_->requestHeadersInitialized(); + EXPECT_CALL(local_reply_, rewrite(_, _, _, _, _, _)); + EXPECT_CALL(filter_manager_callbacks_, setResponseHeaders_(_)) + .WillOnce(Invoke([](auto& response_headers) { + EXPECT_THAT(response_headers, + HeaderHasValueRef(Http::Headers::get().ContentType, "application/grpc")); + })); + EXPECT_CALL(filter_manager_callbacks_, resetIdleTimer()); + EXPECT_CALL(filter_manager_callbacks_, encodeHeaders(_, _)); + EXPECT_CALL(filter_manager_callbacks_, endStream()); + filter_manager_->decodeHeaders(*grpc_headers, true); + filter_manager_->destroyFilters(); +} + +// Verifies that the local reply persists the gRPC classification even if the request headers are +// modified when directly encoding a response. +TEST_F(FilterManagerTest, SendLocalReplyDuringEncodingGrpcClassiciation) { + initialize(); + + std::shared_ptr decoder_filter(new NiceMock()); + + EXPECT_CALL(*decoder_filter, decodeHeaders(_, true)) + .WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { + headers.setContentType("text/plain"); + + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filter->callbacks_->encodeHeaders(std::move(response_headers), true, "test"); + + return FilterHeadersStatus::StopIteration; + })); + + std::shared_ptr encoder_filter(new NiceMock()); + + EXPECT_CALL(*encoder_filter, encodeHeaders(_, true)) + .WillRepeatedly(Invoke([&](auto&, bool) -> FilterHeadersStatus { + encoder_filter->encoder_callbacks_->sendLocalReply(Code::InternalServerError, "", nullptr, + absl::nullopt, ""); + return FilterHeadersStatus::StopIteration; + })); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(decoder_filter); + callbacks.addStreamFilter(encoder_filter); + })); + + RequestHeaderMapPtr grpc_headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"content-type", "application/grpc"}}}; + + ON_CALL(filter_manager_callbacks_, requestHeaders()) + .WillByDefault(Return(absl::make_optional(std::ref(*grpc_headers)))); + filter_manager_->createFilterChain(); + + filter_manager_->requestHeadersInitialized(); + EXPECT_CALL(local_reply_, rewrite(_, _, _, _, _, _)); + EXPECT_CALL(filter_manager_callbacks_, setResponseHeaders_(_)) + .WillOnce(Invoke([](auto&) {})) + .WillOnce(Invoke([](auto& response_headers) { + EXPECT_THAT(response_headers, + HeaderHasValueRef(Http::Headers::get().ContentType, "application/grpc")); + })); + EXPECT_CALL(filter_manager_callbacks_, encodeHeaders(_, _)); + EXPECT_CALL(filter_manager_callbacks_, endStream()); + filter_manager_->decodeHeaders(*grpc_headers, true); + filter_manager_->destroyFilters(); +} +} // namespace +} // namespace Http +} // namespace Envoy \ No newline at end of file diff --git a/test/common/http/header_map_impl_corpus/example_lazymap b/test/common/http/header_map_impl_corpus/example_lazymap new file mode 100644 index 000000000000..4f3bcda6fc4c --- /dev/null +++ b/test/common/http/header_map_impl_corpus/example_lazymap @@ -0,0 +1,236 @@ +actions { + add_reference { + key: "foo" + value: "bar" + } +} +actions { + add_reference { + key: "foo" + value: "baz" + } +} +actions { + add_reference_key { + key: "foo_string_key" + string_value: "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr" + } +} +actions { + add_reference_key { + key: "foo_string_key" + string_value: "baz" + } +} +actions { + add_reference_key { + key: "foo_uint64_key" + uint64_value: 42 + } +} +actions { + add_reference_key { + key: "foo_uint64_key" + uint64_value: 37 + } +} +actions { + add_copy { + key: "foo_string_key" + string_value: "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr" + } +} +actions { + add_copy { + key: "foo_string_key" + string_value: "baz" + } +} +actions { + add_copy { + key: "foo_uint64_key" + uint64_value: 42 + } +} +actions { + add_copy { + key: "foo_uint64_key" + uint64_value: 37 + } +} +actions { + set_reference { + key: "foo" + value: "bar" + } +} +actions { + set_reference { + key: "foo" + value: "baz" + } +} +actions { + set_reference_key { + key: "foo" + value: "bar" + } +} +actions { + set_reference_key { + key: "foo" + value: "baz" + } +} + +actions { + add_reference { + key: ":method" + value: "bar" + } +} +actions { + add_reference { + key: ":method" + value: "baz" + } +} +actions { + add_reference_key { + key: ":method" + string_value: "bar" + } +} +actions { + add_reference_key { + key: ":method" + string_value: "baz" + } +} +actions { + add_reference_key { + key: ":method" + uint64_value: 42 + } +} +actions { + add_reference_key { + key: ":method" + uint64_value: 37 + } +} +actions { + add_copy { + key: ":method" + string_value: "bar" + } +} +actions { + add_copy { + key: ":method" + string_value: "baz" + } +} +actions { + add_copy { + key: ":method" + uint64_value: 42 + } +} +actions { + add_copy { + key: ":method" + uint64_value: 37 + } +} +actions { + set_reference { + key: ":method" + value: "bar" + } +} +actions { + set_reference { + key: ":method" + value: "baz" + } +} +actions { + set_reference_key { + key: ":method" + value: "bar" + } +} +actions { + set_reference_key { + key: ":method" + value: "baz" + } +} + +actions { + get_and_mutate { + key: ":method" + append: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz" + } +} +actions { + get_and_mutate { + key: ":method" + append: "aa" + } +} +actions { + get_and_mutate { + key: ":method" + clear: {} + } +} +actions { + get_and_mutate { + key: ":method" + find: "a" + } +} +actions { + get_and_mutate { + key: ":method" + set_copy: "a" + } +} +actions { + get_and_mutate { + key: ":method" + set_integer: 0 + } +} +actions { + get_and_mutate { + key: ":method" + set_reference: "a" + } +} +actions { + copy: {} +} +actions { + lookup: ":method" +} +actions { + lookup: "foo" +} +actions { + remove: "f" +} +actions { + remove_prefix: "foo" +} +actions { + remove: ":m" +} +actions { + remove_prefix: ":m" +} +config { + lazy_map_min_size: 0 +} diff --git a/test/common/http/header_map_impl_fuzz.proto b/test/common/http/header_map_impl_fuzz.proto index 69e4ae244a0a..ea06c5c1b593 100644 --- a/test/common/http/header_map_impl_fuzz.proto +++ b/test/common/http/header_map_impl_fuzz.proto @@ -88,6 +88,12 @@ message Action { } } +message Config { + uint32 lazy_map_min_size = 1; +} + message HeaderMapImplFuzzTestCase { repeated Action actions = 1; + // Optional threshold value configuration for the lazy header-map. + Config config = 2; } diff --git a/test/common/http/header_map_impl_fuzz_test.cc b/test/common/http/header_map_impl_fuzz_test.cc index 97e327ce57f1..abbe77e6ca1c 100644 --- a/test/common/http/header_map_impl_fuzz_test.cc +++ b/test/common/http/header_map_impl_fuzz_test.cc @@ -7,6 +7,7 @@ #include "test/common/http/header_map_impl_fuzz.pb.h" #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" +#include "test/test_common/test_runtime.h" #include "absl/strings/ascii.h" @@ -16,6 +17,14 @@ namespace Envoy { // Fuzz the header map implementation. DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) { + TestScopedRuntime runtime; + // Set the lazy header-map threshold if found. + if (input.has_config()) { + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.http.headermap.lazy_map_min_size", + absl::StrCat(input.config().lazy_map_min_size())}}); + } + auto header_map = Http::RequestHeaderMapImpl::create(); std::vector> lower_case_strings; std::vector> strings; @@ -85,14 +94,14 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) } case test::common::http::Action::kGet: { const auto& get = action.get(); - const auto* header_entry = + const auto header_entry = header_map->get(Http::LowerCaseString(replaceInvalidCharacters(get.key()))); - if (header_entry != nullptr) { + for (size_t i = 0; i < header_entry.size(); i++) { // Do some read-only stuff. - (void)strlen(std::string(header_entry->key().getStringView()).c_str()); - (void)strlen(std::string(header_entry->value().getStringView()).c_str()); - header_entry->key().empty(); - header_entry->value().empty(); + (void)strlen(std::string(header_entry[i]->key().getStringView()).c_str()); + (void)strlen(std::string(header_entry[i]->value().getStringView()).c_str()); + header_entry[i]->key().empty(); + header_entry[i]->value().empty(); } break; } diff --git a/test/common/http/header_map_impl_speed_test.cc b/test/common/http/header_map_impl_speed_test.cc index 32020e841c90..62a0c12972ff 100644 --- a/test/common/http/header_map_impl_speed_test.cc +++ b/test/common/http/header_map_impl_speed_test.cc @@ -1,4 +1,5 @@ #include "common/http/header_map_impl.h" +#include "common/http/headers.h" #include "benchmark/benchmark.h" @@ -9,8 +10,8 @@ namespace Http { * Add several dummy headers to a HeaderMap. * @param num_headers the number of dummy headers to add. */ -static void addDummyHeaders(HeaderMap& headers, size_t num_headers) { - const std::string prefix("dummy-key-"); +static void addDummyHeaders(HeaderMap& headers, size_t num_headers, + const std::string prefix = "dummy-key-") { for (size_t i = 0; i < num_headers; i++) { headers.addCopy(LowerCaseString(prefix + std::to_string(i)), "abcd"); } @@ -44,7 +45,7 @@ static void headerMapImplSetReference(benchmark::State& state) { } benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(headerMapImplSetReference)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplSetReference)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); /** * Measure the speed of retrieving a header value. The numeric Arg passed by the @@ -62,11 +63,11 @@ static void headerMapImplGet(benchmark::State& state) { headers->setReference(key, value); size_t successes = 0; for (auto _ : state) { // NOLINT - successes += (headers->get(key) != nullptr); + successes += !headers->get(key).empty(); } benchmark::DoNotOptimize(successes); } -BENCHMARK(headerMapImplGet)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplGet)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); /** * Measure the retrieval speed of a header for which HeaderMapImpl is expected to @@ -83,7 +84,7 @@ static void headerMapImplGetInline(benchmark::State& state) { } benchmark::DoNotOptimize(size); } -BENCHMARK(headerMapImplGetInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplGetInline)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); /** * Measure the speed of writing to a header for which HeaderMapImpl is expected to @@ -98,7 +99,7 @@ static void headerMapImplSetInlineMacro(benchmark::State& state) { } benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(headerMapImplSetInlineMacro)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplSetInlineMacro)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); /** * Measure the speed of writing to a header for which HeaderMapImpl is expected to @@ -113,7 +114,7 @@ static void headerMapImplSetInlineInteger(benchmark::State& state) { } benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(headerMapImplSetInlineInteger)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplSetInlineInteger)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); /** Measure the speed of the byteSize() estimation method. */ static void headerMapImplGetByteSize(benchmark::State& state) { @@ -125,7 +126,7 @@ static void headerMapImplGetByteSize(benchmark::State& state) { } benchmark::DoNotOptimize(size); } -BENCHMARK(headerMapImplGetByteSize)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplGetByteSize)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); /** Measure the speed of iteration with a lightweight callback. */ static void headerMapImplIterate(benchmark::State& state) { @@ -141,7 +142,7 @@ static void headerMapImplIterate(benchmark::State& state) { } benchmark::DoNotOptimize(num_callbacks); } -BENCHMARK(headerMapImplIterate)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplIterate)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); /** * Measure the speed of removing a header by key name. @@ -159,7 +160,7 @@ static void headerMapImplRemove(benchmark::State& state) { } benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(headerMapImplRemove)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplRemove)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); /** * Measure the speed of removing a header by key name, for the special case of @@ -178,7 +179,7 @@ static void headerMapImplRemoveInline(benchmark::State& state) { } benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(headerMapImplRemoveInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplRemoveInline)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); /** * Measure the speed of creating a HeaderMapImpl and populating it with a realistic @@ -207,5 +208,92 @@ static void headerMapImplPopulate(benchmark::State& state) { } BENCHMARK(headerMapImplPopulate); +/** + * Measure the speed of encoding headers as part of upgraded requests (HTTP/1 to HTTP/2) + * @note The measured time for each iteration includes the time needed to add + * a varying number of headers (set by the benchmark's argument). + */ +static void headerMapImplEmulateH1toH2Upgrade(benchmark::State& state) { + uint32_t total_len = 0; // Accumulates the length of all header keys and values. + auto headers = Http::RequestHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + headers->setConnection(Http::Headers::get().ConnectionValues.Upgrade); + headers->setUpgrade(Http::Headers::get().UpgradeValues.H2c); + + for (auto _ : state) { // NOLINT + // Emulate the encodeHeaders method upgrade part. + Http::RequestHeaderMapPtr modified_headers = createHeaderMap(*headers); + benchmark::DoNotOptimize(headers->getUpgradeValue()); + // Emulate the Http::Utility::transformUpgradeRequestFromH1toH2 function. + modified_headers->setReferenceMethod(Http::Headers::get().MethodValues.Connect); + modified_headers->setProtocol(headers->getUpgradeValue()); + modified_headers->removeUpgrade(); + modified_headers->removeConnection(); + if (modified_headers->getContentLengthValue() == "0") { + modified_headers->removeContentLength(); + } + // Emulate the headers iteration in the buildHeaders method. + modified_headers->iterate([&total_len](const HeaderEntry& header) -> HeaderMap::Iterate { + const absl::string_view header_key = header.key().getStringView(); + const absl::string_view header_value = header.value().getStringView(); + total_len += header_key.length() + header_value.length(); + return HeaderMap::Iterate::Continue; + }); + // modified_headers destruction time also being measured. + } + benchmark::DoNotOptimize(headers->size()); + benchmark::DoNotOptimize(total_len); +} +BENCHMARK(headerMapImplEmulateH1toH2Upgrade)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); + +/** + * Measure the speed of decoding headers as part of upgraded responses (HTTP/2 to HTTP/1) + * @note The measured time for each iteration includes the time needed to add + * a varying number of headers (set by the benchmark's argument). + */ +static void headerMapImplEmulateH2toH1Upgrade(benchmark::State& state) { + uint32_t total_len = 0; // Accumulates the length of all header keys and values. + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + headers->setStatus(200); + + for (auto _ : state) { // NOLINT + // Emulate the Http::Utility::transformUpgradeResponseFromH2toH1 function. + benchmark::DoNotOptimize(headers->getStatusValue()); + headers->setUpgrade(Http::Headers::get().UpgradeValues.H2c); + headers->setReferenceConnection(Http::Headers::get().ConnectionValues.Upgrade); + headers->setStatus(101); + // Emulate a decodeHeaders function that iterates over the headers. + headers->iterate([&total_len](const HeaderEntry& header) -> HeaderMap::Iterate { + const absl::string_view header_key = header.key().getStringView(); + const absl::string_view header_value = header.value().getStringView(); + total_len += header_key.length() + header_value.length(); + return HeaderMap::Iterate::Continue; + }); + } + benchmark::DoNotOptimize(headers->size()); + benchmark::DoNotOptimize(total_len); +} +BENCHMARK(headerMapImplEmulateH2toH1Upgrade)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); + +/** + * Measure the speed of removing a varying number of headers by key name prefix from + * a header-map that contains 80 headers that do not have that prefix. + */ +static void headerMapImplRemovePrefix(benchmark::State& state) { + const LowerCaseString prefix("X-prefix"); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, 80); + for (auto _ : state) { // NOLINT + // Add the headers with the prefix + state.PauseTiming(); + addDummyHeaders(*headers, state.range(0), prefix.get()); + state.ResumeTiming(); + headers->removePrefix(prefix); + } + benchmark::DoNotOptimize(headers->size()); +} +BENCHMARK(headerMapImplRemovePrefix)->Arg(0)->Arg(1)->Arg(5)->Arg(10)->Arg(50); + } // namespace Http } // namespace Envoy diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 0e5b0c3df8cd..88888a7d67e0 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -7,6 +7,7 @@ #include "common/http/header_utility.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -359,8 +360,27 @@ Http::RegisterCustomInlineHeader custom_header_1_copy(Http::LowerCaseString{"foo_custom_header"}); +class HeaderMapImplTest : public testing::TestWithParam { +public: + HeaderMapImplTest() { + // Set the lazy map threshold using the test parameter. + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.http.headermap.lazy_map_min_size", absl::StrCat(GetParam())}}); + } + + static std::string testParamsToString(const ::testing::TestParamInfo& params) { + return absl::StrCat(params.param); + } + + TestScopedRuntime runtime; +}; + +INSTANTIATE_TEST_SUITE_P(HeaderMapThreshold, HeaderMapImplTest, + testing::Values(0, 1, std::numeric_limits::max()), + HeaderMapImplTest::testParamsToString); + // Make sure that the same header registered twice points to the same location. -TEST(HeaderMapImplTest, CustomRegisteredHeaders) { +TEST_P(HeaderMapImplTest, CustomRegisteredHeaders) { TestRequestHeaderMapImpl headers; EXPECT_EQ(custom_header_1.handle(), custom_header_1_copy.handle()); EXPECT_EQ(nullptr, headers.getInline(custom_header_1.handle())); @@ -377,10 +397,10 @@ TEST(HeaderMapImplTest, CustomRegisteredHeaders) { header_map->remove##name(); \ EXPECT_EQ(nullptr, header_map->name()); \ header_map->set##name(#name); \ - EXPECT_EQ(header_map->get(Headers::get().name)->value().getStringView(), #name); + EXPECT_EQ(header_map->get(Headers::get().name)[0]->value().getStringView(), #name); // Make sure that the O(1) headers are wired up properly. -TEST(HeaderMapImplTest, AllInlineHeaders) { +TEST_P(HeaderMapImplTest, AllInlineHeaders) { { auto header_map = RequestHeaderMapImpl::create(); INLINE_REQ_HEADERS(TEST_INLINE_HEADER_FUNCS) @@ -400,7 +420,7 @@ TEST(HeaderMapImplTest, AllInlineHeaders) { } } -TEST(HeaderMapImplTest, InlineInsert) { +TEST_P(HeaderMapImplTest, InlineInsert) { TestRequestHeaderMapImpl headers; EXPECT_TRUE(headers.empty()); EXPECT_EQ(0, headers.size()); @@ -410,10 +430,10 @@ TEST(HeaderMapImplTest, InlineInsert) { EXPECT_EQ(1, headers.size()); EXPECT_EQ(":authority", headers.Host()->key().getStringView()); EXPECT_EQ("hello", headers.getHostValue()); - EXPECT_EQ("hello", headers.get(Headers::get().Host)->value().getStringView()); + EXPECT_EQ("hello", headers.get(Headers::get().Host)[0]->value().getStringView()); } -TEST(HeaderMapImplTest, InlineAppend) { +TEST_P(HeaderMapImplTest, InlineAppend) { { TestRequestHeaderMapImpl headers; // Create via header and append. @@ -459,7 +479,7 @@ TEST(HeaderMapImplTest, InlineAppend) { } } -TEST(HeaderMapImplTest, MoveIntoInline) { +TEST_P(HeaderMapImplTest, MoveIntoInline) { TestRequestHeaderMapImpl headers; HeaderString key; key.setCopy(Headers::get().EnvoyRetryOn.get()); @@ -478,19 +498,19 @@ TEST(HeaderMapImplTest, MoveIntoInline) { EXPECT_EQ("hello,there", headers.getEnvoyRetryOnValue()); } -TEST(HeaderMapImplTest, Remove) { +TEST_P(HeaderMapImplTest, Remove) { TestRequestHeaderMapImpl headers; // Add random header and then remove by name. LowerCaseString static_key("hello"); std::string ref_value("value"); headers.addReference(static_key, ref_value); - EXPECT_EQ("value", headers.get(static_key)->value().getStringView()); - EXPECT_TRUE(headers.get(static_key)->value().isReference()); + EXPECT_EQ("value", headers.get(static_key)[0]->value().getStringView()); + EXPECT_TRUE(headers.get(static_key)[0]->value().isReference()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); EXPECT_EQ(1UL, headers.remove(static_key)); - EXPECT_EQ(nullptr, headers.get(static_key)); + EXPECT_TRUE(headers.get(static_key).empty()); EXPECT_EQ(0UL, headers.size()); EXPECT_TRUE(headers.empty()); @@ -520,7 +540,7 @@ TEST(HeaderMapImplTest, Remove) { EXPECT_EQ(0UL, headers.remove(Headers::get().ContentLength)); } -TEST(HeaderMapImplTest, RemoveHost) { +TEST_P(HeaderMapImplTest, RemoveHost) { TestRequestHeaderMapImpl headers; headers.setHost("foo"); EXPECT_EQ("foo", headers.get_("host")); @@ -534,28 +554,50 @@ TEST(HeaderMapImplTest, RemoveHost) { EXPECT_EQ(nullptr, headers.Host()); } -TEST(HeaderMapImplTest, RemoveIf) { +TEST_P(HeaderMapImplTest, RemoveIf) { LowerCaseString key1 = LowerCaseString("X-postfix-foo"); LowerCaseString key2 = LowerCaseString("X-postfix-"); LowerCaseString key3 = LowerCaseString("x-postfix-eep"); - TestRequestHeaderMapImpl headers; - headers.addReference(key1, "value"); - headers.addReference(key2, "value"); - headers.addReference(key3, "value"); + { + TestRequestHeaderMapImpl headers; + headers.addReference(key1, "value"); + headers.addReference(key2, "value"); + headers.addReference(key3, "value"); - EXPECT_EQ(0UL, headers.removeIf([](const HeaderEntry&) -> bool { return false; })); + EXPECT_EQ(0UL, headers.removeIf([](const HeaderEntry&) -> bool { return false; })); - EXPECT_EQ(2UL, headers.removeIf([](const HeaderEntry& entry) -> bool { - return absl::EndsWith(entry.key().getStringView(), "foo") || - absl::EndsWith(entry.key().getStringView(), "eep"); - })); + EXPECT_EQ(2UL, headers.removeIf([](const HeaderEntry& entry) -> bool { + return absl::EndsWith(entry.key().getStringView(), "foo") || + absl::EndsWith(entry.key().getStringView(), "eep"); + })); - TestRequestHeaderMapImpl expected{{"X-postfix-", "value"}}; - EXPECT_EQ(expected, headers); + TestRequestHeaderMapImpl expected{{"X-postfix-", "value"}}; + EXPECT_EQ(expected, headers); + } + + // Test multiple entries with same key but different value. + { + TestRequestHeaderMapImpl headers; + headers.addReference(key1, "valueA"); + headers.addReference(key1, "valueB"); + headers.addReference(key1, "valueC"); + headers.addReference(key2, "valueB"); + headers.addReference(key3, "valueC"); + + EXPECT_EQ(5UL, headers.size()); + EXPECT_EQ(2UL, headers.removeIf([](const HeaderEntry& entry) -> bool { + return absl::EndsWith(entry.value().getStringView(), "B"); + })); + + // Make sure key1 other values still exist. + TestRequestHeaderMapImpl expected{ + {key1.get(), "valueA"}, {key1.get(), "valueC"}, {key3.get(), "valueC"}}; + EXPECT_EQ(expected, headers); + } } -TEST(HeaderMapImplTest, RemovePrefix) { +TEST_P(HeaderMapImplTest, RemovePrefix) { // These will match. LowerCaseString key1 = LowerCaseString("X-prefix-foo"); LowerCaseString key3 = LowerCaseString("X-Prefix-"); @@ -573,19 +615,19 @@ TEST(HeaderMapImplTest, RemovePrefix) { // Test removing the first header, middle headers, and the end header. EXPECT_EQ(3UL, headers.removePrefix(LowerCaseString("x-prefix-"))); - EXPECT_EQ(nullptr, headers.get(key1)); - EXPECT_NE(nullptr, headers.get(key2)); - EXPECT_EQ(nullptr, headers.get(key3)); - EXPECT_NE(nullptr, headers.get(key4)); - EXPECT_EQ(nullptr, headers.get(key5)); + EXPECT_TRUE(headers.get(key1).empty()); + EXPECT_FALSE(headers.get(key2).empty()); + EXPECT_TRUE(headers.get(key3).empty()); + EXPECT_FALSE(headers.get(key4).empty()); + EXPECT_TRUE(headers.get(key5).empty()); // Try to remove headers with no prefix match. EXPECT_EQ(0UL, headers.removePrefix(LowerCaseString("foo"))); // Remove all headers. EXPECT_EQ(2UL, headers.removePrefix(LowerCaseString(""))); - EXPECT_EQ(nullptr, headers.get(key2)); - EXPECT_EQ(nullptr, headers.get(key4)); + EXPECT_TRUE(headers.get(key2).empty()); + EXPECT_TRUE(headers.get(key4).empty()); // Add inline and remove by prefix headers.setContentLength(5); @@ -607,7 +649,7 @@ class HeaderAndValueCb } }; -TEST(HeaderMapImplTest, SetRemovesAllValues) { +TEST_P(HeaderMapImplTest, SetRemovesAllValues) { TestRequestHeaderMapImpl headers; LowerCaseString key1("hello"); @@ -648,7 +690,7 @@ TEST(HeaderMapImplTest, SetRemovesAllValues) { } } -TEST(HeaderMapImplTest, DoubleInlineAdd) { +TEST_P(HeaderMapImplTest, DoubleInlineAdd) { { TestRequestHeaderMapImpl headers; const std::string foo("foo"); @@ -684,7 +726,7 @@ TEST(HeaderMapImplTest, DoubleInlineAdd) { // Per https://github.com/envoyproxy/envoy/issues/7488 make sure we don't // combine set-cookie headers -TEST(HeaderMapImplTest, DoubleCookieAdd) { +TEST_P(HeaderMapImplTest, DoubleCookieAdd) { TestRequestHeaderMapImpl headers; const std::string foo("foo"); const std::string bar("bar"); @@ -693,14 +735,13 @@ TEST(HeaderMapImplTest, DoubleCookieAdd) { headers.addReference(set_cookie, bar); EXPECT_EQ(2UL, headers.size()); - std::vector out; - Http::HeaderUtility::getAllOfHeader(headers, "set-cookie", out); - ASSERT_EQ(out.size(), 2); - ASSERT_EQ(out[0], "foo"); - ASSERT_EQ(out[1], "bar"); + const auto set_cookie_value = headers.get(LowerCaseString("set-cookie")); + ASSERT_EQ(set_cookie_value.size(), 2); + ASSERT_EQ(set_cookie_value[0]->value().getStringView(), "foo"); + ASSERT_EQ(set_cookie_value[1]->value().getStringView(), "bar"); } -TEST(HeaderMapImplTest, DoubleInlineSet) { +TEST_P(HeaderMapImplTest, DoubleInlineSet) { TestRequestHeaderMapImpl headers; headers.setReferenceKey(Headers::get().ContentType, "blah"); headers.setReferenceKey(Headers::get().ContentType, "text/html"); @@ -708,35 +749,38 @@ TEST(HeaderMapImplTest, DoubleInlineSet) { EXPECT_EQ(1UL, headers.size()); } -TEST(HeaderMapImplTest, AddReferenceKey) { +TEST_P(HeaderMapImplTest, AddReferenceKey) { TestRequestHeaderMapImpl headers; LowerCaseString foo("hello"); headers.addReferenceKey(foo, "world"); - EXPECT_NE("world", headers.get(foo)->value().getStringView().data()); - EXPECT_EQ("world", headers.get(foo)->value().getStringView()); + EXPECT_NE("world", headers.get(foo)[0]->value().getStringView().data()); + EXPECT_EQ("world", headers.get(foo)[0]->value().getStringView()); } -TEST(HeaderMapImplTest, SetReferenceKey) { +TEST_P(HeaderMapImplTest, SetReferenceKey) { TestRequestHeaderMapImpl headers; LowerCaseString foo("hello"); headers.setReferenceKey(foo, "world"); - EXPECT_NE("world", headers.get(foo)->value().getStringView().data()); - EXPECT_EQ("world", headers.get(foo)->value().getStringView()); + EXPECT_NE("world", headers.get(foo)[0]->value().getStringView().data()); + EXPECT_EQ("world", headers.get(foo)[0]->value().getStringView()); headers.setReferenceKey(foo, "monde"); - EXPECT_NE("monde", headers.get(foo)->value().getStringView().data()); - EXPECT_EQ("monde", headers.get(foo)->value().getStringView()); + EXPECT_NE("monde", headers.get(foo)[0]->value().getStringView().data()); + EXPECT_EQ("monde", headers.get(foo)[0]->value().getStringView()); } -TEST(HeaderMapImplTest, SetCopy) { +TEST_P(HeaderMapImplTest, SetCopyOldBehavior) { + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http_set_copy_replace_all_headers", "false"}}); + TestRequestHeaderMapImpl headers; LowerCaseString foo("hello"); headers.setCopy(foo, "world"); - EXPECT_EQ("world", headers.get(foo)->value().getStringView()); + EXPECT_EQ("world", headers.get(foo)[0]->value().getStringView()); // Overwrite value. headers.setCopy(foo, "monde"); - EXPECT_EQ("monde", headers.get(foo)->value().getStringView()); + EXPECT_EQ("monde", headers.get(foo)[0]->value().getStringView()); // Add another foo header. headers.addCopy(foo, "monde2"); @@ -760,7 +804,51 @@ TEST(HeaderMapImplTest, SetCopy) { headers.setCopy(foo, empty); EXPECT_EQ(headers.size(), 1); headers.setCopy(foo, "not-empty"); - EXPECT_EQ(headers.get(foo)->value().getStringView(), "not-empty"); + EXPECT_EQ(headers.get(foo)[0]->value().getStringView(), "not-empty"); + + // Use setCopy with inline headers both indirectly and directly. + headers.clear(); + EXPECT_EQ(headers.size(), 0); + headers.setCopy(Headers::get().Path, "/"); + EXPECT_EQ(headers.size(), 1); + EXPECT_EQ(headers.getPathValue(), "/"); + headers.setPath("/foo"); + EXPECT_EQ(headers.size(), 1); + EXPECT_EQ(headers.getPathValue(), "/foo"); +} + +TEST_P(HeaderMapImplTest, SetCopyNewBehavior) { + TestRequestHeaderMapImpl headers; + LowerCaseString foo("hello"); + headers.setCopy(foo, "world"); + EXPECT_EQ("world", headers.get(foo)[0]->value().getStringView()); + + // Overwrite value. + headers.setCopy(foo, "monde"); + EXPECT_EQ("monde", headers.get(foo)[0]->value().getStringView()); + + // Add another foo header. + headers.addCopy(foo, "monde2"); + EXPECT_EQ(headers.size(), 2); + + // The foo header is overridden. + headers.setCopy(foo, "override-monde"); + EXPECT_EQ(headers.size(), 1); + + HeaderAndValueCb cb; + + InSequence seq; + EXPECT_CALL(cb, Call("hello", "override-monde")); + headers.iterate(cb.asIterateCb()); + + // Test setting an empty string and then overriding. + EXPECT_EQ(1UL, headers.remove(foo)); + EXPECT_EQ(headers.size(), 0); + const std::string empty; + headers.setCopy(foo, empty); + EXPECT_EQ(headers.size(), 1); + headers.setCopy(foo, "not-empty"); + EXPECT_EQ(headers.get(foo)[0]->value().getStringView(), "not-empty"); // Use setCopy with inline headers both indirectly and directly. headers.clear(); @@ -773,21 +861,21 @@ TEST(HeaderMapImplTest, SetCopy) { EXPECT_EQ(headers.getPathValue(), "/foo"); } -TEST(HeaderMapImplTest, AddCopy) { +TEST_P(HeaderMapImplTest, AddCopy) { TestRequestHeaderMapImpl headers; // Start with a string value. std::unique_ptr lcKeyPtr(new LowerCaseString("hello")); headers.addCopy(*lcKeyPtr, "world"); - const HeaderString& value = headers.get(*lcKeyPtr)->value(); + const HeaderString& value = headers.get(*lcKeyPtr)[0]->value(); EXPECT_EQ("world", value.getStringView()); EXPECT_EQ(5UL, value.size()); lcKeyPtr.reset(); - const HeaderString& value2 = headers.get(LowerCaseString("hello"))->value(); + const HeaderString& value2 = headers.get(LowerCaseString("hello"))[0]->value(); EXPECT_EQ("world", value2.getStringView()); EXPECT_EQ(5UL, value2.size()); @@ -809,14 +897,14 @@ TEST(HeaderMapImplTest, AddCopy) { headers.addCopy(*lcKeyPtr, 42); - const HeaderString& value3 = headers.get(*lcKeyPtr)->value(); + const HeaderString& value3 = headers.get(*lcKeyPtr)[0]->value(); EXPECT_EQ("42", value3.getStringView()); EXPECT_EQ(2UL, value3.size()); lcKeyPtr.reset(); - const HeaderString& value4 = headers.get(LowerCaseString("hello"))->value(); + const HeaderString& value4 = headers.get(LowerCaseString("hello"))[0]->value(); EXPECT_EQ("42", value4.getStringView()); EXPECT_EQ(2UL, value4.size()); @@ -826,25 +914,25 @@ TEST(HeaderMapImplTest, AddCopy) { LowerCaseString lcKey3(std::string("he") + "ll" + "o"); EXPECT_STREQ("hello", lcKey3.get().c_str()); - EXPECT_EQ("42", headers.get(lcKey3)->value().getStringView()); - EXPECT_EQ(2UL, headers.get(lcKey3)->value().size()); + EXPECT_EQ("42", headers.get(lcKey3)[0]->value().getStringView()); + EXPECT_EQ(2UL, headers.get(lcKey3)[0]->value().size()); LowerCaseString envoy_retry_on("x-envoy-retry-on"); headers.addCopy(envoy_retry_on, "max-age=1345"); - EXPECT_EQ("max-age=1345", headers.get(envoy_retry_on)->value().getStringView()); + EXPECT_EQ("max-age=1345", headers.get(envoy_retry_on)[0]->value().getStringView()); EXPECT_EQ("max-age=1345", headers.getEnvoyRetryOnValue()); headers.addCopy(envoy_retry_on, "public"); - EXPECT_EQ("max-age=1345,public", headers.get(envoy_retry_on)->value().getStringView()); + EXPECT_EQ("max-age=1345,public", headers.get(envoy_retry_on)[0]->value().getStringView()); headers.addCopy(envoy_retry_on, ""); - EXPECT_EQ("max-age=1345,public", headers.get(envoy_retry_on)->value().getStringView()); + EXPECT_EQ("max-age=1345,public", headers.get(envoy_retry_on)[0]->value().getStringView()); headers.addCopy(envoy_retry_on, 123); - EXPECT_EQ("max-age=1345,public,123", headers.get(envoy_retry_on)->value().getStringView()); + EXPECT_EQ("max-age=1345,public,123", headers.get(envoy_retry_on)[0]->value().getStringView()); headers.addCopy(envoy_retry_on, std::numeric_limits::max()); EXPECT_EQ("max-age=1345,public,123,18446744073709551615", - headers.get(envoy_retry_on)->value().getStringView()); + headers.get(envoy_retry_on)[0]->value().getStringView()); } -TEST(HeaderMapImplTest, Equality) { +TEST_P(HeaderMapImplTest, Equality) { TestRequestHeaderMapImpl headers1; TestRequestHeaderMapImpl headers2; EXPECT_EQ(headers1, headers2); @@ -856,15 +944,15 @@ TEST(HeaderMapImplTest, Equality) { EXPECT_FALSE(headers1 == headers2); } -TEST(HeaderMapImplTest, LargeCharInHeader) { +TEST_P(HeaderMapImplTest, LargeCharInHeader) { TestRequestHeaderMapImpl headers; LowerCaseString static_key("\x90hello"); std::string ref_value("value"); headers.addReference(static_key, ref_value); - EXPECT_EQ("value", headers.get(static_key)->value().getStringView()); + EXPECT_EQ("value", headers.get(static_key)[0]->value().getStringView()); } -TEST(HeaderMapImplTest, Iterate) { +TEST_P(HeaderMapImplTest, Iterate) { TestRequestHeaderMapImpl headers; headers.addCopy(LowerCaseString("hello"), "world"); headers.addCopy(LowerCaseString("foo"), "xxx"); @@ -881,7 +969,7 @@ TEST(HeaderMapImplTest, Iterate) { headers.iterate(cb.asIterateCb()); } -TEST(HeaderMapImplTest, IterateReverse) { +TEST_P(HeaderMapImplTest, IterateReverse) { TestRequestHeaderMapImpl headers; headers.addCopy(LowerCaseString("hello"), "world"); headers.addCopy(LowerCaseString("foo"), "bar"); @@ -904,12 +992,12 @@ TEST(HeaderMapImplTest, IterateReverse) { }); } -TEST(HeaderMapImplTest, Get) { +TEST_P(HeaderMapImplTest, Get) { { auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), "/"}, {"hello", "world"}}); - EXPECT_EQ("/", headers.get(LowerCaseString(":path"))->value().getStringView()); - EXPECT_EQ("world", headers.get(LowerCaseString("hello"))->value().getStringView()); - EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo"))); + EXPECT_EQ("/", headers.get(LowerCaseString(":path"))[0]->value().getStringView()); + EXPECT_EQ("world", headers.get(LowerCaseString("hello"))[0]->value().getStringView()); + EXPECT_TRUE(headers.get(LowerCaseString("foo")).empty()); } { @@ -917,24 +1005,24 @@ TEST(HeaderMapImplTest, Get) { // There is not HeaderMap method to set a header and copy both the key and value. const LowerCaseString path(":path"); headers.setReferenceKey(path, "/new_path"); - EXPECT_EQ("/new_path", headers.get(LowerCaseString(":path"))->value().getStringView()); + EXPECT_EQ("/new_path", headers.get(LowerCaseString(":path"))[0]->value().getStringView()); const LowerCaseString foo("hello"); headers.setReferenceKey(foo, "world2"); - EXPECT_EQ("world2", headers.get(foo)->value().getStringView()); - EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo"))); + EXPECT_EQ("world2", headers.get(foo)[0]->value().getStringView()); + EXPECT_TRUE(headers.get(LowerCaseString("foo")).empty()); } } -TEST(HeaderMapImplTest, CreateHeaderMapFromIterator) { +TEST_P(HeaderMapImplTest, CreateHeaderMapFromIterator) { std::vector> iter_headers{ {LowerCaseString(Headers::get().Path), "/"}, {LowerCaseString("hello"), "world"}}; auto headers = createHeaderMap(iter_headers.cbegin(), iter_headers.cend()); - EXPECT_EQ("/", headers->get(LowerCaseString(":path"))->value().getStringView()); - EXPECT_EQ("world", headers->get(LowerCaseString("hello"))->value().getStringView()); - EXPECT_EQ(nullptr, headers->get(LowerCaseString("foo"))); + EXPECT_EQ("/", headers->get(LowerCaseString(":path"))[0]->value().getStringView()); + EXPECT_EQ("world", headers->get(LowerCaseString("hello"))[0]->value().getStringView()); + EXPECT_TRUE(headers->get(LowerCaseString("foo")).empty()); } -TEST(HeaderMapImplTest, TestHeaderList) { +TEST_P(HeaderMapImplTest, TestHeaderList) { std::array keys{Headers::get().Path.get(), "hello"}; std::array values{"/", "world"}; @@ -952,14 +1040,14 @@ TEST(HeaderMapImplTest, TestHeaderList) { EXPECT_THAT(to_string_views(header_list.values()), ElementsAre("/", "world")); } -TEST(HeaderMapImplTest, TestAppendHeader) { +TEST_P(HeaderMapImplTest, TestAppendHeader) { // Test appending to a string with a value. { TestRequestHeaderMapImpl headers; LowerCaseString foo("key1"); headers.addCopy(foo, "some;"); headers.appendCopy(foo, "test"); - EXPECT_EQ(headers.get(foo)->value().getStringView(), "some;,test"); + EXPECT_EQ(headers.get(foo)[0]->value().getStringView(), "some;,test"); } // Test appending to an empty string. @@ -967,7 +1055,7 @@ TEST(HeaderMapImplTest, TestAppendHeader) { TestRequestHeaderMapImpl headers; LowerCaseString key2("key2"); headers.appendCopy(key2, "my tag data"); - EXPECT_EQ(headers.get(key2)->value().getStringView(), "my tag data"); + EXPECT_EQ(headers.get(key2)[0]->value().getStringView(), "my tag data"); } // Test empty data case. @@ -976,7 +1064,7 @@ TEST(HeaderMapImplTest, TestAppendHeader) { LowerCaseString key3("key3"); headers.addCopy(key3, "empty"); headers.appendCopy(key3, ""); - EXPECT_EQ(headers.get(key3)->value().getStringView(), "empty"); + EXPECT_EQ(headers.get(key3)[0]->value().getStringView(), "empty"); } // Regression test for appending to an empty string with a short string, then // setting integer. @@ -1015,7 +1103,7 @@ TEST(TestHeaderMapImplDeathTest, TestHeaderLengthChecks) { "Trying to allocate overly large headers."); } -TEST(HeaderMapImplTest, PseudoHeaderOrder) { +TEST_P(HeaderMapImplTest, PseudoHeaderOrder) { HeaderAndValueCb cb; { @@ -1164,21 +1252,21 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Validate that TestRequestHeaderMapImpl copy construction and assignment works. This is a // regression for where we were missing a valid copy constructor and had the // default (dangerous) move semantics takeover. -TEST(HeaderMapImplTest, TestRequestHeaderMapImplyCopy) { +TEST_P(HeaderMapImplTest, TestRequestHeaderMapImplCopy) { TestRequestHeaderMapImpl foo; foo.addCopy(LowerCaseString("foo"), "bar"); auto headers = std::make_unique(foo); - EXPECT_EQ("bar", headers->get(LowerCaseString("foo"))->value().getStringView()); + EXPECT_EQ("bar", headers->get(LowerCaseString("foo"))[0]->value().getStringView()); TestRequestHeaderMapImpl baz{{"foo", "baz"}}; baz = *headers; - EXPECT_EQ("bar", baz.get(LowerCaseString("foo"))->value().getStringView()); + EXPECT_EQ("bar", baz.get(LowerCaseString("foo"))[0]->value().getStringView()); const TestRequestHeaderMapImpl& baz2 = baz; baz = baz2; - EXPECT_EQ("bar", baz.get(LowerCaseString("foo"))->value().getStringView()); + EXPECT_EQ("bar", baz.get(LowerCaseString("foo"))[0]->value().getStringView()); } // Make sure 'host' -> ':authority' auto translation only occurs for request headers. -TEST(HeaderMapImplTest, HostHeader) { +TEST_P(HeaderMapImplTest, HostHeader) { TestRequestHeaderMapImpl request_headers{{"host", "foo"}}; EXPECT_EQ(request_headers.size(), 1); EXPECT_EQ(request_headers.get_(":authority"), "foo"); @@ -1196,26 +1284,26 @@ TEST(HeaderMapImplTest, HostHeader) { EXPECT_EQ(response_trailers.get_("host"), "foo"); } -TEST(HeaderMapImplTest, TestInlineHeaderAdd) { +TEST_P(HeaderMapImplTest, TestInlineHeaderAdd) { TestRequestHeaderMapImpl foo; foo.addCopy(LowerCaseString(":path"), "GET"); EXPECT_EQ(foo.size(), 1); EXPECT_TRUE(foo.Path() != nullptr); } -TEST(HeaderMapImplTest, ClearHeaderMap) { +TEST_P(HeaderMapImplTest, ClearHeaderMap) { TestRequestHeaderMapImpl headers; LowerCaseString static_key("hello"); std::string ref_value("value"); // Add random header and then clear. headers.addReference(static_key, ref_value); - EXPECT_EQ("value", headers.get(static_key)->value().getStringView()); - EXPECT_TRUE(headers.get(static_key)->value().isReference()); + EXPECT_EQ("value", headers.get(static_key)[0]->value().getStringView()); + EXPECT_TRUE(headers.get(static_key)[0]->value().isReference()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); headers.clear(); - EXPECT_EQ(nullptr, headers.get(static_key)); + EXPECT_TRUE(headers.get(static_key).empty()); EXPECT_EQ(0UL, headers.size()); EXPECT_EQ(headers.byteSize(), 0); EXPECT_TRUE(headers.empty()); @@ -1245,7 +1333,7 @@ TEST(HeaderMapImplTest, ClearHeaderMap) { } // Validates byte size is properly accounted for in different inline header setting scenarios. -TEST(HeaderMapImplTest, InlineHeaderByteSize) { +TEST_P(HeaderMapImplTest, InlineHeaderByteSize) { { TestRequestHeaderMapImpl headers; std::string foo = "foo"; @@ -1299,7 +1387,7 @@ TEST(HeaderMapImplTest, InlineHeaderByteSize) { } } -TEST(HeaderMapImplTest, ValidHeaderString) { +TEST_P(HeaderMapImplTest, ValidHeaderString) { EXPECT_TRUE(validHeaderString("abc")); EXPECT_FALSE(validHeaderString(absl::string_view("a\000bc", 4))); EXPECT_FALSE(validHeaderString("abc\n")); diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index ae0aaba39c42..47c39a36311e 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -8,6 +8,7 @@ #include "common/http/header_utility.h" #include "common/json/json_loader.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -71,6 +72,53 @@ TEST_F(HeaderUtilityTest, RemovePortsFromHostConnect) { } } +TEST(GetAllOfHeaderAsStringTest, All) { + const LowerCaseString test_header("test"); + { + TestRequestHeaderMapImpl headers; + const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header); + EXPECT_FALSE(ret.result().has_value()); + EXPECT_TRUE(ret.backingString().empty()); + } + { + TestRequestHeaderMapImpl headers{{"test", "foo"}}; + const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header); + EXPECT_EQ("foo", ret.result().value()); + EXPECT_TRUE(ret.backingString().empty()); + } + { + TestRequestHeaderMapImpl headers{{"test", "foo"}, {"test", "bar"}}; + const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header); + EXPECT_EQ("foo,bar", ret.result().value()); + EXPECT_EQ("foo,bar", ret.backingString()); + } + { + TestRequestHeaderMapImpl headers{{"test", ""}, {"test", "bar"}}; + const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header); + EXPECT_EQ(",bar", ret.result().value()); + EXPECT_EQ(",bar", ret.backingString()); + } + { + TestRequestHeaderMapImpl headers{{"test", ""}, {"test", ""}}; + const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header); + EXPECT_EQ(",", ret.result().value()); + EXPECT_EQ(",", ret.backingString()); + } + { + TestRequestHeaderMapImpl headers{ + {"test", "a"}, {"test", "b"}, {"test", "c"}, {"test", ""}, {"test", ""}}; + const auto ret = HeaderUtility::getAllOfHeaderAsString(headers, test_header); + EXPECT_EQ("a,b,c,,", ret.result().value()); + EXPECT_EQ("a,b,c,,", ret.backingString()); + // Make sure copying the return value works correctly. + const auto ret2 = ret; // NOLINT(performance-unnecessary-copy-initialization) + EXPECT_EQ(ret2.result(), ret.result()); + EXPECT_EQ(ret2.backingString(), ret.backingString()); + EXPECT_EQ(ret2.result().value().data(), ret2.backingString().data()); + EXPECT_NE(ret2.result().value().data(), ret.backingString().data()); + } +} + TEST(HeaderDataConstructorTest, NoSpecifierSet) { const std::string yaml = R"EOF( name: test-header @@ -201,27 +249,6 @@ invert_match: true EXPECT_EQ(true, header_data.invert_match_); } -TEST(HeaderDataConstructorTest, GetAllOfHeader) { - TestRequestHeaderMapImpl headers{ - {"foo", "val1"}, {"bar", "bar2"}, {"foo", "eep, bar"}, {"foo", ""}}; - - std::vector foo_out; - Http::HeaderUtility::getAllOfHeader(headers, "foo", foo_out); - ASSERT_EQ(foo_out.size(), 3); - ASSERT_EQ(foo_out[0], "val1"); - ASSERT_EQ(foo_out[1], "eep, bar"); - ASSERT_EQ(foo_out[2], ""); - - std::vector bar_out; - Http::HeaderUtility::getAllOfHeader(headers, "bar", bar_out); - ASSERT_EQ(bar_out.size(), 1); - ASSERT_EQ(bar_out[0], "bar2"); - - std::vector eep_out; - Http::HeaderUtility::getAllOfHeader(headers, "eep", eep_out); - ASSERT_EQ(eep_out.size(), 0); -} - TEST(MatchHeadersTest, MayMatchOneOrMoreRequestHeader) { TestRequestHeaderMapImpl headers{{"some-header", "a"}, {"other-header", "b"}}; @@ -236,8 +263,32 @@ regex_match: (a|b) EXPECT_FALSE(HeaderUtility::matchHeaders(headers, header_data)); headers.addCopy("match-header", "a"); + // With a single "match-header" this regex will match. EXPECT_TRUE(HeaderUtility::matchHeaders(headers, header_data)); + headers.addCopy("match-header", "b"); + // With two "match-header" we now logically have "a,b" as the value, so the regex will not match. + EXPECT_FALSE(HeaderUtility::matchHeaders(headers, header_data)); + + header_data[0] = std::make_unique(parseHeaderMatcherFromYaml(R"EOF( +name: match-header +exact_match: a,b + )EOF")); + // Make sure that an exact match on "a,b" does in fact work. + EXPECT_TRUE(HeaderUtility::matchHeaders(headers, header_data)); + + TestScopedRuntime runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http_match_on_all_headers", "false"}}); + // Flipping runtime to false should make "a,b" no longer match because we will match on the first + // header only. + EXPECT_FALSE(HeaderUtility::matchHeaders(headers, header_data)); + + header_data[0] = std::make_unique(parseHeaderMatcherFromYaml(R"EOF( +name: match-header +exact_match: a + )EOF")); + // With runtime off, exact match on "a" should pass. EXPECT_TRUE(HeaderUtility::matchHeaders(headers, header_data)); } @@ -596,7 +647,7 @@ TEST(HeaderAddTest, HeaderAdd) { headers_to_add.iterate([&headers](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; - EXPECT_EQ(entry.value().getStringView(), headers.get(lower_key)->value().getStringView()); + EXPECT_EQ(entry.value().getStringView(), headers.get(lower_key)[0]->value().getStringView()); return Http::HeaderMap::Iterate::Continue; }); } diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 0a78208255ef..200ba2423191 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -1073,42 +1073,6 @@ TEST_P(Http1ServerConnectionImplTest, FloodProtection) { } } -TEST_P(Http1ServerConnectionImplTest, FloodProtectionOff) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.http1_flood_protection", "false"}}); - initialize(); - - NiceMock decoder; - Buffer::OwnedImpl local_buffer; - // With flood protection off, many responses can be queued up. - for (int i = 0; i < 4; ++i) { - Http::ResponseEncoder* response_encoder = nullptr; - EXPECT_CALL(callbacks_, newStream(_, _)) - .WillOnce(Invoke([&](Http::ResponseEncoder& encoder, bool) -> Http::RequestDecoder& { - response_encoder = &encoder; - return decoder; - })); - - Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - auto status = codec_->dispatch(buffer); - EXPECT_TRUE(status.ok()); - EXPECT_EQ(0U, buffer.length()); - - // In most tests the write output is serialized to a buffer here it is - // ignored to build up queued "end connection" sentinels. - EXPECT_CALL(connection_, write(_, _)) - - .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> void { - // Move the response out of data while preserving the buffer fragment sentinels. - local_buffer.move(data); - })); - - TestResponseHeaderMapImpl headers{{":status", "200"}}; - response_encoder->encodeHeaders(headers, true); - } -} - TEST_P(Http1ServerConnectionImplTest, HostHeaderTranslation) { initialize(); diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index cd7a933b86d1..c919f9499cea 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -53,7 +53,7 @@ class ConnPoolImplForTest : public ConnPoolImpl { ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::ClusterInfoConstSharedPtr cluster, NiceMock* upstream_ready_cb) - : ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), + : ConnPoolImpl(dispatcher, random_, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), Upstream::ResourcePriority::Default, nullptr, nullptr), api_(Api::createApiForTest()), mock_dispatcher_(dispatcher), mock_upstream_ready_cb_(upstream_ready_cb) {} @@ -125,6 +125,7 @@ class ConnPoolImplForTest : public ConnPoolImpl { Api::ApiPtr api_; Event::MockDispatcher& mock_dispatcher_; + NiceMock random_; NiceMock* mock_upstream_ready_cb_; std::vector test_clients_; }; diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index a32ee0d8add4..8b95de6b6cf4 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -40,10 +40,6 @@ CODEC_TEST_DEPS = [ envoy_cc_test( name = "codec_impl_test", srcs = ["codec_impl_test.cc"], - # The default codec is the legacy codec. Override runtime flag for testing new codec. - args = [ - "--runtime-feature-override-for-tests=envoy.reloadable_features.new_codec_behavior", - ], shard_count = 5, deps = CODEC_TEST_DEPS, ) @@ -51,7 +47,7 @@ envoy_cc_test( envoy_cc_test( name = "codec_impl_legacy_test", srcs = ["codec_impl_test.cc"], - # The default codec is the legacy codec. Verify the runtime flag for the new codec is disabled. + # The default codec is the new codec. Disable runtime flag for testing old codec. args = [ "--runtime-feature-disable-for-tests=envoy.reloadable_features.new_codec_behavior", ], @@ -165,6 +161,16 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "protocol_constraints_test", + srcs = ["protocol_constraints_test.cc"], + deps = [ + "//source/common/http/http2:protocol_constraints_lib", + "//test/common/stats:stat_test_utility_lib", + "//test/test_common:test_runtime_lib", + ], +) + envoy_cc_fuzz_test( name = "response_header_fuzz_test", srcs = ["response_header_fuzz_test.cc"], diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 9b4fdeccc1b9..d8ec1e0adb55 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -333,7 +333,7 @@ class Http2CodecImplTest : public ::testing::TestWithParamencodeHeaders(request_headers, true); } + +// Validate the keepalive PINGs are sent and received correctly. +TEST_P(Http2CodecImplTest, ConnectionKeepalive) { + constexpr uint32_t interval_ms = 100; + constexpr uint32_t timeout_ms = 200; + client_http2_options_.mutable_connection_keepalive()->mutable_interval()->set_nanos(interval_ms * + 1000 * 1000); + client_http2_options_.mutable_connection_keepalive()->mutable_timeout()->set_nanos(timeout_ms * + 1000 * 1000); + client_http2_options_.mutable_connection_keepalive()->mutable_interval_jitter()->set_value(0); + auto timeout_timer = new Event::MockTimer(&client_connection_.dispatcher_); /* */ + auto send_timer = new Event::MockTimer(&client_connection_.dispatcher_); + EXPECT_CALL(*timeout_timer, disableTimer()); + EXPECT_CALL(*send_timer, enableTimer(std::chrono::milliseconds(interval_ms), _)); + initialize(); + + // Trigger sending a PING, and validate that an ACK is received based on the timeout timer + // being disabled and the interval being re-enabled. + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(timeout_ms), _)); + EXPECT_CALL(*timeout_timer, disableTimer()); // This indicates that an ACK was received. + EXPECT_CALL(*send_timer, enableTimer(std::chrono::milliseconds(interval_ms), _)); + send_timer->callback_(); + + // Test that a timeout closes the connection. + EXPECT_CALL(client_connection_, close(Network::ConnectionCloseType::NoFlush)); + timeout_timer->callback_(); +} + +// Validate that jitter is added as expected based on configuration. +TEST_P(Http2CodecImplTest, ConnectionKeepaliveJitter) { + client_http2_options_.mutable_connection_keepalive()->mutable_interval()->set_seconds(1); + client_http2_options_.mutable_connection_keepalive()->mutable_timeout()->set_seconds(1); + client_http2_options_.mutable_connection_keepalive()->mutable_interval_jitter()->set_value(10); + /*auto timeout_timer = */ new NiceMock(&client_connection_.dispatcher_); + auto send_timer = new Event::MockTimer(&client_connection_.dispatcher_); + + constexpr std::chrono::milliseconds min_expected(1000); + constexpr std::chrono::milliseconds max_expected(1099); // 1000ms + 10% + std::chrono::milliseconds min_observed(5000); + std::chrono::milliseconds max_observed(0); + EXPECT_CALL(*send_timer, enableTimer(_, _)) + .WillRepeatedly(Invoke([&](const std::chrono::milliseconds& ms, const ScopeTrackedObject*) { + EXPECT_GE(ms, std::chrono::milliseconds(1000)); + EXPECT_LE(ms, std::chrono::milliseconds(1100)); + max_observed = std::max(max_observed, ms); + min_observed = std::min(min_observed, ms); + })); + initialize(); + + for (uint64_t i = 0; i < 250; i++) { + EXPECT_CALL(client_->random_generator_, random()).WillOnce(Return(i)); + send_timer->callback_(); + } + + EXPECT_EQ(min_observed.count(), min_expected.count()); + EXPECT_EQ(max_observed.count(), max_expected.count()); +} + class Http2CodecImplDeferredResetTest : public Http2CodecImplTest {}; TEST_P(Http2CodecImplDeferredResetTest, DeferredResetClient) { @@ -1307,6 +1365,125 @@ TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeoutAfterGoaway) { EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); } +// Verify detection of downstream outbound frame queue by the WINDOW_UPDATE frames +// sent when codec resumes reading. +TEST_P(Http2CodecImplFlowControlTest, WindowUpdateOnReadResumingFlood) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + TestRequestHeaderMapImpl expected_headers; + HttpTestUtility::addDefaultHeaders(expected_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); + + // Force the server stream to be read disabled. This will cause it to stop sending window + // updates to the client. + server_->getStream(1)->readDisable(true); + + uint32_t initial_stream_window = + nghttp2_session_get_stream_effective_local_window_size(client_->session(), 1); + // If this limit is changed, this test will fail due to the initial large writes being divided + // into more than 4 frames. Fast fail here with this explanatory comment. + ASSERT_EQ(65535, initial_stream_window); + // Make sure the limits were configured properly in test set up. + EXPECT_EQ(initial_stream_window, server_->getStream(1)->bufferLimit()); + EXPECT_EQ(initial_stream_window, client_->getStream(1)->bufferLimit()); + + // One large write gets broken into smaller frames. + EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AnyNumber()); + Buffer::OwnedImpl long_data(std::string(initial_stream_window / 2, 'a')); + request_encoder_->encodeData(long_data, false); + + EXPECT_EQ(initial_stream_window / 2, server_->getStreamUnconsumedBytes(1)); + + // pre-fill downstream outbound frame queue + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above and pre-fill outbound queue with 1 byte DATA frames + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 2; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + + EXPECT_FALSE(violation_callback->enabled_); + + // Now unblock the server's stream. This will cause the bytes to be consumed, 2 flow control + // updates to be sent, and overflow outbound frame queue. + server_->getStream(1)->readDisable(false); + + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); + + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + +// Verify detection of outbound queue flooding by the RST_STREAM frame sent by the pending flush +// timeout. +TEST_P(Http2CodecImplFlowControlTest, RstStreamOnPendingFlushTimeoutFlood) { + // This test sets initial stream window to 65535 bytes. + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above and pre-fill outbound queue with 6 byte DATA frames + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 2; ++i) { + Buffer::OwnedImpl data(std::string(6, '0')); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + + // client stream windows should have 5535 bytes left and the next frame should overflow it. + // nghttp2 sends 1 DATA frame for the remainder of the client window and it should make + // outbound frame queue 1 away from overflow. + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl large_body(std::string(6 * 1024, '1')); + response_encoder_->encodeData(large_body, true); + + EXPECT_FALSE(violation_callback->enabled_); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)); + + // Pending flush timeout causes RST_STREAM to be sent and overflow the outbound frame queue. + flush_timer->invokeCallback(); + + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); + + EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + TEST_P(Http2CodecImplTest, WatermarkUnderEndStream) { initialize(); MockStreamCallbacks callbacks; @@ -1889,7 +2066,6 @@ TEST_P(Http2CodecImplTest, PingFlood) { Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") ? "Too many control frames in the outbound queue." : "Too many frames in the outbound queue."); - EXPECT_EQ(ack_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_control_flood").value()); } @@ -1983,15 +2159,16 @@ TEST_P(Http2CodecImplTest, ResponseHeadersFlood) { buffer.move(frame); })); + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); TestResponseHeaderMapImpl response_headers{{":status", "200"}}; for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1; ++i) { EXPECT_NO_THROW(response_encoder_->encodeHeaders(response_headers, false)); } - // Presently flood mitigation is done only when processing downstream data - // So we need to send stream from downstream client to trigger mitigation - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many frames in the outbound queue."); + + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); @@ -2014,6 +2191,9 @@ TEST_P(Http2CodecImplTest, ResponseDataFlood) { buffer.move(frame); })); + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; response_encoder_->encodeHeaders(response_headers, false); // Account for the single HEADERS frame above @@ -2021,11 +2201,10 @@ TEST_P(Http2CodecImplTest, ResponseDataFlood) { Buffer::OwnedImpl data("0"); EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); } - // Presently flood mitigation is done only when processing downstream data - // So we need to send stream from downstream client to trigger mitigation - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many frames in the outbound queue."); + + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); @@ -2091,16 +2270,17 @@ TEST_P(Http2CodecImplTest, ResponseDataFloodCounterReset) { // Drain kMaxOutboundFrames / 2 slices from the send buffer buffer.drain(buffer.length() / 2); + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); + for (uint32_t i = 0; i < kMaxOutboundFrames / 2 + 1; ++i) { Buffer::OwnedImpl data("0"); EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); } - // Presently flood mitigation is done only when processing downstream data - // So we need to send a frame from downstream client to trigger mitigation - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many frames in the outbound queue."); + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); } // Verify that control frames are added to the counter of outbound frames of all types. @@ -2132,7 +2312,94 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, "Too many frames in the outbound queue."); - EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + +// Verify that codec detects flood of outbound trailers +TEST_P(Http2CodecImplTest, ResponseTrailersFlood) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 1; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + + EXPECT_FALSE(violation_callback->enabled_); + EXPECT_NO_THROW(response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"foo", "bar"}})); + + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); + + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + +// Verify that codec detects flood of outbound METADATA frames +TEST_P(Http2CodecImplTest, MetadataFlood) { + allow_metadata_ = true; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 1; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + + EXPECT_FALSE(violation_callback->enabled_); + + MetadataMapVector metadata_map_vector; + MetadataMap metadata_map = { + {"header_key1", "header_value1"}, + {"header_key2", "header_value2"}, + }; + MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); + metadata_map_vector.push_back(std::move(metadata_map_ptr)); + + response_encoder_->encodeMetadata(metadata_map_vector); + + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); + + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } @@ -2175,12 +2442,15 @@ TEST_P(Http2CodecImplTest, EmptyDataFlood) { EXPECT_CALL(request_decoder_, decodeData(_, false)); auto status = server_wrapper_.dispatch(data, *server_); EXPECT_FALSE(status.ok()); - EXPECT_TRUE(isBufferFloodError(status)); - // Legacy codec does not propagate error details and uses generic error message - EXPECT_EQ(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") - ? "Too many consecutive frames with an empty payload" - : "Flooding was detected in this HTTP/2 session, and it must be closed", - status.message()); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(status)); + EXPECT_EQ("Too many consecutive frames with an empty payload", status.message()); + } else { + // Legacy codec does not propagate error details and uses generic error message + EXPECT_TRUE(isBufferFloodError(status)); + EXPECT_EQ("Flooding was detected in this HTTP/2 session, and it must be closed", + status.message()); + } } TEST_P(Http2CodecImplTest, EmptyDataFloodOverride) { @@ -2195,6 +2465,186 @@ TEST_P(Http2CodecImplTest, EmptyDataFloodOverride) { EXPECT_TRUE(status.ok()); } +// Verify that codec detects flood of outbound frames caused by goAway() method +TEST_P(Http2CodecImplTest, GoAwayCausesOutboundFlood) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 1; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + + EXPECT_FALSE(violation_callback->enabled_); + + server_->goAway(); + + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); + + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + +// Verify that codec detects flood of outbound frames caused by shutdownNotice() method +TEST_P(Http2CodecImplTest, ShudowNoticeCausesOutboundFlood) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 1; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + + EXPECT_FALSE(violation_callback->enabled_); + + server_->shutdownNotice(); + + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); + + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + +// Verify that codec detects flood of outbound PING frames caused by the keep alive timer +TEST_P(Http2CodecImplTest, KeepAliveCausesOutboundFlood) { + // set-up server to send PING frames + constexpr uint32_t interval_ms = 100; + constexpr uint32_t timeout_ms = 200; + server_http2_options_.mutable_connection_keepalive()->mutable_interval()->set_nanos(interval_ms * + 1000 * 1000); + server_http2_options_.mutable_connection_keepalive()->mutable_timeout()->set_nanos(timeout_ms * + 1000 * 1000); + server_http2_options_.mutable_connection_keepalive()->mutable_interval_jitter()->set_value(0); + auto timeout_timer = new Event::MockTimer(&server_connection_.dispatcher_); /* */ + auto send_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*timeout_timer, disableTimer()); + EXPECT_CALL(*send_timer, enableTimer(std::chrono::milliseconds(interval_ms), _)); + + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Pre-fill outbound frame queue 1 away from overflow (account for the single HEADERS frame above) + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 1; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + + EXPECT_FALSE(violation_callback->enabled_); + + // Trigger sending a PING, which should overflow the outbound frame queue and cause + // client to be disconnected + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + // new codec does not schedule timeout callback if the PING had triggered flood protection + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(timeout_ms), _)); + } + send_timer->callback_(); + + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); + + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + +// Verify that codec detects flood of RST_STREAM frame caused by resetStream() method +TEST_P(Http2CodecImplTest, ResetStreamCausesOutboundFlood) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + auto* violation_callback = + new NiceMock(&server_connection_.dispatcher_); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 1; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + + EXPECT_FALSE(violation_callback->enabled_); + EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); + + server_->getStream(1)->resetStream(StreamResetReason::RemoteReset); + + EXPECT_TRUE(violation_callback->enabled_); + EXPECT_CALL(server_connection_, close(Envoy::Network::ConnectionCloseType::NoFlush)); + violation_callback->invokeCallback(); + + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + // CONNECT without upgrade type gets tagged with "bytestream" TEST_P(Http2CodecImplTest, ConnectTest) { client_http2_options_.set_allow_connect(true); @@ -2213,6 +2663,10 @@ TEST_P(Http2CodecImplTest, ConnectTest) { expected_headers.setReferenceKey(Headers::get().Protocol, "bytestream"); EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); request_encoder_->encodeHeaders(request_headers, false); + + EXPECT_CALL(callbacks, onResetStream(StreamResetReason::ConnectError, _)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::ConnectError, _)); + response_encoder_->getStream().resetStream(StreamResetReason::ConnectError); } template class TestNghttp2SessionFactory; @@ -2303,14 +2757,9 @@ TestNghttp2SessionFactory ssize_t { // Cast down to MetadataTestClientConnectionImpl to leverage friendship. - auto status_or_len = - static_cast*>( - static_cast(user_data)) - ->onSend(data, length); - if (status_or_len.ok()) { - return status_or_len.value(); - } - return NGHTTP2_ERR_CALLBACK_FAILURE; + return static_cast*>( + static_cast(user_data)) + ->onSend(data, length); }); nghttp2_option_new(&options_); nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); diff --git a/test/common/http/http2/codec_impl_test_util.h b/test/common/http/http2/codec_impl_test_util.h index 6049876ef844..20438d42f26e 100644 --- a/test/common/http/http2/codec_impl_test_util.h +++ b/test/common/http/http2/codec_impl_test_util.h @@ -6,6 +6,8 @@ #include "common/http/http2/codec_impl_legacy.h" #include "common/http/utility.h" +#include "test/mocks/common.h" + namespace Envoy { namespace Http { namespace Http2 { @@ -83,7 +85,7 @@ class TestServerConnectionImpl : public TestServerConnection, public CodecImplTy envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) : TestServerConnection(scope), - CodecImplType(connection, callbacks, http2CodecStats(), http2_options, + CodecImplType(connection, callbacks, http2CodecStats(), random_, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action) {} @@ -103,6 +105,8 @@ class TestServerConnectionImpl : public TestServerConnection, public CodecImplTy protected: // Overrides ServerConnectionImpl::onSettingsForTest(). void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } + + testing::NiceMock random_; }; using TestServerConnectionImplLegacy = @@ -123,6 +127,8 @@ class TestClientConnection : public TestCodecStatsProvider, public ClientCodecFacade { public: TestClientConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {} + + testing::NiceMock random_generator_; }; template @@ -134,7 +140,7 @@ class TestClientConnectionImpl : public TestClientConnection, public CodecImplTy uint32_t max_request_headers_kb, uint32_t max_request_headers_count, typename CodecImplType::SessionFactory& http2_session_factory) : TestClientConnection(scope), - CodecImplType(connection, callbacks, http2CodecStats(), http2_options, + CodecImplType(connection, callbacks, http2CodecStats(), random_generator_, http2_options, max_request_headers_kb, max_request_headers_count, http2_session_factory) {} // ClientCodecFacade diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 6e23074f691a..c8ea9f0c89c7 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -65,7 +65,7 @@ class Http2ConnPoolImplTest : public testing::Test { Http2ConnPoolImplTest() : api_(Api::createApiForTest(stats_store_)), pool_(std::make_unique( - dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr)) { + dispatcher_, random_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr)) { // Default connections to 1024 because the tests shouldn't be relying on the // connection resource limit for most tests. cluster_->resetResourceManager(1024, 1024, 1024, 1, 1); @@ -196,6 +196,7 @@ class Http2ConnPoolImplTest : public testing::Test { std::unique_ptr pool_; std::vector test_clients_; NiceMock runtime_; + Random::MockRandomGenerator random_; }; class ActiveTestRequest { @@ -315,7 +316,7 @@ TEST_F(Http2ConnPoolImplTest, VerifyAlpnFallback) { // Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at // our test transport socket factory. host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80"); - pool_ = std::make_unique(dispatcher_, host_, + pool_ = std::make_unique(dispatcher_, random_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr); // This requires some careful set up of expectations ordering: the call to createTransportSocket @@ -1318,7 +1319,7 @@ TEST_F(Http2ConnPoolImplTest, DrainedConnectionsNotActive) { TEST_F(Http2ConnPoolImplTest, PrefetchWithoutMultiplexing) { cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); // With one request per connection, and prefetch 1.5, the first request will // kick off 2 connections. @@ -1348,7 +1349,7 @@ TEST_F(Http2ConnPoolImplTest, PrefetchOff) { Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.allow_prefetch", "false"}}); cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); // Despite the prefetch ratio, no prefetch will happen due to the runtime // disable. @@ -1363,7 +1364,7 @@ TEST_F(Http2ConnPoolImplTest, PrefetchOff) { TEST_F(Http2ConnPoolImplTest, PrefetchWithMultiplexing) { cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(2); - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); // With two requests per connection, and prefetch 1.5, the first request will // only kick off 1 connection. @@ -1384,7 +1385,7 @@ TEST_F(Http2ConnPoolImplTest, PrefetchWithMultiplexing) { TEST_F(Http2ConnPoolImplTest, PrefetchEvenWhenReady) { cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); // With one request per connection, and prefetch 1.5, the first request will // kick off 2 connections. @@ -1410,7 +1411,7 @@ TEST_F(Http2ConnPoolImplTest, PrefetchEvenWhenReady) { TEST_F(Http2ConnPoolImplTest, PrefetchAfterTimeout) { cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); expectClientsCreate(2); ActiveTestRequest r1(*this, 0, false); @@ -1431,7 +1432,7 @@ TEST_F(Http2ConnPoolImplTest, PrefetchAfterTimeout) { TEST_F(Http2ConnPoolImplTest, CloseExcessWithPrefetch) { cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.00)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.00)); // First request prefetches an additional connection. expectClientsCreate(1); @@ -1442,7 +1443,7 @@ TEST_F(Http2ConnPoolImplTest, CloseExcessWithPrefetch) { ActiveTestRequest r2(*this, 0, false); // Change the prefetch ratio to force the connection to no longer be excess. - ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(2)); + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(2)); // Closing off the second request should bring us back to 1 request in queue, // desired capacity 2, so will not close the connection. EXPECT_CALL(*this, onClientDestroy()).Times(0); @@ -1454,6 +1455,19 @@ TEST_F(Http2ConnPoolImplTest, CloseExcessWithPrefetch) { closeAllClients(); } +// Test that maybePrefetch is passed up to the base class implementation. +TEST_F(Http2ConnPoolImplTest, MaybePrefetch) { + ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + + EXPECT_FALSE(pool_->maybePrefetch(0)); + + expectClientsCreate(1); + EXPECT_TRUE(pool_->maybePrefetch(2)); + + pool_->drainConnections(); + closeAllClients(); +} + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/http2_frame.cc b/test/common/http/http2/http2_frame.cc index 142e353f28f4..4d8a8c85cc15 100644 --- a/test/common/http/http2/http2_frame.cc +++ b/test/common/http/http2/http2_frame.cc @@ -10,8 +10,8 @@ namespace { -// Make request stream ID in the network byte order -uint32_t makeRequestStreamId(uint32_t stream_id) { return htonl((stream_id << 1) | 1); } +// Converts stream ID to the network byte order. Supports all values in the range [0, 2^30). +uint32_t makeNetworkOrderStreamId(uint32_t stream_id) { return htonl(stream_id); } // All this templatized stuff is for the typesafe constexpr bitwise ORing of the "enum class" values template struct FirstArgType { @@ -147,28 +147,30 @@ Http2Frame Http2Frame::makeEmptySettingsFrame(SettingsFlags flags) { Http2Frame Http2Frame::makeEmptyHeadersFrame(uint32_t stream_index, HeadersFlags flags) { Http2Frame frame; frame.buildHeader(Type::Headers, 0, static_cast(flags), - makeRequestStreamId(stream_index)); + makeNetworkOrderStreamId(stream_index)); return frame; } Http2Frame Http2Frame::makeEmptyContinuationFrame(uint32_t stream_index, HeadersFlags flags) { Http2Frame frame; frame.buildHeader(Type::Continuation, 0, static_cast(flags), - makeRequestStreamId(stream_index)); + makeNetworkOrderStreamId(stream_index)); return frame; } Http2Frame Http2Frame::makeEmptyDataFrame(uint32_t stream_index, DataFlags flags) { Http2Frame frame; - frame.buildHeader(Type::Data, 0, static_cast(flags), makeRequestStreamId(stream_index)); + frame.buildHeader(Type::Data, 0, static_cast(flags), + makeNetworkOrderStreamId(stream_index)); return frame; } Http2Frame Http2Frame::makePriorityFrame(uint32_t stream_index, uint32_t dependent_index) { static constexpr size_t kPriorityPayloadSize = 5; Http2Frame frame; - frame.buildHeader(Type::Priority, kPriorityPayloadSize, 0, makeRequestStreamId(stream_index)); - const uint32_t dependent_net = makeRequestStreamId(dependent_index); + frame.buildHeader(Type::Priority, kPriorityPayloadSize, 0, + makeNetworkOrderStreamId(stream_index)); + const uint32_t dependent_net = makeNetworkOrderStreamId(dependent_index); ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); memcpy(&frame.data_[HeaderSize], reinterpret_cast(&dependent_net), sizeof(uint32_t)); return frame; @@ -180,8 +182,8 @@ Http2Frame Http2Frame::makeEmptyPushPromiseFrame(uint32_t stream_index, static constexpr size_t kEmptyPushPromisePayloadSize = 4; Http2Frame frame; frame.buildHeader(Type::PushPromise, kEmptyPushPromisePayloadSize, static_cast(flags), - makeRequestStreamId(stream_index)); - const uint32_t promised_stream_id = makeRequestStreamId(promised_stream_index); + makeNetworkOrderStreamId(stream_index)); + const uint32_t promised_stream_id = makeNetworkOrderStreamId(promised_stream_index); ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); memcpy(&frame.data_[HeaderSize], reinterpret_cast(&promised_stream_id), sizeof(uint32_t)); @@ -191,7 +193,8 @@ Http2Frame Http2Frame::makeEmptyPushPromiseFrame(uint32_t stream_index, Http2Frame Http2Frame::makeResetStreamFrame(uint32_t stream_index, ErrorCode error_code) { static constexpr size_t kResetStreamPayloadSize = 4; Http2Frame frame; - frame.buildHeader(Type::RstStream, kResetStreamPayloadSize, 0, makeRequestStreamId(stream_index)); + frame.buildHeader(Type::RstStream, kResetStreamPayloadSize, 0, + makeNetworkOrderStreamId(stream_index)); const uint32_t error = static_cast(error_code); ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); memcpy(&frame.data_[HeaderSize], reinterpret_cast(&error), sizeof(uint32_t)); @@ -201,8 +204,8 @@ Http2Frame Http2Frame::makeResetStreamFrame(uint32_t stream_index, ErrorCode err Http2Frame Http2Frame::makeEmptyGoAwayFrame(uint32_t last_stream_index, ErrorCode error_code) { static constexpr size_t kEmptyGoAwayPayloadSize = 8; Http2Frame frame; - frame.buildHeader(Type::GoAway, kEmptyGoAwayPayloadSize, 0, makeRequestStreamId(0)); - const uint32_t last_stream_id = makeRequestStreamId(last_stream_index); + frame.buildHeader(Type::GoAway, kEmptyGoAwayPayloadSize, 0); + const uint32_t last_stream_id = makeNetworkOrderStreamId(last_stream_index); ASSERT(frame.data_.capacity() >= HeaderSize + 4 + sizeof(uint32_t)); memcpy(&frame.data_[HeaderSize], reinterpret_cast(&last_stream_id), sizeof(uint32_t)); @@ -215,7 +218,7 @@ Http2Frame Http2Frame::makeWindowUpdateFrame(uint32_t stream_index, uint32_t inc static constexpr size_t kWindowUpdatePayloadSize = 4; Http2Frame frame; frame.buildHeader(Type::WindowUpdate, kWindowUpdatePayloadSize, 0, - makeRequestStreamId(stream_index)); + makeNetworkOrderStreamId(stream_index)); const uint32_t increment_net = htonl(increment); ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); memcpy(&frame.data_[HeaderSize], reinterpret_cast(&increment_net), sizeof(uint32_t)); @@ -251,7 +254,7 @@ Http2Frame Http2Frame::makeMetadataFrameFromMetadataMap(uint32_t stream_index, Http2Frame frame; frame.buildHeader(Type::Metadata, numberOfBytesInMetadataPayload, static_cast(flags), - makeRequestStreamId(stream_index)); + makeNetworkOrderStreamId(stream_index)); std::vector bufferVector(buffer, buffer + numberOfBytesInMetadataPayload); frame.appendDataAfterHeaders(bufferVector); delete[] buffer; @@ -262,7 +265,7 @@ Http2Frame Http2Frame::makeMetadataFrameFromMetadataMap(uint32_t stream_index, Http2Frame Http2Frame::makeMalformedRequest(uint32_t stream_index) { Http2Frame frame; frame.buildHeader(Type::Headers, 0, orFlags(HeadersFlags::EndStream, HeadersFlags::EndHeaders), - makeRequestStreamId(stream_index)); + makeNetworkOrderStreamId(stream_index)); frame.appendStaticHeader( StaticHeaderIndex::Status200); // send :status as request header, which is invalid frame.adjustPayloadSize(); @@ -274,7 +277,7 @@ Http2Frame Http2Frame::makeMalformedRequestWithZerolenHeader(uint32_t stream_ind absl::string_view path) { Http2Frame frame; frame.buildHeader(Type::Headers, 0, orFlags(HeadersFlags::EndStream, HeadersFlags::EndHeaders), - makeRequestStreamId(stream_index)); + makeNetworkOrderStreamId(stream_index)); frame.appendStaticHeader(StaticHeaderIndex::MethodGet); frame.appendStaticHeader(StaticHeaderIndex::SchemeHttps); frame.appendHeaderWithoutIndexing(StaticHeaderIndex::Path, path); @@ -288,7 +291,7 @@ Http2Frame Http2Frame::makeRequest(uint32_t stream_index, absl::string_view host absl::string_view path) { Http2Frame frame; frame.buildHeader(Type::Headers, 0, orFlags(HeadersFlags::EndStream, HeadersFlags::EndHeaders), - makeRequestStreamId(stream_index)); + makeNetworkOrderStreamId(stream_index)); frame.appendStaticHeader(StaticHeaderIndex::MethodGet); frame.appendStaticHeader(StaticHeaderIndex::SchemeHttps); frame.appendHeaderWithoutIndexing(StaticHeaderIndex::Path, path); @@ -312,7 +315,7 @@ Http2Frame Http2Frame::makePostRequest(uint32_t stream_index, absl::string_view absl::string_view path) { Http2Frame frame; frame.buildHeader(Type::Headers, 0, orFlags(HeadersFlags::EndHeaders), - makeRequestStreamId(stream_index)); + makeNetworkOrderStreamId(stream_index)); frame.appendStaticHeader(StaticHeaderIndex::MethodPost); frame.appendStaticHeader(StaticHeaderIndex::SchemeHttps); frame.appendHeaderWithoutIndexing(StaticHeaderIndex::Path, path); @@ -321,6 +324,18 @@ Http2Frame Http2Frame::makePostRequest(uint32_t stream_index, absl::string_view return frame; } +Http2Frame Http2Frame::makePostRequest(uint32_t stream_index, absl::string_view host, + absl::string_view path, + const std::vector
extra_headers) { + + auto frame = makePostRequest(stream_index, host, path); + for (const auto& header : extra_headers) { + frame.appendHeaderWithoutIndexing(header); + } + frame.adjustPayloadSize(); + return frame; +} + Http2Frame Http2Frame::makeGenericFrame(absl::string_view contents) { Http2Frame frame; frame.appendData(contents); @@ -333,6 +348,16 @@ Http2Frame Http2Frame::makeGenericFrameFromHexDump(absl::string_view contents) { return frame; } +Http2Frame Http2Frame::makeDataFrame(uint32_t stream_index, absl::string_view data, + DataFlags flags) { + Http2Frame frame; + frame.buildHeader(Type::Data, 0, static_cast(flags), + makeNetworkOrderStreamId(stream_index)); + frame.appendData(data); + frame.adjustPayloadSize(); + return frame; +} + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/http2_frame.h b/test/common/http/http2/http2_frame.h index 43225793a542..d9a32fab64f2 100644 --- a/test/common/http/http2/http2_frame.h +++ b/test/common/http/http2/http2_frame.h @@ -99,6 +99,15 @@ class Http2Frame { std::string value_; }; + /** + * Make client stream ID out of the given ID in the host byte order, ensuring that the stream id + * is odd as required by https://tools.ietf.org/html/rfc7540#section-5.1.1 + * Use this function to create client stream ids for methods creating HTTP/2 frames. + * @param stream_id some stream id that will be used to create the client stream id. + * @return an odd number client stream id. + */ + static uint32_t makeClientStreamId(uint32_t stream_id) { return (stream_id << 1) | 1; } + // Methods for creating HTTP2 frames static Http2Frame makePingFrame(absl::string_view data = {}); static Http2Frame makeEmptySettingsFrame(SettingsFlags flags = SettingsFlags::None); @@ -129,6 +138,12 @@ class Http2Frame { absl::string_view path, const std::vector
extra_headers); static Http2Frame makePostRequest(uint32_t stream_index, absl::string_view host, absl::string_view path); + static Http2Frame makePostRequest(uint32_t stream_index, absl::string_view host, + absl::string_view path, + const std::vector
extra_headers); + static Http2Frame makeDataFrame(uint32_t stream_index, absl::string_view data, + DataFlags flags = DataFlags::None); + /** * Creates a frame with the given contents. This frame can be * malformed/invalid depending on the given contents. diff --git a/test/common/http/http2/http2_frame_test.cc b/test/common/http/http2/http2_frame_test.cc index ac409d876735..7de8769d8cd3 100644 --- a/test/common/http/http2/http2_frame_test.cc +++ b/test/common/http/http2/http2_frame_test.cc @@ -21,8 +21,8 @@ TEST(EqualityMetadataFrame, Http2FrameTest) { ASSERT_EQ(static_cast(http2FrameFromUtility.type()), 0x4D); // type ASSERT_EQ(payloadFromHttp2Frame[4], 4); // flags ASSERT_EQ(std::to_string(payloadFromHttp2Frame[8]), - std::to_string(3)); // stream_id (extra bit at the end) + std::to_string(1)); // stream_id } } // namespace Http2 } // namespace Http -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/common/http/http2/protocol_constraints_test.cc b/test/common/http/http2/protocol_constraints_test.cc new file mode 100644 index 000000000000..e3084146650a --- /dev/null +++ b/test/common/http/http2/protocol_constraints_test.cc @@ -0,0 +1,205 @@ +#include "common/http/http2/protocol_constraints.h" + +#include "test/common/stats/stat_test_utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Http { +namespace Http2 { + +class ProtocolConstraintsTest : public ::testing::Test { +protected: + Http::Http2::CodecStats& http2CodecStats() { + return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, stats_store_); + } + + Stats::TestUtil::TestStore stats_store_; + Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; + envoy::config::core::v3::Http2ProtocolOptions options_; +}; + +TEST_F(ProtocolConstraintsTest, DefaultStatusOk) { + ProtocolConstraints constraints(http2CodecStats(), options_); + EXPECT_TRUE(constraints.status().ok()); +} + +TEST_F(ProtocolConstraintsTest, OutboundControlFrameFlood) { + options_.mutable_max_outbound_frames()->set_value(20); + options_.mutable_max_outbound_control_frames()->set_value(2); + ProtocolConstraints constraints(http2CodecStats(), options_); + constraints.incrementOutboundFrameCount(true); + constraints.incrementOutboundFrameCount(true); + EXPECT_TRUE(constraints.checkOutboundFrameLimits().ok()); + constraints.incrementOutboundFrameCount(true); + EXPECT_FALSE(constraints.checkOutboundFrameLimits().ok()); + EXPECT_TRUE(isBufferFloodError(constraints.status())); + EXPECT_EQ("Too many control frames in the outbound queue.", constraints.status().message()); + EXPECT_EQ(1, stats_store_.counter("http2.outbound_control_flood").value()); +} + +TEST_F(ProtocolConstraintsTest, OutboundFrameFlood) { + options_.mutable_max_outbound_frames()->set_value(5); + options_.mutable_max_outbound_control_frames()->set_value(2); + ProtocolConstraints constraints(http2CodecStats(), options_); + constraints.incrementOutboundFrameCount(false); + constraints.incrementOutboundFrameCount(false); + constraints.incrementOutboundFrameCount(false); + EXPECT_TRUE(constraints.checkOutboundFrameLimits().ok()); + constraints.incrementOutboundFrameCount(false); + constraints.incrementOutboundFrameCount(false); + constraints.incrementOutboundFrameCount(false); + EXPECT_FALSE(constraints.checkOutboundFrameLimits().ok()); + EXPECT_TRUE(isBufferFloodError(constraints.status())); + EXPECT_EQ("Too many frames in the outbound queue.", constraints.status().message()); + EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); +} + +// Verify that the `status()` method reflects the first violation and is not modified by subsequent +// violations of outbound flood limits +TEST_F(ProtocolConstraintsTest, OutboundFrameFloodStatusIsIdempotent) { + options_.mutable_max_outbound_frames()->set_value(5); + options_.mutable_max_outbound_control_frames()->set_value(2); + ProtocolConstraints constraints(http2CodecStats(), options_); + // First trigger control frame flood + constraints.incrementOutboundFrameCount(true); + constraints.incrementOutboundFrameCount(true); + constraints.incrementOutboundFrameCount(true); + EXPECT_TRUE(isBufferFloodError(constraints.checkOutboundFrameLimits())); + EXPECT_EQ("Too many control frames in the outbound queue.", constraints.status().message()); + // Then trigger flood check for all frame types + constraints.incrementOutboundFrameCount(false); + constraints.incrementOutboundFrameCount(false); + constraints.incrementOutboundFrameCount(false); + EXPECT_FALSE(constraints.checkOutboundFrameLimits().ok()); + EXPECT_TRUE(isBufferFloodError(constraints.status())); + // The status should still reflect the first violation + EXPECT_EQ("Too many control frames in the outbound queue.", constraints.status().message()); + EXPECT_EQ(1, stats_store_.counter("http2.outbound_control_flood").value()); + EXPECT_EQ(0, stats_store_.counter("http2.outbound_flood").value()); +} + +TEST_F(ProtocolConstraintsTest, InboundZeroLenData) { + options_.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value(2); + ProtocolConstraints constraints(http2CodecStats(), options_); + nghttp2_frame_hd frame; + frame.type = NGHTTP2_DATA; + frame.length = 0; + frame.flags = 0; + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.trackInboundFrames(&frame, 0))); + EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.status())); + EXPECT_EQ(1, stats_store_.counter("http2.inbound_empty_frames_flood").value()); +} + +// Verify that the `status()` method reflects the first violation and is not modified by subsequent +// violations of outbound or inbound flood limits +TEST_F(ProtocolConstraintsTest, OutboundAndInboundFrameFloodStatusIsIdempotent) { + options_.mutable_max_outbound_frames()->set_value(5); + options_.mutable_max_outbound_control_frames()->set_value(2); + options_.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value(2); + ProtocolConstraints constraints(http2CodecStats(), options_); + // First trigger inbound frame flood + nghttp2_frame_hd frame; + frame.type = NGHTTP2_DATA; + frame.length = 0; + frame.flags = 0; + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.trackInboundFrames(&frame, 0))); + + // Then trigger outbound control flood + constraints.incrementOutboundFrameCount(true); + constraints.incrementOutboundFrameCount(true); + constraints.incrementOutboundFrameCount(true); + EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.checkOutboundFrameLimits())); + EXPECT_EQ(1, stats_store_.counter("http2.inbound_empty_frames_flood").value()); + EXPECT_EQ(0, stats_store_.counter("http2.outbound_control_flood").value()); +} + +TEST_F(ProtocolConstraintsTest, InboundZeroLenDataWithPadding) { + options_.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value(2); + ProtocolConstraints constraints(http2CodecStats(), options_); + nghttp2_frame_hd frame; + frame.type = NGHTTP2_DATA; + frame.length = 8; + frame.flags = 0; + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 8).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 8).ok()); + EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.trackInboundFrames(&frame, 8))); + EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.status())); + EXPECT_EQ(1, stats_store_.counter("http2.inbound_empty_frames_flood").value()); +} + +TEST_F(ProtocolConstraintsTest, InboundZeroLenDataEndStreamResetCounter) { + options_.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value(2); + ProtocolConstraints constraints(http2CodecStats(), options_); + nghttp2_frame_hd frame; + frame.type = NGHTTP2_DATA; + frame.length = 0; + frame.flags = 0; + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + frame.flags = NGHTTP2_FLAG_END_STREAM; + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + frame.flags = 0; + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.trackInboundFrames(&frame, 0))); + EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(constraints.status())); + EXPECT_EQ(1, stats_store_.counter("http2.inbound_empty_frames_flood").value()); +} + +TEST_F(ProtocolConstraintsTest, Priority) { + options_.mutable_max_inbound_priority_frames_per_stream()->set_value(2); + ProtocolConstraints constraints(http2CodecStats(), options_); + // Create one stream + nghttp2_frame_hd frame; + frame.type = NGHTTP2_HEADERS; + frame.length = 1; + frame.flags = NGHTTP2_FLAG_END_HEADERS; + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + + frame.type = NGHTTP2_PRIORITY; + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(isBufferFloodError(constraints.trackInboundFrames(&frame, 0))); + EXPECT_TRUE(isBufferFloodError(constraints.status())); + EXPECT_EQ("Too many PRIORITY frames", constraints.status().message()); + EXPECT_EQ(1, stats_store_.counter("http2.inbound_priority_frames_flood").value()); +} + +TEST_F(ProtocolConstraintsTest, WindowUpdate) { + options_.mutable_max_inbound_window_update_frames_per_data_frame_sent()->set_value(1); + ProtocolConstraints constraints(http2CodecStats(), options_); + // Create one stream + nghttp2_frame_hd frame; + frame.type = NGHTTP2_HEADERS; + frame.length = 1; + frame.flags = NGHTTP2_FLAG_END_HEADERS; + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + // Send 2 DATA frames + constraints.incrementOutboundDataFrameCount(); + constraints.incrementOutboundDataFrameCount(); + + frame.type = NGHTTP2_WINDOW_UPDATE; + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(constraints.trackInboundFrames(&frame, 0).ok()); + EXPECT_TRUE(isBufferFloodError(constraints.trackInboundFrames(&frame, 0))); + EXPECT_TRUE(isBufferFloodError(constraints.status())); + EXPECT_EQ("Too many WINDOW_UPDATE frames", constraints.status().message()); + EXPECT_EQ(1, stats_store_.counter("http2.inbound_window_update_frames_flood").value()); +} + +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/path_utility_corpus/clusterfuzz-testcase-minimized-path_utility_fuzz_test-5770162224234496 b/test/common/http/path_utility_corpus/clusterfuzz-testcase-minimized-path_utility_fuzz_test-5770162224234496 new file mode 100644 index 000000000000..2e0a0c0132fc --- /dev/null +++ b/test/common/http/path_utility_corpus/clusterfuzz-testcase-minimized-path_utility_fuzz_test-5770162224234496 @@ -0,0 +1,7 @@ +merge_slashes { + request_headers { + headers { + key: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + } + } +} diff --git a/test/common/http/path_utility_fuzz.proto b/test/common/http/path_utility_fuzz.proto index d64a274a6c56..db3eee46f14d 100644 --- a/test/common/http/path_utility_fuzz.proto +++ b/test/common/http/path_utility_fuzz.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package test.common.http; +import "validate/validate.proto"; import "test/fuzz/common.proto"; message CanonicalPath { diff --git a/test/common/http/path_utility_fuzz_test.cc b/test/common/http/path_utility_fuzz_test.cc index ebe94d2f854e..3ae691e7ecf7 100644 --- a/test/common/http/path_utility_fuzz_test.cc +++ b/test/common/http/path_utility_fuzz_test.cc @@ -1,6 +1,6 @@ #include "common/http/path_utility.h" -#include "test/common/http/path_utility_fuzz.pb.h" +#include "test/common/http/path_utility_fuzz.pb.validate.h" #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" @@ -8,6 +8,13 @@ namespace Envoy { namespace Fuzz { namespace { DEFINE_PROTO_FUZZER(const test::common::http::PathUtilityTestCase& input) { + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + switch (input.path_utility_selector_case()) { case test::common::http::PathUtilityTestCase::kCanonicalPath: { auto request_headers = fromHeaders( diff --git a/test/common/http/request_id_extension_uuid_impl_test.cc b/test/common/http/request_id_extension_uuid_impl_test.cc index 0b471c3a88cf..a975a8a24d69 100644 --- a/test/common/http/request_id_extension_uuid_impl_test.cc +++ b/test/common/http/request_id_extension_uuid_impl_test.cc @@ -48,7 +48,7 @@ TEST(UUIDRequestIDExtensionTest, PreserveRequestIDInResponse) { TestResponseHeaderMapImpl response_headers; uuid_utils.setInResponse(response_headers, request_headers); - EXPECT_EQ(nullptr, response_headers.get(Headers::get().RequestId)); + EXPECT_TRUE(response_headers.get(Headers::get().RequestId).empty()); request_headers.setRequestId("some-request-id"); uuid_utils.setInResponse(response_headers, request_headers); diff --git a/test/common/http/status_test.cc b/test/common/http/status_test.cc index 327bba34a5a5..152cc1925fa6 100644 --- a/test/common/http/status_test.cc +++ b/test/common/http/status_test.cc @@ -16,6 +16,7 @@ TEST(Status, Ok) { EXPECT_FALSE(isBufferFloodError(status)); EXPECT_FALSE(isPrematureResponseError(status)); EXPECT_FALSE(isCodecClientError(status)); + EXPECT_FALSE(isInboundFramesWithEmptyPayloadError(status)); } TEST(Status, CodecProtocolError) { @@ -28,6 +29,7 @@ TEST(Status, CodecProtocolError) { EXPECT_FALSE(isBufferFloodError(status)); EXPECT_FALSE(isPrematureResponseError(status)); EXPECT_FALSE(isCodecClientError(status)); + EXPECT_FALSE(isInboundFramesWithEmptyPayloadError(status)); } TEST(Status, BufferFloodError) { @@ -40,6 +42,7 @@ TEST(Status, BufferFloodError) { EXPECT_TRUE(isBufferFloodError(status)); EXPECT_FALSE(isPrematureResponseError(status)); EXPECT_FALSE(isCodecClientError(status)); + EXPECT_FALSE(isInboundFramesWithEmptyPayloadError(status)); } TEST(Status, PrematureResponseError) { @@ -53,6 +56,7 @@ TEST(Status, PrematureResponseError) { EXPECT_TRUE(isPrematureResponseError(status)); EXPECT_EQ(Http::Code::ProxyAuthenticationRequired, getPrematureResponseHttpCode(status)); EXPECT_FALSE(isCodecClientError(status)); + EXPECT_FALSE(isInboundFramesWithEmptyPayloadError(status)); } TEST(Status, CodecClientError) { @@ -65,6 +69,21 @@ TEST(Status, CodecClientError) { EXPECT_FALSE(isBufferFloodError(status)); EXPECT_FALSE(isPrematureResponseError(status)); EXPECT_TRUE(isCodecClientError(status)); + EXPECT_FALSE(isInboundFramesWithEmptyPayloadError(status)); +} + +TEST(Status, InboundFramesWithEmptyPayload) { + auto status = inboundFramesWithEmptyPayloadError(); + EXPECT_FALSE(status.ok()); + EXPECT_EQ("Too many consecutive frames with an empty payload", status.message()); + EXPECT_EQ("InboundFramesWithEmptyPayloadError: Too many consecutive frames with an empty payload", + toString(status)); + EXPECT_EQ(StatusCode::InboundFramesWithEmptyPayload, getStatusCode(status)); + EXPECT_FALSE(isCodecProtocolError(status)); + EXPECT_FALSE(isBufferFloodError(status)); + EXPECT_FALSE(isPrematureResponseError(status)); + EXPECT_FALSE(isCodecClientError(status)); + EXPECT_TRUE(isInboundFramesWithEmptyPayloadError(status)); } TEST(Status, ReturnIfError) { diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index ec31fee046a0..447ed9a54d84 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -9,7 +9,6 @@ #include "common/common/fmt.h" #include "common/http/exception.h" #include "common/http/header_map_impl.h" -#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/network/address_impl.h" @@ -609,6 +608,7 @@ TEST(HttpUtility, SendLocalReply) { EXPECT_CALL(callbacks, encodeHeaders_(_, false)); EXPECT_CALL(callbacks, encodeData(_, true)); + EXPECT_CALL(callbacks, streamInfo()); Utility::sendLocalReply( is_reset, callbacks, Utility::LocalReplyData{false, Http::Code::PayloadTooLarge, "large", absl::nullopt, false}); @@ -618,6 +618,7 @@ TEST(HttpUtility, SendLocalGrpcReply) { MockStreamDecoderFilterCallbacks callbacks; bool is_reset = false; + EXPECT_CALL(callbacks, streamInfo()); EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ(headers.getStatusValue(), "200"); @@ -645,6 +646,7 @@ TEST(HttpUtility, SendLocalGrpcReplyWithUpstreamJsonPayload) { } )EOF"; + EXPECT_CALL(callbacks, streamInfo()); EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ(headers.getStatusValue(), "200"); @@ -663,6 +665,7 @@ TEST(HttpUtility, SendLocalGrpcReplyWithUpstreamJsonPayload) { TEST(HttpUtility, RateLimitedGrpcStatus) { MockStreamDecoderFilterCallbacks callbacks; + EXPECT_CALL(callbacks, streamInfo()).Times(testing::AnyNumber()); EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(headers.GrpcStatus(), nullptr); @@ -691,6 +694,7 @@ TEST(HttpUtility, SendLocalReplyDestroyedEarly) { MockStreamDecoderFilterCallbacks callbacks; bool is_reset = false; + EXPECT_CALL(callbacks, streamInfo()); EXPECT_CALL(callbacks, encodeHeaders_(_, false)).WillOnce(InvokeWithoutArgs([&]() -> void { is_reset = true; })); @@ -703,6 +707,7 @@ TEST(HttpUtility, SendLocalReplyDestroyedEarly) { TEST(HttpUtility, SendLocalReplyHeadRequest) { MockStreamDecoderFilterCallbacks callbacks; bool is_reset = false; + EXPECT_CALL(callbacks, streamInfo()); EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ(headers.getContentLengthValue(), fmt::format("{}", strlen("large"))); @@ -749,6 +754,13 @@ TEST(HttpUtility, TestExtractHostPathFromUri) { EXPECT_EQ(path, "/:/adsf"); } +TEST(HttpUtility, LocalPathFromFilePath) { + EXPECT_EQ("/", Utility::localPathFromFilePath("")); + EXPECT_EQ("c:/", Utility::localPathFromFilePath("c:/")); + EXPECT_EQ("Z:/foo/bar", Utility::localPathFromFilePath("Z:/foo/bar")); + EXPECT_EQ("/foo/bar", Utility::localPathFromFilePath("foo/bar")); +} + TEST(HttpUtility, TestPrepareHeaders) { envoy::config::core::v3::HttpUri http_uri; http_uri.set_uri("scheme://dns.name/x/y/z"); @@ -778,6 +790,8 @@ TEST(HttpUtility, ResetReasonToString) { EXPECT_EQ("remote reset", Utility::resetReasonToString(Http::StreamResetReason::RemoteReset)); EXPECT_EQ("remote refused stream reset", Utility::resetReasonToString(Http::StreamResetReason::RemoteRefusedStreamReset)); + EXPECT_EQ("remote error with CONNECT request", + Utility::resetReasonToString(Http::StreamResetReason::ConnectError)); } // Verify that it resolveMostSpecificPerFilterConfigGeneric works with nil routes. @@ -808,8 +822,8 @@ TEST(HttpUtility, ResolveMostSpecificPerFilterConfig) { EXPECT_EQ(&testConfig, resolved_filter_config); } -// Verify that resolveMostSpecificPerFilterConfigGeneric indeed returns the most specific per filter -// config. +// Verify that resolveMostSpecificPerFilterConfigGeneric indeed returns the most specific per +// filter config. TEST(HttpUtility, ResolveMostSpecificPerFilterConfigGeneric) { const std::string filter_name = "envoy.filter"; NiceMock filter_callbacks; @@ -1233,110 +1247,87 @@ TEST(HttpUtility, TestRejectTeHeaderTooLong) { TEST(Url, ParsingFails) { Utility::Url url; - const bool is_connect = true; - EXPECT_FALSE(url.initialize("", !is_connect)); - EXPECT_FALSE(url.initialize("foo", !is_connect)); - EXPECT_FALSE(url.initialize("http://", !is_connect)); - EXPECT_FALSE(url.initialize("random_scheme://host.com/path", !is_connect)); - // Only port value in valid range (1-65535) is allowed. - EXPECT_FALSE(url.initialize("http://host.com:65536/path", !is_connect)); - EXPECT_FALSE(url.initialize("http://host.com:0/path", !is_connect)); - EXPECT_FALSE(url.initialize("http://host.com:-1/path", !is_connect)); - EXPECT_FALSE(url.initialize("http://host.com:port/path", !is_connect)); - - // Test parsing fails for CONNECT request URLs. - EXPECT_FALSE(url.initialize("http://www.foo.com", is_connect)); - EXPECT_FALSE(url.initialize("foo.com", is_connect)); - // Only port value in valid range (1-65535) is allowed. - EXPECT_FALSE(url.initialize("foo.com:65536", is_connect)); - EXPECT_FALSE(url.initialize("foo.com:0", is_connect)); - EXPECT_FALSE(url.initialize("foo.com:-1", is_connect)); - EXPECT_FALSE(url.initialize("foo.com:port", is_connect)); + EXPECT_FALSE(url.initialize("", false)); + EXPECT_FALSE(url.initialize("foo", false)); + EXPECT_FALSE(url.initialize("http://", false)); + EXPECT_FALSE(url.initialize("random_scheme://host.com/path", false)); + EXPECT_FALSE(url.initialize("http://www.foo.com", true)); + EXPECT_FALSE(url.initialize("foo.com", true)); } void validateUrl(absl::string_view raw_url, absl::string_view expected_scheme, - absl::string_view expected_host_port, absl::string_view expected_path, - uint16_t expected_port) { + absl::string_view expected_host_port, absl::string_view expected_path) { Utility::Url url; - ASSERT_TRUE(url.initialize(raw_url, /*is_connect=*/false)) << "Failed to initialize " << raw_url; + ASSERT_TRUE(url.initialize(raw_url, false)) << "Failed to initialize " << raw_url; EXPECT_EQ(url.scheme(), expected_scheme); EXPECT_EQ(url.hostAndPort(), expected_host_port); EXPECT_EQ(url.pathAndQueryParams(), expected_path); - EXPECT_EQ(url.port(), expected_port); +} + +void validateConnectUrl(absl::string_view raw_url, absl::string_view expected_host_port) { + Utility::Url url; + ASSERT_TRUE(url.initialize(raw_url, true)) << "Failed to initialize " << raw_url; + EXPECT_TRUE(url.scheme().empty()); + EXPECT_TRUE(url.pathAndQueryParams().empty()); + EXPECT_EQ(url.hostAndPort(), expected_host_port); } TEST(Url, ParsingTest) { - // Test url with no explicit path (with and without port). - validateUrl("http://www.host.com", "http", "www.host.com", "/", 80); - validateUrl("http://www.host.com:80", "http", "www.host.com", "/", 80); + // Test url with no explicit path (with and without port) + validateUrl("http://www.host.com", "http", "www.host.com", "/"); + validateUrl("http://www.host.com:80", "http", "www.host.com:80", "/"); // Test url with "/" path. - validateUrl("http://www.host.com:80/", "http", "www.host.com", "/", 80); - validateUrl("http://www.host.com/", "http", "www.host.com", "/", 80); + validateUrl("http://www.host.com:80/", "http", "www.host.com:80", "/"); + validateUrl("http://www.host.com/", "http", "www.host.com", "/"); // Test url with "?". - validateUrl("http://www.host.com:80/?", "http", "www.host.com", "/?", 80); - validateUrl("http://www.host.com/?", "http", "www.host.com", "/?", 80); + validateUrl("http://www.host.com:80/?", "http", "www.host.com:80", "/?"); + validateUrl("http://www.host.com/?", "http", "www.host.com", "/?"); // Test url with "?" but without slash. - validateUrl("http://www.host.com:80?", "http", "www.host.com", "/?", 80); - validateUrl("http://www.host.com?", "http", "www.host.com", "/?", 80); + validateUrl("http://www.host.com:80?", "http", "www.host.com:80", "?"); + validateUrl("http://www.host.com?", "http", "www.host.com", "?"); - // Test url with multi-character path. - validateUrl("http://www.host.com:80/path", "http", "www.host.com", "/path", 80); - validateUrl("http://www.host.com/path", "http", "www.host.com", "/path", 80); + // Test url with multi-character path + validateUrl("http://www.host.com:80/path", "http", "www.host.com:80", "/path"); + validateUrl("http://www.host.com/path", "http", "www.host.com", "/path"); - // Test url with multi-character path and ? at the end. - validateUrl("http://www.host.com:80/path?", "http", "www.host.com", "/path?", 80); - validateUrl("http://www.host.com/path?", "http", "www.host.com", "/path?", 80); + // Test url with multi-character path and ? at the end + validateUrl("http://www.host.com:80/path?", "http", "www.host.com:80", "/path?"); + validateUrl("http://www.host.com/path?", "http", "www.host.com", "/path?"); - // Test https scheme. - validateUrl("https://www.host.com", "https", "www.host.com", "/", 443); + // Test https scheme + validateUrl("https://www.host.com", "https", "www.host.com", "/"); - // Test url with query parameter. - validateUrl("http://www.host.com:80/?query=param", "http", "www.host.com", "/?query=param", 80); - validateUrl("http://www.host.com/?query=param", "http", "www.host.com", "/?query=param", 80); + // Test url with query parameter + validateUrl("http://www.host.com:80/?query=param", "http", "www.host.com:80", "/?query=param"); + validateUrl("http://www.host.com/?query=param", "http", "www.host.com", "/?query=param"); - // Test url with query parameter but without slash. It will be normalized. - validateUrl("http://www.host.com:80?query=param", "http", "www.host.com", "/?query=param", 80); - validateUrl("http://www.host.com?query=param", "http", "www.host.com", "/?query=param", 80); + // Test url with query parameter but without slash + validateUrl("http://www.host.com:80?query=param", "http", "www.host.com:80", "?query=param"); + validateUrl("http://www.host.com?query=param", "http", "www.host.com", "?query=param"); - // Test url with multi-character path and query parameter. - validateUrl("http://www.host.com:80/path?query=param", "http", "www.host.com", - "/path?query=param", 80); - validateUrl("http://www.host.com/path?query=param", "http", "www.host.com", "/path?query=param", - 80); + // Test url with multi-character path and query parameter + validateUrl("http://www.host.com:80/path?query=param", "http", "www.host.com:80", + "/path?query=param"); + validateUrl("http://www.host.com/path?query=param", "http", "www.host.com", "/path?query=param"); - // Test url with multi-character path and more than one query parameter. - validateUrl("http://www.host.com:80/path?query=param&query2=param2", "http", "www.host.com", - "/path?query=param&query2=param2", 80); + // Test url with multi-character path and more than one query parameter + validateUrl("http://www.host.com:80/path?query=param&query2=param2", "http", "www.host.com:80", + "/path?query=param&query2=param2"); validateUrl("http://www.host.com/path?query=param&query2=param2", "http", "www.host.com", - "/path?query=param&query2=param2", 80); - + "/path?query=param&query2=param2"); // Test url with multi-character path, more than one query parameter and fragment validateUrl("http://www.host.com:80/path?query=param&query2=param2#fragment", "http", - "www.host.com", "/path?query=param&query2=param2#fragment", 80); + "www.host.com:80", "/path?query=param&query2=param2#fragment"); validateUrl("http://www.host.com/path?query=param&query2=param2#fragment", "http", "www.host.com", - "/path?query=param&query2=param2#fragment", 80); - - // Test url with non-default ports. - validateUrl("https://www.host.com:8443", "https", "www.host.com:8443", "/", 8443); - validateUrl("http://www.host.com:8080", "http", "www.host.com:8080", "/", 8080); -} - -void validateConnectUrl(absl::string_view raw_url, absl::string_view expected_host_port, - uint16_t expected_port) { - Utility::Url url; - ASSERT_TRUE(url.initialize(raw_url, /*is_connect=*/true)) << "Failed to initialize " << raw_url; - EXPECT_TRUE(url.scheme().empty()); - EXPECT_TRUE(url.pathAndQueryParams().empty()); - EXPECT_EQ(url.hostAndPort(), expected_host_port); - EXPECT_EQ(url.port(), expected_port); + "/path?query=param&query2=param2#fragment"); } TEST(Url, ParsingForConnectTest) { - validateConnectUrl("host.com:443", "host.com:443", 443); - validateConnectUrl("host.com:80", "host.com:80", 80); + validateConnectUrl("host.com:443", "host.com:443"); + validateConnectUrl("host.com:80", "host.com:80"); } void validatePercentEncodingEncodeDecode(absl::string_view source, diff --git a/test/common/local_reply/local_reply_test.cc b/test/common/local_reply/local_reply_test.cc index 892c8f2364bc..e635bfea2d43 100644 --- a/test/common/local_reply/local_reply_test.cc +++ b/test/common/local_reply/local_reply_test.cc @@ -329,11 +329,90 @@ TEST_F(LocalReplyTest, TestHeaderAddition) { EXPECT_EQ(response_headers_.get_("foo-1"), "bar1"); EXPECT_EQ(response_headers_.get_("foo-2"), "override-bar2"); - std::vector out; - Http::HeaderUtility::getAllOfHeader(response_headers_, "foo-3", out); + const auto out = response_headers_.get(Http::LowerCaseString("foo-3")); ASSERT_EQ(out.size(), 2); - ASSERT_EQ(out[0], "bar3"); - ASSERT_EQ(out[1], "append-bar3"); + ASSERT_EQ(out[0]->value().getStringView(), "bar3"); + ASSERT_EQ(out[1]->value().getStringView(), "append-bar3"); +} + +TEST_F(LocalReplyTest, TestMapperWithContentType) { + // Match with response_code, and rewrite the code and body. + const std::string yaml = R"( + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + status_code: 401 + body: + inline_string: "401 body text" + body_format_override: + text_format: "

%LOCAL_REPLY_BODY%

" + content_type: "text/html; charset=UTF-8" + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 410 + runtime_key: key_b + status_code: 411 + body: + inline_string: "411 body text" + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 420 + runtime_key: key_b + status_code: 421 + body: + inline_string: "421 body text" + body_format_override: + text_format: "%LOCAL_REPLY_BODY%" + body_format: + text_format: "

%LOCAL_REPLY_BODY%

%RESPONSE_CODE% default formatter" + content_type: "text/html; charset=UTF-8" +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + // code=400 matches the first filter; rewrite code and body + // has its own formatter. + // content-type is explicitly set to text/html; charset=UTF-8. + resetData(400); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(401)); + EXPECT_EQ(stream_info_.response_code_, 401U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "401"); + EXPECT_EQ(body_, "

401 body text

"); + EXPECT_EQ(content_type_, "text/html; charset=UTF-8"); + + // code=410 matches the second filter; rewrite code and body + // but using default formatter. + // content-type is explicitly set to text/html; charset=UTF-8. + resetData(410); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(411)); + EXPECT_EQ(stream_info_.response_code_, 411U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "411"); + EXPECT_EQ(body_, "

411 body text

411 default formatter"); + EXPECT_EQ(content_type_, "text/html; charset=UTF-8"); + + // code=420 matches the third filter; rewrite code and body + // has its own formatter. + // default content-type is set based on reply format type. + resetData(420); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(421)); + EXPECT_EQ(stream_info_.response_code_, 421U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "421"); + EXPECT_EQ(body_, "421 body text"); + EXPECT_EQ(content_type_, "text/plain"); } } // namespace LocalReply diff --git a/test/common/memory/heap_shrinker_test.cc b/test/common/memory/heap_shrinker_test.cc index e23336191dc9..346a74899ff9 100644 --- a/test/common/memory/heap_shrinker_test.cc +++ b/test/common/memory/heap_shrinker_test.cc @@ -25,8 +25,8 @@ class HeapShrinkerTest : public testing::Test { dispatcher_("test_thread", *api_, time_system_) {} void step() { - time_system_.advanceTimeAsync(std::chrono::milliseconds(10000)); - dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(10000), dispatcher_, + Event::Dispatcher::RunType::NonBlock); } Envoy::Stats::TestUtil::TestStore stats_; @@ -67,7 +67,7 @@ TEST_F(HeapShrinkerTest, ShrinkWhenTriggered) { const uint64_t physical_mem_after_shrink = Stats::totalCurrentlyReserved() - Stats::totalPageHeapUnmapped(); -#ifdef TCMALLOC +#if defined(TCMALLOC) || defined(GPERFTOOLS_TCMALLOC) EXPECT_GE(physical_mem_before_shrink, physical_mem_after_shrink); #else EXPECT_EQ(physical_mem_before_shrink, physical_mem_after_shrink); diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 31b558b10c50..ef62c131dba5 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -83,6 +83,7 @@ envoy_cc_test( "//source/common/network:listen_socket_lib", "//source/common/network:utility_lib", "//source/common/stats:stats_lib", + "//test/mocks/api:api_mocks", "//test/mocks/buffer:buffer_mocks", "//test/mocks/event:event_mocks", "//test/mocks/network:network_mocks", @@ -91,13 +92,51 @@ envoy_cc_test( "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", "//test/test_common:test_time_lib", + "//test/test_common:threadsafe_singleton_injector_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) +envoy_cc_test( + name = "apple_dns_impl_test", + srcs = select({ + "//bazel:apple": ["apple_dns_impl_test.cc"], + "//conditions:default": [], + }), + deps = [ + "//include/envoy/event:dispatcher_interface", + "//include/envoy/network:address_interface", + "//include/envoy/network:dns_interface", + "//source/common/buffer:buffer_lib", + "//source/common/event:dispatcher_includes", + "//source/common/event:dispatcher_lib", + "//source/common/network:address_lib", + "//source/common/network:filter_lib", + "//source/common/network:listen_socket_lib", + "//source/common/stats:stats_lib", + "//source/common/stream_info:stream_info_lib", + "//test/mocks/network:network_mocks", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/thread_local:thread_local_mocks", + ] + select({ + "//bazel:apple": ["//source/common/network:dns_lib"], + "//conditions:default": [], + }), +) + envoy_cc_test( name = "dns_impl_test", srcs = ["dns_impl_test.cc"], + args = [ + # Used in createDnsResolver to force creation of DnsResolverImpl when running test on macOS. + "--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups", + ], deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/network:address_interface", @@ -166,6 +205,7 @@ envoy_cc_test( "//source/common/network:address_lib", "//source/common/network:listen_socket_lib", "//source/common/network:utility_lib", + "//test/mocks/network:io_handle_mocks", "//test/mocks/network:network_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", @@ -377,6 +417,8 @@ envoy_cc_test( deps = [ "//source/common/common:utility_lib", "//source/common/network:address_lib", + "//test/mocks/api:api_mocks", + "//test/test_common:threadsafe_singleton_injector_lib", ], ) diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index fa9436142c14..d1a01ca6734b 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -154,6 +154,8 @@ TEST(Ipv4InstanceTest, SocketAddress) { EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress("1.2.3.4"), address)); EXPECT_EQ(nullptr, address.ip()->ipv6()); EXPECT_TRUE(address.ip()->isUnicastAddress()); + EXPECT_EQ(nullptr, address.pipe()); + EXPECT_EQ(nullptr, address.envoyInternalAddress()); } TEST(Ipv4InstanceTest, AddressOnly) { @@ -241,6 +243,8 @@ TEST(Ipv6InstanceTest, SocketAddress) { EXPECT_TRUE(addressesEqual(Network::Utility::parseInternetAddress("1:0023::0Ef"), address)); EXPECT_EQ(nullptr, address.ip()->ipv4()); EXPECT_TRUE(address.ip()->isUnicastAddress()); + EXPECT_EQ(nullptr, address.pipe()); + EXPECT_EQ(nullptr, address.envoyInternalAddress()); } TEST(Ipv6InstanceTest, AddressOnly) { @@ -317,8 +321,22 @@ TEST(PipeInstanceTest, Basic) { EXPECT_EQ("/foo", address.asString()); EXPECT_EQ(Type::Pipe, address.type()); EXPECT_EQ(nullptr, address.ip()); + EXPECT_EQ(nullptr, address.envoyInternalAddress()); } +TEST(InteralInstanceTest, Basic) { + EnvoyInternalInstance address("listener_foo"); + EXPECT_EQ("envoy://listener_foo", address.asString()); + EXPECT_EQ(Type::EnvoyInternal, address.type()); + EXPECT_EQ(nullptr, address.ip()); + EXPECT_EQ(nullptr, address.pipe()); + EXPECT_NE(nullptr, address.envoyInternalAddress()); + EXPECT_EQ(nullptr, address.sockAddr()); + EXPECT_EQ(static_cast(0), address.sockAddrLen()); +} + +// Excluding Windows; chmod(2) against Windows AF_UNIX socket files succeeds, +// but stat(2) against those returns ENOENT. #ifndef WIN32 TEST(PipeInstanceTest, BasicPermission) { std::string path = TestEnvironment::unixDomainSocketPath("foo.sock"); @@ -509,7 +527,7 @@ TEST(AddressFromSockAddrDeathTest, Pipe) { // Test comparisons between all the different (known) test classes. struct TestCase { - enum InstanceType { Ipv4, Ipv6, Pipe }; + enum InstanceType { Ipv4, Ipv6, Pipe, Internal }; TestCase() = default; TestCase(enum InstanceType type, const std::string& address, uint32_t port) @@ -543,6 +561,9 @@ class MixedAddressTest : public testing::TestWithParam<::testing::tuple(test_case.address_); break; + case TestCase::Internal: + return std::make_shared(test_case.address_); + break; } return nullptr; } @@ -561,10 +582,11 @@ TEST_P(MixedAddressTest, Equality) { } struct TestCase test_cases[] = { - {TestCase::Ipv4, "1.2.3.4", 1}, {TestCase::Ipv4, "1.2.3.4", 2}, - {TestCase::Ipv4, "1.2.3.5", 1}, {TestCase::Ipv6, "01:023::00ef", 1}, - {TestCase::Ipv6, "01:023::00ef", 2}, {TestCase::Ipv6, "01:023::00ed", 1}, - {TestCase::Pipe, "/path/to/pipe/1", 0}, {TestCase::Pipe, "/path/to/pipe/2", 0}}; + {TestCase::Ipv4, "1.2.3.4", 1}, {TestCase::Ipv4, "1.2.3.4", 2}, + {TestCase::Ipv4, "1.2.3.5", 1}, {TestCase::Ipv6, "01:023::00ef", 1}, + {TestCase::Ipv6, "01:023::00ef", 2}, {TestCase::Ipv6, "01:023::00ed", 1}, + {TestCase::Pipe, "/path/to/pipe/1", 0}, {TestCase::Pipe, "/path/to/pipe/2", 0}, + {TestCase::Internal, "listener_foo", 0}, {TestCase::Internal, "listener_bar", 0}}; INSTANTIATE_TEST_SUITE_P(AddressCrossProduct, MixedAddressTest, ::testing::Combine(::testing::ValuesIn(test_cases), diff --git a/test/common/network/apple_dns_impl_test.cc b/test/common/network/apple_dns_impl_test.cc new file mode 100644 index 000000000000..fb0a20bb546d --- /dev/null +++ b/test/common/network/apple_dns_impl_test.cc @@ -0,0 +1,199 @@ +#include +#include +#include +#include + +#include "envoy/common/platform.h" +#include "envoy/config/core/v3/address.pb.h" +#include "envoy/event/dispatcher.h" +#include "envoy/network/address.h" +#include "envoy/network/dns.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/utility.h" +#include "common/event/dispatcher_impl.h" +#include "common/network/address_impl.h" +#include "common/network/apple_dns_impl.h" +#include "common/network/filter_impl.h" +#include "common/network/listen_socket_impl.h" +#include "common/network/utility.h" +#include "common/runtime/runtime_impl.h" +#include "common/stream_info/stream_info_impl.h" + +#include "test/mocks/common.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/printers.h" +#include "test/test_common/utility.h" + +#include "absl/container/fixed_array.h" +#include "absl/container/node_hash_map.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Contains; +using testing::InSequence; +using testing::IsSupersetOf; +using testing::NiceMock; +using testing::Not; +using testing::Return; + +namespace Envoy { +namespace Network { +namespace { + +// Note: this test suite is, unfortunately, not hermetic. Apple's APIs do not allow overriding the +// IP address used for resolution via API calls (only in system settings), and worse +// yet does not allow overriding the port number used _at all_. Therefore, the tests do not use a +// test DNS server like in dns_impl_test, and thus affords less flexibility in testing scenarios: no +// concurrent requests, no expressive error responses, etc. Further experiments could be done in +// order to create a test connection that is reachable locally (potentially by binding port 53 -- +// default for DNS). However, @junr03's initial attempts were not successful. +class AppleDnsImplTest : public testing::Test { +public: + AppleDnsImplTest() + : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} + + void SetUp() override { resolver_ = dispatcher_->createDnsResolver({}, false); } + + ActiveDnsQuery* resolveWithExpectations(const std::string& address, + const DnsLookupFamily lookup_family, + const DnsResolver::ResolutionStatus expected_status, + const bool expected_results) { + return resolver_->resolve( + address, lookup_family, + [=](DnsResolver::ResolutionStatus status, std::list&& results) -> void { + EXPECT_EQ(expected_status, status); + if (expected_results) { + EXPECT_FALSE(results.empty()); + for (const auto& result : results) { + if (lookup_family == DnsLookupFamily::V4Only) { + EXPECT_NE(nullptr, result.address_->ip()->ipv4()); + } else if (lookup_family == DnsLookupFamily::V6Only) { + EXPECT_NE(nullptr, result.address_->ip()->ipv6()); + } + } + } + dispatcher_->exit(); + }); + } + + ActiveDnsQuery* resolveWithUnreferencedParameters(const std::string& address, + const DnsLookupFamily lookup_family, + bool expected_to_execute) { + return resolver_->resolve(address, lookup_family, + [expected_to_execute](DnsResolver::ResolutionStatus status, + std::list&& results) -> void { + if (!expected_to_execute) { + FAIL(); + } + UNREFERENCED_PARAMETER(status); + UNREFERENCED_PARAMETER(results); + }); + } + + template + ActiveDnsQuery* resolveWithException(const std::string& address, + const DnsLookupFamily lookup_family, T exception_object) { + return resolver_->resolve(address, lookup_family, + [exception_object](DnsResolver::ResolutionStatus status, + std::list&& results) -> void { + UNREFERENCED_PARAMETER(status); + UNREFERENCED_PARAMETER(results); + throw exception_object; + }); + } + +protected: + Api::ApiPtr api_; + Event::DispatcherPtr dispatcher_; + DnsResolverSharedPtr resolver_; +}; + +TEST_F(AppleDnsImplTest, InvalidConfigOptions) { + EXPECT_DEATH( + dispatcher_->createDnsResolver({}, true), + "using TCP for DNS lookups is not possible when using Apple APIs for DNS resolution"); + EXPECT_DEATH( + dispatcher_->createDnsResolver({nullptr}, false), + "defining custom resolvers is not possible when using Apple APIs for DNS resolution"); +} + +// Validate that when AppleDnsResolverImpl is destructed with outstanding requests, +// that we don't invoke any callbacks if the query was cancelled. This is a regression test from +// development, where segfaults were encountered due to callback invocations on +// destruction. +TEST_F(AppleDnsImplTest, DestructPending) { + ActiveDnsQuery* query = resolveWithUnreferencedParameters("", DnsLookupFamily::V4Only, 0); + ASSERT_NE(nullptr, query); + query->cancel(); +} + +TEST_F(AppleDnsImplTest, LocalLookup) { + EXPECT_NE(nullptr, resolveWithExpectations("localhost", DnsLookupFamily::Auto, + DnsResolver::ResolutionStatus::Success, true)); +} + +TEST_F(AppleDnsImplTest, DnsIpAddressVersion) { + EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::Auto, + DnsResolver::ResolutionStatus::Success, true)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::V4Only, + DnsResolver::ResolutionStatus::Success, true)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::V6Only, + DnsResolver::ResolutionStatus::Success, true)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_F(AppleDnsImplTest, CallbackException) { + EXPECT_NE(nullptr, resolveWithException("1.2.3.4", DnsLookupFamily::V4Only, + EnvoyException("Envoy exception"))); + EXPECT_THROW_WITH_MESSAGE(dispatcher_->run(Event::Dispatcher::RunType::Block), EnvoyException, + "Envoy exception"); +} + +TEST_F(AppleDnsImplTest, CallbackException2) { + EXPECT_NE(nullptr, resolveWithException("1.2.3.4", DnsLookupFamily::V4Only, + std::runtime_error("runtime error"))); + EXPECT_THROW_WITH_MESSAGE(dispatcher_->run(Event::Dispatcher::RunType::Block), EnvoyException, + "runtime error"); +} + +TEST_F(AppleDnsImplTest, CallbackException3) { + EXPECT_NE(nullptr, + resolveWithException("1.2.3.4", DnsLookupFamily::V4Only, std::string())); + EXPECT_THROW_WITH_MESSAGE(dispatcher_->run(Event::Dispatcher::RunType::Block), EnvoyException, + "unknown"); +} + +// Validate working of cancellation provided by ActiveDnsQuery return. +TEST_F(AppleDnsImplTest, Cancel) { + ActiveDnsQuery* query = + resolveWithUnreferencedParameters("some.domain", DnsLookupFamily::Auto, false); + + EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::Auto, + DnsResolver::ResolutionStatus::Success, true)); + + ASSERT_NE(nullptr, query); + query->cancel(); + + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_F(AppleDnsImplTest, Timeout) { + EXPECT_NE(nullptr, resolveWithExpectations("some.domain", DnsLookupFamily::V6Only, + DnsResolver::ResolutionStatus::Failure, false)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +} // namespace +} // namespace Network +} // namespace Envoy diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index d62524e262d4..086551ade592 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -5,6 +5,7 @@ #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" +#include "common/api/os_sys_calls_impl.h" #include "common/buffer/buffer_impl.h" #include "common/common/empty_string.h" #include "common/common/fmt.h" @@ -16,6 +17,7 @@ #include "common/network/utility.h" #include "common/runtime/runtime_impl.h" +#include "test/mocks/api/mocks.h" #include "test/mocks/buffer/mocks.h" #include "test/mocks/event/mocks.h" #include "test/mocks/network/mocks.h" @@ -24,6 +26,7 @@ #include "test/test_common/network_utility.h" #include "test/test_common/printers.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/threadsafe_singleton_injector.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -129,9 +132,30 @@ class ConnectionImplTest : public testing::TestWithParam { Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true, ENVOY_TCP_BACKLOG_SIZE); +#if defined(__clang__) && defined(__has_feature) && __has_feature(address_sanitizer) + // There is a bug in clang with AddressSanitizer on the CI such that the code below reports: + // + // runtime error: constructor call on address 0x6190000b4a80 with insufficient space for + // an object of type 'Envoy::Network::(anonymous namespace)::TestClientConnectionImpl' + // 0x6190000b4a80: note: pointer points here + // 05 01 80 39 be be be be be be be be be be be be be be be be be be be be be be be be + // be be be be + // + // However, the workaround below trips gcc on the CI, which reports: + // + // size check failed 2304 1280 38 + // CorrectSize(p, size, tcmalloc::DefaultAlignPolicy()) + // + // so we only use it for clang with AddressSanitizer builds. + auto x = malloc(sizeof(TestClientConnectionImpl) + 1024); + new (x) TestClientConnectionImpl(*dispatcher_, socket_->localAddress(), source_address_, + Network::Test::createRawBufferSocket(), socket_options_); + client_connection_.reset(reinterpret_cast(x)); +#else client_connection_ = std::make_unique( *dispatcher_, socket_->localAddress(), source_address_, Network::Test::createRawBufferSocket(), socket_options_); +#endif client_connection_->addConnectionCallbacks(client_callbacks_); EXPECT_EQ(nullptr, client_connection_->ssl()); const Network::ClientConnection& const_connection = *client_connection_; @@ -169,14 +193,14 @@ class ConnectionImplTest : public testing::TestWithParam { if (client_write_buffer_) { EXPECT_CALL(*client_write_buffer_, drain(_)) .Times(AnyNumber()) - .WillOnce(Invoke([&](uint64_t size) -> void { client_write_buffer_->baseDrain(size); })); + .WillRepeatedly( + Invoke([&](uint64_t size) -> void { client_write_buffer_->baseDrain(size); })); } EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)); client_connection_->close(ConnectionCloseType::NoFlush); if (wait_for_remote_close) { EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); - dispatcher_->run(Event::Dispatcher::RunType::Block); } else { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -454,6 +478,20 @@ struct NiceMockConnectionStats { NiceMock delayed_close_timeouts_; }; +TEST_P(ConnectionImplTest, ConnectionHash) { + setUpBasicConnection(); + + MockConnectionStats client_connection_stats; + client_connection_->setConnectionStats(client_connection_stats.toBufferStats()); + + std::vector hash1; + std::vector hash2; + ConnectionImplBase::addIdToHashKey(hash1, client_connection_->id()); + client_connection_->hashKey(hash2); + ASSERT_EQ(hash1, hash2); + disconnect(false); +} + TEST_P(ConnectionImplTest, ConnectionStats) { setUpBasicConnection(); @@ -779,8 +817,6 @@ TEST_P(ConnectionImplTest, WriteWatermarks) { // Stick 5 bytes in the connection buffer. std::unique_ptr buffer(new Buffer::OwnedImpl("hello")); int buffer_len = buffer->length(); - EXPECT_CALL(*client_write_buffer_, write(_)) - .WillOnce(Invoke(client_write_buffer_, &MockWatermarkBuffer::failWrite)); EXPECT_CALL(*client_write_buffer_, move(_)); client_write_buffer_->move(*buffer); @@ -950,8 +986,6 @@ TEST_P(ConnectionImplTest, BasicWrite) { EXPECT_CALL(*client_write_buffer_, move(_)) .WillRepeatedly(DoAll(AddBufferToStringWithoutDraining(&data_written), Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove))); - EXPECT_CALL(*client_write_buffer_, write(_)) - .WillOnce(Invoke(client_write_buffer_, &MockWatermarkBuffer::trackWrites)); EXPECT_CALL(*client_write_buffer_, drain(_)) .WillOnce(Invoke(client_write_buffer_, &MockWatermarkBuffer::trackDrains)); client_connection_->write(buffer_to_write, false); @@ -977,8 +1011,6 @@ TEST_P(ConnectionImplTest, WriteWithWatermarks) { EXPECT_CALL(*client_write_buffer_, move(_)) .WillRepeatedly(DoAll(AddBufferToStringWithoutDraining(&data_written), Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove))); - EXPECT_CALL(*client_write_buffer_, write(_)) - .WillOnce(Invoke(client_write_buffer_, &MockWatermarkBuffer::trackWrites)); EXPECT_CALL(*client_write_buffer_, drain(_)) .WillOnce(Invoke(client_write_buffer_, &MockWatermarkBuffer::trackDrains)); // The write() call on the connection will buffer enough data to bring the connection above the @@ -987,21 +1019,26 @@ TEST_P(ConnectionImplTest, WriteWithWatermarks) { // connection_impl, and try an immediate drain inside of write() to avoid thrashing here. EXPECT_CALL(client_callbacks_, onAboveWriteBufferHighWatermark()); EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark()); + client_connection_->write(first_buffer_to_write, false); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); EXPECT_EQ(data_to_write, data_written); - // Now do the write again, but this time configure buffer_ to reject the write - // with errno set to EAGAIN via failWrite(). This should result in going above the high + // Now do the write again, but this time configure os_sys_calls to reject the write + // with errno set to EAGAIN. This should result in going above the high // watermark and not returning. Buffer::OwnedImpl second_buffer_to_write(data_to_write); EXPECT_CALL(*client_write_buffer_, move(_)) .WillRepeatedly(DoAll(AddBufferToStringWithoutDraining(&data_written), Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove))); - EXPECT_CALL(*client_write_buffer_, write(_)) - .WillOnce(Invoke([&](IoHandle& io_handle) -> Api::IoCallUint64Result { + NiceMock os_sys_calls; + TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + EXPECT_CALL(os_sys_calls, writev(_, _, _)) + .WillOnce(Invoke([&](os_fd_t, const iovec*, int) -> Api::SysCallSizeResult { dispatcher_->exit(); - return client_write_buffer_->failWrite(io_handle); + // Return to default os_sys_calls implementation + os_calls.~TestThreadsafeSingletonInjector(); + return {-1, SOCKET_ERROR_AGAIN}; })); // The write() call on the connection will buffer enough data to bring the connection above the // high watermark and as the data will not flush it should not return below the watermark. @@ -1012,8 +1049,6 @@ TEST_P(ConnectionImplTest, WriteWithWatermarks) { // Clean up the connection. The close() (called via disconnect) will attempt to flush. The // call to write() will succeed, bringing the connection back under the low watermark. - EXPECT_CALL(*client_write_buffer_, write(_)) - .WillOnce(Invoke(client_write_buffer_, &MockWatermarkBuffer::trackWrites)); EXPECT_CALL(client_callbacks_, onBelowWriteBufferLowWatermark()).Times(1); disconnect(true); @@ -1034,8 +1069,12 @@ TEST_P(ConnectionImplTest, WatermarkFuzzing) { bool is_below = true; bool is_above = false; - ON_CALL(*client_write_buffer_, write(_)) - .WillByDefault(testing::Invoke(client_write_buffer_, &MockWatermarkBuffer::failWrite)); + NiceMock os_sys_calls; + TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + ON_CALL(os_sys_calls, writev(_, _, _)) + .WillByDefault(Invoke([&](os_fd_t, const iovec*, int) -> Api::SysCallSizeResult { + return {-1, SOCKET_ERROR_AGAIN}; + })); ON_CALL(*client_write_buffer_, drain(_)) .WillByDefault(testing::Invoke(client_write_buffer_, &MockWatermarkBuffer::baseDrain)); EXPECT_CALL(*client_write_buffer_, drain(_)).Times(AnyNumber()); @@ -1078,15 +1117,18 @@ TEST_P(ConnectionImplTest, WatermarkFuzzing) { } // Do the actual work. Write |buffer_to_write| bytes to the connection and - // drain |bytes_to_flush| before having the buffer failWrite() + // drain |bytes_to_flush| before having writev syscall fail with EAGAIN EXPECT_CALL(*client_write_buffer_, move(_)) .WillOnce(Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove)); - EXPECT_CALL(*client_write_buffer_, write(_)) - .WillOnce( - DoAll(Invoke([&](IoHandle&) -> void { client_write_buffer_->drain(bytes_to_flush); }), - Return(testing::ByMove(Api::IoCallUint64Result( - bytes_to_flush, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})))))) - .WillRepeatedly(testing::Invoke(client_write_buffer_, &MockWatermarkBuffer::failWrite)); + EXPECT_CALL(os_sys_calls, writev(_, _, _)) + .WillOnce(Invoke([&](os_fd_t, const iovec*, int) -> Api::SysCallSizeResult { + client_write_buffer_->drain(bytes_to_flush); + return {-1, SOCKET_ERROR_AGAIN}; + })) + .WillRepeatedly(Invoke([&](os_fd_t, const iovec*, int) -> Api::SysCallSizeResult { + return {-1, SOCKET_ERROR_AGAIN}; + })); + client_connection_->write(buffer_to_write, false); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index fb1fc5d0bde7..339cfb4abc89 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -281,7 +281,7 @@ class TestDnsServer : public TcpListenerCallbacks { queries_.emplace_back(query); } - void onReject() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + void onReject(RejectCause) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } void addHosts(const std::string& hostname, const IpList& ip, const RecordType& type) { if (type == RecordType::A) { @@ -390,6 +390,9 @@ class CustomInstance : public Address::Instance { const std::string& logicalName() const override { return antagonistic_name_; } const Address::Ip* ip() const override { return instance_.ip(); } const Address::Pipe* pipe() const override { return instance_.pipe(); } + const Address::EnvoyInternalAddress* envoyInternalAddress() const override { + return instance_.envoyInternalAddress(); + } const sockaddr* sockAddr() const override { return instance_.sockAddr(); } socklen_t sockAddrLen() const override { return instance_.sockAddrLen(); } Address::Type type() const override { return instance_.type(); } diff --git a/test/common/network/filter_manager_impl_test.cc b/test/common/network/filter_manager_impl_test.cc index 705f1370e01c..c40e2804ed0c 100644 --- a/test/common/network/filter_manager_impl_test.cc +++ b/test/common/network/filter_manager_impl_test.cc @@ -403,7 +403,7 @@ stat_prefix: name EXPECT_CALL(*rl_client, limit(_, "foo", testing::ContainerEq( std::vector{{{{"hello", "world"}}}}), - testing::A())) + testing::A(), _)) .WillOnce(WithArgs<0>( Invoke([&](Extensions::Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks = &callbacks; diff --git a/test/common/network/io_socket_handle_impl_test.cc b/test/common/network/io_socket_handle_impl_test.cc index a1a1a506b158..97b859fb942e 100644 --- a/test/common/network/io_socket_handle_impl_test.cc +++ b/test/common/network/io_socket_handle_impl_test.cc @@ -2,9 +2,20 @@ #include "common/network/io_socket_error_impl.h" #include "common/network/io_socket_handle_impl.h" +#include "test/mocks/api/mocks.h" +#include "test/test_common/threadsafe_singleton_injector.h" + #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::_; +using testing::DoAll; +using testing::Eq; +using testing::Invoke; +using testing::NiceMock; +using testing::Return; +using testing::WithArg; + namespace Envoy { namespace Network { namespace { @@ -50,6 +61,45 @@ TEST(IoSocketHandleImplTest, TestIoSocketError) { EXPECT_EQ(errorDetails(123), error9.getErrorDetails()); } +#ifdef TCP_INFO + +TEST(IoSocketHandleImpl, LastRoundTripTimeReturnsEmptyOptionalIfGetSocketFails) { + NiceMock os_sys_calls; + auto os_calls = + std::make_unique>( + &os_sys_calls); + EXPECT_CALL(os_sys_calls, getsockopt_(_, _, _, _, _)).WillOnce(Return(-1)); + + IoSocketHandleImpl io_handle; + EXPECT_THAT(io_handle.lastRoundTripTime(), Eq(absl::optional{})); +} + +TEST(IoSocketHandleImpl, LastRoundTripTimeReturnsRttIfSuccessful) { + NiceMock os_sys_calls; + auto os_calls = + std::make_unique>( + &os_sys_calls); + EXPECT_CALL(os_sys_calls, getsockopt_(_, _, _, _, _)) + .WillOnce(DoAll(WithArg<3>(Invoke([](void* optval) { + static_cast(optval)->tcpi_rtt = 35; + })), + Return(0))); + + IoSocketHandleImpl io_handle; + EXPECT_THAT(io_handle.lastRoundTripTime(), Eq(absl::optional{35})); +} + +#endif + +#ifndef TCP_INFO + +TEST(IoSocketHandleImpl, LastRoundTripTimeAlwaysReturnsEmptyOptional) { + IoSocketHandleImpl io_handle; + EXPECT_THAT(io_handle.lastRoundTripTime(), Eq(absl::optional{})); +} + +#endif + } // namespace } // namespace Network } // namespace Envoy diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index 8056826de91a..21e4673ce643 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -1,3 +1,5 @@ +#include + #include "envoy/config/core/v3/base.pb.h" #include "envoy/network/exception.h" @@ -65,10 +67,11 @@ TEST_P(ListenerImplDeathTest, ErrorCallback) { class TestTcpListenerImpl : public TcpListenerImpl { public: - TestTcpListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket, - TcpListenerCallbacks& cb, bool bind_to_port, + TestTcpListenerImpl(Event::DispatcherImpl& dispatcher, Random::RandomGenerator& random_generator, + SocketSharedPtr socket, TcpListenerCallbacks& cb, bool bind_to_port, uint32_t tcp_backlog = ENVOY_TCP_BACKLOG_SIZE) - : TcpListenerImpl(dispatcher, std::move(socket), cb, bind_to_port, tcp_backlog) {} + : TcpListenerImpl(dispatcher, random_generator, std::move(socket), cb, bind_to_port, + tcp_backlog) {} MOCK_METHOD(Address::InstanceConstSharedPtr, getLocalAddress, (os_fd_t fd)); }; @@ -82,6 +85,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, TcpListenerImplTest, TEST_P(TcpListenerImplTest, SetListeningSocketOptionsSuccess) { Network::MockTcpListenerCallbacks listener_callbacks; Network::MockConnectionHandler connection_handler; + Random::MockRandomGenerator random_generator; auto socket = std::make_shared( Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); @@ -89,13 +93,15 @@ TEST_P(TcpListenerImplTest, SetListeningSocketOptionsSuccess) { socket->addOption(option); EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_LISTENING)) .WillOnce(Return(true)); - TestTcpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true); + TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, listener_callbacks, + true); } // Test that an exception is thrown if there is an error setting socket options. TEST_P(TcpListenerImplTest, SetListeningSocketOptionsError) { Network::MockTcpListenerCallbacks listener_callbacks; Network::MockConnectionHandler connection_handler; + Random::MockRandomGenerator random_generator; auto socket = std::make_shared( Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); @@ -103,10 +109,11 @@ TEST_P(TcpListenerImplTest, SetListeningSocketOptionsError) { socket->addOption(option); EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_LISTENING)) .WillOnce(Return(false)); - EXPECT_THROW_WITH_MESSAGE(TestTcpListenerImpl(dispatcherImpl(), socket, listener_callbacks, true), - CreateListenerException, - fmt::format("cannot set post-listen socket option on socket: {}", - socket->localAddress()->asString())); + EXPECT_THROW_WITH_MESSAGE( + TestTcpListenerImpl(dispatcherImpl(), random_generator, socket, listener_callbacks, true), + CreateListenerException, + fmt::format("cannot set post-listen socket option on socket: {}", + socket->localAddress()->asString())); } TEST_P(TcpListenerImplTest, UseActualDst) { @@ -115,10 +122,13 @@ TEST_P(TcpListenerImplTest, UseActualDst) { auto socketDst = std::make_shared(alt_address_, nullptr, false); Network::MockTcpListenerCallbacks listener_callbacks1; Network::MockConnectionHandler connection_handler; + Random::MockRandomGenerator random_generator; // Do not redirect since use_original_dst is false. - Network::TestTcpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks1, true); + Network::TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, + listener_callbacks1, true); Network::MockTcpListenerCallbacks listener_callbacks2; - Network::TestTcpListenerImpl listenerDst(dispatcherImpl(), socketDst, listener_callbacks2, false); + Network::TestTcpListenerImpl listenerDst(dispatcherImpl(), random_generator, socketDst, + listener_callbacks2, false); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( socket->localAddress(), Network::Address::InstanceConstSharedPtr(), @@ -175,7 +185,8 @@ TEST_P(TcpListenerImplTest, GlobalConnectionLimitEnforcement) { }; initiate_connections(5); - EXPECT_CALL(listener_callbacks, onReject()).Times(3); + EXPECT_CALL(listener_callbacks, onReject(TcpListenerCallbacks::RejectCause::GlobalCxLimit)) + .Times(3); dispatcher_->run(Event::Dispatcher::RunType::Block); // We expect any server-side connections that get created to populate 'server_connections'. @@ -185,7 +196,8 @@ TEST_P(TcpListenerImplTest, GlobalConnectionLimitEnforcement) { Runtime::LoaderSingleton::getExisting()->mergeValues( {{"overload.global_downstream_max_connections", "3"}}); initiate_connections(5); - EXPECT_CALL(listener_callbacks, onReject()).Times(4); + EXPECT_CALL(listener_callbacks, onReject(TcpListenerCallbacks::RejectCause::GlobalCxLimit)) + .Times(4); dispatcher_->run(Event::Dispatcher::RunType::Block); EXPECT_EQ(3, server_connections.size()); @@ -214,8 +226,10 @@ TEST_P(TcpListenerImplTest, WildcardListenerUseActualDst) { Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); Network::MockTcpListenerCallbacks listener_callbacks; Network::MockConnectionHandler connection_handler; + Random::MockRandomGenerator random_generator; // Do not redirect since use_original_dst is false. - Network::TestTcpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true); + Network::TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, + listener_callbacks, true); auto local_dst_address = Network::Utility::getAddressWithPort( *Network::Test::getCanonicalLoopbackAddress(version_), socket->localAddress()->ip()->port()); @@ -253,11 +267,13 @@ TEST_P(TcpListenerImplTest, WildcardListenerIpv4Compat) { options, true); Network::MockTcpListenerCallbacks listener_callbacks; Network::MockConnectionHandler connection_handler; + Random::MockRandomGenerator random_generator; ASSERT_TRUE(socket->localAddress()->ip()->isAnyAddress()); // Do not redirect since use_original_dst is false. - Network::TestTcpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true); + Network::TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, + listener_callbacks, true); auto listener_address = Network::Utility::getAddressWithPort( *Network::Test::getCanonicalLoopbackAddress(version_), socket->localAddress()->ip()->port()); @@ -291,7 +307,9 @@ TEST_P(TcpListenerImplTest, DisableAndEnableListener) { Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); MockTcpListenerCallbacks listener_callbacks; MockConnectionCallbacks connection_callbacks; - TestTcpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true); + Random::MockRandomGenerator random_generator; + TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, listener_callbacks, + true); // When listener is disabled, the timer should fire before any connection is accepted. listener.disable(); @@ -325,6 +343,132 @@ TEST_P(TcpListenerImplTest, DisableAndEnableListener) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +TEST_P(TcpListenerImplTest, SetListenerRejectFractionZero) { + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + MockTcpListenerCallbacks listener_callbacks; + MockConnectionCallbacks connection_callbacks; + Random::MockRandomGenerator random_generator; + TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, listener_callbacks, + true); + + listener.setRejectFraction(0); + + // This connection will be accepted and not rejected. + { + testing::InSequence s1; + EXPECT_CALL(connection_callbacks, onEvent(ConnectionEvent::Connected)); + EXPECT_CALL(connection_callbacks, onEvent(ConnectionEvent::LocalClose)); + } + EXPECT_CALL(listener_callbacks, onAccept_(_)).WillOnce([&] { dispatcher_->exit(); }); + + ClientConnectionPtr client_connection = + dispatcher_->createClientConnection(socket->localAddress(), Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr); + client_connection->addConnectionCallbacks(connection_callbacks); + client_connection->connect(); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + // Now that we've seen that the connection hasn't been closed by the listener, make sure to close + // it. + client_connection->close(ConnectionCloseType::NoFlush); +} + +TEST_P(TcpListenerImplTest, SetListenerRejectFractionIntermediate) { + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + MockTcpListenerCallbacks listener_callbacks; + MockConnectionCallbacks connection_callbacks; + Random::MockRandomGenerator random_generator; + TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, listener_callbacks, + true); + + listener.setRejectFraction(0.5f); + + // The first connection will be rejected because the random value is too small. + { + testing::InSequence s1; + EXPECT_CALL(random_generator, random()).WillOnce(Return(0)); + EXPECT_CALL(listener_callbacks, onReject(TcpListenerCallbacks::RejectCause::OverloadAction)); + } + { + testing::InSequence s2; + EXPECT_CALL(connection_callbacks, onEvent(ConnectionEvent::Connected)); + EXPECT_CALL(connection_callbacks, onEvent(ConnectionEvent::RemoteClose)).WillOnce([&] { + dispatcher_->exit(); + }); + } + + { + ClientConnectionPtr client_connection = dispatcher_->createClientConnection( + socket->localAddress(), Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr); + client_connection->addConnectionCallbacks(connection_callbacks); + client_connection->connect(); + dispatcher_->run(Event::Dispatcher::RunType::Block); + } + + // The second connection rolls better on initiative and is accepted. + { + testing::InSequence s1; + EXPECT_CALL(random_generator, random()).WillOnce(Return(std::numeric_limits::max())); + EXPECT_CALL(listener_callbacks, onAccept_(_)); + } + { + testing::InSequence s2; + EXPECT_CALL(connection_callbacks, onEvent(ConnectionEvent::Connected)).WillOnce([&] { + dispatcher_->exit(); + }); + EXPECT_CALL(connection_callbacks, onEvent(ConnectionEvent::RemoteClose)).Times(0); + } + + { + ClientConnectionPtr client_connection = dispatcher_->createClientConnection( + socket->localAddress(), Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr); + client_connection->addConnectionCallbacks(connection_callbacks); + client_connection->connect(); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_CALL(connection_callbacks, onEvent(ConnectionEvent::LocalClose)); + // Now that we've seen that the connection hasn't been closed by the listener, make sure to + // close it. + client_connection->close(ConnectionCloseType::NoFlush); + } +} + +TEST_P(TcpListenerImplTest, SetListenerRejectFractionAll) { + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + MockTcpListenerCallbacks listener_callbacks; + MockConnectionCallbacks connection_callbacks; + Random::MockRandomGenerator random_generator; + TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, listener_callbacks, + true); + + listener.setRejectFraction(1); + + { + testing::InSequence s1; + EXPECT_CALL(listener_callbacks, onReject(TcpListenerCallbacks::RejectCause::OverloadAction)); + } + + { + testing::InSequence s2; + EXPECT_CALL(connection_callbacks, onEvent(ConnectionEvent::Connected)); + EXPECT_CALL(connection_callbacks, onEvent(ConnectionEvent::RemoteClose)).WillOnce([&] { + dispatcher_->exit(); + }); + } + + ClientConnectionPtr client_connection = + dispatcher_->createClientConnection(socket->localAddress(), Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr); + client_connection->addConnectionCallbacks(connection_callbacks); + client_connection->connect(); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + } // namespace } // namespace Network } // namespace Envoy diff --git a/test/common/network/resolver_impl_test.cc b/test/common/network/resolver_impl_test.cc index 6e7c0a5e8b9e..c9d9b3fce901 100644 --- a/test/common/network/resolver_impl_test.cc +++ b/test/common/network/resolver_impl_test.cc @@ -63,6 +63,20 @@ TEST(ResolverTest, FromProtoAddress) { EXPECT_EQ("/foo/bar", resolveProtoAddress(pipe_address)->asString()); } +TEST(ResolverTest, InternalListenerNameFromProtoAddress) { + envoy::config::core::v3::Address internal_listener_address; + internal_listener_address.mutable_envoy_internal_address()->set_server_listener_name( + "internal_listener_foo"); + EXPECT_EQ("envoy://internal_listener_foo", + resolveProtoAddress(internal_listener_address)->asString()); +} + +TEST(ResolverTest, UninitializedInternalAddressFromProtoAddress) { + envoy::config::core::v3::Address internal_address; + internal_address.mutable_envoy_internal_address(); + EXPECT_DEATH(resolveProtoAddress(internal_address), "panic"); +} + // Validate correct handling of ipv4_compat field. TEST(ResolverTest, FromProtoAddressV4Compat) { { @@ -156,8 +170,7 @@ TEST(ResolverTest, NonStandardResolver) { TEST(ResolverTest, UninitializedAddress) { envoy::config::core::v3::Address address; - EXPECT_THROW_WITH_MESSAGE(resolveProtoAddress(address), EnvoyException, - "Address must be a socket or pipe: "); + EXPECT_THROW_WITH_MESSAGE(resolveProtoAddress(address), EnvoyException, "Address must be set: "); } TEST(ResolverTest, NoSuchResolver) { diff --git a/test/common/network/transport_socket_options_impl_test.cc b/test/common/network/transport_socket_options_impl_test.cc index a96fbc53bdd3..213c6778b037 100644 --- a/test/common/network/transport_socket_options_impl_test.cc +++ b/test/common/network/transport_socket_options_impl_test.cc @@ -44,6 +44,8 @@ TEST_F(TransportSocketOptionsImplTest, UpstreamServer) { auto transport_socket_options = TransportSocketOptionsUtility::fromFilterState(filter_state_); EXPECT_EQ(absl::make_optional("www.example.com"), transport_socket_options->serverNameOverride()); + EXPECT_EQ("202.168.0.13:52000", + transport_socket_options->proxyProtocolOptions()->src_addr_->asStringView()); EXPECT_TRUE(transport_socket_options->applicationProtocolListOverride().empty()); } diff --git a/test/common/network/utility_test.cc b/test/common/network/utility_test.cc index 96f42f40dc97..9588f02bb364 100644 --- a/test/common/network/utility_test.cc +++ b/test/common/network/utility_test.cc @@ -175,6 +175,11 @@ TEST(NetworkUtility, GetOriginalDst) { EXPECT_CALL(socket, ipVersion()).WillOnce(testing::Return(absl::nullopt)); #endif EXPECT_EQ(nullptr, Utility::getOriginalDst(socket)); + +#ifdef SOL_IP + EXPECT_CALL(socket, addressType()).WillOnce(testing::Return(Address::Type::Pipe)); +#endif + EXPECT_EQ(nullptr, Utility::getOriginalDst(socket)); } TEST(NetworkUtility, LocalConnection) { diff --git a/test/common/protobuf/BUILD b/test/common/protobuf/BUILD index bb018981c290..4aa4300922dc 100644 --- a/test/common/protobuf/BUILD +++ b/test/common/protobuf/BUILD @@ -46,6 +46,14 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "type_util_test", + srcs = ["type_util_test.cc"], + deps = [ + "//source/common/protobuf:type_util_lib", + ], +) + envoy_cc_fuzz_test( name = "value_util_fuzz_test", srcs = ["value_util_fuzz_test.cc"], diff --git a/test/common/protobuf/type_util_test.cc b/test/common/protobuf/type_util_test.cc new file mode 100644 index 000000000000..d78395163637 --- /dev/null +++ b/test/common/protobuf/type_util_test.cc @@ -0,0 +1,18 @@ +#include "common/protobuf/type_util.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Config { +namespace { +TEST(TypeUtilTest, TypeUrlHelperFunction) { + EXPECT_EQ("envoy.config.filter.http.ip_tagging.v2.IPTagging", + TypeUtil::typeUrlToDescriptorFullName( + "type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging")); + EXPECT_EQ( + "type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging", + TypeUtil::descriptorFullNameToTypeUrl("envoy.config.filter.http.ip_tagging.v2.IPTagging")); +} +} // namespace +} // namespace Config +} // namespace Envoy \ No newline at end of file diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 2132fd25e2d2..13aa42bf5372 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -32,15 +32,39 @@ #include "gtest/gtest.h" #include "udpa/type/v1/typed_struct.pb.h" +using namespace std::chrono_literals; + namespace Envoy { -class ProtobufUtilityTest : public testing::Test { -protected: - ProtobufUtilityTest() : api_(Api::createApiForTest()) {} +class RuntimeStatsHelper { +public: + RuntimeStatsHelper() + : api_(Api::createApiForTest(store_)), + runtime_deprecated_feature_use_(store_.counter("runtime.deprecated_feature_use")), + deprecated_feature_seen_since_process_start_( + store_.gauge("runtime.deprecated_feature_seen_since_process_start", + Stats::Gauge::ImportMode::NeverImport)) { + envoy::config::bootstrap::v3::LayeredRuntime config; + config.add_layers()->mutable_admin_layer(); + loader_ = std::make_unique( + Runtime::LoaderPtr{new Runtime::LoaderImpl(dispatcher_, tls_, config, local_info_, store_, + generator_, validation_visitor_, *api_)}); + } + Event::MockDispatcher dispatcher_; + NiceMock tls_; + Stats::TestUtil::TestStore store_; + Random::MockRandomGenerator generator_; Api::ApiPtr api_; + std::unique_ptr loader_; + Stats::Counter& runtime_deprecated_feature_use_; + Stats::Gauge& deprecated_feature_seen_since_process_start_; + NiceMock local_info_; + NiceMock validation_visitor_; }; +class ProtobufUtilityTest : public testing::Test, protected RuntimeStatsHelper {}; + TEST_F(ProtobufUtilityTest, ConvertPercentNaNDouble) { envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; common_config_.mutable_healthy_panic_threshold()->set_value( @@ -239,9 +263,26 @@ TEST_F(ProtobufUtilityTest, LoadBinaryProtoFromFile) { envoy::config::bootstrap::v3::Bootstrap proto_from_file; TestUtility::loadFromFile(filename, proto_from_file, *api_); + EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file)); } +TEST_F(ProtobufUtilityTest, DEPRECATED_FEATURE_TEST(LoadBinaryV2ProtoFromFile)) { + // Allow the use of v2.Bootstrap.runtime. + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.deprecated_features:envoy.config.bootstrap.v2.Bootstrap.runtime", "True "}}); + envoy::config::bootstrap::v2::Bootstrap bootstrap; + bootstrap.mutable_runtime()->set_symlink_root("/"); + + const std::string filename = + TestEnvironment::writeStringToFileForTest("proto.pb", bootstrap.SerializeAsString()); + + envoy::config::bootstrap::v3::Bootstrap proto_from_file; + TestUtility::loadFromFile(filename, proto_from_file, *api_); + EXPECT_EQ("/", proto_from_file.hidden_envoy_deprecated_runtime().symlink_root()); + EXPECT_GT(runtime_deprecated_feature_use_.value(), 0); +} + // An unknown field (or with wrong type) in a message is rejected. TEST_F(ProtobufUtilityTest, LoadBinaryProtoUnknownFieldFromFile) { ProtobufWkt::Duration source_duration; @@ -290,6 +331,7 @@ TEST_F(ProtobufUtilityTest, LoadTextProtoFromFile) { envoy::config::bootstrap::v3::Bootstrap proto_from_file; TestUtility::loadFromFile(filename, proto_from_file, *api_); + EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file)); } @@ -307,6 +349,7 @@ TEST_F(ProtobufUtilityTest, LoadJsonFromFileNoBoosting) { envoy::config::bootstrap::v3::Bootstrap proto_from_file; TestUtility::loadFromFile(filename, proto_from_file, *api_); + EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file)); } @@ -321,6 +364,7 @@ TEST_F(ProtobufUtilityTest, DEPRECATED_FEATURE_TEST(LoadV2TextProtoFromFile)) { API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_from_file; TestUtility::loadFromFile(filename, proto_from_file, *api_); + EXPECT_GT(runtime_deprecated_feature_use_.value(), 0); EXPECT_EQ("foo", proto_from_file.node().hidden_envoy_deprecated_build_version()); } @@ -1214,9 +1258,34 @@ TEST_F(ProtobufUtilityTest, UnpackToNextVersion) { source_any.PackFrom(source); API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; MessageUtil::unpackTo(source_any, dst); + EXPECT_GT(runtime_deprecated_feature_use_.value(), 0); EXPECT_TRUE(dst.ignore_health_on_host_removal()); } +// Validate warning messages on v2 upgrades. +TEST_F(ProtobufUtilityTest, V2UpgradeWarningLogs) { + API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; + // First attempt works. + EXPECT_LOG_CONTAINS("warn", "Configuration does not parse cleanly as v3", + MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, + ProtobufMessage::getNullValidationVisitor())); + // Second attempt immediately after fails. + EXPECT_LOG_NOT_CONTAINS("warn", "Configuration does not parse cleanly as v3", + MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", + dst, + ProtobufMessage::getNullValidationVisitor())); + // Third attempt works, since this is a different log message. + EXPECT_LOG_CONTAINS("warn", "Configuration does not parse cleanly as v3", + MessageUtil::loadFromJson("{drain_connections_on_host_removal: false}", dst, + ProtobufMessage::getNullValidationVisitor())); + // This is kind of terrible, but it's hard to do dependency injection at onVersionUpgradeWarn(). + std::this_thread::sleep_for(5s); // NOLINT + // We can log the original warning again. + EXPECT_LOG_CONTAINS("warn", "Configuration does not parse cleanly as v3", + MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, + ProtobufMessage::getNullValidationVisitor())); +} + // MessageUtility::loadFromJson() throws on garbage JSON. TEST_F(ProtobufUtilityTest, LoadFromJsonGarbage) { envoy::config::cluster::v3::Cluster dst; @@ -1231,24 +1300,28 @@ TEST_F(ProtobufUtilityTest, LoadFromJsonSameVersion) { API_NO_BOOST(envoy::api::v2::Cluster) dst; MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, ProtobufMessage::getNullValidationVisitor()); + EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); EXPECT_TRUE(dst.drain_connections_on_host_removal()); } { API_NO_BOOST(envoy::api::v2::Cluster) dst; MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, ProtobufMessage::getStrictValidationVisitor()); + EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); EXPECT_TRUE(dst.drain_connections_on_host_removal()); } { API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; MessageUtil::loadFromJson("{ignore_health_on_host_removal: true}", dst, ProtobufMessage::getNullValidationVisitor()); + EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); EXPECT_TRUE(dst.ignore_health_on_host_removal()); } { API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; MessageUtil::loadFromJson("{ignore_health_on_host_removal: true}", dst, ProtobufMessage::getStrictValidationVisitor()); + EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); EXPECT_TRUE(dst.ignore_health_on_host_removal()); } } @@ -1268,24 +1341,28 @@ TEST_F(ProtobufUtilityTest, LoadFromJsonNextVersion) { API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; MessageUtil::loadFromJson("{use_tcp_for_dns_lookups: true}", dst, ProtobufMessage::getNullValidationVisitor()); + EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); EXPECT_TRUE(dst.use_tcp_for_dns_lookups()); } { API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; MessageUtil::loadFromJson("{use_tcp_for_dns_lookups: true}", dst, ProtobufMessage::getStrictValidationVisitor()); + EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); EXPECT_TRUE(dst.use_tcp_for_dns_lookups()); } { API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, ProtobufMessage::getNullValidationVisitor()); + EXPECT_GT(runtime_deprecated_feature_use_.value(), 0); EXPECT_TRUE(dst.ignore_health_on_host_removal()); } { API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, ProtobufMessage::getStrictValidationVisitor()); + EXPECT_GT(runtime_deprecated_feature_use_.value(), 0); EXPECT_TRUE(dst.ignore_health_on_host_removal()); } } @@ -1428,20 +1505,9 @@ TEST(DurationUtilTest, OutOfRange) { } } -class DeprecatedFieldsTest : public testing::TestWithParam { +class DeprecatedFieldsTest : public testing::TestWithParam, protected RuntimeStatsHelper { protected: - DeprecatedFieldsTest() - : with_upgrade_(GetParam()), api_(Api::createApiForTest(store_)), - runtime_deprecated_feature_use_(store_.counter("runtime.deprecated_feature_use")), - deprecated_feature_seen_since_process_start_( - store_.gauge("runtime.deprecated_feature_seen_since_process_start", - Stats::Gauge::ImportMode::NeverImport)) { - envoy::config::bootstrap::v3::LayeredRuntime config; - config.add_layers()->mutable_admin_layer(); - loader_ = std::make_unique( - Runtime::LoaderPtr{new Runtime::LoaderImpl(dispatcher_, tls_, config, local_info_, store_, - generator_, validation_visitor_, *api_)}); - } + DeprecatedFieldsTest() : with_upgrade_(GetParam()) {} void checkForDeprecation(const Protobuf::Message& message) { if (with_upgrade_) { @@ -1455,17 +1521,6 @@ class DeprecatedFieldsTest : public testing::TestWithParam { } const bool with_upgrade_; - Event::MockDispatcher dispatcher_; - NiceMock tls_; - Stats::TestUtil::TestStore store_; - Random::MockRandomGenerator generator_; - Api::ApiPtr api_; - Random::MockRandomGenerator rand_; - std::unique_ptr loader_; - Stats::Counter& runtime_deprecated_feature_use_; - Stats::Gauge& deprecated_feature_seen_since_process_start_; - NiceMock local_info_; - NiceMock validation_visitor_; }; INSTANTIATE_TEST_SUITE_P(Versions, DeprecatedFieldsTest, testing::Values(false, true)); @@ -1523,7 +1578,7 @@ TEST_P(DeprecatedFieldsTest, // Now create a new snapshot with this feature allowed. Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.deprecated_features:envoy.test.deprecation_test.Base.is_deprecated_fatal", - "TrUe "}}); + "True "}}); // Now the same deprecation check should only trigger a warning. EXPECT_LOG_CONTAINS( @@ -1772,10 +1827,4 @@ TEST(StatusCode, Strings) { ASSERT_EQ("OK", MessageUtil::CodeEnumToString(ProtobufUtil::error::OK)); } -TEST(TypeUtilTest, TypeUrlToDescriptorFullName) { - EXPECT_EQ("envoy.config.filter.http.ip_tagging.v2.IPTagging", - TypeUtil::typeUrlToDescriptorFullName( - "type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging")); -} - } // namespace Envoy diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 9892165a1ed4..51121e73b8a4 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -1,5 +1,7 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_cc_test_binary", @@ -41,6 +43,21 @@ envoy_cc_test_library( ], ) +envoy_cc_benchmark_binary( + name = "config_impl_headermap_benchmark_test", + srcs = ["config_impl_headermap_benchmark_test.cc"], + external_deps = [ + "benchmark", + ], + deps = [ + "//source/common/http:header_map_lib", + "//source/common/router:config_lib", + "//test/mocks/server:server_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + ], +) + envoy_proto_library( name = "header_parser_fuzz_proto", srcs = ["header_parser_fuzz.proto"], @@ -386,3 +403,25 @@ envoy_proto_library( "@envoy_api//envoy/extensions/filters/http/router/v3:pkg", ], ) + +envoy_cc_benchmark_binary( + name = "config_impl_speed_test", + srcs = ["config_impl_speed_test.cc"], + external_deps = [ + "benchmark", + ], + deps = [ + "//source/common/common:assert_lib", + "//source/common/router:config_lib", + "//test/mocks/server:instance_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:test_runtime_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + ], +) + +envoy_benchmark_test( + name = "config_impl_benchmark_test", + benchmark_binary = "config_impl_speed_test", +) diff --git a/test/common/router/config_impl_headermap_benchmark_test.cc b/test/common/router/config_impl_headermap_benchmark_test.cc new file mode 100644 index 000000000000..80fa3ae007ef --- /dev/null +++ b/test/common/router/config_impl_headermap_benchmark_test.cc @@ -0,0 +1,88 @@ +#include "envoy/config/route/v3/route.pb.h" +#include "envoy/config/route/v3/route.pb.validate.h" +#include "envoy/config/route/v3/route_components.pb.h" + +#include "common/http/header_map_impl.h" +#include "common/router/config_impl.h" + +#include "test/mocks/server/mocks.h" +#include "test/test_common/utility.h" + +#include "benchmark/benchmark.h" + +using testing::ReturnRef; + +namespace Envoy { +namespace Router { + +/** + * Measure the time it takes to iterate over country route configurations until + * the default route is taken. This emulates a case where the router has 250 + * different configuration (for 250 countries), and multiple requests that + * aren't matched are tested against. The test allows the performance comparison + * of different header map implementations. + * + * Note: the benchmark includes the time to setup the config routes and add all + * the request headers once. + * */ +static void manyCountryRoutesLongHeaders(benchmark::State& state) { + // Add a route configuration with multiple route, each has a different + // x-country header required to that route. + const size_t countries_num = 250; + const Http::LowerCaseString country_header_name("x-country"); + envoy::config::route::v3::RouteConfiguration proto_config; + auto main_virtual_host = proto_config.mutable_virtual_hosts()->Add(); + main_virtual_host->set_name("default"); + main_virtual_host->mutable_domains()->Add("*"); + // Add countries routes. + std::vector countries; + for (size_t i = 0; i < countries_num; i++) { + auto country_name = absl::StrCat("country", i); + countries.push_back(country_name); + // Add the country route. + auto new_routes = main_virtual_host->mutable_routes()->Add(); + new_routes->mutable_match()->set_prefix("/"); + new_routes->mutable_route()->set_cluster(country_name); + auto headers_matcher = new_routes->mutable_match()->mutable_headers()->Add(); + headers_matcher->set_name(country_header_name.get()); + headers_matcher->set_exact_match(country_name); + } + // Add the default route. + auto new_routes = main_virtual_host->mutable_routes()->Add(); + new_routes->mutable_match()->set_prefix("/"); + new_routes->mutable_route()->set_cluster("default"); + + // Setup the config parsing. + Api::ApiPtr api(Api::createApiForTest()); + NiceMock factory_context; + ON_CALL(factory_context, api()).WillByDefault(ReturnRef(*api)); + ConfigImpl config(proto_config, factory_context, ProtobufMessage::getNullValidationVisitor(), + true); + + const auto stream_info = NiceMock(); + auto req_headers = Http::TestRequestHeaderMapImpl{{":authority", "www.lyft.com"}, + {":path", "/"}, + {":method", "GET"}, + {"x-forwarded-proto", "http"}}; + // Add dummy headers to reach ~100 headers (limit per request). + for (int i = 0; i < 90; i++) { + req_headers.addCopy(Http::LowerCaseString(absl::StrCat("dummyheader", i)), "some_value"); + } + req_headers.addReferenceKey(country_header_name, absl::StrCat("country", countries_num)); + for (auto _ : state) { // NOLINT + auto& result = config.route(req_headers, stream_info, 0)->routeEntry()->clusterName(); + benchmark::DoNotOptimize(result); + } +} +BENCHMARK(manyCountryRoutesLongHeaders) + ->Arg(0) + ->Arg(1) + ->Arg(5) + ->Arg(10) + ->Arg(100) + ->Arg(1000) + ->Arg(5000) + ->Arg(10000); + +} // namespace Router +} // namespace Envoy diff --git a/test/common/router/config_impl_speed_test.cc b/test/common/router/config_impl_speed_test.cc new file mode 100644 index 000000000000..a4192eb487f2 --- /dev/null +++ b/test/common/router/config_impl_speed_test.cc @@ -0,0 +1,149 @@ +#include "envoy/config/route/v3/route.pb.h" +#include "envoy/config/route/v3/route.pb.validate.h" + +#include "common/common/assert.h" +#include "common/router/config_impl.h" + +#include "test/mocks/server/instance.h" +#include "test/mocks/stream_info/mocks.h" +#include "test/test_common/test_runtime.h" +#include "test/test_common/utility.h" + +#include "benchmark/benchmark.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Router { +namespace { + +using envoy::config::route::v3::DirectResponseAction; +using envoy::config::route::v3::Route; +using envoy::config::route::v3::RouteConfiguration; +using envoy::config::route::v3::RouteMatch; +using envoy::config::route::v3::VirtualHost; +using testing::NiceMock; +using testing::ReturnRef; + +/** + * Generates a request with the path: + * - /shelves/shelf_x/route_x + */ +static Http::TestRequestHeaderMapImpl genRequestHeaders(int route_num) { + return Http::TestRequestHeaderMapImpl{ + {":authority", "www.google.com"}, + {":method", "GET"}, + {":path", absl::StrCat("/shelves/shelf_", route_num, "/route_", route_num)}, + {"x-forwarded-proto", "http"}}; +} + +/** + * Generates the route config for the type of matcher being tested. + */ +static RouteConfiguration genRouteConfig(benchmark::State& state, + RouteMatch::PathSpecifierCase match_type) { + // Create the base route config. + RouteConfiguration route_config; + VirtualHost* v_host = route_config.add_virtual_hosts(); + v_host->set_name("default"); + v_host->add_domains("*"); + + // Create `n` regex routes. The last route will be the only one matched. + for (int i = 0; i < state.range(0); ++i) { + Route* route = v_host->add_routes(); + DirectResponseAction* direct_response = route->mutable_direct_response(); + direct_response->set_status(200); + RouteMatch* match = route->mutable_match(); + + switch (match_type) { + case RouteMatch::PathSpecifierCase::kPrefix: { + match->set_prefix(absl::StrCat("/shelves/shelf_", i, "/")); + break; + } + case RouteMatch::PathSpecifierCase::kPath: { + match->set_prefix(absl::StrCat("/shelves/shelf_", i, "/route_", i)); + break; + } + case RouteMatch::PathSpecifierCase::kSafeRegex: { + envoy::type::matcher::v3::RegexMatcher* regex = match->mutable_safe_regex(); + regex->mutable_google_re2(); + regex->set_regex(absl::StrCat("^/shelves/[^\\\\/]+/route_", i, "$")); + break; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + + return route_config; +} + +/** + * Measure the speed of doing a route match against a route table of varying sizes. + * Why? Currently, route matching is linear in first-to-win ordering. + * + * We construct the first `n - 1` items in the route table so they are not + * matched by the incoming request. Only the last route will be matched. + * We then time how long it takes for the request to be matched against the + * last route. + */ +static void bmRouteTableSize(benchmark::State& state, RouteMatch::PathSpecifierCase match_type) { + // Setup router for benchmarking. + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_query_string_in_path_redirects", "false"}}); + Api::ApiPtr api = Api::createApiForTest(); + NiceMock factory_context; + NiceMock stream_info; + ON_CALL(factory_context, api()).WillByDefault(ReturnRef(*api)); + + // Create router config. + ConfigImpl config(genRouteConfig(state, match_type), factory_context, + ProtobufMessage::getNullValidationVisitor(), true); + + for (auto _ : state) { // NOLINT + // Do the actual timing here. + // Single request that will match the last route in the config. + int last_route_num = state.range(0) - 1; + config.route(genRequestHeaders(last_route_num), stream_info, 0); + } +} + +/** + * Benchmark a route table with path prefix matchers in the form of: + * - /shelves/shelf_1/... + * - /shelves/shelf_2/... + * - etc. + */ +static void bmRouteTableSizeWithPathPrefixMatch(benchmark::State& state) { + bmRouteTableSize(state, RouteMatch::PathSpecifierCase::kPrefix); +} + +/** + * Benchmark a route table with exact path matchers in the form of: + * - /shelves/shelf_1/route_1 + * - /shelves/shelf_2/route_2 + * - etc. + */ +static void bmRouteTableSizeWithExactPathMatch(benchmark::State& state) { + bmRouteTableSize(state, RouteMatch::PathSpecifierCase::kPath); +} + +/** + * Benchmark a route table with regex path matchers in the form of: + * - /shelves/{shelf_id}/route_1 + * - /shelves/{shelf_id}/route_2 + * - etc. + * + * This represents common OpenAPI path templating. + */ +static void bmRouteTableSizeWithRegexMatch(benchmark::State& state) { + bmRouteTableSize(state, RouteMatch::PathSpecifierCase::kSafeRegex); +} + +BENCHMARK(bmRouteTableSizeWithPathPrefixMatch)->RangeMultiplier(2)->Ranges({{1, 2 << 13}}); +BENCHMARK(bmRouteTableSizeWithExactPathMatch)->RangeMultiplier(2)->Ranges({{1, 2 << 13}}); +BENCHMARK(bmRouteTableSizeWithRegexMatch)->RangeMultiplier(2)->Ranges({{1, 2 << 13}}); + +} // namespace +} // namespace Router +} // namespace Envoy diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 7e5cb92cc983..3b147b46477c 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -491,8 +491,8 @@ TEST_F(RouteMatcherTest, TestConnectRoutes) { route: cluster: connect_break - match: - connect_matcher: - {} + connect_matcher: + {} route: cluster: connect_match prefix_rewrite: "/rewrote" @@ -507,9 +507,21 @@ TEST_F(RouteMatcherTest, TestConnectRoutes) { - bat4.com routes: - match: - connect_matcher: - {} + connect_matcher: + {} redirect: { path_redirect: /new_path } +- name: connect3 + domains: + - bat5.com + routes: + - match: + connect_matcher: + {} + headers: + - name: x-safe + exact_match: "safe" + route: + cluster: connect_header_match - name: default domains: - "*" @@ -557,6 +569,15 @@ TEST_F(RouteMatcherTest, TestConnectRoutes) { redirect->rewritePathHeader(headers, true); EXPECT_EQ("http://bat4.com/new_path", redirect->newPath(headers)); } + + // Header matching (for HTTP/1.1) + EXPECT_EQ( + "connect_header_match", + config.route(genPathlessHeaders("bat5.com", "CONNECT"), 0)->routeEntry()->clusterName()); + + // Header matching (for HTTP/2) + EXPECT_EQ("connect_header_match", + config.route(genHeaders("bat5.com", " ", "CONNECT"), 0)->routeEntry()->clusterName()); } TEST_F(RouteMatcherTest, TestRoutes) { @@ -2831,7 +2852,38 @@ TEST_F(RouteMatcherTest, ContentType) { } } -TEST_F(RouteMatcherTest, GrpcTimeoutOffset) { +TEST_F(RouteMatcherTest, DurationTimeouts) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/foo" + route: + cluster: local_service_grpc + - match: + prefix: "/" + route: + max_stream_duration: + max_stream_duration: 0.01s + grpc_timeout_header_max: 0.02s + grpc_timeout_header_offset: 0.03s + cluster: local_service_grpc + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + + { + auto entry = config.route(genHeaders("www.lyft.com", "/", "GET"), 0)->routeEntry(); + EXPECT_EQ(std::chrono::milliseconds(10), entry->maxStreamDuration()); + EXPECT_EQ(std::chrono::milliseconds(20), entry->grpcTimeoutHeaderMax()); + EXPECT_EQ(std::chrono::milliseconds(30), entry->grpcTimeoutHeaderOffset()); + } +} + +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(GrpcTimeoutOffset)) { const std::string yaml = R"EOF( virtual_hosts: - name: local_service @@ -2861,7 +2913,7 @@ TEST_F(RouteMatcherTest, GrpcTimeoutOffset) { ->grpcTimeoutOffset()); } -TEST_F(RouteMatcherTest, GrpcTimeoutOffsetOfDynamicRoute) { +TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(GrpcTimeoutOffsetOfDynamicRoute)) { // A DynamicRouteEntry will be created when 'cluster_header' is set. const std::string yaml = R"EOF( virtual_hosts: @@ -4804,7 +4856,9 @@ TEST_F(RouteMatcherTest, WeightedClusters) { EXPECT_EQ(nullptr, route_entry->hashPolicy()); EXPECT_TRUE(route_entry->opaqueConfig().empty()); EXPECT_FALSE(route_entry->autoHostRewrite()); - EXPECT_TRUE(route_entry->includeVirtualHostRateLimits()); + // Default behavior when include_vh_rate_limits is not set, similar to + // VhRateLimitOptions::Override + EXPECT_FALSE(route_entry->includeVirtualHostRateLimits()); EXPECT_EQ(Http::Code::ServiceUnavailable, route_entry->clusterNotFoundResponseCode()); EXPECT_EQ(nullptr, route_entry->corsPolicy()); EXPECT_EQ("test_value", @@ -5395,7 +5449,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestOpaqueConfigUsingDeprecated class RoutePropertyTest : public testing::Test, public ConfigImplTestBase {}; -TEST_F(RoutePropertyTest, ExcludeVHRateLimits) { +TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(ExcludeVHRateLimits)) { std::string yaml = R"EOF( virtual_hosts: - name: www2 @@ -5413,7 +5467,9 @@ TEST_F(RoutePropertyTest, ExcludeVHRateLimits) { config_ptr = std::make_unique(parseRouteConfigurationFromYaml(yaml), factory_context_, true); - EXPECT_TRUE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); + // Default behavior when include_vh_rate_limits is not set, similar to + // VhRateLimitOptions::Override + EXPECT_FALSE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); yaml = R"EOF( virtual_hosts: diff --git a/test/common/router/corpus_from_config_impl.sh b/test/common/router/corpus_from_config_impl.sh index 91b7963cb033..8885d2958e05 100755 --- a/test/common/router/corpus_from_config_impl.sh +++ b/test/common/router/corpus_from_config_impl.sh @@ -5,7 +5,7 @@ set -e # Set NORUNFILES so test/main doesn't fail when runfiles manifest is not found. -NORUNFILES=1 $* +NORUNFILES=1 "$@" # Verify at least one entry is actually generated [ -e "${GENRULE_OUTPUT_DIR}"/generated_corpus_0 ] diff --git a/test/common/router/header_formatter_test.cc b/test/common/router/header_formatter_test.cc index 93f626fc86e2..f045b12d4fda 100644 --- a/test/common/router/header_formatter_test.cc +++ b/test/common/router/header_formatter_test.cc @@ -982,6 +982,69 @@ match: { prefix: "/new_endpoint" } EXPECT_TRUE(header_map.has("x-client-ip-port")); } +TEST(HeaderParserTest, EvaluateHeadersWithNullStreamInfo) { + const std::string yaml = R"EOF( +match: { prefix: "/new_endpoint" } +route: + cluster: "www2" + prefix_rewrite: "/api/new_endpoint" +request_headers_to_add: + - header: + key: "x-client-ip" + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" + append: true + - header: + key: "x-client-ip-port" + value: "%DOWNSTREAM_REMOTE_ADDRESS%" + append: true +)EOF"; + + HeaderParserPtr req_header_parser = + HeaderParser::configure(parseRouteFromV3Yaml(yaml).request_headers_to_add()); + Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; + req_header_parser->evaluateHeaders(header_map, nullptr); + EXPECT_TRUE(header_map.has("x-client-ip")); + EXPECT_TRUE(header_map.has("x-client-ip-port")); + EXPECT_EQ("%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%", header_map.get_("x-client-ip")); + EXPECT_EQ("%DOWNSTREAM_REMOTE_ADDRESS%", header_map.get_("x-client-ip-port")); +} + +TEST(HeaderParserTest, EvaluateHeaderValuesWithNullStreamInfo) { + Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; + Protobuf::RepeatedPtrField headers_values; + + auto& first_entry = *headers_values.Add(); + first_entry.set_key("key"); + + // This tests when we have "StreamInfoHeaderFormatter", but stream info is null. + first_entry.set_value("%DOWNSTREAM_REMOTE_ADDRESS%"); + + HeaderParserPtr req_header_parser_add = HeaderParser::configure(headers_values, /*append=*/true); + req_header_parser_add->evaluateHeaders(header_map, nullptr); + EXPECT_TRUE(header_map.has("key")); + EXPECT_EQ("%DOWNSTREAM_REMOTE_ADDRESS%", header_map.get_("key")); + + headers_values.Clear(); + auto& set_entry = *headers_values.Add(); + set_entry.set_key("key"); + set_entry.set_value("great"); + + HeaderParserPtr req_header_parser_set = HeaderParser::configure(headers_values, /*append=*/false); + req_header_parser_set->evaluateHeaders(header_map, nullptr); + EXPECT_TRUE(header_map.has("key")); + EXPECT_EQ("great", header_map.get_("key")); + + headers_values.Clear(); + auto& empty_entry = *headers_values.Add(); + empty_entry.set_key("empty"); + empty_entry.set_value(""); + + HeaderParserPtr req_header_parser_empty = + HeaderParser::configure(headers_values, /*append=*/false); + req_header_parser_empty->evaluateHeaders(header_map, nullptr); + EXPECT_FALSE(header_map.has("empty")); +} + TEST(HeaderParserTest, EvaluateEmptyHeaders) { const std::string yaml = R"EOF( match: { prefix: "/new_endpoint" } @@ -1283,11 +1346,10 @@ response_headers_to_remove: ["x-nope"] // Per https://github.com/envoyproxy/envoy/issues/7488 make sure we don't // combine set-cookie headers - std::vector out; - Http::HeaderUtility::getAllOfHeader(header_map, "set-cookie", out); + const auto out = header_map.get(Http::LowerCaseString("set-cookie")); ASSERT_EQ(out.size(), 2); - ASSERT_EQ(out[0], "foo"); - ASSERT_EQ(out[1], "bar"); + ASSERT_EQ(out[0]->value().getStringView(), "foo"); + ASSERT_EQ(out[1]->value().getStringView(), "bar"); } TEST(HeaderParserTest, EvaluateRequestHeadersRemoveBeforeAdd) { diff --git a/test/common/router/header_parser_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-4709439954485248 b/test/common/router/header_parser_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-4709439954485248 new file mode 100644 index 000000000000..be4e080e72f0 --- /dev/null +++ b/test/common/router/header_parser_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-4709439954485248 @@ -0,0 +1,18 @@ +headers_to_add { + header { + key: "A" + value: "%START_TIME(%f%f%f%E92E1%f%f%E4/1f%E1%f%f%E4/1f%E46E%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E47E768E4/1f%E47E768f%\305\2231%f%f%E461f%Eff%f%E916%0f%6f%)%" + } +} +headers_to_add { + header { + key: "A" + value: "%START_TIME(%f%f%f%E92E1%f%f%E4/1f%E1%f%f%E4/1f%E3E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E46E768f%\305\2231%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E%%Eff%f%E0f%6f%)%" + } +} +headers_to_add { + header { + key: "A" + value: "%START_TIME(%f%f%f%E92E1%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%EE768f%E1%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f461f%Enf%f%f%E768-5517521057234699755%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%E4/1f%E47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E1%f%f%E4/1f%E46E768f%\305\2231%f%f%E461f%Eff%f%E922f%E0f%E46116%f%f%E461f%Eff%f%f%E4/1f%E1%f%f%EE47E768f%E1%f%f%E461f%Eff%f%f%E4/0f%E11f%E1%f%f%E%%Eff%f%E0f%6f%)%" + } +} diff --git a/test/common/router/header_parser_corpus/invalid_10 b/test/common/router/header_parser_corpus/invalid_10 new file mode 100644 index 000000000000..917ea8f3390e --- /dev/null +++ b/test/common/router/header_parser_corpus/invalid_10 @@ -0,0 +1,6 @@ +headers_to_add { + header { + key: "A" + value: "%START_TIME(%4On%)%" + } +} diff --git a/test/common/router/header_parser_corpus/invalid_11 b/test/common/router/header_parser_corpus/invalid_11 new file mode 100644 index 000000000000..d28fe316e10d --- /dev/null +++ b/test/common/router/header_parser_corpus/invalid_11 @@ -0,0 +1,6 @@ +headers_to_add { + header { + key: "A" + value: "%START_TIME(%4En%)%" + } +} diff --git a/test/common/router/header_parser_corpus/invalid_9 b/test/common/router/header_parser_corpus/invalid_9 new file mode 100644 index 000000000000..dbd1276a4de1 --- /dev/null +++ b/test/common/router/header_parser_corpus/invalid_9 @@ -0,0 +1,6 @@ +headers_to_add { + header { + key: "A" + value: "%START_TIME(%En%)%" + } +} diff --git a/test/common/router/header_parser_corpus/valid b/test/common/router/header_parser_corpus/valid new file mode 100644 index 000000000000..615051c50f67 --- /dev/null +++ b/test/common/router/header_parser_corpus/valid @@ -0,0 +1,6 @@ +headers_to_add { + header { + key: "A" + value: "%START_TIME(%E4n%)%" + } +} diff --git a/test/common/router/route_corpus/Response_headers_to_remove b/test/common/router/route_corpus/Response_headers_to_remove new file mode 100644 index 000000000000..eec0704a8eb3 --- /dev/null +++ b/test/common/router/route_corpus/Response_headers_to_remove @@ -0,0 +1,140 @@ +config { + virtual_hosts { + name: "j" + domains: "$" + routes { + match { + path: ")" + } + route { + weighted_clusters { + clusters { + name: "$$" + weight { + value: 870 + } + metadata_match { + filter_metadata { + key: "envoy.lb" + value { + } + } + } + request_headers_to_remove: "&" + } + clusters { + name: "$" + weight { + value: 1868759072 + } + metadata_match { + filter_metadata { + key: "envoy.lb" + value { + fields { + key: "" + value { + } + } + fields { + key: " " + value { + } + } + fields { + key: ")" + value { + } + } + fields { + key: "-209" + value { + } + } + fields { + key: "1" + value { + string_value: "z" + } + } + fields { + key: "2" + value { + } + } + fields { + key: "5" + value { + } + } + fields { + key: "8" + value { + } + } + fields { + key: "@" + value { + } + } + fields { + key: "Q" + value { + } + } + fields { + key: "]" + value { + } + } + fields { + key: "^" + value { + } + } + fields { + key: "i" + value { + } + } + fields { + key: "j" + value { + } + } + fields { + key: "p" + value { + } + } + fields { + key: "x" + value { + } + } + } + } + } + } + runtime_key_prefix: "\177" + } + metadata_match { + filter_metadata { + key: "envoy.lb" + value { + fields { + key: "9" + value { + } + } + } + } + } + upgrade_configs { + } + } + } + response_headers_to_remove: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + include_attempt_count_in_response: true + } +} diff --git a/test/common/router/route_corpus/clusterfuzz-testcase-route_fuzz_test-5671270751141888 b/test/common/router/route_corpus/clusterfuzz-testcase-route_fuzz_test-5671270751141888 new file mode 100644 index 000000000000..f7d0d92584bc --- /dev/null +++ b/test/common/router/route_corpus/clusterfuzz-testcase-route_fuzz_test-5671270751141888 @@ -0,0 +1,140 @@ +config { + virtual_hosts { + name: "j" + domains: "$" + routes { + match { + path: ")" + } + route { + weighted_clusters { + clusters { + name: "$$" + weight { + value: 870 + } + metadata_match { + filter_metadata { + key: "envoy.lb" + value { + } + } + } + request_headers_to_remove: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + } + clusters { + name: "$" + weight { + value: 1868759072 + } + metadata_match { + filter_metadata { + key: "envoy.lb" + value { + fields { + key: "" + value { + } + } + fields { + key: " " + value { + } + } + fields { + key: ")" + value { + } + } + fields { + key: "-209" + value { + } + } + fields { + key: "1" + value { + string_value: "z" + } + } + fields { + key: "2" + value { + } + } + fields { + key: "5" + value { + } + } + fields { + key: "8" + value { + } + } + fields { + key: "@" + value { + } + } + fields { + key: "Q" + value { + } + } + fields { + key: "]" + value { + } + } + fields { + key: "^" + value { + } + } + fields { + key: "i" + value { + } + } + fields { + key: "j" + value { + } + } + fields { + key: "p" + value { + } + } + fields { + key: "x" + value { + } + } + } + } + } + } + runtime_key_prefix: "\177" + } + metadata_match { + filter_metadata { + key: "envoy.lb" + value { + fields { + key: "9" + value { + } + } + } + } + } + upgrade_configs { + } + } + } + response_headers_to_remove: "&" + include_attempt_count_in_response: true + } +} diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 7d5fba68835f..bad535cbf100 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -481,7 +481,7 @@ TEST_F(RateLimitPolicyEntryTest, GenericKeyWithEmptyDescriptorKey) { testing::ContainerEq(descriptors_)); } -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataMatch) { +TEST_F(RateLimitPolicyEntryTest, DEPRECATED_FEATURE_TEST(DynamicMetaDataMatch)) { const std::string yaml = R"EOF( actions: - dynamic_metadata: @@ -513,11 +513,108 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataMatch) { testing::ContainerEq(descriptors_)); } +TEST_F(RateLimitPolicyEntryTest, MetaDataMatchDynamicSourceByDefault) { + const std::string yaml = R"EOF( +actions: +- metadata: + descriptor_key: fake_key + default_value: fake_value + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: foo + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_THAT(std::vector({{{{"fake_key", "foo"}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, MetaDataMatchDynamicSource) { + const std::string yaml = R"EOF( +actions: +- metadata: + descriptor_key: fake_key + default_value: fake_value + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + source: DYNAMIC + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: foo + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_THAT(std::vector({{{{"fake_key", "foo"}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, MetaDataMatchRouteEntrySource) { + const std::string yaml = R"EOF( +actions: +- metadata: + descriptor_key: fake_key + default_value: fake_value + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + source: ROUTE_ENTRY + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: foo + )EOF"; + + TestUtility::loadFromYaml(metadata_yaml, route_.metadata_); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + dynamic_metadata_); + + EXPECT_THAT(std::vector({{{{"fake_key", "foo"}}}}), + testing::ContainerEq(descriptors_)); +} + // Tests that the default_value is used in the descriptor when the metadata_key is empty. -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatchWithDefaultValue) { +TEST_F(RateLimitPolicyEntryTest, MetaDataNoMatchWithDefaultValue) { const std::string yaml = R"EOF( actions: -- dynamic_metadata: +- metadata: descriptor_key: fake_key default_value: fake_value metadata_key: @@ -546,10 +643,10 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatchWithDefaultValue) { testing::ContainerEq(descriptors_)); } -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatch) { +TEST_F(RateLimitPolicyEntryTest, MetaDataNoMatch) { const std::string yaml = R"EOF( actions: -- dynamic_metadata: +- metadata: descriptor_key: fake_key metadata_key: key: 'envoy.xxx' @@ -576,10 +673,10 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatch) { EXPECT_TRUE(descriptors_.empty()); } -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataEmptyValue) { +TEST_F(RateLimitPolicyEntryTest, MetaDataEmptyValue) { const std::string yaml = R"EOF( actions: -- dynamic_metadata: +- metadata: descriptor_key: fake_key metadata_key: key: 'envoy.xxx' @@ -606,10 +703,10 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataEmptyValue) { EXPECT_TRUE(descriptors_.empty()); } // Tests that no descriptor is generated when both the metadata_key and default_value are empty. -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataAndDefaultValueEmpty) { +TEST_F(RateLimitPolicyEntryTest, MetaDataAndDefaultValueEmpty) { const std::string yaml = R"EOF( actions: -- dynamic_metadata: +- metadata: descriptor_key: fake_key default_value: "" metadata_key: @@ -637,10 +734,10 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataAndDefaultValueEmpty) { EXPECT_TRUE(descriptors_.empty()); } -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNonStringMatch) { +TEST_F(RateLimitPolicyEntryTest, MetaDataNonStringNoMatch) { const std::string yaml = R"EOF( actions: -- dynamic_metadata: +- metadata: descriptor_key: fake_key metadata_key: key: 'envoy.xxx' diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 50dee8922fe3..752bedba83ea 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -459,7 +459,7 @@ TEST_F(RouterTest, RouteNotFound) { EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); - EXPECT_EQ(callbacks_.details_, "route_not_found"); + EXPECT_EQ(callbacks_.details(), "route_not_found"); } TEST_F(RouterTest, ClusterNotFound) { @@ -473,7 +473,7 @@ TEST_F(RouterTest, ClusterNotFound) { EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); - EXPECT_EQ(callbacks_.details_, "cluster_not_found"); + EXPECT_EQ(callbacks_.details(), "cluster_not_found"); } TEST_F(RouterTest, PoolFailureWithPriority) { @@ -506,7 +506,7 @@ TEST_F(RouterTest, PoolFailureWithPriority) { // Pool failure, so upstream request was not initiated. EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); - EXPECT_EQ(callbacks_.details_, + EXPECT_EQ(callbacks_.details(), "upstream_reset_before_response_started{connection failure,tls version mismatch}"); } @@ -663,8 +663,9 @@ TEST_F(RouterTest, AddCookie) { EXPECT_CALL(callbacks_, encodeHeaders_(_, _)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void { - EXPECT_EQ(std::string{headers.get(Http::Headers::get().SetCookie)->value().getStringView()}, - "foo=\"" + cookie_value + "\"; Max-Age=1337; HttpOnly"); + EXPECT_EQ( + std::string{headers.get(Http::Headers::get().SetCookie)[0]->value().getStringView()}, + "foo=\"" + cookie_value + "\"; Max-Age=1337; HttpOnly"); })); expectResponseTimerCreate(); @@ -672,11 +673,10 @@ TEST_F(RouterTest, AddCookie) { HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); - absl::string_view rc_details2 = "via_upstream"; - EXPECT_CALL(callbacks_.stream_info_, setResponseCodeDetails(rc_details2)); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); response_decoder->decodeHeaders(std::move(response_headers), true); + EXPECT_EQ(callbacks_.details(), "via_upstream"); // When the router filter gets reset we should cancel the pool request. router_.onDestroy(); } @@ -715,8 +715,9 @@ TEST_F(RouterTest, AddCookieNoDuplicate) { EXPECT_CALL(callbacks_, encodeHeaders_(_, _)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void { - EXPECT_EQ(std::string{headers.get(Http::Headers::get().SetCookie)->value().getStringView()}, - "foo=baz"); + EXPECT_EQ( + std::string{headers.get(Http::Headers::get().SetCookie)[0]->value().getStringView()}, + "foo=baz"); })); expectResponseTimerCreate(); @@ -878,7 +879,7 @@ TEST_F(RouterTest, NoHost) { EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); - EXPECT_EQ(callbacks_.details_, "no_healthy_upstream"); + EXPECT_EQ(callbacks_.details(), "no_healthy_upstream"); } TEST_F(RouterTest, MaintenanceMode) { @@ -905,7 +906,7 @@ TEST_F(RouterTest, MaintenanceMode) { EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->load_report_stats_store_ .counter("upstream_rq_dropped") .value()); - EXPECT_EQ(callbacks_.details_, "maintenance_mode"); + EXPECT_EQ(callbacks_.details(), "maintenance_mode"); } // Validate that we don't set x-envoy-overloaded when Envoy header suppression @@ -943,8 +944,6 @@ TEST_F(RouterTest, ResponseCodeDetailsSetByUpstream) { Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); - absl::string_view rc_details = StreamInfo::ResponseCodeDetails::get().ViaUpstream; - EXPECT_CALL(callbacks_.stream_info_, setResponseCodeDetails(rc_details)); response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); } @@ -973,7 +972,7 @@ TEST_F(RouterTest, EnvoyUpstreamServiceTime) { EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); EXPECT_CALL(callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([](Http::HeaderMap& headers, bool) { - EXPECT_NE(nullptr, headers.get(Http::Headers::get().EnvoyUpstreamServiceTime)); + EXPECT_FALSE(headers.get(Http::Headers::get().EnvoyUpstreamServiceTime).empty()); })); response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); @@ -1141,7 +1140,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInResponsePresentWithLocalReply) { EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); - EXPECT_EQ(callbacks_.details_, "upstream_reset_before_response_started{connection failure}"); + EXPECT_EQ(callbacks_.details(), "upstream_reset_before_response_started{connection failure}"); } // Validate that the x-envoy-attempt-count header in the downstream response reflects the number of @@ -1242,10 +1241,10 @@ void RouterTestBase::testAppendCluster(absl::optional clu EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); EXPECT_CALL(callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([&cluster_header_name](Http::HeaderMap& headers, bool) { - const Http::HeaderEntry* cluster_header = + const auto cluster_header = headers.get(cluster_header_name.value_or(Http::Headers::get().EnvoyCluster)); - EXPECT_NE(nullptr, cluster_header); - EXPECT_EQ("fake_cluster", cluster_header->value().getStringView()); + EXPECT_FALSE(cluster_header.empty()); + EXPECT_EQ("fake_cluster", cluster_header[0]->value().getStringView()); })); response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); @@ -1300,15 +1299,15 @@ void RouterTestBase::testAppendUpstreamHost( EXPECT_CALL(callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([&hostname_header_name, &host_address_header_name](Http::HeaderMap& headers, bool) { - const Http::HeaderEntry* hostname_header = + const auto hostname_header = headers.get(hostname_header_name.value_or(Http::Headers::get().EnvoyUpstreamHostname)); - EXPECT_NE(nullptr, hostname_header); - EXPECT_EQ("scooby.doo", hostname_header->value().getStringView()); + EXPECT_FALSE(hostname_header.empty()); + EXPECT_EQ("scooby.doo", hostname_header[0]->value().getStringView()); - const Http::HeaderEntry* host_address_header = headers.get( + const auto host_address_header = headers.get( host_address_header_name.value_or(Http::Headers::get().EnvoyUpstreamHostAddress)); - EXPECT_NE(nullptr, host_address_header); - EXPECT_EQ("10.0.0.5:9211", host_address_header->value().getStringView()); + EXPECT_FALSE(host_address_header.empty()); + EXPECT_EQ("10.0.0.5:9211", host_address_header[0]->value().getStringView()); })); response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); @@ -1433,7 +1432,7 @@ TEST_F(RouterTestSuppressEnvoyHeaders, EnvoyUpstreamServiceTime) { {":status", "200"}, {"x-envoy-upstream-service-time", "0"}}; EXPECT_CALL(callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([](Http::HeaderMap& headers, bool) { - EXPECT_EQ(nullptr, headers.get(Http::Headers::get().EnvoyUpstreamServiceTime)); + EXPECT_TRUE(headers.get(Http::Headers::get().EnvoyUpstreamServiceTime).empty()); })); response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); @@ -2331,9 +2330,9 @@ TEST_F(RouterTest, UpstreamPerTryTimeoutExcludesNewStream) { EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResult(Upstream::Outlier::Result::LocalOriginTimeout, _)); EXPECT_CALL(*per_try_timeout_, disableTimer()); - EXPECT_CALL(*response_timeout_, disableTimer()); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); + EXPECT_CALL(*response_timeout_, disableTimer()); Http::TestResponseHeaderMapImpl response_headers{ {":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}}; EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); @@ -3119,7 +3118,7 @@ TEST_F(RouterTest, RetryRequestDuringBodyBufferLimitExceeded) { Buffer::OwnedImpl buf2(body2); router_.decodeData(buf2, false); - EXPECT_EQ(callbacks_.details_, "request_payload_exceeded_retry_buffer_limit"); + EXPECT_EQ(callbacks_.details(), "request_payload_exceeded_retry_buffer_limit"); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("retry_or_shadow_abandoned") .value()); @@ -4733,7 +4732,7 @@ TEST_F(RouterTest, Shadow) { EXPECT_CALL(*shadow_writer_, shadow_("foo", _, _)) .WillOnce(Invoke([](const std::string&, Http::RequestMessagePtr& request, const Http::AsyncClient::RequestOptions& options) -> void { - EXPECT_NE(nullptr, request->body()); + EXPECT_NE(request->body().length(), 0); EXPECT_NE(nullptr, request->trailers()); EXPECT_EQ(absl::optional(10), options.timeout); EXPECT_TRUE(options.sampled_); @@ -4741,7 +4740,7 @@ TEST_F(RouterTest, Shadow) { EXPECT_CALL(*shadow_writer_, shadow_("fizz", _, _)) .WillOnce(Invoke([](const std::string&, Http::RequestMessagePtr& request, const Http::AsyncClient::RequestOptions& options) -> void { - EXPECT_NE(nullptr, request->body()); + EXPECT_NE(request->body().length(), 0); EXPECT_NE(nullptr, request->trailers()); EXPECT_EQ(absl::optional(10), options.timeout); EXPECT_FALSE(options.sampled_); @@ -6221,7 +6220,7 @@ TEST_F(WatermarkTest, RetryRequestNotComplete) { EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset); - EXPECT_EQ(callbacks_.details_, "upstream_reset_before_response_started{remote reset}"); + EXPECT_EQ(callbacks_.details(), "upstream_reset_before_response_started{remote reset}"); } class RouterTestChildSpan : public RouterTestBase { @@ -6498,7 +6497,7 @@ TEST_P(RouterTestStrictCheckOneHeader, SingleInvalidHeader) { })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, router_.decodeHeaders(req_headers, true)); - EXPECT_EQ(callbacks_.details_, + EXPECT_EQ(callbacks_.details(), fmt::format("request_headers_failed_strict_check{{{}}}", checked_header)); } @@ -6566,7 +6565,7 @@ TEST_P(RouterTestStrictCheckAllHeaders, MultipleInvalidHeaders) { })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, router_.decodeHeaders(headers, true)); - EXPECT_THAT(callbacks_.details_, + EXPECT_THAT(callbacks_.details(), StartsWith(fmt::format("request_headers_failed_strict_check{{"))); router_.onDestroy(); } diff --git a/test/common/runtime/filesystem_setup.sh b/test/common/runtime/filesystem_setup.sh index ef27243da854..60c4f6fdd0b5 100755 --- a/test/common/runtime/filesystem_setup.sh +++ b/test/common/runtime/filesystem_setup.sh @@ -6,7 +6,7 @@ TEST_DATA=test/common/runtime/test_data # Regular runtime tests. cd "${TEST_SRCDIR}/envoy" -rm -rf "${TEST_TMPDIR}/${TEST_DATA}" +rm -rf "${TEST_TMPDIR:?}/${TEST_DATA}" mkdir -p "${TEST_TMPDIR}/${TEST_DATA}" cp -RfL "${TEST_DATA}"/* "${TEST_TMPDIR}/${TEST_DATA}" chmod -R u+rwX "${TEST_TMPDIR}/${TEST_DATA}" @@ -25,9 +25,13 @@ if [[ -z "${WINDIR}" ]]; then ln -sf "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/subdir" "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/badlink" ln -sf "${LOOP_PATH}" "${LOOP_PATH}"/loop else - win_test_root="$(echo $TEST_TMPDIR/$TEST_DATA | tr '/' '\\')" + # see https://github.com/koalaman/shellcheck/issues/861 + # shellcheck disable=SC1003 + win_test_root="$(echo "${TEST_TMPDIR}/${TEST_DATA}" | tr '/' '\\')" cmd.exe /C "mklink /D ${win_test_root}\\current ${win_test_root}\\root" cmd.exe /C "mklink /D ${win_test_root}\\root\\envoy\\badlink ${win_test_root}\\root\\envoy\\subdir" - win_loop_path="$(echo $LOOP_PATH | tr '/' '\\')" + # see https://github.com/koalaman/shellcheck/issues/861 + # shellcheck disable=SC1003 + win_loop_path="$(echo "$LOOP_PATH" | tr '/' '\\')" cmd.exe /C "mklink /D ${win_loop_path}\\loop ${win_loop_path}" fi diff --git a/test/common/runtime/runtime_protos_test.cc b/test/common/runtime/runtime_protos_test.cc index e8a61f27c9c3..97eb4b89ec1e 100644 --- a/test/common/runtime/runtime_protos_test.cc +++ b/test/common/runtime/runtime_protos_test.cc @@ -24,6 +24,58 @@ class RuntimeProtosTest : public testing::Test { NiceMock runtime_; }; +TEST_F(RuntimeProtosTest, PercentBasicTest) { + envoy::config::core::v3::RuntimePercent percent_proto; + std::string yaml(R"EOF( +runtime_key: "foo.bar" +default_value: + value: 4.2 +)EOF"); + TestUtility::loadFromYamlAndValidate(yaml, percent_proto); + Percentage test_percent(percent_proto, runtime_); + + // Basic double values and overrides. + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.bar", 4.2)); + EXPECT_EQ(4.2, test_percent.value()); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.bar", 4.2)).WillOnce(Return(1.337)); + EXPECT_EQ(1.337, test_percent.value()); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.bar", 4.2)).WillOnce(Return(1)); + EXPECT_EQ(1.0, test_percent.value()); + + // Verify handling of bogus percentages (outside [0,100]). + yaml = R"EOF( +runtime_key: "foo.bar" +default_value: + value: -20 +)EOF"; + EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, percent_proto), EnvoyException); + + yaml = R"EOF( +runtime_key: "foo.bar" +default_value: + value: 400 +)EOF"; + EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, percent_proto), EnvoyException); + + yaml = R"EOF( +runtime_key: "foo.bar" +default_value: + value: 23.0 +)EOF"; + TestUtility::loadFromYamlAndValidate(yaml, percent_proto); + Percentage test_percent2(percent_proto, runtime_); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.bar", 23.0)); + EXPECT_EQ(23.0, test_percent2.value()); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.bar", 23.0)).WillOnce(Return(1.337)); + EXPECT_EQ(1.337, test_percent2.value()); + + // Return default value if bogus runtime values given. + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.bar", 23.0)).WillOnce(Return(-10.0)); + EXPECT_EQ(23.0, test_percent2.value()); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.bar", 23.0)).WillOnce(Return(160.0)); + EXPECT_EQ(23.0, test_percent2.value()); +} + TEST_F(RuntimeProtosTest, DoubleBasicTest) { envoy::config::core::v3::RuntimeDouble double_proto; std::string yaml(R"EOF( diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index 8e04b273263f..0493f5127b17 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -18,7 +18,6 @@ envoy_cc_test( srcs = ["allocator_impl_test.cc"], deps = [ "//source/common/stats:allocator_lib", - "//source/common/stats:symbol_table_creator_lib", "//test/test_common:logging_lib", "//test/test_common:thread_factory_for_test_lib", ], @@ -29,7 +28,6 @@ envoy_cc_test( srcs = ["isolated_store_impl_test.cc"], deps = [ "//source/common/stats:isolated_store_lib", - "//source/common/stats:symbol_table_creator_lib", ], ) @@ -47,7 +45,6 @@ envoy_cc_test( srcs = ["metric_impl_test.cc"], deps = [ "//source/common/stats:allocator_lib", - "//source/common/stats:symbol_table_creator_lib", "//source/common/stats:utility_lib", "//test/test_common:logging_lib", ], @@ -85,7 +82,6 @@ envoy_cc_test( ":stat_test_utility_lib", "//source/common/stats:isolated_store_lib", "//source/common/stats:stat_merger_lib", - "//source/common/stats:symbol_table_creator_lib", "//source/common/stats:thread_local_store_lib", "//test/test_common:utility_lib", ], @@ -103,7 +99,6 @@ envoy_cc_test_library( "//source/common/common:assert_lib", "//source/common/memory:stats_lib", "//source/common/stats:isolated_store_lib", - "//source/common/stats:symbol_table_creator_lib", ], ) @@ -142,7 +137,6 @@ envoy_cc_test( ":stat_test_utility_lib", "//source/common/common:mutex_tracer_lib", "//source/common/memory:stats_lib", - "//source/common/stats:fake_symbol_table_lib", "//source/common/stats:symbol_table_lib", "//test/mocks/stats:stats_mocks", "//test/test_common:logging_lib", diff --git a/test/common/stats/allocator_impl_test.cc b/test/common/stats/allocator_impl_test.cc index b6579fdae696..571e53fd6810 100644 --- a/test/common/stats/allocator_impl_test.cc +++ b/test/common/stats/allocator_impl_test.cc @@ -1,7 +1,6 @@ #include #include "common/stats/allocator_impl.h" -#include "common/stats/symbol_table_creator.h" #include "test/test_common/logging.h" #include "test/test_common/thread_factory_for_test.h" @@ -15,23 +14,21 @@ namespace { class AllocatorImplTest : public testing::Test { protected: - AllocatorImplTest() - : symbol_table_(SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_), - pool_(*symbol_table_) {} + AllocatorImplTest() : alloc_(symbol_table_), pool_(symbol_table_) {} ~AllocatorImplTest() override { clearStorage(); } StatNameStorage makeStatStorage(absl::string_view name) { - return StatNameStorage(name, *symbol_table_); + return StatNameStorage(name, symbol_table_); } StatName makeStat(absl::string_view name) { return pool_.add(name); } void clearStorage() { pool_.clear(); - EXPECT_EQ(0, symbol_table_->numSymbols()); + EXPECT_EQ(0, symbol_table_.numSymbols()); } - SymbolTablePtr symbol_table_; + SymbolTableImpl symbol_table_; AllocatorImpl alloc_; StatNamePool pool_; }; diff --git a/test/common/stats/isolated_store_impl_test.cc b/test/common/stats/isolated_store_impl_test.cc index ee618669cb2c..b3a82ad8773e 100644 --- a/test/common/stats/isolated_store_impl_test.cc +++ b/test/common/stats/isolated_store_impl_test.cc @@ -5,7 +5,6 @@ #include "common/stats/isolated_store_impl.h" #include "common/stats/null_counter.h" #include "common/stats/null_gauge.h" -#include "common/stats/symbol_table_creator.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" @@ -17,17 +16,16 @@ namespace Stats { class StatsIsolatedStoreImplTest : public testing::Test { protected: StatsIsolatedStoreImplTest() - : symbol_table_(SymbolTableCreator::makeSymbolTable()), - store_(std::make_unique(*symbol_table_)), pool_(*symbol_table_) {} + : store_(std::make_unique(symbol_table_)), pool_(symbol_table_) {} ~StatsIsolatedStoreImplTest() override { pool_.clear(); store_.reset(); - EXPECT_EQ(0, symbol_table_->numSymbols()); + EXPECT_EQ(0, symbol_table_.numSymbols()); } StatName makeStatName(absl::string_view name) { return pool_.add(name); } - SymbolTablePtr symbol_table_; + SymbolTableImpl symbol_table_; std::unique_ptr store_; StatNamePool pool_; }; diff --git a/test/common/stats/metric_impl_test.cc b/test/common/stats/metric_impl_test.cc index 75652e21f989..30f20a61ee3a 100644 --- a/test/common/stats/metric_impl_test.cc +++ b/test/common/stats/metric_impl_test.cc @@ -1,7 +1,6 @@ #include #include "common/stats/allocator_impl.h" -#include "common/stats/symbol_table_creator.h" #include "common/stats/utility.h" #include "test/test_common/logging.h" @@ -14,19 +13,17 @@ namespace { class MetricImplTest : public testing::Test { protected: - MetricImplTest() - : symbol_table_(SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_), - pool_(*symbol_table_) {} + MetricImplTest() : alloc_(symbol_table_), pool_(symbol_table_) {} ~MetricImplTest() override { clearStorage(); } StatName makeStat(absl::string_view name) { return pool_.add(name); } void clearStorage() { pool_.clear(); - EXPECT_EQ(0, symbol_table_->numSymbols()); + EXPECT_EQ(0, symbol_table_.numSymbols()); } - SymbolTablePtr symbol_table_; + SymbolTableImpl symbol_table_; AllocatorImpl alloc_; StatNamePool pool_; }; diff --git a/test/common/stats/stat_merger_fuzz_test.cc b/test/common/stats/stat_merger_fuzz_test.cc index 70579f378676..c35c7fd9ea35 100644 --- a/test/common/stats/stat_merger_fuzz_test.cc +++ b/test/common/stats/stat_merger_fuzz_test.cc @@ -69,11 +69,9 @@ void testDynamicEncoding(absl::string_view data, SymbolTable& symbol_table) { // Fuzzer for symbol tables. DEFINE_FUZZER(const uint8_t* buf, size_t len) { - FakeSymbolTableImpl fake_symbol_table; SymbolTableImpl symbol_table; absl::string_view data(reinterpret_cast(buf), len); - testDynamicEncoding(data, fake_symbol_table); testDynamicEncoding(data, symbol_table); } diff --git a/test/common/stats/stat_merger_test.cc b/test/common/stats/stat_merger_test.cc index ee8b5bf65ae8..092fc531597b 100644 --- a/test/common/stats/stat_merger_test.cc +++ b/test/common/stats/stat_merger_test.cc @@ -2,7 +2,6 @@ #include "common/stats/isolated_store_impl.h" #include "common/stats/stat_merger.h" -#include "common/stats/symbol_table_creator.h" #include "common/stats/thread_local_store.h" #include "test/test_common/utility.h" @@ -303,43 +302,10 @@ TEST_F(StatMergerDynamicTest, DynamicsWithRealSymbolTable) { EXPECT_EQ(1, dynamicEncodeDecodeTest("D:hello,,,world")); } -TEST_F(StatMergerDynamicTest, DynamicsWithFakeSymbolTable) { - init(std::make_unique()); - - for (uint32_t i = 1; i < 256; ++i) { - char ch = static_cast(i); - absl::string_view one_char(&ch, 1); - EXPECT_EQ(0, dynamicEncodeDecodeTest(absl::StrCat("D:", one_char))) << "dynamic=" << one_char; - EXPECT_EQ(0, dynamicEncodeDecodeTest(one_char)) << "symbolic=" << one_char; - } - EXPECT_EQ(0, dynamicEncodeDecodeTest("normal")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("D:dynamic")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("hello.world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("hello..world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("hello...world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello.world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("hello.D:world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello.D:world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello,world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("one.D:two.three.D:four.D:five.six.D:seven,eight.nine")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("D:one,two,three")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("hello..world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello..world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("hello..D:world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello..D:world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello.D:.D:world")); - EXPECT_EQ(0, dynamicEncodeDecodeTest("aV.D:,b")); - - // TODO(#10008): these tests fail because fake/real symbol tables - // deal with empty components differently. - // EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello,,world")); - // EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello,,,world")); -} - class StatMergerThreadLocalTest : public testing::Test { protected: - SymbolTablePtr symbol_table_{SymbolTableCreator::makeSymbolTable()}; - AllocatorImpl alloc_{*symbol_table_}; + SymbolTableImpl symbol_table_; + AllocatorImpl alloc_{symbol_table_}; ThreadLocalStoreImpl store_{alloc_}; }; diff --git a/test/common/stats/stat_test_utility.cc b/test/common/stats/stat_test_utility.cc index 7cdbc08ab4dc..4b8b678a115e 100644 --- a/test/common/stats/stat_test_utility.cc +++ b/test/common/stats/stat_test_utility.cc @@ -101,7 +101,7 @@ void forEachSampleStat(int num_clusters, std::function } MemoryTest::Mode MemoryTest::mode() { -#if !defined(TCMALLOC) || defined(ENVOY_MEMORY_DEBUG_ENABLED) +#if !(defined(TCMALLOC) || defined(GPERFTOOLS_TCMALLOC)) || defined(ENVOY_MEMORY_DEBUG_ENABLED) // We can only test absolute memory usage if the malloc library is a known // quantity. This decision is centralized here. As the preferred malloc // library for Envoy is TCMALLOC that's what we test for here. If we switch @@ -119,12 +119,18 @@ MemoryTest::Mode MemoryTest::mode() { const size_t end_mem = Memory::Stats::totalCurrentlyAllocated(); bool can_measure_memory = end_mem > start_mem; + // As of Oct 8, 2020, tcmalloc has changed such that Memory::Stats::totalCurrentlyAllocated + // is not deterministic, even with single-threaded tests. When possible, this should be fixed, + // and the following block of code uncommented. This affects approximate comparisons, not + // just exact ones. +#if 0 if (getenv("ENVOY_MEMORY_TEST_EXACT") != nullptr) { // Set in "ci/do_ci.sh" for 'release' tests. RELEASE_ASSERT(can_measure_memory, "$ENVOY_MEMORY_TEST_EXACT is set for canonical memory measurements, " "but memory measurement looks broken"); return Mode::Canonical; } +#endif // Different versions of STL and other compiler/architecture differences may // also impact memory usage, so when not compiling with MEMORY_TEST_EXACT, diff --git a/test/common/stats/stat_test_utility.h b/test/common/stats/stat_test_utility.h index 6b46a0f05aea..1d0eaa3c66cf 100644 --- a/test/common/stats/stat_test_utility.h +++ b/test/common/stats/stat_test_utility.h @@ -5,7 +5,6 @@ #include "common/common/logger.h" #include "common/memory/stats.h" #include "common/stats/isolated_store_impl.h" -#include "common/stats/symbol_table_creator.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" @@ -164,18 +163,6 @@ class TestStore : public IsolatedStoreImpl { } \ } while (false) -class SymbolTableCreatorTestPeer { -public: - ~SymbolTableCreatorTestPeer() { SymbolTableCreator::setUseFakeSymbolTables(save_use_fakes_); } - - void setUseFakeSymbolTables(bool use_fakes) { - SymbolTableCreator::setUseFakeSymbolTables(use_fakes); - } - -private: - const bool save_use_fakes_{SymbolTableCreator::useFakeSymbolTables()}; -}; - // Serializes a number into a uint8_t array, and check that it de-serializes to // the same number. The serialized number is also returned, which can be // checked in unit tests, but ignored in fuzz tests. diff --git a/test/common/stats/stat_test_utility_test.cc b/test/common/stats/stat_test_utility_test.cc index a395adff8847..7a526d4d6f28 100644 --- a/test/common/stats/stat_test_utility_test.cc +++ b/test/common/stats/stat_test_utility_test.cc @@ -11,13 +11,9 @@ namespace { class StatTestUtilityTest : public testing::Test { protected: StatTestUtilityTest() - : symbol_table_(SymbolTableCreator::makeSymbolTable()), test_store_(*symbol_table_), - dynamic_(*symbol_table_), symbolic_(*symbol_table_) { - symbol_table_creator_test_peer_.setUseFakeSymbolTables(false); - } + : test_store_(symbol_table_), dynamic_(symbol_table_), symbolic_(symbol_table_) {} - TestUtil::SymbolTableCreatorTestPeer symbol_table_creator_test_peer_; - SymbolTablePtr symbol_table_; + SymbolTableImpl symbol_table_; TestUtil::TestStore test_store_; StatNameDynamicPool dynamic_; StatNamePool symbolic_; diff --git a/test/common/stats/symbol_table_fuzz_test.cc b/test/common/stats/symbol_table_fuzz_test.cc index 4284f05c7359..3d5ac4df99bc 100644 --- a/test/common/stats/symbol_table_fuzz_test.cc +++ b/test/common/stats/symbol_table_fuzz_test.cc @@ -15,19 +15,14 @@ namespace Fuzz { // Fuzzer for symbol tables. DEFINE_FUZZER(const uint8_t* buf, size_t len) { FuzzedDataProvider provider(buf, len); - FakeSymbolTableImpl fake_symbol_table; SymbolTableImpl symbol_table; StatNamePool pool(symbol_table); - StatNamePool fake_pool(fake_symbol_table); StatNameDynamicPool dynamic_pool(symbol_table); - StatNameDynamicPool fake_dynamic_pool(fake_symbol_table); while (provider.remaining_bytes() != 0) { std::string next_data = provider.ConsumeRandomLengthString(provider.remaining_bytes()); StatName stat_name = pool.add(next_data); - StatName fake_stat_name = fake_pool.add(next_data); StatName dynamic_stat_name = dynamic_pool.add(next_data); - StatName fake_dynamic_stat_name = fake_dynamic_pool.add(next_data); // Encode the string directly first. TestUtil::serializeDeserializeString(next_data); @@ -49,9 +44,7 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { // string before comparing. absl::string_view trimmed_fuzz_data = StringUtil::removeTrailingCharacters(next_data, '.'); FUZZ_ASSERT(trimmed_fuzz_data == symbol_table.toString(stat_name)); - FUZZ_ASSERT(trimmed_fuzz_data == fake_symbol_table.toString(fake_stat_name)); FUZZ_ASSERT(trimmed_fuzz_data == symbol_table.toString(dynamic_stat_name)); - FUZZ_ASSERT(trimmed_fuzz_data == fake_symbol_table.toString(fake_dynamic_stat_name)); // The 'join' tests only work if the trimmed fuzz data is not empty. if (trimmed_fuzz_data.empty()) { @@ -84,10 +77,6 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { FUZZ_ASSERT(join(symbol_table, stat_name, dynamic_stat_name)); FUZZ_ASSERT(join(symbol_table, dynamic_stat_name, dynamic_stat_name)); FUZZ_ASSERT(join(symbol_table, dynamic_stat_name, stat_name)); - FUZZ_ASSERT(join(fake_symbol_table, fake_stat_name, fake_stat_name)); - FUZZ_ASSERT(join(fake_symbol_table, fake_stat_name, fake_dynamic_stat_name)); - FUZZ_ASSERT(join(fake_symbol_table, fake_dynamic_stat_name, fake_dynamic_stat_name)); - FUZZ_ASSERT(join(fake_symbol_table, fake_dynamic_stat_name, fake_stat_name)); } } diff --git a/test/common/stats/symbol_table_impl_test.cc b/test/common/stats/symbol_table_impl_test.cc index 5913b47b4be6..5ac09db06a4b 100644 --- a/test/common/stats/symbol_table_impl_test.cc +++ b/test/common/stats/symbol_table_impl_test.cc @@ -3,7 +3,6 @@ #include "common/common/macros.h" #include "common/common/mutex_tracer_impl.h" #include "common/memory/stats.h" -#include "common/stats/fake_symbol_table_impl.h" #include "common/stats/symbol_table_impl.h" #include "test/common/stats/stat_test_utility.h" @@ -18,69 +17,35 @@ namespace Envoy { namespace Stats { -// See comments in fake_symbol_table_impl.h: we need to test two implementations -// of SymbolTable, which we'll do with a test parameterized on this enum. -// -// Note that some of the tests cover behavior that is specific to the real -// SymbolTableImpl, and thus early-exit when the param is Fake. -// -// TODO(jmarantz): un-parameterize this test once SymbolTable is fully deployed -// and FakeSymbolTableImpl can be deleted. -enum class SymbolTableType { - Real, - Fake, -}; - -class StatNameTest : public testing::TestWithParam { +class StatNameTest : public testing::Test { protected: - StatNameTest() { - switch (GetParam()) { - case SymbolTableType::Real: { - auto table = std::make_unique(); - real_symbol_table_ = table.get(); - table_ = std::move(table); - break; - } - case SymbolTableType::Fake: - auto table = std::make_unique(); - fake_symbol_table_ = table.get(); - table_ = std::move(table); - break; - } - pool_ = std::make_unique(*table_); - } - + StatNameTest() : pool_(table_) {} ~StatNameTest() override { clearStorage(); } void clearStorage() { - pool_->clear(); - EXPECT_EQ(0, table_->numSymbols()); + pool_.clear(); + EXPECT_EQ(0, table_.numSymbols()); } SymbolVec getSymbols(StatName stat_name) { return SymbolTableImpl::Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); } - Symbol monotonicCounter() { return real_symbol_table_->monotonicCounter(); } + Symbol monotonicCounter() { return table_.monotonicCounter(); } std::string encodeDecode(absl::string_view stat_name) { - return table_->toString(makeStat(stat_name)); + return table_.toString(makeStat(stat_name)); } - StatName makeStat(absl::string_view name) { return pool_->add(name); } + StatName makeStat(absl::string_view name) { return pool_.add(name); } std::vector serializeDeserialize(uint64_t number) { return TestUtil::serializeDeserializeNumber(number); } - FakeSymbolTableImpl* fake_symbol_table_{nullptr}; - SymbolTableImpl* real_symbol_table_{nullptr}; - std::unique_ptr table_; - std::unique_ptr pool_; + SymbolTableImpl table_; + StatNamePool pool_; }; -INSTANTIATE_TEST_SUITE_P(StatNameTest, StatNameTest, - testing::ValuesIn({SymbolTableType::Real, SymbolTableType::Fake})); - -TEST_P(StatNameTest, SerializeBytes) { +TEST_F(StatNameTest, SerializeBytes) { EXPECT_EQ(std::vector{1}, serializeDeserialize(1)); EXPECT_EQ(std::vector{127}, serializeDeserialize(127)); EXPECT_EQ((std::vector{128, 1}), serializeDeserialize(128)); @@ -114,7 +79,7 @@ TEST_P(StatNameTest, SerializeBytes) { } } -TEST_P(StatNameTest, SerializeStrings) { +TEST_F(StatNameTest, SerializeStrings) { TestUtil::serializeDeserializeString(""); TestUtil::serializeDeserializeString("Hello, world!"); TestUtil::serializeDeserializeString("embedded\0\nul"); @@ -126,22 +91,22 @@ TEST_P(StatNameTest, SerializeStrings) { TestUtil::serializeDeserializeString(std::string(20000000, 'a')); } -TEST_P(StatNameTest, AllocFree) { encodeDecode("hello.world"); } +TEST_F(StatNameTest, AllocFree) { encodeDecode("hello.world"); } -TEST_P(StatNameTest, TestArbitrarySymbolRoundtrip) { +TEST_F(StatNameTest, TestArbitrarySymbolRoundtrip) { const std::vector stat_names = {"", " ", " ", ",", "\t", "$", "%", "`", ".x"}; for (auto& stat_name : stat_names) { EXPECT_EQ(stat_name, encodeDecode(stat_name)); } } -TEST_P(StatNameTest, TestEmpty) { +TEST_F(StatNameTest, TestEmpty) { EXPECT_TRUE(makeStat("").empty()); EXPECT_FALSE(makeStat("x").empty()); EXPECT_TRUE(StatName().empty()); } -TEST_P(StatNameTest, TestDynamic100k) { +TEST_F(StatNameTest, TestDynamic100k) { // Tests a variety different sizes of dynamic stat ranging to 500k, covering // potential corner cases of spilling over into multi-byte lengths. std::string stat_str("dyn.x"); @@ -154,11 +119,11 @@ TEST_P(StatNameTest, TestDynamic100k) { for (uint32_t i = stat_str.size(); i < size; ++i, ++ch) { stat_str += (ch == '.') ? 'x' : ch; } - StatNameDynamicStorage storage(stat_str, *table_); + StatNameDynamicStorage storage(stat_str, table_); StatName dynamic = storage.statName(); - EXPECT_EQ(stat_str, table_->toString(dynamic)); - SymbolTable::StoragePtr joined = table_->join({ab, dynamic, cd}); - EXPECT_EQ(absl::StrCat("a.b.", stat_str, ".c.d"), table_->toString(StatName(joined.get()))); + EXPECT_EQ(stat_str, table_.toString(dynamic)); + SymbolTable::StoragePtr joined = table_.join({ab, dynamic, cd}); + EXPECT_EQ(absl::StrCat("a.b.", stat_str, ".c.d"), table_.toString(StatName(joined.get()))); } }; @@ -174,21 +139,21 @@ TEST_P(StatNameTest, TestDynamic100k) { } } -TEST_P(StatNameTest, TestDynamicPools) { +TEST_F(StatNameTest, TestDynamicPools) { // Same test for a dynamically allocated name. The only difference between // the behavior with a remembered vs dynamic name is that when looking // up a remembered name, a mutex is not taken. But we have no easy way // to test for that. So we'll at least cover the code. - StatNameDynamicPool d1(*table_); + StatNameDynamicPool d1(table_); const StatName dynamic = d1.add("dynamic"); - EXPECT_EQ("dynamic", table_->toString(dynamic)); + EXPECT_EQ("dynamic", table_.toString(dynamic)); // The nature of the StatNameDynamicPool is that there is no sharing (and also no locks). EXPECT_NE(dynamic.data(), d1.add("dynamic").data()); // Make sure blanks are always the same. const StatName blank = d1.add(""); - EXPECT_EQ("", table_->toString(blank)); + EXPECT_EQ("", table_.toString(blank)); EXPECT_NE(blank.data(), d1.add("").data()); EXPECT_NE(blank.data(), d1.add("").data()); EXPECT_NE(blank.data(), d1.add(absl::string_view()).data()); @@ -197,29 +162,29 @@ TEST_P(StatNameTest, TestDynamicPools) { // different set. Here we will get a different StatName object // out of the second set, though it will share the same underlying // symbol-table symbol. - StatNameDynamicPool d2(*table_); + StatNameDynamicPool d2(table_); const StatName dynamic2 = d2.add("dynamic"); - EXPECT_EQ("dynamic", table_->toString(dynamic2)); + EXPECT_EQ("dynamic", table_.toString(dynamic2)); EXPECT_NE(dynamic2.data(), d2.add("dynamic").data()); // No storage sharing. EXPECT_NE(dynamic2.data(), dynamic.data()); } -TEST_P(StatNameTest, TestDynamicHash) { - StatNameDynamicPool dynamic(*table_); +TEST_F(StatNameTest, TestDynamicHash) { + StatNameDynamicPool dynamic(table_); const StatName d1 = dynamic.add("dynamic"); const StatName d2 = dynamic.add("dynamic"); EXPECT_EQ(d1, d2); EXPECT_EQ(d1.hash(), d2.hash()); } -TEST_P(StatNameTest, Test100KSymbolsRoundtrip) { +TEST_F(StatNameTest, Test100KSymbolsRoundtrip) { for (int i = 0; i < 100 * 1000; ++i) { const std::string stat_name = absl::StrCat("symbol_", i); EXPECT_EQ(stat_name, encodeDecode(stat_name)); } } -TEST_P(StatNameTest, TwoHundredTwoLevel) { +TEST_F(StatNameTest, TwoHundredTwoLevel) { for (int i = 0; i < 200; ++i) { const std::string stat_name = absl::StrCat("symbol_", i); EXPECT_EQ(stat_name, encodeDecode(stat_name)); @@ -227,12 +192,12 @@ TEST_P(StatNameTest, TwoHundredTwoLevel) { EXPECT_EQ("http.foo", encodeDecode("http.foo")); } -TEST_P(StatNameTest, TestLongSymbolName) { +TEST_F(StatNameTest, TestLongSymbolName) { std::string long_name(100000, 'a'); EXPECT_EQ(long_name, encodeDecode(long_name)); } -TEST_P(StatNameTest, TestLongSequence) { +TEST_F(StatNameTest, TestLongSequence) { std::string long_name("a"); for (int i = 0; i < 100000; ++i) { absl::StrAppend(&long_name, ".a"); @@ -241,7 +206,7 @@ TEST_P(StatNameTest, TestLongSequence) { EXPECT_EQ(long_name, encodeDecode(long_name)); } -TEST_P(StatNameTest, TestUnusualDelimitersRoundtrip) { +TEST_F(StatNameTest, TestUnusualDelimitersRoundtrip) { const std::vector stat_names = {".x", "..x", "...x", "foo", "foo.x", ".foo", ".foo.x", ".foo..x", "..foo.x", "..foo..x"}; for (auto& stat_name : stat_names) { @@ -249,37 +214,31 @@ TEST_P(StatNameTest, TestUnusualDelimitersRoundtrip) { } } -TEST_P(StatNameTest, TestSuccessfulDoubleLookup) { +TEST_F(StatNameTest, TestSuccessfulDoubleLookup) { StatName stat_name_1(makeStat("foo.bar.baz")); StatName stat_name_2(makeStat("foo.bar.baz")); EXPECT_EQ(stat_name_1, stat_name_2); } -TEST_P(StatNameTest, TestSuccessfulDecode) { +TEST_F(StatNameTest, TestSuccessfulDecode) { std::string stat_name = "foo.bar.baz"; StatName stat_name_1(makeStat(stat_name)); StatName stat_name_2(makeStat(stat_name)); - EXPECT_EQ(table_->toString(stat_name_1), table_->toString(stat_name_2)); - EXPECT_EQ(table_->toString(stat_name_1), stat_name); + EXPECT_EQ(table_.toString(stat_name_1), table_.toString(stat_name_2)); + EXPECT_EQ(table_.toString(stat_name_1), stat_name); } class StatNameDeathTest : public StatNameTest { public: void decodeSymbolVec(const SymbolVec& symbol_vec) { - Thread::LockGuard lock(real_symbol_table_->lock_); + Thread::LockGuard lock(table_.lock_); for (Symbol symbol : symbol_vec) { - real_symbol_table_->fromSymbol(symbol); + table_.fromSymbol(symbol); } } }; -INSTANTIATE_TEST_SUITE_P(StatNameDeathTest, StatNameDeathTest, - testing::ValuesIn({SymbolTableType::Real})); - -TEST_P(StatNameDeathTest, TestBadDecodes) { - if (GetParam() == SymbolTableType::Fake) { - return; - } +TEST_F(StatNameDeathTest, TestBadDecodes) { { // If a symbol doesn't exist, decoding it should trigger an ASSERT() and crash. SymbolVec bad_symbol_vec = {1}; // symbol 0 is the empty symbol. @@ -297,17 +256,14 @@ TEST_P(StatNameDeathTest, TestBadDecodes) { } } -TEST_P(StatNameTest, TestDifferentStats) { +TEST_F(StatNameTest, TestDifferentStats) { StatName stat_name_1(makeStat("foo.bar")); StatName stat_name_2(makeStat("bar.foo")); - EXPECT_NE(table_->toString(stat_name_1), table_->toString(stat_name_2)); + EXPECT_NE(table_.toString(stat_name_1), table_.toString(stat_name_2)); EXPECT_NE(stat_name_1, stat_name_2); } -TEST_P(StatNameTest, TestSymbolConsistency) { - if (GetParam() == SymbolTableType::Fake) { - return; - } +TEST_F(StatNameTest, TestSymbolConsistency) { StatName stat_name_1(makeStat("foo.bar")); StatName stat_name_2(makeStat("bar.foo")); // We expect the encoding of "foo" in one context to be the same as another. @@ -317,20 +273,20 @@ TEST_P(StatNameTest, TestSymbolConsistency) { EXPECT_EQ(vec_2[0], vec_1[1]); } -TEST_P(StatNameTest, TestIgnoreTrailingDots) { +TEST_F(StatNameTest, TestIgnoreTrailingDots) { EXPECT_EQ("foo.bar", encodeDecode("foo.bar.")); EXPECT_EQ("foo.bar", encodeDecode("foo.bar...")); EXPECT_EQ("", encodeDecode(".")); EXPECT_EQ("", encodeDecode("..")); } -TEST_P(StatNameTest, TestSameValueOnPartialFree) { +TEST_F(StatNameTest, TestSameValueOnPartialFree) { // This should hold true for components as well. Since "foo" persists even when "foo.bar" is // freed, we expect both instances of "foo" to have the same symbol. makeStat("foo"); - StatNameStorage stat_foobar_1("foo.bar", *table_); + StatNameStorage stat_foobar_1("foo.bar", table_); SymbolVec stat_foobar_1_symbols = getSymbols(stat_foobar_1.statName()); - stat_foobar_1.free(*table_); + stat_foobar_1.free(table_); StatName stat_foobar_2(makeStat("foo.bar")); SymbolVec stat_foobar_2_symbols = getSymbols(stat_foobar_2); @@ -339,11 +295,7 @@ TEST_P(StatNameTest, TestSameValueOnPartialFree) { // And we have no expectation for the "bar" components, because of the free pool. } -TEST_P(StatNameTest, FreePoolTest) { - if (GetParam() == SymbolTableType::Fake) { - return; - } - +TEST_F(StatNameTest, FreePoolTest) { // To ensure that the free pool is being used, we should be able to cycle through a large number // of stats while validating that: // a) the size of the table has not increased, and @@ -357,11 +309,11 @@ TEST_P(StatNameTest, FreePoolTest) { makeStat("4a"); makeStat("5a"); EXPECT_EQ(monotonicCounter(), 6); - EXPECT_EQ(table_->numSymbols(), 5); + EXPECT_EQ(table_.numSymbols(), 5); clearStorage(); } EXPECT_EQ(monotonicCounter(), 6); - EXPECT_EQ(table_->numSymbols(), 0); + EXPECT_EQ(table_.numSymbols(), 0); // These are different strings being encoded, but they should recycle through the same symbols as // the stats above. @@ -371,59 +323,59 @@ TEST_P(StatNameTest, FreePoolTest) { makeStat("4b"); makeStat("5b"); EXPECT_EQ(monotonicCounter(), 6); - EXPECT_EQ(table_->numSymbols(), 5); + EXPECT_EQ(table_.numSymbols(), 5); makeStat("6"); EXPECT_EQ(monotonicCounter(), 7); - EXPECT_EQ(table_->numSymbols(), 6); + EXPECT_EQ(table_.numSymbols(), 6); } -TEST_P(StatNameTest, TestShrinkingExpectation) { +TEST_F(StatNameTest, TestShrinkingExpectation) { // We expect that as we free stat names, the memory used to store those underlying symbols will // be freed. // ::size() is a public function, but should only be used for testing. - size_t table_size_0 = table_->numSymbols(); + size_t table_size_0 = table_.numSymbols(); auto make_stat_storage = [this](absl::string_view name) -> StatNameStorage { - return StatNameStorage(name, *table_); + return StatNameStorage(name, table_); }; StatNameStorage stat_a(make_stat_storage("a")); - size_t table_size_1 = table_->numSymbols(); + size_t table_size_1 = table_.numSymbols(); StatNameStorage stat_aa(make_stat_storage("a.a")); - EXPECT_EQ(table_size_1, table_->numSymbols()); + EXPECT_EQ(table_size_1, table_.numSymbols()); StatNameStorage stat_ab(make_stat_storage("a.b")); - size_t table_size_2 = table_->numSymbols(); + size_t table_size_2 = table_.numSymbols(); StatNameStorage stat_ac(make_stat_storage("a.c")); - size_t table_size_3 = table_->numSymbols(); + size_t table_size_3 = table_.numSymbols(); StatNameStorage stat_acd(make_stat_storage("a.c.d")); - size_t table_size_4 = table_->numSymbols(); + size_t table_size_4 = table_.numSymbols(); StatNameStorage stat_ace(make_stat_storage("a.c.e")); - size_t table_size_5 = table_->numSymbols(); + size_t table_size_5 = table_.numSymbols(); EXPECT_GE(table_size_5, table_size_4); - stat_ace.free(*table_); - EXPECT_EQ(table_size_4, table_->numSymbols()); + stat_ace.free(table_); + EXPECT_EQ(table_size_4, table_.numSymbols()); - stat_acd.free(*table_); - EXPECT_EQ(table_size_3, table_->numSymbols()); + stat_acd.free(table_); + EXPECT_EQ(table_size_3, table_.numSymbols()); - stat_ac.free(*table_); - EXPECT_EQ(table_size_2, table_->numSymbols()); + stat_ac.free(table_); + EXPECT_EQ(table_size_2, table_.numSymbols()); - stat_ab.free(*table_); - EXPECT_EQ(table_size_1, table_->numSymbols()); + stat_ab.free(table_); + EXPECT_EQ(table_size_1, table_.numSymbols()); - stat_aa.free(*table_); - EXPECT_EQ(table_size_1, table_->numSymbols()); + stat_aa.free(table_); + EXPECT_EQ(table_size_1, table_.numSymbols()); - stat_a.free(*table_); - EXPECT_EQ(table_size_0, table_->numSymbols()); + stat_a.free(table_); + EXPECT_EQ(table_size_0, table_.numSymbols()); } // In the tests above we use the StatNameStorage abstraction which is not the @@ -433,33 +385,33 @@ TEST_P(StatNameTest, TestShrinkingExpectation) { // safety-net here in terms of leaks is that SymbolTable will assert-fail if // you don't free all the StatNames you've allocated bytes for. StatNameList // provides this capability. -TEST_P(StatNameTest, List) { +TEST_F(StatNameTest, List) { StatName names[] = {makeStat("hello.world"), makeStat("goodbye.world")}; StatNameList name_list; EXPECT_FALSE(name_list.populated()); - table_->populateList(names, ARRAY_SIZE(names), name_list); + table_.populateList(names, ARRAY_SIZE(names), name_list); EXPECT_TRUE(name_list.populated()); // First, decode only the first name. name_list.iterate([this](StatName stat_name) -> bool { - EXPECT_EQ("hello.world", table_->toString(stat_name)); + EXPECT_EQ("hello.world", table_.toString(stat_name)); return false; }); // Decode all the names. std::vector decoded_strings; name_list.iterate([this, &decoded_strings](StatName stat_name) -> bool { - decoded_strings.push_back(table_->toString(stat_name)); + decoded_strings.push_back(table_.toString(stat_name)); return true; }); ASSERT_EQ(2, decoded_strings.size()); EXPECT_EQ("hello.world", decoded_strings[0]); EXPECT_EQ("goodbye.world", decoded_strings[1]); - name_list.clear(*table_); + name_list.clear(table_); EXPECT_FALSE(name_list.populated()); } -TEST_P(StatNameTest, HashTable) { +TEST_F(StatNameTest, HashTable) { StatName ac = makeStat("a.c"); StatName ab = makeStat("a.b"); StatName de = makeStat("d.e"); @@ -477,65 +429,64 @@ TEST_P(StatNameTest, HashTable) { EXPECT_EQ(3, name_int_map[de]); } -TEST_P(StatNameTest, Sort) { +TEST_F(StatNameTest, Sort) { StatNameVec names{makeStat("a.c"), makeStat("a.b"), makeStat("d.e"), makeStat("d.a.a"), makeStat("d.a"), makeStat("a.c")}; const StatNameVec sorted_names{makeStat("a.b"), makeStat("a.c"), makeStat("a.c"), makeStat("d.a"), makeStat("d.a.a"), makeStat("d.e")}; EXPECT_NE(names, sorted_names); - std::sort(names.begin(), names.end(), StatNameLessThan(*table_)); + std::sort(names.begin(), names.end(), StatNameLessThan(table_)); EXPECT_EQ(names, sorted_names); } -TEST_P(StatNameTest, Concat2) { - SymbolTable::StoragePtr joined = table_->join({makeStat("a.b"), makeStat("c.d")}); - EXPECT_EQ("a.b.c.d", table_->toString(StatName(joined.get()))); +TEST_F(StatNameTest, Concat2) { + SymbolTable::StoragePtr joined = table_.join({makeStat("a.b"), makeStat("c.d")}); + EXPECT_EQ("a.b.c.d", table_.toString(StatName(joined.get()))); } -TEST_P(StatNameTest, ConcatFirstEmpty) { - SymbolTable::StoragePtr joined = table_->join({makeStat(""), makeStat("c.d")}); - EXPECT_EQ("c.d", table_->toString(StatName(joined.get()))); +TEST_F(StatNameTest, ConcatFirstEmpty) { + SymbolTable::StoragePtr joined = table_.join({makeStat(""), makeStat("c.d")}); + EXPECT_EQ("c.d", table_.toString(StatName(joined.get()))); } -TEST_P(StatNameTest, ConcatSecondEmpty) { - SymbolTable::StoragePtr joined = table_->join({makeStat("a.b"), makeStat("")}); - EXPECT_EQ("a.b", table_->toString(StatName(joined.get()))); +TEST_F(StatNameTest, ConcatSecondEmpty) { + SymbolTable::StoragePtr joined = table_.join({makeStat("a.b"), makeStat("")}); + EXPECT_EQ("a.b", table_.toString(StatName(joined.get()))); } -TEST_P(StatNameTest, ConcatAllEmpty) { - SymbolTable::StoragePtr joined = table_->join({makeStat(""), makeStat("")}); - EXPECT_EQ("", table_->toString(StatName(joined.get()))); +TEST_F(StatNameTest, ConcatAllEmpty) { + SymbolTable::StoragePtr joined = table_.join({makeStat(""), makeStat("")}); + EXPECT_EQ("", table_.toString(StatName(joined.get()))); } -TEST_P(StatNameTest, Join3) { - SymbolTable::StoragePtr joined = - table_->join({makeStat("a.b"), makeStat("c.d"), makeStat("e.f")}); - EXPECT_EQ("a.b.c.d.e.f", table_->toString(StatName(joined.get()))); +TEST_F(StatNameTest, Join3) { + SymbolTable::StoragePtr joined = table_.join({makeStat("a.b"), makeStat("c.d"), makeStat("e.f")}); + EXPECT_EQ("a.b.c.d.e.f", table_.toString(StatName(joined.get()))); } -TEST_P(StatNameTest, Join3FirstEmpty) { - SymbolTable::StoragePtr joined = table_->join({makeStat(""), makeStat("c.d"), makeStat("e.f")}); - EXPECT_EQ("c.d.e.f", table_->toString(StatName(joined.get()))); +TEST_F(StatNameTest, Join3FirstEmpty) { + SymbolTable::StoragePtr joined = table_.join({makeStat(""), makeStat("c.d"), makeStat("e.f")}); + EXPECT_EQ("c.d.e.f", table_.toString(StatName(joined.get()))); } -TEST_P(StatNameTest, Join3SecondEmpty) { - SymbolTable::StoragePtr joined = table_->join({makeStat("a.b"), makeStat(""), makeStat("e.f")}); - EXPECT_EQ("a.b.e.f", table_->toString(StatName(joined.get()))); +TEST_F(StatNameTest, Join3SecondEmpty) { + SymbolTable::StoragePtr joined = table_.join({makeStat("a.b"), makeStat(""), makeStat("e.f")}); + EXPECT_EQ("a.b.e.f", table_.toString(StatName(joined.get()))); } -TEST_P(StatNameTest, Join3ThirdEmpty) { - SymbolTable::StoragePtr joined = table_->join({makeStat("a.b"), makeStat("c.d"), makeStat("")}); - EXPECT_EQ("a.b.c.d", table_->toString(StatName(joined.get()))); +TEST_F(StatNameTest, Join3ThirdEmpty) { + SymbolTable::StoragePtr joined = table_.join({makeStat("a.b"), makeStat("c.d"), makeStat("")}); + EXPECT_EQ("a.b.c.d", table_.toString(StatName(joined.get()))); } -TEST_P(StatNameTest, JoinAllEmpty) { - SymbolTable::StoragePtr joined = table_->join({makeStat(""), makeStat(""), makeStat("")}); - EXPECT_EQ("", table_->toString(StatName(joined.get()))); +TEST_F(StatNameTest, JoinAllEmpty) { + SymbolTable::StoragePtr joined = table_.join({makeStat(""), makeStat(""), makeStat("")}); + EXPECT_EQ("", table_.toString(StatName(joined.get()))); } // Validates that we don't get tsan or other errors when concurrently creating // a large number of stats. -TEST_P(StatNameTest, RacingSymbolCreation) { +TEST_F(StatNameTest, RacingSymbolCreation) { Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest(); MutexTracerImpl& mutex_tracer = MutexTracerImpl::getOrCreateTracer(); @@ -558,11 +509,11 @@ TEST_P(StatNameTest, RacingSymbolCreation) { // Block each thread on waking up a common condition variable, // so we make it likely to race on creation. creation.wait(); - StatNameManagedStorage initial(stat_name_string, *table_); + StatNameManagedStorage initial(stat_name_string, table_); creates.DecrementCount(); access.wait(); - StatNameManagedStorage second(stat_name_string, *table_); + StatNameManagedStorage second(stat_name_string, table_); accesses.DecrementCount(); wait.wait(); @@ -601,7 +552,7 @@ TEST_P(StatNameTest, RacingSymbolCreation) { } } -TEST_P(StatNameTest, MutexContentionOnExistingSymbols) { +TEST_F(StatNameTest, MutexContentionOnExistingSymbols) { Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest(); MutexTracerImpl& mutex_tracer = MutexTracerImpl::getOrCreateTracer(); @@ -624,11 +575,11 @@ TEST_P(StatNameTest, MutexContentionOnExistingSymbols) { // Block each thread on waking up a common condition variable, // so we make it likely to race on creation. creation.wait(); - StatNameManagedStorage initial(stat_name_string, *table_); + StatNameManagedStorage initial(stat_name_string, table_); creates.DecrementCount(); access.wait(); - StatNameManagedStorage second(stat_name_string, *table_); + StatNameManagedStorage second(stat_name_string, table_); accesses.DecrementCount(); wait.wait(); @@ -669,62 +620,55 @@ TEST_P(StatNameTest, MutexContentionOnExistingSymbols) { } } -TEST_P(StatNameTest, SharedStatNameStorageSetInsertAndFind) { +TEST_F(StatNameTest, SharedStatNameStorageSetInsertAndFind) { StatNameStorageSet set; const int iters = 10; for (int i = 0; i < iters; ++i) { std::string foo = absl::StrCat("foo", i); - auto insertion = set.insert(StatNameStorage(foo, *table_)); - StatNameManagedStorage temp_foo(foo, *table_); + auto insertion = set.insert(StatNameStorage(foo, table_)); + StatNameManagedStorage temp_foo(foo, table_); auto found = set.find(temp_foo.statName()); EXPECT_EQ(found->statName().data(), insertion.first->statName().data()); } - StatNameManagedStorage bar("bar", *table_); + StatNameManagedStorage bar("bar", table_); EXPECT_EQ(set.end(), set.find(bar.statName())); EXPECT_EQ(iters, set.size()); - set.free(*table_); + set.free(table_); } -TEST_P(StatNameTest, StatNameSet) { - StatNameSetPtr set(table_->makeSet("set")); +TEST_F(StatNameTest, StatNameSet) { + StatNameSetPtr set(table_.makeSet("set")); // Test that we get a consistent StatName object from a remembered name. set->rememberBuiltin("remembered"); const StatName fallback = set->add("fallback"); const Stats::StatName remembered = set->getBuiltin("remembered", fallback); - EXPECT_EQ("remembered", table_->toString(remembered)); + EXPECT_EQ("remembered", table_.toString(remembered)); EXPECT_EQ(remembered.data(), set->getBuiltin("remembered", fallback).data()); EXPECT_EQ(fallback.data(), set->getBuiltin("not_remembered", fallback).data()); } -TEST_P(StatNameTest, StorageCopy) { - StatName a = pool_->add("stat.name"); - StatNameStorage b_storage(a, *table_); +TEST_F(StatNameTest, StorageCopy) { + StatName a = pool_.add("stat.name"); + StatNameStorage b_storage(a, table_); StatName b = b_storage.statName(); EXPECT_EQ(a, b); EXPECT_NE(a.data(), b.data()); - b_storage.free(*table_); + b_storage.free(table_); } -TEST_P(StatNameTest, RecentLookups) { - if (GetParam() == SymbolTableType::Fake) { - // Touch these for coverage of fake symbol tables, but they'll have no effect. - table_->clearRecentLookups(); - table_->setRecentLookupCapacity(0); - return; - } - - StatNameSetPtr set1(table_->makeSet("set1")); - table_->setRecentLookupCapacity(10); - StatNameSetPtr set2(table_->makeSet("set2")); - StatNameDynamicPool d1(*table_); +TEST_F(StatNameTest, RecentLookups) { + StatNameSetPtr set1(table_.makeSet("set1")); + table_.setRecentLookupCapacity(10); + StatNameSetPtr set2(table_.makeSet("set2")); + StatNameDynamicPool d1(table_); d1.add("dynamic.stat1"); - StatNameDynamicPool d2(*table_); + StatNameDynamicPool d2(table_); d2.add("dynamic.stat2"); encodeDecode("direct.stat"); std::vector accum; - uint64_t total = table_->getRecentLookups([&accum](absl::string_view name, uint64_t count) { + uint64_t total = table_.getRecentLookups([&accum](absl::string_view name, uint64_t count) { accum.emplace_back(absl::StrCat(count, ": ", name)); }); EXPECT_EQ(1, total); // Dynamic pool adds don't count as recent lookups. @@ -732,14 +676,13 @@ TEST_P(StatNameTest, RecentLookups) { EXPECT_EQ("1: direct.stat", recent_lookups_str); // No dynamic-pool lookups take locks. - table_->clearRecentLookups(); + table_.clearRecentLookups(); uint32_t num_calls = 0; - EXPECT_EQ(0, - table_->getRecentLookups([&num_calls](absl::string_view, uint64_t) { ++num_calls; })); + EXPECT_EQ(0, table_.getRecentLookups([&num_calls](absl::string_view, uint64_t) { ++num_calls; })); EXPECT_EQ(0, num_calls); } -TEST_P(StatNameTest, StatNameEmptyEquivalent) { +TEST_F(StatNameTest, StatNameEmptyEquivalent) { StatName empty1; StatName empty2 = makeStat(""); StatName non_empty = makeStat("a"); @@ -751,7 +694,7 @@ TEST_P(StatNameTest, StatNameEmptyEquivalent) { EXPECT_NE(empty2.hash(), non_empty.hash()); } -TEST_P(StatNameTest, SupportsAbslHash) { +TEST_F(StatNameTest, SupportsAbslHash) { EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ StatName(), makeStat(""), @@ -789,11 +732,9 @@ TEST(SymbolTableTest, Memory) { } } - // Make sure we don't regress. Data as of 2019/05/29: - // - // string_mem_used: 6710912 (libc++), 7759488 (libstdc++). + // Make sure we don't regress. + // Data as of 2019/05/29: // symbol_table_mem_used: 1726056 (3.9x) -- does not seem to depend on STL sizes. - EXPECT_MEMORY_LE(string_mem_used, 7759488); EXPECT_MEMORY_LE(symbol_table_mem_used, string_mem_used / 3); EXPECT_MEMORY_EQ(symbol_table_mem_used, 1726056); } diff --git a/test/common/stats/tag_extractor_impl_test.cc b/test/common/stats/tag_extractor_impl_test.cc index 5ca2bb933b73..18ff4d6ec88c 100644 --- a/test/common/stats/tag_extractor_impl_test.cc +++ b/test/common/stats/tag_extractor_impl_test.cc @@ -317,6 +317,14 @@ TEST(TagExtractorTest, DefaultTagExtractors) { regex_tester.testRegex("tcp.tcp_prefix.downstream_flow_control_resumed_reading_total", "tcp.downstream_flow_control_resumed_reading_total", {tcp_prefix}); + // UDP Prefix + Tag udp_prefix; + udp_prefix.name_ = tag_names.UDP_PREFIX; + udp_prefix.value_ = "udp_prefix"; + + regex_tester.testRegex("udp.udp_prefix.downstream_flow_control_resumed_reading_total", + "udp.downstream_flow_control_resumed_reading_total", {udp_prefix}); + // Fault Downstream Cluster Tag fault_connection_manager; fault_connection_manager.name_ = tag_names.HTTP_CONN_MANAGER_PREFIX; diff --git a/test/common/stats/thread_local_store_speed_test.cc b/test/common/stats/thread_local_store_speed_test.cc index 6e2c62ace9ef..3eff52a78d97 100644 --- a/test/common/stats/thread_local_store_speed_test.cc +++ b/test/common/stats/thread_local_store_speed_test.cc @@ -7,7 +7,7 @@ #include "common/common/thread.h" #include "common/event/dispatcher_impl.h" #include "common/stats/allocator_impl.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "common/stats/tag_producer_impl.h" #include "common/stats/thread_local_store.h" #include "common/thread_local/thread_local_impl.h" @@ -24,18 +24,18 @@ namespace Envoy { class ThreadLocalStorePerf { public: ThreadLocalStorePerf() - : symbol_table_(Stats::SymbolTableCreator::makeSymbolTable()), heap_alloc_(*symbol_table_), - store_(heap_alloc_), api_(Api::createApiForTest(store_, time_system_)) { + : heap_alloc_(symbol_table_), store_(heap_alloc_), + api_(Api::createApiForTest(store_, time_system_)) { store_.setTagProducer(std::make_unique(stats_config_)); Stats::TestUtil::forEachSampleStat(1000, [this](absl::string_view name) { - stat_names_.push_back(std::make_unique(name, *symbol_table_)); + stat_names_.push_back(std::make_unique(name, symbol_table_)); }); } ~ThreadLocalStorePerf() { for (auto& stat_name_storage : stat_names_) { - stat_name_storage->free(*symbol_table_); + stat_name_storage->free(symbol_table_); } store_.shutdownThreading(); if (tls_) { @@ -64,7 +64,7 @@ class ThreadLocalStorePerf { } private: - Stats::SymbolTablePtr symbol_table_; + Stats::SymbolTableImpl symbol_table_; Event::SimulatedTimeSystem time_system_; Stats::AllocatorImpl heap_alloc_; Event::DispatcherPtr dispatcher_; diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 135c6b424097..395a84cf32e6 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -55,10 +55,11 @@ class ThreadLocalStoreTestingPeer { const std::function& num_tls_hist_cb) { auto num_tls_histograms = std::make_shared>(0); thread_local_store_impl.tls_->runOnAllThreads( - [&thread_local_store_impl, num_tls_histograms]() { - auto& tls_cache = - thread_local_store_impl.tls_->getTyped(); + [num_tls_histograms](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + auto& tls_cache = object->asType(); *num_tls_histograms += tls_cache.tls_histogram_cache_.size(); + return object; }, [num_tls_hist_cb, num_tls_histograms]() { num_tls_hist_cb(*num_tls_histograms); }); } @@ -67,8 +68,7 @@ class ThreadLocalStoreTestingPeer { class StatsThreadLocalStoreTest : public testing::Test { public: StatsThreadLocalStoreTest() - : symbol_table_(SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_), - store_(std::make_unique(alloc_)) { + : alloc_(symbol_table_), store_(std::make_unique(alloc_)) { store_->addSink(sink_); } @@ -92,7 +92,7 @@ class StatsThreadLocalStoreTest : public testing::Test { return num_tls_histograms; } - SymbolTablePtr symbol_table_; + SymbolTableImpl symbol_table_; NiceMock main_thread_dispatcher_; NiceMock tls_; AllocatorImpl alloc_; @@ -122,7 +122,7 @@ class HistogramTest : public testing::Test { public: using NameHistogramMap = std::map; - HistogramTest() : symbol_table_(SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_) {} + HistogramTest() : alloc_(symbol_table_) {} void SetUp() override { store_ = std::make_unique(alloc_); @@ -210,7 +210,7 @@ class HistogramTest : public testing::Test { } } - SymbolTablePtr symbol_table_; + SymbolTableImpl symbol_table_; NiceMock main_thread_dispatcher_; NiceMock tls_; AllocatorImpl alloc_; @@ -226,7 +226,7 @@ TEST_F(StatsThreadLocalStoreTest, NoTls) { Counter& c1 = store_->counterFromString("c1"); EXPECT_EQ(&c1, &store_->counterFromString("c1")); - StatNameManagedStorage c1_name("c1", *symbol_table_); + StatNameManagedStorage c1_name("c1", symbol_table_); c1.add(100); auto found_counter = store_->findCounter(c1_name.statName()); ASSERT_TRUE(found_counter.has_value()); @@ -237,7 +237,7 @@ TEST_F(StatsThreadLocalStoreTest, NoTls) { Gauge& g1 = store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate); EXPECT_EQ(&g1, &store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate)); - StatNameManagedStorage g1_name("g1", *symbol_table_); + StatNameManagedStorage g1_name("g1", symbol_table_); g1.set(100); auto found_gauge = store_->findGauge(g1_name.statName()); ASSERT_TRUE(found_gauge.has_value()); @@ -248,7 +248,7 @@ TEST_F(StatsThreadLocalStoreTest, NoTls) { Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(&h1, &store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified)); - StatNameManagedStorage h1_name("h1", *symbol_table_); + StatNameManagedStorage h1_name("h1", symbol_table_); auto found_histogram = store_->findHistogram(h1_name.statName()); ASSERT_TRUE(found_histogram.has_value()); EXPECT_EQ(&h1, &found_histogram->get()); @@ -280,7 +280,7 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { Counter& c1 = store_->counterFromString("c1"); EXPECT_EQ(&c1, &store_->counterFromString("c1")); - StatNameManagedStorage c1_name("c1", *symbol_table_); + StatNameManagedStorage c1_name("c1", symbol_table_); c1.add(100); auto found_counter = store_->findCounter(c1_name.statName()); ASSERT_TRUE(found_counter.has_value()); @@ -291,7 +291,7 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { Gauge& g1 = store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate); EXPECT_EQ(&g1, &store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate)); - StatNameManagedStorage g1_name("g1", *symbol_table_); + StatNameManagedStorage g1_name("g1", symbol_table_); g1.set(100); auto found_gauge = store_->findGauge(g1_name.statName()); ASSERT_TRUE(found_gauge.has_value()); @@ -302,7 +302,7 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(&h1, &store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified)); - StatNameManagedStorage h1_name("h1", *symbol_table_); + StatNameManagedStorage h1_name("h1", symbol_table_); auto found_histogram = store_->findHistogram(h1_name.statName()); ASSERT_TRUE(found_histogram.has_value()); EXPECT_EQ(&h1, &found_histogram->get()); @@ -344,11 +344,11 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { Counter& c2 = scope1->counterFromString("c2"); EXPECT_EQ("c1", c1.name()); EXPECT_EQ("scope1.c2", c2.name()); - StatNameManagedStorage c1_name("c1", *symbol_table_); + StatNameManagedStorage c1_name("c1", symbol_table_); auto found_counter = store_->findCounter(c1_name.statName()); ASSERT_TRUE(found_counter.has_value()); EXPECT_EQ(&c1, &found_counter->get()); - StatNameManagedStorage c2_name("scope1.c2", *symbol_table_); + StatNameManagedStorage c2_name("scope1.c2", symbol_table_); auto found_counter2 = store_->findCounter(c2_name.statName()); ASSERT_TRUE(found_counter2.has_value()); EXPECT_EQ(&c2, &found_counter2->get()); @@ -357,11 +357,11 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { Gauge& g2 = scope1->gaugeFromString("g2", Gauge::ImportMode::Accumulate); EXPECT_EQ("g1", g1.name()); EXPECT_EQ("scope1.g2", g2.name()); - StatNameManagedStorage g1_name("g1", *symbol_table_); + StatNameManagedStorage g1_name("g1", symbol_table_); auto found_gauge = store_->findGauge(g1_name.statName()); ASSERT_TRUE(found_gauge.has_value()); EXPECT_EQ(&g1, &found_gauge->get()); - StatNameManagedStorage g2_name("scope1.g2", *symbol_table_); + StatNameManagedStorage g2_name("scope1.g2", symbol_table_); auto found_gauge2 = store_->findGauge(g2_name.statName()); ASSERT_TRUE(found_gauge2.has_value()); EXPECT_EQ(&g2, &found_gauge2->get()); @@ -374,11 +374,11 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { h1.recordValue(100); EXPECT_CALL(sink_, onHistogramComplete(Ref(h2), 200)); h2.recordValue(200); - StatNameManagedStorage h1_name("h1", *symbol_table_); + StatNameManagedStorage h1_name("h1", symbol_table_); auto found_histogram = store_->findHistogram(h1_name.statName()); ASSERT_TRUE(found_histogram.has_value()); EXPECT_EQ(&h1, &found_histogram->get()); - StatNameManagedStorage h2_name("scope1.h2", *symbol_table_); + StatNameManagedStorage h2_name("scope1.h2", symbol_table_); auto found_histogram2 = store_->findHistogram(h2_name.statName()); ASSERT_TRUE(found_histogram2.has_value()); EXPECT_EQ(&h2, &found_histogram2->get()); @@ -388,20 +388,20 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { EXPECT_EQ("t1", t1.name()); EXPECT_EQ("scope1.t2", t2.name()); - StatNameManagedStorage tag_key("a", *symbol_table_); - StatNameManagedStorage tag_value("b", *symbol_table_); + StatNameManagedStorage tag_key("a", symbol_table_); + StatNameManagedStorage tag_value("b", symbol_table_); StatNameTagVector tags{{StatName(tag_key.statName()), StatName(tag_value.statName())}}; const TagVector expectedTags = {Tag{"a", "b"}}; { - StatNameManagedStorage storage("c3", *symbol_table_); + StatNameManagedStorage storage("c3", symbol_table_); Counter& counter = scope1->counterFromStatNameWithTags(StatName(storage.statName()), tags); EXPECT_EQ(expectedTags, counter.tags()); EXPECT_EQ(&counter, &scope1->counterFromStatNameWithTags(StatName(storage.statName()), tags)); } { - StatNameManagedStorage storage("g3", *symbol_table_); + StatNameManagedStorage storage("g3", symbol_table_); Gauge& gauge = scope1->gaugeFromStatNameWithTags(StatName(storage.statName()), tags, Gauge::ImportMode::Accumulate); EXPECT_EQ(expectedTags, gauge.tags()); @@ -409,7 +409,7 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { Gauge::ImportMode::Accumulate)); } { - StatNameManagedStorage storage("h3", *symbol_table_); + StatNameManagedStorage storage("h3", symbol_table_); Histogram& histogram = scope1->histogramFromStatNameWithTags( StatName(storage.statName()), tags, Stats::Histogram::Unit::Unspecified); EXPECT_EQ(expectedTags, histogram.tags()); @@ -520,7 +520,7 @@ TEST_F(StatsThreadLocalStoreTest, NestedScopes) { ScopePtr scope1 = store_->createScope("scope1."); Counter& c1 = scope1->counterFromString("foo.bar"); EXPECT_EQ("scope1.foo.bar", c1.name()); - StatNameManagedStorage c1_name("scope1.foo.bar", *symbol_table_); + StatNameManagedStorage c1_name("scope1.foo.bar", symbol_table_); auto found_counter = store_->findCounter(c1_name.statName()); ASSERT_TRUE(found_counter.has_value()); EXPECT_EQ(&c1, &found_counter->get()); @@ -529,7 +529,7 @@ TEST_F(StatsThreadLocalStoreTest, NestedScopes) { Counter& c2 = scope2->counterFromString("bar"); EXPECT_EQ(&c1, &c2); EXPECT_EQ("scope1.foo.bar", c2.name()); - StatNameManagedStorage c2_name("scope1.foo.bar", *symbol_table_); + StatNameManagedStorage c2_name("scope1.foo.bar", symbol_table_); auto found_counter2 = store_->findCounter(c2_name.statName()); ASSERT_TRUE(found_counter2.has_value()); @@ -660,8 +660,8 @@ TEST_F(StatsThreadLocalStoreTest, TextReadoutAllLengths) { class ThreadLocalStoreNoMocksTestBase : public testing::Test { public: ThreadLocalStoreNoMocksTestBase() - : symbol_table_(SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_), - store_(std::make_unique(alloc_)), pool_(*symbol_table_) {} + : alloc_(symbol_table_), store_(std::make_unique(alloc_)), + pool_(symbol_table_) {} ~ThreadLocalStoreNoMocksTestBase() override { if (store_ != nullptr) { store_->shutdownThreading(); @@ -670,7 +670,7 @@ class ThreadLocalStoreNoMocksTestBase : public testing::Test { StatName makeStatName(absl::string_view name) { return pool_.add(name); } - SymbolTablePtr symbol_table_; + SymbolTableImpl symbol_table_; AllocatorImpl alloc_; ThreadLocalStoreImplPtr store_; StatNamePool pool_; @@ -933,8 +933,7 @@ TEST_F(StatsMatcherTLSTest, TestExclusionRegex) { class RememberStatsMatcherTest : public testing::TestWithParam { public: RememberStatsMatcherTest() - : symbol_table_(SymbolTableCreator::makeSymbolTable()), heap_alloc_(*symbol_table_), - store_(heap_alloc_), scope_(store_.createScope("scope.")) { + : heap_alloc_(symbol_table_), store_(heap_alloc_), scope_(store_.createScope("scope.")) { if (GetParam()) { store_.initializeThreading(main_thread_dispatcher_, tls_); } @@ -1031,7 +1030,7 @@ class RememberStatsMatcherTest : public testing::TestWithParam { }; } - Stats::SymbolTablePtr symbol_table_; + SymbolTableImpl symbol_table_; NiceMock main_thread_dispatcher_; NiceMock tls_; AllocatorImpl heap_alloc_; @@ -1136,79 +1135,52 @@ TEST_F(StatsThreadLocalStoreTest, NonHotRestartNoTruncation) { class StatsThreadLocalStoreTestNoFixture : public testing::Test { protected: + StatsThreadLocalStoreTestNoFixture() : alloc_(symbol_table_), store_(alloc_) { + store_.addSink(sink_); + + // Use a tag producer that will produce tags. + envoy::config::metrics::v3::StatsConfig stats_config; + store_.setTagProducer(std::make_unique(stats_config)); + } + ~StatsThreadLocalStoreTestNoFixture() override { if (threading_enabled_) { - store_->shutdownThreading(); + store_.shutdownThreading(); tls_.shutdownThread(); } } - void init(bool use_fakes) { - symbol_table_creator_test_peer_.setUseFakeSymbolTables(use_fakes); - symbol_table_ = SymbolTableCreator::makeSymbolTable(); - alloc_ = std::make_unique(*symbol_table_); - store_ = std::make_unique(*alloc_); - store_->addSink(sink_); - - // Use a tag producer that will produce tags. - envoy::config::metrics::v3::StatsConfig stats_config; - store_->setTagProducer(std::make_unique(stats_config)); - } - void initThreading() { threading_enabled_ = true; - store_->initializeThreading(main_thread_dispatcher_, tls_); + store_.initializeThreading(main_thread_dispatcher_, tls_); } static constexpr size_t million_ = 1000 * 1000; MockSink sink_; - SymbolTablePtr symbol_table_; - std::unique_ptr alloc_; - ThreadLocalStoreImplPtr store_; + SymbolTableImpl symbol_table_; + AllocatorImpl alloc_; + ThreadLocalStoreImpl store_; NiceMock main_thread_dispatcher_; NiceMock tls_; - TestUtil::SymbolTableCreatorTestPeer symbol_table_creator_test_peer_; bool threading_enabled_{false}; }; -// Tests how much memory is consumed allocating 100k stats. -TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithoutTlsFakeSymbolTable) { - init(true); - TestUtil::MemoryTest memory_test; - TestUtil::forEachSampleStat( - 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); - EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 1358576); // Jan 23, 2020 - EXPECT_MEMORY_LE(memory_test.consumedBytes(), 1.4 * million_); -} - -TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithTlsFakeSymbolTable) { - init(true); - initThreading(); - TestUtil::MemoryTest memory_test; - TestUtil::forEachSampleStat( - 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); - EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 1498128); // July 30, 2020 - EXPECT_MEMORY_LE(memory_test.consumedBytes(), 1.6 * million_); -} - // Tests how much memory is consumed allocating 100k stats. TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithoutTlsRealSymbolTable) { - init(false); TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( - 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); + 100, [this](absl::string_view name) { store_.counterFromString(std::string(name)); }); EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 688080); // July 2, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.75 * million_); } TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithTlsRealSymbolTable) { - init(false); initThreading(); TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( - 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); - EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 827632); // July 20, 2020 + 100, [this](absl::string_view name) { store_.counterFromString(std::string(name)); }); + EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 827616); // Sep 25, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.9 * million_); } @@ -1264,11 +1236,11 @@ TEST_F(StatsThreadLocalStoreTest, MergeDuringShutDown) { } TEST(ThreadLocalStoreThreadTest, ConstructDestruct) { - SymbolTablePtr symbol_table(SymbolTableCreator::makeSymbolTable()); + SymbolTableImpl symbol_table; Api::ApiPtr api = Api::createApiForTest(); Event::DispatcherPtr dispatcher = api->allocateDispatcher("test_thread"); NiceMock tls; - AllocatorImpl alloc(*symbol_table); + AllocatorImpl alloc(symbol_table); ThreadLocalStoreImpl store(alloc); store.initializeThreading(*dispatcher, tls); diff --git a/test/common/stats/utility_fuzz_test.cc b/test/common/stats/utility_fuzz_test.cc index e61257e2d191..eebdceab396d 100644 --- a/test/common/stats/utility_fuzz_test.cc +++ b/test/common/stats/utility_fuzz_test.cc @@ -2,7 +2,6 @@ #include #include "common/stats/isolated_store_impl.h" -#include "common/stats/symbol_table_creator.h" #include "common/stats/utility.h" #include "test/fuzz/fuzz_runner.h" @@ -41,15 +40,10 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { // model common/stats/utility_test.cc, initialize those objects to create random elements as // input - Stats::SymbolTablePtr symbol_table; - if (provider.ConsumeBool()) { - symbol_table = std::make_unique(); - } else { - symbol_table = std::make_unique(); - } + Stats::SymbolTableImpl symbol_table; std::unique_ptr store = - std::make_unique(*symbol_table); - Stats::StatNamePool pool(*symbol_table); + std::make_unique(symbol_table); + Stats::StatNamePool pool(symbol_table); Stats::ScopePtr scope = store->createScope(provider.ConsumeRandomLengthString(max_len)); Stats::ElementVec ele_vec; Stats::StatNameVec sn_vec; diff --git a/test/common/stats/utility_test.cc b/test/common/stats/utility_test.cc index bf1643ff4ed5..3c4bda7d122d 100644 --- a/test/common/stats/utility_test.cc +++ b/test/common/stats/utility_test.cc @@ -5,7 +5,6 @@ #include "common/stats/isolated_store_impl.h" #include "common/stats/null_counter.h" #include "common/stats/null_gauge.h" -#include "common/stats/symbol_table_creator.h" #include "common/stats/thread_local_store.h" #include "absl/strings/str_cat.h" @@ -32,7 +31,7 @@ class StatsUtilityTest : public testing::TestWithParam { using MakeStatFn = std::function; StatsUtilityTest() - : symbol_table_(SymbolTableCreator::makeSymbolTable()), pool_(*symbol_table_), + : symbol_table_(std::make_unique()), pool_(*symbol_table_), tags_( {{pool_.add("tag1"), pool_.add("value1")}, {pool_.add("tag2"), pool_.add("value2")}}) { switch (GetParam()) { diff --git a/test/common/stream_info/stream_info_impl_test.cc b/test/common/stream_info/stream_info_impl_test.cc index 771dc9860541..dcb62885e56f 100644 --- a/test/common/stream_info/stream_info_impl_test.cc +++ b/test/common/stream_info/stream_info_impl_test.cc @@ -153,6 +153,11 @@ TEST_F(StreamInfoImplTest, MiscSettersAndGetters) { ASSERT_TRUE(stream_info.responseCodeDetails().has_value()); EXPECT_EQ(ResponseCodeDetails::get().ViaUpstream, stream_info.responseCodeDetails().value()); + EXPECT_FALSE(stream_info.connectionTerminationDetails().has_value()); + stream_info.setConnectionTerminationDetails("access_denied"); + ASSERT_TRUE(stream_info.connectionTerminationDetails().has_value()); + EXPECT_EQ("access_denied", stream_info.connectionTerminationDetails().value()); + EXPECT_EQ(nullptr, stream_info.upstreamHost()); Upstream::HostDescriptionConstSharedPtr host(new NiceMock()); stream_info.onUpstreamHostSelected(host); @@ -259,6 +264,22 @@ TEST_F(StreamInfoImplTest, DefaultRequestIDExtensionTest) { EXPECT_EQ(rid_extension->getTraceStatus(request_headers), Http::TraceStatus::NoTrace); } +TEST_F(StreamInfoImplTest, ConnectionID) { + StreamInfoImpl stream_info(test_time_.timeSystem()); + EXPECT_FALSE(stream_info.connectionID().has_value()); + uint64_t id = 123; + stream_info.setConnectionID(id); + EXPECT_EQ(id, stream_info.connectionID()); +} + +TEST_F(StreamInfoImplTest, Details) { + StreamInfoImpl stream_info(test_time_.timeSystem()); + EXPECT_FALSE(stream_info.responseCodeDetails().has_value()); + stream_info.setResponseCodeDetails("two words"); + ASSERT_TRUE(stream_info.responseCodeDetails().has_value()); + EXPECT_EQ(stream_info.responseCodeDetails().value(), "two_words"); +} + } // namespace } // namespace StreamInfo } // namespace Envoy diff --git a/test/common/stream_info/test_util.h b/test/common/stream_info/test_util.h index 5767592c7406..560b485e18c0 100644 --- a/test/common/stream_info/test_util.h +++ b/test/common/stream_info/test_util.h @@ -41,6 +41,12 @@ class TestStreamInfo : public StreamInfo::StreamInfo { void setResponseCodeDetails(absl::string_view rc_details) override { response_code_details_.emplace(rc_details); } + const absl::optional& connectionTerminationDetails() const override { + return connection_termination_details_; + } + void setConnectionTerminationDetails(absl::string_view details) override { + connection_termination_details_.emplace(details); + } void addBytesSent(uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } uint64_t bytesSent() const override { return 2; } bool intersectResponseFlags(uint64_t response_flags) const override { @@ -227,6 +233,10 @@ class TestStreamInfo : public StreamInfo::StreamInfo { return upstream_cluster_info_; } + void setConnectionID(uint64_t id) override { connection_id_ = id; } + + absl::optional connectionID() const override { return connection_id_; } + Random::RandomGeneratorImpl random_; SystemTime start_time_; MonotonicTime start_time_monotonic_; @@ -243,6 +253,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo { absl::optional protocol_{Http::Protocol::Http11}; absl::optional response_code_; absl::optional response_code_details_; + absl::optional connection_termination_details_; uint64_t response_flags_{}; Upstream::HostDescriptionConstSharedPtr upstream_host_{}; bool health_check_request_{}; @@ -266,6 +277,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo { Envoy::Event::SimulatedTimeSystem test_time_; absl::optional upstream_cluster_info_{}; Http::RequestIDExtensionSharedPtr request_id_extension_; + absl::optional connection_id_; }; } // namespace Envoy diff --git a/test/common/stream_info/utility_test.cc b/test/common/stream_info/utility_test.cc index f74faa902220..9bc8096ee490 100644 --- a/test/common/stream_info/utility_test.cc +++ b/test/common/stream_info/utility_test.cc @@ -15,7 +15,7 @@ namespace StreamInfo { namespace { TEST(ResponseFlagUtilsTest, toShortStringConversion) { - static_assert(ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x400000, "A flag has been added. Fix this code."); std::vector> expected = { std::make_pair(ResponseFlag::FailedLocalHealthCheck, "LH"), @@ -39,7 +39,8 @@ TEST(ResponseFlagUtilsTest, toShortStringConversion) { std::make_pair(ResponseFlag::DownstreamProtocolError, "DPE"), std::make_pair(ResponseFlag::UpstreamMaxStreamDurationReached, "UMSDR"), std::make_pair(ResponseFlag::ResponseFromCacheFilter, "RFCF"), - std::make_pair(ResponseFlag::NoFilterConfigFound, "NFCF")}; + std::make_pair(ResponseFlag::NoFilterConfigFound, "NFCF"), + std::make_pair(ResponseFlag::DurationTimeout, "DT")}; for (const auto& test_case : expected) { NiceMock stream_info; @@ -67,7 +68,7 @@ TEST(ResponseFlagUtilsTest, toShortStringConversion) { } TEST(ResponseFlagsUtilsTest, toResponseFlagConversion) { - static_assert(ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x400000, "A flag has been added. Fix this code."); std::vector> expected = { std::make_pair("LH", ResponseFlag::FailedLocalHealthCheck), @@ -91,7 +92,8 @@ TEST(ResponseFlagsUtilsTest, toResponseFlagConversion) { std::make_pair("DPE", ResponseFlag::DownstreamProtocolError), std::make_pair("UMSDR", ResponseFlag::UpstreamMaxStreamDurationReached), std::make_pair("RFCF", ResponseFlag::ResponseFromCacheFilter), - std::make_pair("NFCF", ResponseFlag::NoFilterConfigFound)}; + std::make_pair("NFCF", ResponseFlag::NoFilterConfigFound), + std::make_pair("DT", ResponseFlag::DurationTimeout)}; EXPECT_FALSE(ResponseFlagUtils::toResponseFlag("NonExistentFlag").has_value()); diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index ff5a678d93e4..838cc9a64035 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -87,6 +87,13 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { MOCK_METHOD(void, onConnReleasedForTest, ()); MOCK_METHOD(void, onConnDestroyedForTest, ()); + bool maybePrefetch(float ratio) override { + if (!test_new_connection_pool_) { + return false; + } + ASSERT(dynamic_cast(conn_pool_.get()) != nullptr); + return dynamic_cast(conn_pool_.get())->maybePrefetch(ratio); + } struct TestConnection { Network::MockClientConnection* connection_; @@ -1041,6 +1048,19 @@ TEST_P(TcpConnPoolImplTest, RequestCapacity) { conn_pool_.test_conns_[2].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); } +// Test that maybePrefetch is passed up to the base class implementation. +TEST_P(TcpConnPoolImplTest, TestPrefetch) { + if (!test_new_connection_pool_) { + return; + } + EXPECT_FALSE(conn_pool_.maybePrefetch(0)); + + conn_pool_.expectConnCreate(); + ASSERT_TRUE(conn_pool_.maybePrefetch(2)); + + conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); +} + /** * Test that pending connections are closed when the connection pool is destroyed. */ @@ -1084,6 +1104,7 @@ TEST_P(TcpConnPoolImplDestructorTest, TestReadyConnectionsAreClosed) { EXPECT_CALL(dispatcher_, clearDeferredDeleteList()); conn_pool_.reset(); } + INSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplTest, testing::Bool()); INSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplDestructorTest, testing::Bool()); diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index 20961c086b3d..803381c4192c 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -105,6 +105,18 @@ idle_timeout: 1s EXPECT_EQ(std::chrono::seconds(1), config_obj.sharedConfig()->idleTimeout().value()); } +TEST(ConfigTest, MaxDownstreamConnectionDuration) { + const std::string yaml = R"EOF( +stat_prefix: name +cluster: foo +max_downstream_connection_duration: 10s +)EOF"; + + NiceMock factory_context; + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + EXPECT_EQ(std::chrono::seconds(10), config_obj.maxDownstreamConnectionDuration().value()); +} + TEST(ConfigTest, NoRouteConfig) { const std::string yaml = R"EOF( stat_prefix: name @@ -423,10 +435,10 @@ TEST(ConfigTest, WeightedClustersConfig) { Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); NiceMock connection; - EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(0)); + EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(0)); EXPECT_EQ(std::string("cluster1"), config_obj.getRouteFromEntries(connection)->clusterName()); - EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(2)); + EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(2)); EXPECT_EQ(std::string("cluster2"), config_obj.getRouteFromEntries(connection)->clusterName()); } @@ -463,7 +475,7 @@ TEST(ConfigTest, WeightedClustersWithMetadataMatchConfig) { HashedValue hv1(v1), hv2(v2); NiceMock connection; - EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(0)); + EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(0)); const auto route = config_obj.getRouteFromEntries(connection); EXPECT_NE(nullptr, route); @@ -490,7 +502,7 @@ TEST(ConfigTest, WeightedClustersWithMetadataMatchConfig) { HashedValue hv3(v3), hv4(v4); NiceMock connection; - EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(2)); + EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(2)); const auto route = config_obj.getRouteFromEntries(connection); EXPECT_NE(nullptr, route); @@ -556,7 +568,7 @@ TEST(ConfigTest, WeightedClustersWithMetadataMatchAndTopLevelMetadataMatchConfig HashedValue hv1(v1), hv2(v2); NiceMock connection; - EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(0)); + EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(0)); const auto route = config_obj.getRouteFromEntries(connection); EXPECT_NE(nullptr, route); @@ -589,7 +601,7 @@ TEST(ConfigTest, WeightedClustersWithMetadataMatchAndTopLevelMetadataMatchConfig HashedValue hv3(v3), hv4(v4); NiceMock connection; - EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(2)); + EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(2)); const auto route = config_obj.getRouteFromEntries(connection); EXPECT_NE(nullptr, route); @@ -1324,7 +1336,7 @@ TEST_F(TcpProxyTest, WeightedClusterWithMetadataMatch) { { Upstream::LoadBalancerContext* context; - EXPECT_CALL(factory_context_.random_, random()).WillOnce(Return(0)); + EXPECT_CALL(factory_context_.api_.random_, random()).WillOnce(Return(0)); EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster("cluster1", _, _)) .WillOnce(DoAll(SaveArg<2>(&context), Return(nullptr))); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); @@ -1348,7 +1360,7 @@ TEST_F(TcpProxyTest, WeightedClusterWithMetadataMatch) { { Upstream::LoadBalancerContext* context; - EXPECT_CALL(factory_context_.random_, random()).WillOnce(Return(2)); + EXPECT_CALL(factory_context_.api_.random_, random()).WillOnce(Return(2)); EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster("cluster2", _, _)) .WillOnce(DoAll(SaveArg<2>(&context), Return(nullptr))); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); diff --git a/test/common/thread_local/thread_local_impl_test.cc b/test/common/thread_local/thread_local_impl_test.cc index 6747fa2eae99..a625d57002a7 100644 --- a/test/common/thread_local/thread_local_impl_test.cc +++ b/test/common/thread_local/thread_local_impl_test.cc @@ -45,7 +45,6 @@ class ThreadLocalInstanceImplTest : public testing::Test { object.reset(); return object_ref; } - int deferredDeletesMapSize() { return tls_.deferred_deletes_.size(); } int freeSlotIndexesListSize() { return tls_.free_slot_indexes_.size(); } InstanceImpl tls_; @@ -60,7 +59,6 @@ TEST_F(ThreadLocalInstanceImplTest, All) { EXPECT_CALL(thread_dispatcher_, post(_)); SlotPtr slot1 = tls_.allocateSlot(); slot1.reset(); - EXPECT_EQ(deferredDeletesMapSize(), 0); EXPECT_EQ(freeSlotIndexesListSize(), 1); // Create a new slot which should take the place of the old slot. ReturnPointee() is used to @@ -86,48 +84,67 @@ TEST_F(ThreadLocalInstanceImplTest, All) { slot3.reset(); slot4.reset(); EXPECT_EQ(freeSlotIndexesListSize(), 0); - EXPECT_EQ(deferredDeletesMapSize(), 2); EXPECT_CALL(object_ref4, onDestroy()); EXPECT_CALL(object_ref3, onDestroy()); tls_.shutdownThread(); } -TEST_F(ThreadLocalInstanceImplTest, DeferredRecycle) { +struct ThreadStatus { + uint64_t thread_local_calls_{0}; + bool all_threads_complete_ = false; +}; + +TEST_F(ThreadLocalInstanceImplTest, CallbackNotInvokedAfterDeletion) { InSequence s; - // Free a slot without ever calling set. - EXPECT_CALL(thread_dispatcher_, post(_)); - SlotPtr slot1 = tls_.allocateSlot(); - slot1.reset(); - // Slot destructed directly, as there is no out-going callbacks. - EXPECT_EQ(deferredDeletesMapSize(), 0); + // Allocate a slot and invoke all callback variants. Hold all callbacks and destroy the slot. + // Make sure that recycling happens appropriately. + SlotPtr slot = tls_.allocateSlot(); + + std::list holder; + EXPECT_CALL(thread_dispatcher_, post(_)).Times(4).WillRepeatedly(Invoke([&](Event::PostCb cb) { + // Holds the posted callback. + holder.push_back(cb); + })); + + uint32_t total_callbacks = 0; + slot->set([&total_callbacks](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { + // Callbacks happen on the main thread but not the workers, so track the total. + total_callbacks++; + return nullptr; + }); + slot->runOnAllThreads([&total_callbacks](ThreadLocal::ThreadLocalObjectSharedPtr) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + // Callbacks happen on the main thread but not the workers, so track the total. + total_callbacks++; + return nullptr; + }); + ThreadStatus thread_status; + slot->runOnAllThreads( + [&thread_status]( + ThreadLocal::ThreadLocalObjectSharedPtr) -> ThreadLocal::ThreadLocalObjectSharedPtr { + ++thread_status.thread_local_calls_; + return nullptr; + }, + [&thread_status]() -> void { + // Callbacks happen on the main thread but not the workers. + EXPECT_EQ(thread_status.thread_local_calls_, 1); + thread_status.all_threads_complete_ = true; + }); + EXPECT_FALSE(thread_status.all_threads_complete_); + + EXPECT_EQ(2, total_callbacks); + slot.reset(); EXPECT_EQ(freeSlotIndexesListSize(), 1); - // Allocate a slot and set value, hold the posted callback and the slot will only be returned - // after the held callback is destructed. - { - SlotPtr slot2 = tls_.allocateSlot(); - EXPECT_EQ(freeSlotIndexesListSize(), 0); - { - Event::PostCb holder; - EXPECT_CALL(thread_dispatcher_, post(_)).WillOnce(Invoke([&](Event::PostCb cb) { - // Holds the posted callback. - holder = cb; - })); - slot2->set( - [](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return nullptr; }); - slot2.reset(); - // Not released yet, as holder has a copy of the ref_count_. - EXPECT_EQ(freeSlotIndexesListSize(), 0); - EXPECT_EQ(deferredDeletesMapSize(), 1); - // This post is called when the holder dies. - EXPECT_CALL(thread_dispatcher_, post(_)); - } - // Slot is deleted now that there holder destructs. - EXPECT_EQ(deferredDeletesMapSize(), 0); - EXPECT_EQ(freeSlotIndexesListSize(), 1); + EXPECT_CALL(main_dispatcher_, post(_)); + while (!holder.empty()) { + holder.front()(); + holder.pop_front(); } + EXPECT_EQ(2, total_callbacks); + EXPECT_TRUE(thread_status.all_threads_complete_); tls_.shutdownGlobalThreading(); } @@ -172,25 +189,29 @@ TEST_F(ThreadLocalInstanceImplTest, UpdateCallback) { // Validate ThreadLocal::runOnAllThreads behavior with all_thread_complete call back. TEST_F(ThreadLocalInstanceImplTest, RunOnAllThreads) { SlotPtr tlsptr = tls_.allocateSlot(); + TestThreadLocalObject& object_ref = setObject(*tlsptr); EXPECT_CALL(thread_dispatcher_, post(_)); EXPECT_CALL(main_dispatcher_, post(_)); // Ensure that the thread local call back and all_thread_complete call back are called. - struct { - uint64_t thread_local_calls_{0}; - bool all_threads_complete_ = false; - } thread_status; - - tlsptr->runOnAllThreads([&thread_status]() -> void { ++thread_status.thread_local_calls_; }, - [&thread_status]() -> void { - EXPECT_EQ(thread_status.thread_local_calls_, 2); - thread_status.all_threads_complete_ = true; - }); - + ThreadStatus thread_status; + tlsptr->runOnAllThreads( + [&thread_status](ThreadLocal::ThreadLocalObjectSharedPtr object) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + ++thread_status.thread_local_calls_; + return object; + }, + [&thread_status]() -> void { + EXPECT_EQ(thread_status.thread_local_calls_, 2); + thread_status.all_threads_complete_ = true; + }); EXPECT_TRUE(thread_status.all_threads_complete_); tls_.shutdownGlobalThreading(); + tlsptr.reset(); + EXPECT_EQ(freeSlotIndexesListSize(), 0); + EXPECT_CALL(object_ref, onDestroy()); tls_.shutdownThread(); } diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 7a65b1341f94..54bc7d6a5375 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -2,9 +2,11 @@ load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", "envoy_cc_benchmark_binary", + "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_cc_test_library", "envoy_package", + "envoy_proto_library", ) licenses(["notice"]) # Apache 2 @@ -653,3 +655,35 @@ envoy_cc_test( "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], ) + +envoy_cc_test_library( + name = "health_check_fuzz_lib", + srcs = ["health_check_fuzz.cc"], + hdrs = ["health_check_fuzz.h"], + deps = [ + ":health_check_fuzz_proto_cc_proto", + ":health_checker_impl_test_lib", + ":utility_lib", + "//test/fuzz:utility_lib", + ], +) + +envoy_proto_library( + name = "health_check_fuzz_proto", + srcs = ["health_check_fuzz.proto"], + deps = [ + "//test/fuzz:common_proto", + "@envoy_api//envoy/config/core/v3:pkg", + ], +) + +envoy_cc_fuzz_test( + name = "health_check_fuzz_test", + srcs = ["health_check_fuzz_test.cc"], + corpus = "health_check_corpus", + deps = [ + ":health_check_fuzz_lib", + ":health_check_fuzz_proto_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/test/common/upstream/cluster_factory_impl_test.cc b/test/common/upstream/cluster_factory_impl_test.cc index bf6032ca4f6f..7196fe2c99be 100644 --- a/test/common/upstream/cluster_factory_impl_test.cc +++ b/test/common/upstream/cluster_factory_impl_test.cc @@ -62,7 +62,6 @@ class ClusterFactoryTestBase { const NiceMock local_info_; NiceMock dispatcher_; NiceMock runtime_; - NiceMock random_; Stats::IsolatedStoreImpl stats_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; NiceMock tls_; @@ -97,9 +96,9 @@ TEST_F(TestStaticClusterImplTest, CreateWithoutConfig) { const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); auto create_result = ClusterFactoryImplBase::create( - cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, - dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, - std::move(outlier_event_logger_), false, validation_visitor_, *api_); + cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, dispatcher_, + log_manager_, local_info_, admin_, singleton_manager_, std::move(outlier_event_logger_), + false, validation_visitor_, *api_); auto cluster = create_result.first; cluster->initialize([] {}); @@ -142,9 +141,9 @@ TEST_F(TestStaticClusterImplTest, CreateWithStructConfig) { const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); auto create_result = ClusterFactoryImplBase::create( - cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, - dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, - std::move(outlier_event_logger_), false, validation_visitor_, *api_); + cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, dispatcher_, + log_manager_, local_info_, admin_, singleton_manager_, std::move(outlier_event_logger_), + false, validation_visitor_, *api_); auto cluster = create_result.first; cluster->initialize([] {}); @@ -185,9 +184,9 @@ TEST_F(TestStaticClusterImplTest, CreateWithTypedConfig) { const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); auto create_result = ClusterFactoryImplBase::create( - cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, - dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, - std::move(outlier_event_logger_), false, validation_visitor_, *api_); + cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, dispatcher_, + log_manager_, local_info_, admin_, singleton_manager_, std::move(outlier_event_logger_), + false, validation_visitor_, *api_); auto cluster = create_result.first; cluster->initialize([] {}); @@ -229,7 +228,7 @@ TEST_F(TestStaticClusterImplTest, UnsupportedClusterType) { const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, - random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, + dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, std::move(outlier_event_logger_), false, validation_visitor_, *api_); }, EnvoyException, @@ -262,7 +261,7 @@ TEST_F(TestStaticClusterImplTest, HostnameWithoutDNS) { const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, - random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, + dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, std::move(outlier_event_logger_), false, validation_visitor_, *api_); }, EnvoyException, diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 442a4cb5edb1..34c31a7f4f03 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -43,14 +43,14 @@ std::string clustersJson(const std::vector& clusters) { class ClusterManagerImplTest : public testing::Test { public: ClusterManagerImplTest() - : api_(Api::createApiForTest()), http_context_(factory_.stats_.symbolTable()), - grpc_context_(factory_.stats_.symbolTable()) {} + : http_context_(factory_.stats_.symbolTable()), grpc_context_(factory_.stats_.symbolTable()) { + } void create(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { cluster_manager_ = std::make_unique( - bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, + bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, - *api_, http_context_, grpc_context_); + *factory_.api_, http_context_, grpc_context_); cluster_manager_->setPrimaryClustersInitializedCb( [this, bootstrap]() { cluster_manager_->initializeSecondaryClusters(bootstrap); }); } @@ -92,9 +92,9 @@ class ClusterManagerImplTest : public testing::Test { const auto& bootstrap = parseBootstrapFromV3Yaml(yaml); cluster_manager_ = std::make_unique( - bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, + bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, - *api_, local_cluster_update_, local_hosts_removed_, http_context_, grpc_context_); + *factory_.api_, local_cluster_update_, local_hosts_removed_, http_context_, grpc_context_); } void checkStats(uint64_t added, uint64_t modified, uint64_t removed, uint64_t active, @@ -135,7 +135,6 @@ class ClusterManagerImplTest : public testing::Test { } Event::SimulatedTimeSystem time_system_; - Api::ApiPtr api_; NiceMock factory_; NiceMock validation_context_; std::unique_ptr cluster_manager_; @@ -1056,7 +1055,7 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) { last_updated: seconds: 1234567891 nanos: 234000000 - dynamic_active_clusters: + dynamic_warming_clusters: - version_info: "version1" cluster: "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster @@ -1108,7 +1107,7 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) { last_updated: seconds: 1234567891 nanos: 234000000 - dynamic_warming_clusters: + dynamic_active_clusters: )EOF"); EXPECT_CALL(*cluster3, initialize(_)); @@ -3959,7 +3958,93 @@ TEST_F(ClusterManagerImplTest, ConnectionPoolPerDownstreamConnection) { EXPECT_EQ(conn_pool_vector.front(), cluster_manager_->httpConnPoolForCluster("cluster_1", ResourcePriority::Default, Http::Protocol::Http11, &lb_context)); -} // namespace +} + +class PrefetchTest : public ClusterManagerImplTest { +public: + void initialize(float ratio) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + lb_policy: ROUND_ROBIN + type: STATIC + )EOF"; + + ReadyWatcher initialized; + EXPECT_CALL(initialized, ready()); + envoy::config::bootstrap::v3::Bootstrap config = parseBootstrapFromV3Yaml(yaml); + if (ratio != 0) { + config.mutable_static_resources() + ->mutable_clusters(0) + ->mutable_prefetch_policy() + ->mutable_predictive_prefetch_ratio() + ->set_value(ratio); + } + create(config); + + // Set up for an initialize callback. + cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); + + std::unique_ptr callbacks( + new NiceMock()); + ClusterUpdateCallbacksHandlePtr cb = + cluster_manager_->addThreadLocalClusterUpdateCallbacks(*callbacks); + + cluster_ = &cluster_manager_->activeClusters().begin()->second.get(); + + // Set up the HostSet. + host1_ = makeTestHost(cluster_->info(), "tcp://127.0.0.1:80"); + host2_ = makeTestHost(cluster_->info(), "tcp://127.0.0.1:80"); + + HostVector hosts{host1_, host2_}; + auto hosts_ptr = std::make_shared(hosts); + + // Sending non-mergeable updates. + cluster_->prioritySet().updateHosts( + 0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr, hosts, + {}, 100); + } + + Cluster* cluster_{}; + HostSharedPtr host1_; + HostSharedPtr host2_; +}; + +TEST_F(PrefetchTest, PrefetchOff) { + // With prefetch set to 0, each request for a connection pool will only + // allocate that conn pool. + initialize(0); + EXPECT_CALL(factory_, allocateConnPool_(_, _, _)) + .Times(1) + .WillRepeatedly(ReturnNew()); + cluster_manager_->httpConnPoolForCluster("cluster_1", ResourcePriority::Default, + Http::Protocol::Http11, nullptr); + + EXPECT_CALL(factory_, allocateTcpConnPool_(_)) + .Times(1) + .WillRepeatedly(ReturnNew()); + cluster_manager_->tcpConnPoolForCluster("cluster_1", ResourcePriority::Default, nullptr); +} + +TEST_F(PrefetchTest, PrefetchOn) { + // With prefetch set to 1.1, each request for a connection pool will kick off + // prefetching, so create the pool for both the current connection and the + // anticipated one. + initialize(1.1); + EXPECT_CALL(factory_, allocateConnPool_(_, _, _)) + .Times(2) + .WillRepeatedly(ReturnNew>()); + cluster_manager_->httpConnPoolForCluster("cluster_1", ResourcePriority::Default, + Http::Protocol::Http11, nullptr); + + EXPECT_CALL(factory_, allocateTcpConnPool_(_)) + .Times(2) + .WillRepeatedly(ReturnNew>()); + cluster_manager_->tcpConnPoolForCluster("cluster_1", ResourcePriority::Default, nullptr); +} + } // namespace } // namespace Upstream } // namespace Envoy diff --git a/test/common/upstream/edf_scheduler_test.cc b/test/common/upstream/edf_scheduler_test.cc index ddb39bf847a0..a0f0b92168c8 100644 --- a/test/common/upstream/edf_scheduler_test.cc +++ b/test/common/upstream/edf_scheduler_test.cc @@ -8,7 +8,8 @@ namespace { TEST(EdfSchedulerTest, Empty) { EdfScheduler sched; - EXPECT_EQ(nullptr, sched.pick()); + EXPECT_EQ(nullptr, sched.peekAgain([](const double&) { return 0; })); + EXPECT_EQ(nullptr, sched.pickAndAdd([](const double&) { return 0; })); } // Validate we get regular RR behavior when all weights are the same. @@ -24,9 +25,10 @@ TEST(EdfSchedulerTest, Unweighted) { for (uint32_t rounds = 0; rounds < 128; ++rounds) { for (uint32_t i = 0; i < num_entries; ++i) { - auto p = sched.pick(); + auto peek = sched.peekAgain([](const double&) { return 1; }); + auto p = sched.pickAndAdd([](const double&) { return 1; }); EXPECT_EQ(i, *p); - sched.add(1, p); + EXPECT_EQ(*peek, *p); } } } @@ -45,9 +47,10 @@ TEST(EdfSchedulerTest, Weighted) { } for (uint32_t i = 0; i < (num_entries * (1 + num_entries)) / 2; ++i) { - auto p = sched.pick(); + auto peek = sched.peekAgain([](const double& orig) { return orig + 1; }); + auto p = sched.pickAndAdd([](const double& orig) { return orig + 1; }); + EXPECT_EQ(*p, *peek); ++pick_count[*p]; - sched.add(*p + 1, p); } for (uint32_t i = 0; i < num_entries; ++i) { @@ -66,9 +69,69 @@ TEST(EdfSchedulerTest, Expired) { sched.add(1, second_entry); } - auto p = sched.pick(); + auto peek = sched.peekAgain([](const double&) { return 1; }); + auto p = sched.pickAndAdd([](const double&) { return 1; }); + EXPECT_EQ(*peek, *p); EXPECT_EQ(*second_entry, *p); - EXPECT_EQ(nullptr, sched.pick()); + EXPECT_EQ(*second_entry, *p); +} + +// Validate that expired entries are not peeked. +TEST(EdfSchedulerTest, ExpiredPeek) { + EdfScheduler sched; + + { + auto second_entry = std::make_shared(42); + auto first_entry = std::make_shared(37); + sched.add(2, first_entry); + sched.add(1, second_entry); + } + auto third_entry = std::make_shared(37); + sched.add(3, third_entry); + + EXPECT_EQ(37, *sched.peekAgain([](const double&) { return 1; })); +} + +// Validate that expired entries are ignored. +TEST(EdfSchedulerTest, ExpiredPeekedIsNotPicked) { + EdfScheduler sched; + + { + auto second_entry = std::make_shared(42); + auto first_entry = std::make_shared(37); + sched.add(2, first_entry); + sched.add(1, second_entry); + for (int i = 0; i < 3; ++i) { + EXPECT_TRUE(sched.peekAgain([](const double&) { return 1; }) != nullptr); + } + } + + EXPECT_TRUE(sched.peekAgain([](const double&) { return 1; }) == nullptr); + EXPECT_TRUE(sched.pickAndAdd([](const double&) { return 1; }) == nullptr); +} + +TEST(EdfSchedulerTest, ManyPeekahead) { + EdfScheduler sched1; + EdfScheduler sched2; + constexpr uint32_t num_entries = 128; + std::shared_ptr entries[num_entries]; + + for (uint32_t i = 0; i < num_entries; ++i) { + entries[i] = std::make_shared(i); + sched1.add(1, entries[i]); + sched2.add(1, entries[i]); + } + + std::vector picks; + for (uint32_t rounds = 0; rounds < 10; ++rounds) { + picks.push_back(*sched1.peekAgain([](const double&) { return 1; })); + } + for (uint32_t rounds = 0; rounds < 10; ++rounds) { + auto p1 = sched1.pickAndAdd([](const double&) { return 1; }); + auto p2 = sched2.pickAndAdd([](const double&) { return 1; }); + EXPECT_EQ(picks[rounds], *p1); + EXPECT_EQ(*p2, *p1); + } } } // namespace diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index 20b9a5a9fae7..41b56317653a 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -76,7 +76,7 @@ class EdsSpeedTest { "cluster.{}.", eds_cluster_.alt_stat_name().empty() ? eds_cluster_.name() : eds_cluster_.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); cluster_ = std::make_shared(eds_cluster_, runtime_, factory_context, std::move(scope), false); diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index e52b807c1d71..d2c330c14153 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -95,7 +95,7 @@ class EdsTest : public testing::Test { "cluster.{}.", eds_cluster_.alt_stat_name().empty() ? eds_cluster_.name() : eds_cluster_.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); cluster_ = std::make_shared(eds_cluster_, runtime_, factory_context, std::move(scope), false); diff --git a/test/common/upstream/hds_test.cc b/test/common/upstream/hds_test.cc index 5bc411ff4425..09804ccbb3ab 100644 --- a/test/common/upstream/hds_test.cc +++ b/test/common/upstream/hds_test.cc @@ -61,11 +61,21 @@ class HdsTest : public testing::Test { protected: HdsTest() : retry_timer_(new Event::MockTimer()), server_response_timer_(new Event::MockTimer()), - async_client_(new Grpc::MockAsyncClient()), api_(Api::createApiForTest(stats_store_)), + async_client_(new Grpc::MockAsyncClient()), + api_(Api::createApiForTest(stats_store_, random_)), ssl_context_manager_(api_->timeSource()) { node_.set_id("hds-node"); } + // Checks if the cluster counters are correct + void checkHdsCounters(int requests, int responses, int errors, int updates) { + auto stats = hds_delegate_friend_.getStats(*hds_delegate_); + EXPECT_EQ(requests, stats.requests_.value()); + EXPECT_LE(responses, stats.responses_.value()); + EXPECT_EQ(errors, stats.errors_.value()); + EXPECT_EQ(updates, stats.updates_.value()); + } + // Creates an HdsDelegate void createHdsDelegate() { InSequence s; @@ -86,10 +96,27 @@ class HdsTest : public testing::Test { hds_delegate_ = std::make_unique( stats_store_, Grpc::RawAsyncClientPtr(async_client_), envoy::config::core::v3::ApiVersion::AUTO, dispatcher_, runtime_, stats_store_, - ssl_context_manager_, random_, test_factory_, log_manager_, cm_, local_info_, admin_, + ssl_context_manager_, test_factory_, log_manager_, cm_, local_info_, admin_, singleton_manager_, tls_, validation_visitor_, *api_); } + void expectCreateClientConnection() { + // Create a new mock connection for each call to createClientConnection. + EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) + .WillRepeatedly(Invoke( + [](Network::Address::InstanceConstSharedPtr, Network::Address::InstanceConstSharedPtr, + Network::TransportSocketPtr&, const Network::ConnectionSocket::OptionsSharedPtr&) { + Network::MockClientConnection* connection = + new NiceMock(); + + // pretend our endpoint was connected to. + connection->raiseEvent(Network::ConnectionEvent::Connected); + + // return this new, connected endpoint. + return connection; + })); + } + // Creates a HealthCheckSpecifier message that contains one endpoint and one // healthcheck envoy::service::health::v3::HealthCheckSpecifier* createSimpleMessage() { @@ -174,6 +201,34 @@ class HdsTest : public testing::Test { return msg; } + void + addTransportSocketMatches(envoy::service::health::v3::ClusterHealthCheck* cluster_health_check, + std::string match, std::string criteria) { + // Add transport socket matches to specified cluster and its first health check. + const std::string match_yaml = absl::StrFormat( + R"EOF( +transport_socket_matches: +- name: "test_socket" + match: + %s: "true" + transport_socket: + name: "envoy.transport_sockets.raw_buffer" +)EOF", + match); + cluster_health_check->MergeFrom( + TestUtility::parseYaml(match_yaml)); + + // Add transport socket match criteria to our health check, for filtering matches. + const std::string criteria_yaml = absl::StrFormat( + R"EOF( +transport_socket_match_criteria: + %s: "true" +)EOF", + criteria); + cluster_health_check->mutable_health_checks(0)->MergeFrom( + TestUtility::parseYaml(criteria_yaml)); + } + Event::SimulatedTimeSystem time_system_; envoy::config::core::v3::Node node_; Event::MockDispatcher dispatcher_; @@ -401,19 +456,7 @@ TEST_F(HdsTest, TestSendResponseMultipleEndpoints) { // Create a new active connection on request, setting its status to connected // to mock a found endpoint. - EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) - .WillRepeatedly(Invoke( - [](Network::Address::InstanceConstSharedPtr, Network::Address::InstanceConstSharedPtr, - Network::TransportSocketPtr&, const Network::ConnectionSocket::OptionsSharedPtr&) { - Network::MockClientConnection* connection = - new NiceMock(); - - // pretend our endpoint was connected to. - connection->raiseEvent(Network::ConnectionEvent::Connected); - - // return this new, connected endpoint. - return connection; - })); + expectCreateClientConnection(); EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(2); EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); @@ -493,31 +536,9 @@ TEST_F(HdsTest, TestSocketContext) { EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); createHdsDelegate(); - // Create Message. + // Create Message with transport sockets. message.reset(createSimpleMessage()); - - // Add transport socket matches to message. - const std::string match_yaml = absl::StrFormat( - R"EOF( -transport_socket_matches: -- name: "test_socket" - match: - test_match: "true" - transport_socket: - name: "envoy.transport_sockets.raw_buffer" -)EOF"); - auto* cluster_health_check = message->mutable_cluster_health_checks(0); - cluster_health_check->MergeFrom( - TestUtility::parseYaml(match_yaml)); - - // Add transport socket match criteria to our health check, for filtering matches. - const std::string criteria_yaml = absl::StrFormat( - R"EOF( -transport_socket_match_criteria: - test_match: "true" -)EOF"); - cluster_health_check->mutable_health_checks(0)->MergeFrom( - TestUtility::parseYaml(criteria_yaml)); + addTransportSocketMatches(message->mutable_cluster_health_checks(0), "test_match", "test_match"); Network::MockClientConnection* connection = new NiceMock(); EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillRepeatedly(Return(connection)); @@ -532,8 +553,8 @@ TEST_F(HdsTest, TestSocketContext) { params.stats_.createScope(fmt::format("cluster.{}.", params.cluster_.name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( params.admin_, params.ssl_context_manager_, *scope, params.cm_, params.local_info_, - params.dispatcher_, params.random_, params.stats_, params.singleton_manager_, - params.tls_, params.validation_visitor_, params.api_); + params.dispatcher_, params.stats_, params.singleton_manager_, params.tls_, + params.validation_visitor_, params.api_); // Create a mock socket_factory for the scope of this unit test. std::unique_ptr socket_factory = @@ -678,5 +699,401 @@ TEST_F(HdsTest, TestSendResponseOneEndpointTimeout) { 1234); } +// Check to see if two of the same specifier does not get parsed twice in a row. +TEST_F(HdsTest, TestSameSpecifier) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message + message.reset(createSimpleMessage()); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + hds_delegate_->onReceiveMessage(std::move(message)); + hds_delegate_->sendResponse(); + + // Try to change the specifier, but it is the same. + message.reset(createSimpleMessage()); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Check to see that HDS got two requests, but only used the specifier one time. + checkHdsCounters(2, 0, 0, 1); + + // Try to change the specifier, but use a new specifier this time. + message = createComplexSpecifier(1, 1, 2); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Check that both requests and updates increased, meaning we did an update. + checkHdsCounters(3, 0, 0, 2); +} + +// Test to see that if a cluster is added or removed, the ones that did not change are reused. +TEST_F(HdsTest, TestClusterChange) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message + message = createComplexSpecifier(2, 1, 1); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + // Process message + hds_delegate_->onReceiveMessage(std::move(message)); + hds_delegate_->sendResponse(); + + // Get cluster shared pointers to make sure they are the same memory addresses, that we reused + // them. + auto original_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(original_clusters.size(), 2); + + // Add a third cluster to the specifier. The first two should reuse pointers. + message = createComplexSpecifier(3, 1, 1); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get the new clusters list from HDS. + auto new_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(new_clusters.size(), 3); + + // Make sure our first two clusters are at the same address in memory as before. + for (int i = 0; i < 2; i++) { + EXPECT_EQ(new_clusters[i], original_clusters[i]); + } + + message = createComplexSpecifier(3, 1, 1); + + // Remove the first element, change the order of the last two elements. + message->mutable_cluster_health_checks()->SwapElements(0, 2); + message->mutable_cluster_health_checks()->RemoveLast(); + // Sanity check. + ASSERT_EQ(message->cluster_health_checks_size(), 2); + + // Send this new specifier. + hds_delegate_->onReceiveMessage(std::move(message)); + + // Check to see that even if we changed the order, we get the expected pointers. + auto final_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(final_clusters.size(), 2); + + // Compare first cluster in the new list is the same as the last in the previous list, + // and that the second cluster in the new list is the same as the second in the previous. + for (int i = 0; i < 2; i++) { + EXPECT_EQ(final_clusters[i], new_clusters[2 - i]); + } + + // Check to see that HDS got three requests, and updated three times with it. + checkHdsCounters(3, 0, 0, 3); +} + +// Edit one of two cluster's endpoints by adding and removing. +TEST_F(HdsTest, TestUpdateEndpoints) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message, and later add/remove endpoints from the second cluster. + message.reset(createSimpleMessage()); + message->MergeFrom(*createComplexSpecifier(1, 1, 2)); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + // Process message + hds_delegate_->onReceiveMessage(std::move(message)); + hds_delegate_->sendResponse(); + + // Save list of hosts/endpoints for comparison later. + auto original_hosts = hds_delegate_->hdsClusters()[1]->hosts(); + ASSERT_EQ(original_hosts.size(), 2); + + // Add 3 endpoints to the specifier's second cluster. The first in the list should reuse pointers. + message.reset(createSimpleMessage()); + message->MergeFrom(*createComplexSpecifier(1, 1, 5)); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get the new clusters list from HDS. + auto new_hosts = hds_delegate_->hdsClusters()[1]->hosts(); + ASSERT_EQ(new_hosts.size(), 5); + + // Make sure our first two endpoints are at the same address in memory as before. + for (int i = 0; i < 2; i++) { + EXPECT_EQ(original_hosts[i], new_hosts[i]); + } + EXPECT_TRUE(original_hosts[0] != new_hosts[2]); + + // This time, have 4 endpoints, 2 each under 2 localities. + // The first locality will be reused, so its 2 endpoints will be as well. + // The second locality is new so we should be getting 2 new endpoints. + // Since the first locality had 5 but now has 2, we are removing 3. + // 2 ADDED, 3 REMOVED, 2 REUSED. + message.reset(createSimpleMessage()); + message->MergeFrom(*createComplexSpecifier(1, 2, 2)); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get this new list of hosts. + auto final_hosts = hds_delegate_->hdsClusters()[1]->hosts(); + ASSERT_EQ(final_hosts.size(), 4); + + // Ensure the first two elements in the new list are reused. + for (int i = 0; i < 2; i++) { + EXPECT_EQ(new_hosts[i], final_hosts[i]); + } + + // Ensure the first last two elements in the new list are different then the previous list. + for (int i = 2; i < 4; i++) { + EXPECT_TRUE(new_hosts[i] != final_hosts[i]); + } + + // Check to see that HDS got three requests, and updated three times with it. + checkHdsCounters(3, 0, 0, 3); +} + +// Test adding, reusing, and removing health checks. +TEST_F(HdsTest, TestUpdateHealthCheckers) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message with two different health checkers. + message.reset(createSimpleMessage()); + auto new_hc = message->mutable_cluster_health_checks(0)->add_health_checks(); + new_hc->MergeFrom(message->mutable_cluster_health_checks(0)->health_checks(0)); + new_hc->mutable_http_health_check()->set_path("/different_path"); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + // Process message + hds_delegate_->onReceiveMessage(std::move(message)); + hds_delegate_->sendResponse(); + + // Save list of health checkers for use later. + auto original_hcs = hds_delegate_->hdsClusters()[0]->healthCheckers(); + ASSERT_EQ(original_hcs.size(), 2); + + // Create a new specifier, but make the second health checker different and add a third. + // Then reverse the order so the first one is at the end, testing the hashing works as expected. + message.reset(createSimpleMessage()); + auto new_hc0 = message->mutable_cluster_health_checks(0)->add_health_checks(); + new_hc0->MergeFrom(message->mutable_cluster_health_checks(0)->health_checks(0)); + new_hc0->mutable_http_health_check()->set_path("/path0"); + auto new_hc1 = message->mutable_cluster_health_checks(0)->add_health_checks(); + new_hc1->MergeFrom(message->mutable_cluster_health_checks(0)->health_checks(0)); + new_hc1->mutable_http_health_check()->set_path("/path1"); + message->mutable_cluster_health_checks(0)->mutable_health_checks()->SwapElements(0, 2); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get the new health check list from HDS. + auto new_hcs = hds_delegate_->hdsClusters()[0]->healthCheckers(); + ASSERT_EQ(new_hcs.size(), 3); + + // Make sure our first hc from the original list is the same as the third in the new list. + EXPECT_EQ(original_hcs[0], new_hcs[2]); + EXPECT_TRUE(original_hcs[1] != new_hcs[1]); + + // Check to see that HDS got two requests, and updated two times with it. + checkHdsCounters(2, 0, 0, 2); +} + +// Test to see that if clusters with an empty name get used, there are two clusters. +// Also test to see that if two clusters with the same non-empty name are used, only have +// One cluster. +TEST_F(HdsTest, TestClusterSameName) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message + message = createComplexSpecifier(2, 1, 1); + // Set both clusters to have an empty name. + message->mutable_cluster_health_checks(0)->set_cluster_name(""); + message->mutable_cluster_health_checks(1)->set_cluster_name(""); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + // Process message + hds_delegate_->onReceiveMessage(std::move(message)); + hds_delegate_->sendResponse(); + + // Get the clusters from HDS + auto original_clusters = hds_delegate_->hdsClusters(); + + // Make sure that even though they have the same name, since they are empty there are two and they + // do not point to the same thing. + ASSERT_EQ(original_clusters.size(), 2); + ASSERT_TRUE(original_clusters[0] != original_clusters[1]); + + // Create message with 3 clusters this time so we force an update. + message = createComplexSpecifier(3, 1, 1); + // Set both clusters to have empty names empty name. + message->mutable_cluster_health_checks(0)->set_cluster_name(""); + message->mutable_cluster_health_checks(1)->set_cluster_name(""); + + // Test that we still get requested number of clusters, even with repeated names on update since + // they are empty. + hds_delegate_->onReceiveMessage(std::move(message)); + auto new_clusters = hds_delegate_->hdsClusters(); + + // Check that since the names are empty, we do not reuse and just reconstruct. + ASSERT_EQ(new_clusters.size(), 3); + ASSERT_TRUE(original_clusters[0] != new_clusters[0]); + ASSERT_TRUE(original_clusters[1] != new_clusters[1]); + + // Create a new message. + message = createComplexSpecifier(2, 1, 1); + // Set both clusters to have the same, non-empty name. + message->mutable_cluster_health_checks(0)->set_cluster_name("anna"); + message->mutable_cluster_health_checks(1)->set_cluster_name("anna"); + + hds_delegate_->onReceiveMessage(std::move(message)); + + // Check that since they both have the same name, only one of them gets used. + auto final_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(final_clusters.size(), 1); + + // Check to see that HDS got three requests, and updated three times with it. + checkHdsCounters(3, 0, 0, 3); +} + +// Test that a transport_socket_matches and transport_socket_match_criteria filter fail when not +// matching, and then after an update the same cluster is used but now matches. +TEST_F(HdsTest, TestUpdateSocketContext) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + // Pull out socket_matcher object normally internal to createClusterInfo, to test that a matcher + // would match the expected socket. + std::vector> socket_matchers; + EXPECT_CALL(test_factory_, createClusterInfo(_)) + .WillRepeatedly(Invoke([&](const ClusterInfoFactory::CreateClusterInfoParams& params) { + // Build scope, factory_context as does ProdClusterInfoFactory. + Envoy::Stats::ScopePtr scope = + params.stats_.createScope(fmt::format("cluster.{}.", params.cluster_.name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + params.admin_, params.ssl_context_manager_, *scope, params.cm_, params.local_info_, + params.dispatcher_, params.stats_, params.singleton_manager_, params.tls_, + params.validation_visitor_, params.api_); + + // Create a mock socket_factory for the scope of this unit test. + std::unique_ptr socket_factory = + std::make_unique(); + + // set socket_matcher object in test scope. + socket_matchers.push_back(std::make_unique( + params.cluster_.transport_socket_matches(), factory_context, socket_factory, *scope)); + + // But still use the fake cluster_info_. + return cluster_info_; + })); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + + // Create Message, with a non-valid match and process. + message.reset(createSimpleMessage()); + addTransportSocketMatches(message->mutable_cluster_health_checks(0), "bad_match", "test_match"); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get our health checker to match against. + const auto first_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(first_clusters.size(), 1); + const auto first_hcs = first_clusters[0]->healthCheckers(); + ASSERT_EQ(first_hcs.size(), 1); + + // Check that our fails so it uses default. + HealthCheckerImplBase* first_health_checker_base = + dynamic_cast(first_hcs[0].get()); + const auto first_match = + socket_matchers[0]->resolve(first_health_checker_base->transportSocketMatchMetadata().get()); + EXPECT_EQ(first_match.name_, "default"); + + // Create a new Message, this time with a good match. + message.reset(createSimpleMessage()); + addTransportSocketMatches(message->mutable_cluster_health_checks(0), "test_match", "test_match"); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get our new health checker to match against. + const auto second_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(second_clusters.size(), 1); + // Check that this new pointer is actually the same pointer to the first cluster. + ASSERT_EQ(second_clusters[0], first_clusters[0]); + const auto second_hcs = second_clusters[0]->healthCheckers(); + ASSERT_EQ(second_hcs.size(), 1); + + // Check that since we made no change to our health checkers, the pointer was reused. + EXPECT_EQ(first_hcs[0], second_hcs[0]); + + // Check that our match hits. + HealthCheckerImplBase* second_health_checker_base = + dynamic_cast(second_hcs[0].get()); + ASSERT_EQ(socket_matchers.size(), 2); + const auto second_match = + socket_matchers[1]->resolve(second_health_checker_base->transportSocketMatchMetadata().get()); + EXPECT_EQ(second_match.name_, "test_socket"); + + // Create a new Message, this we leave the transport socket the same but change the health check's + // filter. This means that the health checker changes but the transport_socket_matches in the + // ClusterHealthCheck does not. + message.reset(createSimpleMessage()); + addTransportSocketMatches(message->mutable_cluster_health_checks(0), "test_match", + "something_new"); + + hds_delegate_->onReceiveMessage(std::move(message)); + // Get our new health checker to match against. + const auto third_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(third_clusters.size(), 1); + // Check that this new pointer is actually the same pointer to the first cluster. + ASSERT_EQ(third_clusters[0], first_clusters[0]); + const auto third_hcs = third_clusters[0]->healthCheckers(); + ASSERT_EQ(third_hcs.size(), 1); + + // Check that since we made a change to our HC, it is a new pointer. + EXPECT_TRUE(first_hcs[0] != third_hcs[0]); + + HealthCheckerImplBase* third_health_checker_base = + dynamic_cast(third_hcs[0].get()); + + // Check that our socket matchers is still a size 2. This is because createClusterInfo(_) is never + // called again since there was no update to transportSocketMatches. + ASSERT_EQ(socket_matchers.size(), 2); + const auto third_match = + socket_matchers[1]->resolve(third_health_checker_base->transportSocketMatchMetadata().get()); + // Since this again does not match, it uses default. + EXPECT_EQ(third_match.name_, "default"); +} + } // namespace Upstream } // namespace Envoy diff --git a/test/common/upstream/health_check_corpus/clusterfuzz-testcase-minimized-health_check_fuzz_test-5678121129607168 b/test/common/upstream/health_check_corpus/clusterfuzz-testcase-minimized-health_check_fuzz_test-5678121129607168 new file mode 100644 index 000000000000..b1530f2bc1d2 --- /dev/null +++ b/test/common/upstream/health_check_corpus/clusterfuzz-testcase-minimized-health_check_fuzz_test-5678121129607168 @@ -0,0 +1,28 @@ +health_check_config { + timeout { + seconds: 8960 + } + interval { + seconds: 26624 + } + unhealthy_threshold { + value: 524288 + } + healthy_threshold { + value: 2147483652 + } + http_health_check { + path: "\003" + } + event_log_path: "(" + interval_jitter_percent: 654311422 +} +actions { + respond { + http_respond { + status: 11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111 + } + } +} +http_verify_cluster: true +start_failed: true diff --git a/test/common/upstream/health_check_corpus/clusterfuzz-testcase-minimized-health_check_fuzz_test-5748071634567168 b/test/common/upstream/health_check_corpus/clusterfuzz-testcase-minimized-health_check_fuzz_test-5748071634567168 new file mode 100644 index 000000000000..05ce7426fbec --- /dev/null +++ b/test/common/upstream/health_check_corpus/clusterfuzz-testcase-minimized-health_check_fuzz_test-5748071634567168 @@ -0,0 +1,24 @@ +health_check_config { + timeout { + nanos: 9 + } + interval { + seconds: 32768 + nanos: 426 + } + unhealthy_threshold { + value: 491516 + } + healthy_threshold { + value: 524284 + } + http_health_check { + path: "(" + } + event_log_path: "(" +} +actions { + raise_event: 355888746 +} +http_verify_cluster: true +start_failed: true diff --git a/test/common/upstream/health_check_corpus/custom_health_check b/test/common/upstream/health_check_corpus/custom_health_check new file mode 100644 index 000000000000..d994dc6033bf --- /dev/null +++ b/test/common/upstream/health_check_corpus/custom_health_check @@ -0,0 +1,62 @@ +health_check_config { + timeout { + seconds: 26624 + } + interval { + seconds: 8960 + nanos: 65530 + } + interval_jitter { + seconds: 8960 + nanos: 7 + } + unhealthy_threshold { + value: 641007614 + } + healthy_threshold { + value: 1024 + } + alt_port { + value: 16777216 + } + reuse_connection { + value: true + } + no_traffic_interval { + nanos: 2097152 + } + custom_health_check { + name: "ssssssssssssssssssssssssssssssssssssssssss" + } + unhealthy_edge_interval { + seconds: 131072 + nanos: 4104 + } + healthy_edge_interval { + seconds: 131072 + nanos: 128 + } + event_log_path: "A(" + interval_jitter_percent: 641007544 + initial_jitter { + seconds: 8960 + nanos: 7 + } + tls_options { + } +} +actions { + raise_event: REMOTE_CLOSE +} +actions { + raise_event: REMOTE_CLOSE +} +actions { + trigger_interval_timer { + } +} +actions { + raise_event: CONNECTED +} +http_verify_cluster: true +start_failed: true diff --git a/test/common/upstream/health_check_corpus/grpc_Success b/test/common/upstream/health_check_corpus/grpc_Success new file mode 100644 index 000000000000..c33d7837d660 --- /dev/null +++ b/test/common/upstream/health_check_corpus/grpc_Success @@ -0,0 +1,60 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + grpc_health_check { + service_name: "service" + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "content-type" + value: "application/grpc" + } + } + status: 200 + } + grpc_respond_bytes { + status: SERVING + chunk_size_for_structured_response: 3 + } + grpc_respond_trailers { + trailers { + headers { + key: "grpc-status" + value: "0" + } + } + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/grpc_SuccessWithAuthority b/test/common/upstream/health_check_corpus/grpc_SuccessWithAuthority new file mode 100644 index 000000000000..714a6e97034d --- /dev/null +++ b/test/common/upstream/health_check_corpus/grpc_SuccessWithAuthority @@ -0,0 +1,61 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + grpc_health_check { + service_name: "service" + authority: "www.envoyproxy.io" + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "content-type" + value: "application/grpc" + } + } + status: 200 + chunk_size_for_structured_response: 3 + } + grpc_respond_bytes { + status: SERVING + } + grpc_respond_trailers { + trailers { + headers { + key: "grpc-status" + value: "0" + } + } + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/grpc_crash-33da964bf71e02e3324ceee47fbb204532817e61 b/test/common/upstream/health_check_corpus/grpc_crash-33da964bf71e02e3324ceee47fbb204532817e61 new file mode 100644 index 000000000000..b328fc1e1775 --- /dev/null +++ b/test/common/upstream/health_check_corpus/grpc_crash-33da964bf71e02e3324ceee47fbb204532817e61 @@ -0,0 +1,52 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + grpc_health_check { + service_name: "service" + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "content-type" + value: "application/grpc" + } + } + status: 200 + } + grpc_respond_bytes { + status: SERVING + chunk_size_for_structured_response: 3 + } + } + } + } diff --git a/test/common/upstream/health_check_corpus/grpc_crash-50b2ffbcf518e8f078ad8ed1f9801feb89a4d158 b/test/common/upstream/health_check_corpus/grpc_crash-50b2ffbcf518e8f078ad8ed1f9801feb89a4d158 new file mode 100644 index 000000000000..af3320a901f5 --- /dev/null +++ b/test/common/upstream/health_check_corpus/grpc_crash-50b2ffbcf518e8f078ad8ed1f9801feb89a4d158 @@ -0,0 +1,32 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 1 + } + healthy_threshold { + value: 2 + } + alt_port { + value: 544435713 + } + grpc_health_check { + } + no_traffic_interval { + seconds: 5 + } +} +actions { + trigger_timeout_timer { + } +} +actions { + raise_event: REMOTE_CLOSE +} diff --git a/test/common/upstream/health_check_corpus/grpc_crash-5747b3523c44ce0a228a8d8884ed7aeea2608341 b/test/common/upstream/health_check_corpus/grpc_crash-5747b3523c44ce0a228a8d8884ed7aeea2608341 new file mode 100644 index 000000000000..4f2a4e1c1d8b --- /dev/null +++ b/test/common/upstream/health_check_corpus/grpc_crash-5747b3523c44ce0a228a8d8884ed7aeea2608341 @@ -0,0 +1,43 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + grpc_health_check { + service_name: "service" + authority: "www.envoyproxy.io" + } +} +actions { + trigger_interval_timer { + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + } + grpc_respond { + grpc_respond_bytes { + } + grpc_respond_trailers { + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/grpc_crash-5d27a3a5fc4fa384c9cbd76f0e7a3d841083396a b/test/common/upstream/health_check_corpus/grpc_crash-5d27a3a5fc4fa384c9cbd76f0e7a3d841083396a new file mode 100644 index 000000000000..3b199c4a997e --- /dev/null +++ b/test/common/upstream/health_check_corpus/grpc_crash-5d27a3a5fc4fa384c9cbd76f0e7a3d841083396a @@ -0,0 +1,59 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + alt_port { + value: 2 + } + grpc_health_check { + service_name: "service" + } + initial_jitter { + seconds: 1 + } +} +actions { + respond { + http_respond { + headers { + } + status: 200 + } + tcp_respond { + } + grpc_respond { + grpc_respond_headers { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "content-type" + value: "application/grpc" + } + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + grpc_respond_bytes { + grpc_respond_unstructured_bytes { + data: "\005\000\000\000" + } + chunk_size_for_structured_response: 3 + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/grpc_crash-d9287189542575619bdf21886dd396334fded9c6 b/test/common/upstream/health_check_corpus/grpc_crash-d9287189542575619bdf21886dd396334fded9c6 new file mode 100644 index 000000000000..75062adbbd01 --- /dev/null +++ b/test/common/upstream/health_check_corpus/grpc_crash-d9287189542575619bdf21886dd396334fded9c6 @@ -0,0 +1,63 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + grpc_health_check { + service_name: "service" + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + } + grpc_respond { + grpc_respond_headers { + headers { + headers { + key: "content-type" + value: "application/grpc" + } + headers { + key: ":status" + value: "200" + } + headers { + key: "content-type" + value: "application/grpc" + } + } + status: 200 + chunk_size_for_structured_response: 3 + } + grpc_respond_bytes { + status: SERVICE_UNKNOWN + } + grpc_respond_trailers { + trailers { + headers { + key: "grpc-status" + value: "0" + } + } + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/grpc_no-trailers b/test/common/upstream/health_check_corpus/grpc_no-trailers new file mode 100644 index 000000000000..b328fc1e1775 --- /dev/null +++ b/test/common/upstream/health_check_corpus/grpc_no-trailers @@ -0,0 +1,52 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + grpc_health_check { + service_name: "service" + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "content-type" + value: "application/grpc" + } + } + status: 200 + } + grpc_respond_bytes { + status: SERVING + chunk_size_for_structured_response: 3 + } + } + } + } diff --git a/test/common/upstream/health_check_corpus/http_ConnectionClose b/test/common/upstream/health_check_corpus/http_ConnectionClose new file mode 100644 index 000000000000..665f2ae3a61c --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_ConnectionClose @@ -0,0 +1,52 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + no_traffic_interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +actions { + trigger_interval_timer { + + } +} diff --git a/test/common/upstream/health_check_corpus/http_Degraded b/test/common/upstream/health_check_corpus/http_Degraded new file mode 100644 index 000000000000..60c0b7b64d1f --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_Degraded @@ -0,0 +1,70 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + no_traffic_interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "x-envoy-degraded" + value: "true" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + + } + } +} +actions { + trigger_interval_timer { + + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + } +} diff --git a/test/common/upstream/health_check_corpus/http_Disconnect b/test/common/upstream/health_check_corpus/http_Disconnect new file mode 100644 index 000000000000..80d991d8145c --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_Disconnect @@ -0,0 +1,32 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + no_traffic_interval { + seconds: 5 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } +} +actions { + raise_event: REMOTE_CLOSE +} +actions { + raise_event: REMOTE_CLOSE +} diff --git a/test/common/upstream/health_check_corpus/http_LargeNanos b/test/common/upstream/health_check_corpus/http_LargeNanos new file mode 100644 index 000000000000..e03b52a26d4e --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_LargeNanos @@ -0,0 +1,52 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + nanos: 1929379840 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } + no_traffic_interval { + seconds: 1 + } +} +actions { + trigger_timeout_timer { + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/http_RemoteCloseBetweenChecks b/test/common/upstream/health_check_corpus/http_RemoteCloseBetweenChecks new file mode 100644 index 000000000000..e7126f1864fd --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_RemoteCloseBetweenChecks @@ -0,0 +1,71 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + no_traffic_interval { + seconds: 5 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +actions { + raise_event: REMOTE_CLOSE +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/http_Success b/test/common/upstream/health_check_corpus/http_Success new file mode 100644 index 000000000000..56ac0b7128d2 --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_Success @@ -0,0 +1,47 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + no_traffic_interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/http_SuccessStartFailedSuccessFirst b/test/common/upstream/health_check_corpus/http_SuccessStartFailedSuccessFirst new file mode 100644 index 000000000000..eff935aa0c9d --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_SuccessStartFailedSuccessFirst @@ -0,0 +1,48 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + no_traffic_interval { + seconds: 5 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +start_failed: true diff --git a/test/common/upstream/health_check_corpus/http_Timeout b/test/common/upstream/health_check_corpus/http_Timeout new file mode 100644 index 000000000000..1994e89cc842 --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_Timeout @@ -0,0 +1,31 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + no_traffic_interval { + seconds: 5 + } + unhealthy_threshold { + value: 1 + } + healthy_threshold: { + value: 2 + } + http_health_check { + service_name_matcher { + prefix: "locations" + } + path: "/healthcheck" + } +} +actions { + trigger_timeout_timer { + + } +} diff --git a/test/common/upstream/health_check_corpus/http_TimeoutThenRemoteClose b/test/common/upstream/health_check_corpus/http_TimeoutThenRemoteClose new file mode 100644 index 000000000000..4ba620fd98cd --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_TimeoutThenRemoteClose @@ -0,0 +1,34 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + no_traffic_interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } +} +actions { + trigger_timeout_timer { + + } +} +actions { + raise_event: REMOTE_CLOSE +} diff --git a/test/common/upstream/health_check_corpus/http_TimeoutThenSuccess b/test/common/upstream/health_check_corpus/http_TimeoutThenSuccess new file mode 100644 index 000000000000..0f14de621a81 --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_TimeoutThenSuccess @@ -0,0 +1,52 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + no_traffic_interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } +} +actions { + trigger_timeout_timer { + + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/http_ZeroRetryInterval b/test/common/upstream/health_check_corpus/http_ZeroRetryInterval new file mode 100644 index 000000000000..464a5e9f8ce9 --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_ZeroRetryInterval @@ -0,0 +1,49 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + service_name_matcher { + prefix: "locations" + } + path: "/healthcheck" + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "-upstream-healthchecked-cluster" + value: "locations-production-iad" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +http_verify_cluster : true diff --git a/test/common/upstream/health_check_corpus/http_crash-daebc8c8bcb985b777d6fa462a265ba5cdd8b06e b/test/common/upstream/health_check_corpus/http_crash-daebc8c8bcb985b777d6fa462a265ba5cdd8b06e new file mode 100644 index 000000000000..b280f04d488a --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_crash-daebc8c8bcb985b777d6fa462a265ba5cdd8b06e @@ -0,0 +1,55 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } + no_traffic_interval { + seconds: 5 + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +actions { + raise_event: REMOTE_CLOSE +} +actions { + trigger_interval_timer { + + } +} diff --git a/test/common/upstream/health_check_corpus/http_crash-test b/test/common/upstream/health_check_corpus/http_crash-test new file mode 100644 index 000000000000..45eca28c749a --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_crash-test @@ -0,0 +1,31 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } + no_traffic_interval { + seconds: 5 + } +} +actions { + trigger_interval_timer { + + } +} diff --git a/test/common/upstream/health_check_corpus/http_crash_1 b/test/common/upstream/health_check_corpus/http_crash_1 new file mode 100644 index 000000000000..8c050d6f57d4 --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_crash_1 @@ -0,0 +1,52 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } + initial_jitter { + seconds: 1 + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "-upstream-healthchecked-cluster" + value: "locations-production-iad" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +http_verify_cluster: true diff --git a/test/common/upstream/health_check_corpus/http_crash_2 b/test/common/upstream/health_check_corpus/http_crash_2 new file mode 100644 index 000000000000..634eb87dcec9 --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_crash_2 @@ -0,0 +1,28 @@ +health_check_config { + timeout { + seconds: 1 + nanos: 7544832 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } +} +actions { + respond { + } +} diff --git a/test/common/upstream/health_check_corpus/http_crash_3 b/test/common/upstream/health_check_corpus/http_crash_3 new file mode 100644 index 000000000000..9ea6993b5c38 --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_crash_3 @@ -0,0 +1,84 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + } + alt_port { + value: 2 + } + reuse_connection { + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } + no_traffic_interval { + seconds: 1 + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: ":status" + value: "200" + } + headers { + key: "x-envoy-degraded" + value: "true" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +actions { + trigger_interval_timer { + } +} +actions { + respond { + http_respond { + headers { + headers { + key: "200" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/http_crash_4 b/test/common/upstream/health_check_corpus/http_crash_4 new file mode 100644 index 000000000000..c104204814af --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_crash_4 @@ -0,0 +1,52 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + service_name_matcher { + prefix: "locations" + } + path: "/healthcheck" + } + initial_jitter { + seconds: 1 + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "-upstream-healthchecked-cluster" + value: "locations-production-iad" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +http_verify_cluster : true diff --git a/test/common/upstream/health_check_corpus/http_crash_5 b/test/common/upstream/health_check_corpus/http_crash_5 new file mode 100644 index 000000000000..a89622fe932d --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_crash_5 @@ -0,0 +1,58 @@ +health_check_config { + timeout { + seconds: 1 + nanos: 42496 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + nanos: 620756992 + } + unhealthy_threshold { + value: 2752514 + } + healthy_threshold { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "200" + ignore_case: true + } + } + no_traffic_interval { + seconds: 1 + } + always_log_health_check_failures: true + initial_jitter { + nanos: 16 + } +} +actions { + trigger_timeout_timer { + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/http_out_of_range_status b/test/common/upstream/health_check_corpus/http_out_of_range_status new file mode 100644 index 000000000000..9a5c4eb5b38a --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_out_of_range_status @@ -0,0 +1,47 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + no_traffic_interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + http_health_check { + path: "/healthcheck" + service_name_matcher { + prefix: "locations" + } + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 1500 + } + tcp_respond { + + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/http_test-something b/test/common/upstream/health_check_corpus/http_test-something new file mode 100644 index 000000000000..5666af820321 --- /dev/null +++ b/test/common/upstream/health_check_corpus/http_test-something @@ -0,0 +1,32 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + no_traffic_interval { + seconds: 5 + } + unhealthy_threshold { + value: 1 + } + healthy_threshold: { + value: 2 + } + http_health_check { + service_name_matcher { + prefix: "locations" + } + path: "/healthcheck" + } +} +actions { + raise_event: REMOTE_CLOSE +} +actions { + raise_event: REMOTE_CLOSE +} diff --git a/test/common/upstream/health_check_corpus/tcp-expect_close_test b/test/common/upstream/health_check_corpus/tcp-expect_close_test new file mode 100644 index 000000000000..a69759b35f9d --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp-expect_close_test @@ -0,0 +1,53 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + reuse_connection { + value: false + } + tcp_health_check { + send { + text: "01" + } + receive [{ + text: "02" + }] + } +} +actions { + respond { + http_respond { + status: "1" + } + tcp_respond { + data: "\x02" + } + } +} +actions { + raise_event: REMOTE_CLOSE +} +actions { + respond { + http_respond { + status: "1" + } + tcp_respond { + data: "\x02" + } + } +} +actions { + trigger_interval_timer { + + } +} diff --git a/test/common/upstream/health_check_corpus/tcp_DataWithoutReusingConnection b/test/common/upstream/health_check_corpus/tcp_DataWithoutReusingConnection new file mode 100644 index 000000000000..6be3e4462820 --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_DataWithoutReusingConnection @@ -0,0 +1,40 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + reuse_connection { + value: false + } + tcp_health_check { + send { + text: "01" + } + receive [{ + text: "02" + }] + } +} +actions { + respond { + http_respond { + status: 1 + } + tcp_respond { + data: "\x02" + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/tcp_Success b/test/common/upstream/health_check_corpus/tcp_Success new file mode 100644 index 000000000000..3ea16b92943c --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_Success @@ -0,0 +1,40 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + tcp_health_check { + send { + text: "01" + } + receive [{ + text: "02" + }] + } +} +actions { + raise_event: CONNECTED +} +actions { + respond { + http_respond { + status: 1 + } + tcp_respond { + data: "\x02" + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/tcp_Timeout b/test/common/upstream/health_check_corpus/tcp_Timeout new file mode 100644 index 000000000000..219dbf809544 --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_Timeout @@ -0,0 +1,43 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 1 + } + healthy_threshold: { + value: 2 + } + tcp_health_check { + send { + text: "01" + } + receive [{ + text: "02" + }] + } +} +actions { + raise_event: CONNECTED +} +actions { + respond { + http_respond { + status: 1 + } + tcp_respond { + data: "\x01" + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +actions { + raise_event: REMOTE_CLOSE +} diff --git a/test/common/upstream/health_check_corpus/tcp_TimeoutThenRemoteClose b/test/common/upstream/health_check_corpus/tcp_TimeoutThenRemoteClose new file mode 100644 index 000000000000..91fed69e1e61 --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_TimeoutThenRemoteClose @@ -0,0 +1,54 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + tcp_health_check { + send { + text: "01" + } + receive [{ + text: "02" + }] + } +} +actions { + raise_event: CONNECTED +} +actions { + respond { + http_respond { + status: 1 + } + tcp_respond { + data: "\x01" + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +actions { + trigger_timeout_timer { + + } +} +actions { + raise_event: CONNECTED +} +actions { + raise_event: REMOTE_CLOSE +} +actions { + raise_event: CONNECTED +} diff --git a/test/common/upstream/health_check_corpus/tcp_WrongData b/test/common/upstream/health_check_corpus/tcp_WrongData new file mode 100644 index 000000000000..8ac4145f6ded --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_WrongData @@ -0,0 +1,43 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + reuse_connection { + value: false + } + tcp_health_check { + send { + text: "01" + } + receive [{ + text: "02" + }] + } +} +actions { + raise_event: CONNECTED +} +actions { + respond { + http_respond { + status: 1 + } + tcp_respond { + data: "\x03" + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/tcp_crash-3596e4a310a1c131312ba869578be28a86a0439b b/test/common/upstream/health_check_corpus/tcp_crash-3596e4a310a1c131312ba869578be28a86a0439b new file mode 100644 index 000000000000..2e7de8d02852 --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_crash-3596e4a310a1c131312ba869578be28a86a0439b @@ -0,0 +1,69 @@ +health_check_config { + timeout { + seconds: 1 + nanos: 2097152 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + reuse_connection { + } + tcp_health_check { + send { + binary: "\001\000\000\r" + } + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "x-envoy-degraded" + value: "true" + } + } + status: 200 + } + tcp_respond { + data: "0" + } + } +} +actions { + trigger_interval_timer { + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + } + status: 200 + } + tcp_respond { + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/tcp_crash-449c4bf2d000d6e56b782fdd26a86e20a7f87b4f b/test/common/upstream/health_check_corpus/tcp_crash-449c4bf2d000d6e56b782fdd26a86e20a7f87b4f new file mode 100644 index 000000000000..77bff0079e8d --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_crash-449c4bf2d000d6e56b782fdd26a86e20a7f87b4f @@ -0,0 +1,63 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + tcp_health_check { + } + no_traffic_interval { + seconds: 1 + } + event_log_path: "200" + initial_jitter { + seconds: 1 + } + transport_socket_match_criteria { + fields { + key: "" + value { + bool_value: true + } + } + } +} +actions { + trigger_interval_timer { + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "-upstream-healthchecked-cluster" + value: "locations-production-iad" + } + } + status: 200 + } + tcp_respond { + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +http_verify_cluster: true diff --git a/test/common/upstream/health_check_corpus/tcp_crash-e899b54d3e39838939bdde4000acbe8bcc8c37b9 b/test/common/upstream/health_check_corpus/tcp_crash-e899b54d3e39838939bdde4000acbe8bcc8c37b9 new file mode 100644 index 000000000000..5ce1ef11499e --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_crash-e899b54d3e39838939bdde4000acbe8bcc8c37b9 @@ -0,0 +1,45 @@ +health_check_config { + timeout { + seconds: 1 + nanos: 196608 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 16580610 + } + alt_port { + value: 16580610 + } + tcp_health_check { + } + no_traffic_interval { + seconds: 5 + } + initial_jitter { + nanos: 1701314560 + } +} +actions { + respond { + http_respond { + headers { + } + status: 200 + } + tcp_respond { + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} diff --git a/test/common/upstream/health_check_corpus/tcp_crash-test b/test/common/upstream/health_check_corpus/tcp_crash-test new file mode 100644 index 000000000000..77bff0079e8d --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_crash-test @@ -0,0 +1,63 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + tcp_health_check { + } + no_traffic_interval { + seconds: 1 + } + event_log_path: "200" + initial_jitter { + seconds: 1 + } + transport_socket_match_criteria { + fields { + key: "" + value { + bool_value: true + } + } + } +} +actions { + trigger_interval_timer { + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "-upstream-healthchecked-cluster" + value: "locations-production-iad" + } + } + status: 200 + } + tcp_respond { + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +http_verify_cluster: true diff --git a/test/common/upstream/health_check_corpus/tcp_crash-test-1 b/test/common/upstream/health_check_corpus/tcp_crash-test-1 new file mode 100644 index 000000000000..5f7c0bdf7bc3 --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_crash-test-1 @@ -0,0 +1,69 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + interval_jitter { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold { + value: 2 + } + tcp_health_check { + send { + text: "01" + } + receive [{ + text: "02" + }] + } + no_traffic_interval { + seconds: 1 + } + event_log_path: "200" + initial_jitter { + seconds: 1 + } + transport_socket_match_criteria { + fields { + key: "" + value { + bool_value: true + } + } + } +} +actions { + trigger_interval_timer { + } +} +actions { + respond { + http_respond { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "-upstream-healthchecked-cluster" + value: "locations-production-iad" + } + } + status: 200 + } + tcp_respond { + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +http_verify_cluster: true diff --git a/test/common/upstream/health_check_corpus/tcp_crash_test b/test/common/upstream/health_check_corpus/tcp_crash_test new file mode 100644 index 000000000000..994330ae12e7 --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_crash_test @@ -0,0 +1,27 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + tcp_health_check { + send { + text: "01" + } + receive [{ + text: "02" + }] + } +} +actions { + trigger_interval_timer { + + } +} diff --git a/test/common/upstream/health_check_corpus/tcp_expect_close_test b/test/common/upstream/health_check_corpus/tcp_expect_close_test new file mode 100644 index 000000000000..5bad8719eff0 --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_expect_close_test @@ -0,0 +1,63 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + reuse_connection { + value: false + } + tcp_health_check { + send { + text: "01" + } + receive [{ + text: "02" + }] + } +} +actions { + respond { + http_respond { + status: "1" + } + tcp_respond { + data: "\x02" + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +actions { + raise_event: REMOTE_CLOSE +} +actions { + respond { + http_respond { + status: 1 + } + tcp_respond { + data: "\x02" + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +actions { + trigger_interval_timer { + + } +} diff --git a/test/common/upstream/health_check_corpus/tcp_expect_close_test_2 b/test/common/upstream/health_check_corpus/tcp_expect_close_test_2 new file mode 100644 index 000000000000..c33daf7f9323 --- /dev/null +++ b/test/common/upstream/health_check_corpus/tcp_expect_close_test_2 @@ -0,0 +1,45 @@ +health_check_config { + timeout { + seconds: 1 + } + interval { + seconds: 1 + } + unhealthy_threshold { + value: 2 + } + healthy_threshold: { + value: 2 + } + reuse_connection { + value: false + } + tcp_health_check { + send { + text: "01" + } + } +} +actions { + raise_event: CONNECTED +} +actions { + respond { + http_respond { + status: 1 + } + tcp_respond { + data: "\x02" + } + grpc_respond { + grpc_respond_headers { + + } + } + } +} +actions { + trigger_interval_timer { + + } +} diff --git a/test/common/upstream/health_check_fuzz.cc b/test/common/upstream/health_check_fuzz.cc new file mode 100644 index 000000000000..fa21636cb335 --- /dev/null +++ b/test/common/upstream/health_check_fuzz.cc @@ -0,0 +1,526 @@ +#include "test/common/upstream/health_check_fuzz.h" + +#include +#include + +#include "common/grpc/common.h" + +#include "test/common/upstream/utility.h" +#include "test/fuzz/utility.h" + +namespace Envoy { +namespace Upstream { +namespace { // gRPC helper methods +// From unit tests +std::vector> +serializeResponseToBufferList(grpc::health::v1::HealthCheckResponse::ServingStatus status, + uint64_t chunk_size_from_fuzzer) { + grpc::health::v1::HealthCheckResponse response; + response.set_status(status); + const auto data = Grpc::Common::serializeToGrpcFrame(response); + uint64_t chunk_size = chunk_size_from_fuzzer % data->length(); + if (chunk_size == 0) { + ++chunk_size; + } + std::vector> bufferList; + for (size_t i = 0; i < data->length(); i += chunk_size) { + if (i >= data->length() - chunk_size) { + // The length of the last chunk + chunk_size = data->length() - i; + } + auto buffer = std::vector(chunk_size, 0); + data->copyOut(i, chunk_size, &buffer[0]); + bufferList.push_back(buffer); + } + return bufferList; +} + +grpc::health::v1::HealthCheckResponse::ServingStatus +convertToGrpcServingStatus(test::common::upstream::ServingStatus status) { + switch (status) { + case test::common::upstream::ServingStatus::UNKNOWN: { + return grpc::health::v1::HealthCheckResponse::UNKNOWN; + } + case test::common::upstream::ServingStatus::SERVING: { + return grpc::health::v1::HealthCheckResponse::SERVING; + } + case test::common::upstream::ServingStatus::NOT_SERVING: { + return grpc::health::v1::HealthCheckResponse::NOT_SERVING; + } + case test::common::upstream::ServingStatus::SERVICE_UNKNOWN: { + return grpc::health::v1::HealthCheckResponse::SERVICE_UNKNOWN; + } + default: // shouldn't hit + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +std::vector> +makeBufferListToRespondWith(test::common::upstream::GrpcRespondBytes grpc_respond_bytes) { + switch (grpc_respond_bytes.grpc_respond_bytes_selector_case()) { + case test::common::upstream::GrpcRespondBytes::kStatus: { + // Structured Response + grpc::health::v1::HealthCheckResponse::ServingStatus servingStatus = + convertToGrpcServingStatus(grpc_respond_bytes.status()); + ENVOY_LOG_MISC(trace, "Will respond with a serialized frame with status: {}", + grpc_respond_bytes.status()); + return serializeResponseToBufferList(servingStatus, + grpc_respond_bytes.chunk_size_for_structured_response()); + } + case test::common::upstream::GrpcRespondBytes::kGrpcRespondUnstructuredBytes: { + std::vector> bufferList; + // Arbitrarily Generated Bytes + constexpr auto max_chunks = 128; + for (int i = 0; + i < + std::min(max_chunks, grpc_respond_bytes.grpc_respond_unstructured_bytes().data().size()); + ++i) { + std::vector chunk( + grpc_respond_bytes.grpc_respond_unstructured_bytes().data(i).begin(), + grpc_respond_bytes.grpc_respond_unstructured_bytes().data(i).end()); + bufferList.push_back(chunk); + } + ENVOY_LOG_MISC(trace, "Will respond with arbitrarily generated bytes which have no structure."); + return bufferList; + } + default: // shouldn't hit + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +} // namespace + +void HttpHealthCheckFuzz::allocHttpHealthCheckerFromProto( + const envoy::config::core::v3::HealthCheck& config) { + health_checker_ = std::make_shared( + *cluster_, config, dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + ENVOY_LOG_MISC(trace, "Created Test Http Health Checker"); +} + +void HttpHealthCheckFuzz::initialize(test::common::upstream::HealthCheckTestCase input) { + allocHttpHealthCheckerFromProto(input.health_check_config()); + ON_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) + .WillByDefault(testing::Return(input.http_verify_cluster())); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + expectSessionCreate(); + expectStreamCreate(0); + // This sets up the possibility of testing hosts that never become healthy + if (input.start_failed()) { + cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( + Host::HealthFlag::FAILED_ACTIVE_HC); + } + health_checker_->start(); + ON_CALL(runtime_.snapshot_, getInteger("health_check.min_interval", _)) + .WillByDefault(testing::Return(45000)); + // If has an initial jitter, this calls onIntervalBase and finishes startup + if (DurationUtil::durationToMilliseconds(input.health_check_config().initial_jitter()) != 0) { + test_sessions_[0]->interval_timer_->invokeCallback(); + } + reuse_connection_ = + PROTOBUF_GET_WRAPPED_OR_DEFAULT(input.health_check_config(), reuse_connection, true); +} + +void HttpHealthCheckFuzz::respond(test::common::upstream::Respond respond, bool last_action) { + // Timeout timer needs to be explicitly enabled, usually by onIntervalBase() (Callback on interval + // timer). + if (!test_sessions_[0]->timeout_timer_->enabled_) { + ENVOY_LOG_MISC(trace, "Timeout timer is disabled. Skipping response."); + return; + } + + const test::fuzz::Headers& headers = respond.http_respond().headers(); + uint64_t status = respond.http_respond().status(); + + std::unique_ptr response_headers = + std::make_unique( + Fuzz::fromHeaders(headers, {}, {})); + + response_headers->setStatus(status); + + // Responding with http can cause client to close, if so create a new one. + bool client_will_close = false; + if (response_headers->Connection()) { + client_will_close = + absl::EqualsIgnoreCase(response_headers->Connection()->value().getStringView(), + Http::Headers::get().ConnectionValues.Close); + } else if (response_headers->ProxyConnection()) { + client_will_close = + absl::EqualsIgnoreCase(response_headers->ProxyConnection()->value().getStringView(), + Http::Headers::get().ConnectionValues.Close); + } + + ENVOY_LOG_MISC(trace, "Responded headers {}", *response_headers.get()); + test_sessions_[0]->stream_response_callbacks_->decodeHeaders(std::move(response_headers), true); + + // Interval timer gets turned on from decodeHeaders() + if ((!reuse_connection_ || client_will_close) && !last_action) { + ENVOY_LOG_MISC(trace, "Creating client and stream because shouldClose() is true"); + triggerIntervalTimer(true); + } +} + +void HttpHealthCheckFuzz::triggerIntervalTimer(bool expect_client_create) { + // Interval timer needs to be explicitly enabled, usually by decodeHeaders. + if (!test_sessions_[0]->interval_timer_->enabled_) { + ENVOY_LOG_MISC(trace, "Interval timer is disabled. Skipping trigger interval timer."); + return; + } + if (expect_client_create) { + expectClientCreate(0); + } + expectStreamCreate(0); + ENVOY_LOG_MISC(trace, "Triggered interval timer"); + test_sessions_[0]->interval_timer_->invokeCallback(); +} + +void HttpHealthCheckFuzz::triggerTimeoutTimer(bool last_action) { + // Timeout timer needs to be explicitly enabled, usually by a call to onIntervalBase(). + if (!test_sessions_[0]->timeout_timer_->enabled_) { + ENVOY_LOG_MISC(trace, "Timeout timer is disabled. Skipping trigger timeout timer."); + return; + } + ENVOY_LOG_MISC(trace, "Triggered timeout timer"); + test_sessions_[0]->timeout_timer_->invokeCallback(); // This closes the client, turns off timeout + // and enables interval + if (!last_action) { + ENVOY_LOG_MISC(trace, "Creating client and stream from network timeout"); + triggerIntervalTimer(true); + } +} + +void HttpHealthCheckFuzz::raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) { + test_sessions_[0]->client_connection_->raiseEvent(event_type); + if (!last_action && event_type != Network::ConnectionEvent::Connected) { + ENVOY_LOG_MISC(trace, "Creating client and stream from close event"); + triggerIntervalTimer( + true); // Interval timer is guaranteed to be enabled from a close event - calls + // onResetStream which handles failure, turning interval timer on and timeout off + } +} + +void TcpHealthCheckFuzz::allocTcpHealthCheckerFromProto( + const envoy::config::core::v3::HealthCheck& config) { + health_checker_ = std::make_shared( + *cluster_, config, dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + ENVOY_LOG_MISC(trace, "Created Tcp Health Checker"); +} + +void TcpHealthCheckFuzz::initialize(test::common::upstream::HealthCheckTestCase input) { + allocTcpHealthCheckerFromProto(input.health_check_config()); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + expectSessionCreate(); + expectClientCreate(); + health_checker_->start(); + reuse_connection_ = + PROTOBUF_GET_WRAPPED_OR_DEFAULT(input.health_check_config(), reuse_connection, true); + // The Receive proto message has a validation that if there is a receive field, the text field, a + // string representing the hex encoded payload has a least one byte. + if (input.health_check_config().tcp_health_check().receive_size() != 0) { + ENVOY_LOG_MISC(trace, "Health Checker is only testing to connect"); + empty_response_ = false; + } + // Clang tidy throws an error here in regards to a potential leak. It seems to have something to + // do with shared_ptr and possible cycles in regards to the clusters host objects. Since all this + // test class directly uses the unit test class that has been in master for a long time, this is + // likely a false positive. + if (DurationUtil::durationToMilliseconds(input.health_check_config().initial_jitter()) != 0) { + interval_timer_->invokeCallback(); + } +} // NOLINT(clang-analyzer-cplusplus.NewDeleteLeaks) + +void TcpHealthCheckFuzz::respond(test::common::upstream::Respond respond, bool last_action) { + std::string data = respond.tcp_respond().data(); + if (!timeout_timer_->enabled_) { + ENVOY_LOG_MISC(trace, "Timeout timer is disabled. Skipping response."); + return; + } + Buffer::OwnedImpl response; + response.add(data); + + ENVOY_LOG_MISC(trace, "Responded with {}. Length (in bytes) = {}. This is the string passed in.", + data, data.length()); + read_filter_->onData(response, true); + + // The interval timer may not be on. If it's not on, return. An http response will automatically + // turn on interval and turn off timeout, but for tcp it doesn't if the data doesn't match. If the + // response doesn't match, it only sets the host to unhealthy. If it does match, it will turn + // timeout off and interval on. + if (!reuse_connection_ && interval_timer_->enabled_ && !last_action) { + triggerIntervalTimer(true); + } +} + +void TcpHealthCheckFuzz::triggerIntervalTimer(bool expect_client_create) { + if (!interval_timer_->enabled_) { + ENVOY_LOG_MISC(trace, "Interval timer is disabled. Skipping trigger interval timer."); + return; + } + if (expect_client_create) { + ENVOY_LOG_MISC(trace, "Creating client"); + expectClientCreate(); + } + ENVOY_LOG_MISC(trace, "Triggered interval timer"); + interval_timer_->invokeCallback(); +} + +void TcpHealthCheckFuzz::triggerTimeoutTimer(bool last_action) { + if (!timeout_timer_->enabled_) { + ENVOY_LOG_MISC(trace, "Timeout timer is disabled. Skipping trigger timeout timer."); + return; + } + ENVOY_LOG_MISC(trace, "Triggered timeout timer"); + timeout_timer_->invokeCallback(); // This closes the client, turns off timeout + // and enables interval + if (!last_action) { + ENVOY_LOG_MISC(trace, "Will create client and stream from network timeout"); + triggerIntervalTimer(true); + } +} + +void TcpHealthCheckFuzz::raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) { + // On a close event, the health checker will call handleFailure if expect_close_ is false. This is + // set by multiple code paths. handleFailure() turns on interval and turns off timeout. However, + // other action of the fuzzer account for this by explicitly invoking a client after + // expect_close_ gets set to true, turning expect_close_ back to false. + connection_->raiseEvent(event_type); + if (!last_action && event_type != Network::ConnectionEvent::Connected) { + if (!interval_timer_->enabled_) { + return; + } + ENVOY_LOG_MISC(trace, "Will create client from close event"); + triggerIntervalTimer(true); + } + + // In the specific case of: + // https://github.com/envoyproxy/envoy/blob/master/source/common/upstream/health_checker_impl.cc#L489 + // This blows away client, should create a new one + if (event_type == Network::ConnectionEvent::Connected && empty_response_) { + ENVOY_LOG_MISC(trace, "Will create client from connected event and empty response."); + triggerIntervalTimer(true); + } +} + +void GrpcHealthCheckFuzz::allocGrpcHealthCheckerFromProto( + const envoy::config::core::v3::HealthCheck& config) { + health_checker_ = std::make_shared( + *cluster_, config, dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + ENVOY_LOG_MISC(trace, "Created Test Grpc Health Checker"); +} + +void GrpcHealthCheckFuzz::initialize(test::common::upstream::HealthCheckTestCase input) { + allocGrpcHealthCheckerFromProto(input.health_check_config()); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + expectSessionCreate(); + expectStreamCreate(0); + health_checker_->start(); + ON_CALL(runtime_.snapshot_, getInteger("health_check.min_interval", _)) + .WillByDefault(testing::Return(45000)); + + if (DurationUtil::durationToMilliseconds(input.health_check_config().initial_jitter()) != 0) { + test_sessions_[0]->interval_timer_->invokeCallback(); + } + + reuse_connection_ = + PROTOBUF_GET_WRAPPED_OR_DEFAULT(input.health_check_config(), reuse_connection, true); +} + +// Logic from respondResponseSpec() in unit tests +void GrpcHealthCheckFuzz::respond(test::common::upstream::Respond respond, bool last_action) { + const test::common::upstream::GrpcRespond& grpc_respond = respond.grpc_respond(); + if (!test_sessions_[0]->timeout_timer_->enabled_) { + ENVOY_LOG_MISC(trace, "Timeout timer is disabled. Skipping response."); + return; + } + // These booleans help figure out when to end the stream + const bool has_data = grpc_respond.has_grpc_respond_bytes(); + // Didn't hard code grpc-status to fully explore search space provided by codecs. + + // If the fuzzing engine generates a grpc_respond_trailers message, there is a validation + // that trailers (test.fuzz.Headers) must be present. If it is present, that means there is + // trailers that will be passed to decodeTrailers(). An empty trailer map counts as having + // trailers. + const bool has_trailers = grpc_respond.has_grpc_respond_trailers(); + + ENVOY_LOG_MISC(trace, "Has data: {}. Has trailers: {}.", has_data, has_trailers); + + const bool end_stream_on_headers = !has_data && !has_trailers; + + std::unique_ptr response_headers = + std::make_unique( + Fuzz::fromHeaders( + grpc_respond.grpc_respond_headers().headers(), {}, {})); + + response_headers->setStatus(grpc_respond.grpc_respond_headers().status()); + + ENVOY_LOG_MISC(trace, "Responded headers {}", *response_headers.get()); + test_sessions_[0]->stream_response_callbacks_->decodeHeaders(std::move(response_headers), + end_stream_on_headers); + + // If the interval timer is enabled, that means that the rpc is complete, as decodeHeaders hit a + // certain branch that called onRpcComplete(), logically representing a completed rpc call. Thus, + // skip the next responses until explicitly invoking interval timer as cleanup. + if (has_data && !test_sessions_[0]->interval_timer_->enabled_) { + std::vector> bufferList = + makeBufferListToRespondWith(grpc_respond.grpc_respond_bytes()); + // If the interval timer is enabled, that means that the rpc is complete, as decodeData hit a + // certain branch that called onRpcComplete(), logically representing a completed rpc call. + // Thus, skip the next responses until explicitly invoking interval timer as cleanup. + for (size_t i = 0; i < bufferList.size() && !test_sessions_[0]->interval_timer_->enabled_; + ++i) { + const bool end_stream_on_data = !has_trailers && i == bufferList.size() - 1; + const auto data = + std::make_unique(bufferList[i].data(), bufferList[i].size()); + ENVOY_LOG_MISC(trace, "Responded with data"); + test_sessions_[0]->stream_response_callbacks_->decodeData(*data, end_stream_on_data); + } + } + + // If the interval timer is enabled, that means that the rpc is complete, as decodeData hit a + // certain branch that called onRpcComplete(), logically representing a completed rpc call. Thus, + // skip responding with trailers until explicitly invoking interval timer as cleanup. + if (has_trailers && !test_sessions_[0]->interval_timer_->enabled_) { + std::unique_ptr response_trailers = + std::make_unique( + Fuzz::fromHeaders( + grpc_respond.grpc_respond_trailers().trailers(), {}, {})); + + ENVOY_LOG_MISC(trace, "Responded trailers {}", *response_trailers.get()); + + test_sessions_[0]->stream_response_callbacks_->decodeTrailers(std::move(response_trailers)); + } + + // This means that the response did not represent a full rpc response. + if (!test_sessions_[0]->interval_timer_->enabled_) { + return; + } + + // Once it gets here the health checker will have called onRpcComplete(), logically representing a + // completed rpc call, which blows away client if reuse connection is set to false or the health + // checker had a goaway event with no error flag. + if (!last_action) { + ENVOY_LOG_MISC(trace, "Triggering interval timer after response"); + triggerIntervalTimer(!reuse_connection_ || received_no_error_goaway_); + received_no_error_goaway_ = false; // from resetState() + } +} + +void GrpcHealthCheckFuzz::triggerIntervalTimer(bool expect_client_create) { + if (!test_sessions_[0]->interval_timer_->enabled_) { + ENVOY_LOG_MISC(trace, "Interval timer is disabled. Skipping trigger interval timer."); + return; + } + if (expect_client_create) { + expectClientCreate(0); + ENVOY_LOG_MISC(trace, "Created client"); + } + expectStreamCreate(0); + ENVOY_LOG_MISC(trace, "Created stream"); + test_sessions_[0]->interval_timer_->invokeCallback(); +} + +void GrpcHealthCheckFuzz::triggerTimeoutTimer(bool last_action) { + if (!test_sessions_[0]->timeout_timer_->enabled_) { + ENVOY_LOG_MISC(trace, "Timeout timer is disabled. Skipping trigger timeout timer."); + return; + } + ENVOY_LOG_MISC(trace, "Triggered timeout timer"); + test_sessions_[0]->timeout_timer_->invokeCallback(); // This closes the client, turns off + // timeout and enables interval + + if ((!reuse_connection_ || received_no_error_goaway_) && !last_action) { + ENVOY_LOG_MISC(trace, "Triggering interval timer after timeout."); + triggerIntervalTimer(true); + } else { + received_no_error_goaway_ = false; // from resetState() + } +} + +void GrpcHealthCheckFuzz::raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) { + test_sessions_[0]->client_connection_->raiseEvent(event_type); + if (!last_action && event_type != Network::ConnectionEvent::Connected) { + // Close events will always blow away the client + ENVOY_LOG_MISC(trace, "Triggering interval timer after close event"); + // Interval timer is guaranteed to be enabled from a close event - calls + // onResetStream which handles failure, turning interval timer on and timeout off + triggerIntervalTimer(true); + } +} + +void GrpcHealthCheckFuzz::raiseGoAway(bool no_error) { + if (no_error) { + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + // Will cause other events to blow away client, because this is a "graceful" go away + received_no_error_goaway_ = true; + } else { + // go away events without no error flag explicitly blow away client + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::Other); + triggerIntervalTimer(true); + } +} + +Network::ConnectionEvent +HealthCheckFuzz::getEventTypeFromProto(const test::common::upstream::RaiseEvent& event) { + switch (event) { + case test::common::upstream::RaiseEvent::CONNECTED: { + return Network::ConnectionEvent::Connected; + } + case test::common::upstream::RaiseEvent::REMOTE_CLOSE: { + return Network::ConnectionEvent::RemoteClose; + } + case test::common::upstream::RaiseEvent::LOCAL_CLOSE: { + return Network::ConnectionEvent::LocalClose; + } + default: // shouldn't hit + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +void HealthCheckFuzz::initializeAndReplay(test::common::upstream::HealthCheckTestCase input) { + try { + initialize(input); + } catch (EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + return; + } + replay(input); +} + +void HealthCheckFuzz::replay(const test::common::upstream::HealthCheckTestCase& input) { + constexpr auto max_actions = 64; + for (int i = 0; i < std::min(max_actions, input.actions().size()); ++i) { + const auto& event = input.actions(i); + // The last_action boolean prevents final actions from creating a client and stream that will + // never be used. + const bool last_action = i == std::min(max_actions, input.actions().size()) - 1; + ENVOY_LOG_MISC(trace, "Action: {}", event.DebugString()); + switch (event.action_selector_case()) { + case test::common::upstream::Action::kRespond: { + respond(event.respond(), last_action); + break; + } + case test::common::upstream::Action::kTriggerIntervalTimer: { + triggerIntervalTimer(false); + break; + } + case test::common::upstream::Action::kTriggerTimeoutTimer: { + triggerTimeoutTimer(last_action); + break; + } + case test::common::upstream::Action::kRaiseEvent: { + raiseEvent(getEventTypeFromProto(event.raise_event()), last_action); + break; + } + default: + break; + } + } +} + +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/health_check_fuzz.h b/test/common/upstream/health_check_fuzz.h new file mode 100644 index 000000000000..ea17615e1270 --- /dev/null +++ b/test/common/upstream/health_check_fuzz.h @@ -0,0 +1,93 @@ +#pragma once + +#include + +#include "test/common/upstream/health_check_fuzz.pb.validate.h" +#include "test/common/upstream/health_checker_impl_test_utils.h" +#include "test/fuzz/common.pb.h" + +namespace Envoy { +namespace Upstream { + +class HealthCheckFuzz { +public: + HealthCheckFuzz() = default; + // This will delegate to the specific classes + void initializeAndReplay(test::common::upstream::HealthCheckTestCase input); + enum class Type { + HTTP, + TCP, + GRPC, + }; + + // The specific implementations of respond look into the respond proto, which has all three types + // of response + virtual void respond(test::common::upstream::Respond respond, bool last_action) PURE; + + virtual void initialize(test::common::upstream::HealthCheckTestCase input) PURE; + virtual void triggerIntervalTimer(bool expect_client_create) PURE; + virtual void triggerTimeoutTimer(bool last_action) PURE; + virtual void raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) PURE; + + virtual ~HealthCheckFuzz() = default; + +private: + Network::ConnectionEvent getEventTypeFromProto(const test::common::upstream::RaiseEvent& event); + + void replay(const test::common::upstream::HealthCheckTestCase& input); +}; + +class HttpHealthCheckFuzz : public HealthCheckFuzz, HttpHealthCheckerImplTestBase { +public: + void allocHttpHealthCheckerFromProto(const envoy::config::core::v3::HealthCheck& config); + void initialize(test::common::upstream::HealthCheckTestCase input) override; + void respond(test::common::upstream::Respond respond, bool last_action) override; + void triggerIntervalTimer(bool expect_client_create) override; + void triggerTimeoutTimer(bool last_action) override; + void raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) override; + ~HttpHealthCheckFuzz() override = default; + + // Determines whether the client gets reused or not after response + bool reuse_connection_ = true; +}; + +class TcpHealthCheckFuzz : public HealthCheckFuzz, TcpHealthCheckerImplTestBase { +public: + void allocTcpHealthCheckerFromProto(const envoy::config::core::v3::HealthCheck& config); + void initialize(test::common::upstream::HealthCheckTestCase input) override; + void respond(test::common::upstream::Respond respond, bool last_action) override; + void triggerIntervalTimer(bool expect_client_create) override; + void triggerTimeoutTimer(bool last_action) override; + void raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) override; + ~TcpHealthCheckFuzz() override = default; + + // Determines whether the client gets reused or not after response + bool reuse_connection_ = true; + + // Empty response induces a specific codepath in raiseEvent in case of connected, ignores the + // binary field and only uses text. + bool empty_response_ = true; +}; + +class GrpcHealthCheckFuzz : public HealthCheckFuzz, GrpcHealthCheckerImplTestBaseUtils { +public: + void allocGrpcHealthCheckerFromProto(const envoy::config::core::v3::HealthCheck& config); + void initialize(test::common::upstream::HealthCheckTestCase input) override; + // This has three components, headers, raw bytes, and trailers + void respond(test::common::upstream::Respond respond, bool last_action) override; + void triggerIntervalTimer(bool expect_client_create) override; + void triggerTimeoutTimer(bool last_action) override; + void raiseEvent(const Network::ConnectionEvent& event_type, bool last_action) override; + void raiseGoAway(bool no_error); + ~GrpcHealthCheckFuzz() override = default; + + // Determines whether the client gets reused or not after response + bool reuse_connection_ = true; + + // Determines whether a client closes after responds and timeouts. Exactly maps to + // received_no_error_goaway_ in source code. + bool received_no_error_goaway_ = false; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/health_check_fuzz.proto b/test/common/upstream/health_check_fuzz.proto new file mode 100644 index 000000000000..f4d0e26dcacb --- /dev/null +++ b/test/common/upstream/health_check_fuzz.proto @@ -0,0 +1,92 @@ +syntax = "proto3"; + +package test.common.upstream; + +import "validate/validate.proto"; +import "test/fuzz/common.proto"; +import "envoy/config/core/v3/health_check.proto"; +import "google/protobuf/empty.proto"; + +message HttpRespond { + test.fuzz.Headers headers = 1; + uint64 status = 2 [(validate.rules).uint64.lt = 1000]; +} + +message TcpRespond { + bytes data = 1; +} + +enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + SERVICE_UNKNOWN = 3; // Used only by the Watch method. +} + +message GrpcRespondHeaders { + test.fuzz.Headers headers = 1; + uint64 status = 2 [(validate.rules).uint64.lt = 1000]; +} + +message GrpcRespondUnstructuredBytes { + repeated bytes data = 1; +} + +message GrpcRespondBytes { + oneof grpc_respond_bytes_selector { + option (validate.required) = true; + //Structured response, which will get converted to raw bytes + ServingStatus status = 1 [(validate.rules).enum.defined_only = true]; + GrpcRespondUnstructuredBytes grpc_respond_unstructured_bytes = 2; + } + //This value will determine how many fixed bytes will make up a structured response + //It will be moded against the byte size of the serialized response. + uint64 chunk_size_for_structured_response = 3; +} + +message GrpcRespondTrailers { + test.fuzz.Headers trailers = 1 [(validate.rules).message.required = true]; +} + +message GrpcRespond { + GrpcRespondHeaders grpc_respond_headers = 1 [(validate.rules).message.required = true]; + GrpcRespondBytes grpc_respond_bytes = 2; + //Having this as a message allows the scenario with no trailers + GrpcRespondTrailers grpc_respond_trailers = 3; +} + +/* +The three types of health checkers (HTTP, TCP, and gRPC) share a lot of logic, thus allowing the fuzzer to use a single action +stream across all three. However, the main difference comes from the type of data parsed as a response. Switching across the +health checker type allows the fuzzer to choose the correct action sequence. +*/ +message Respond { + HttpRespond http_respond = 1 [(validate.rules).message.required = true]; + TcpRespond tcp_respond = 2 [(validate.rules).message.required = true]; + GrpcRespond grpc_respond = 3 [(validate.rules).message.required = true]; +} + +enum RaiseEvent { + CONNECTED = 0; + REMOTE_CLOSE = 1; + LOCAL_CLOSE = 2; +} + +message Action { + oneof action_selector { + option (validate.required) = true; + Respond respond = 1; + google.protobuf.Empty trigger_interval_timer = 2; + //TODO: respondBody, respondTrailers + google.protobuf.Empty trigger_timeout_timer = 3; + RaiseEvent raise_event = 4 [(validate.rules).enum.defined_only = true]; + } +} + +message HealthCheckTestCase { + envoy.config.core.v3.HealthCheck health_check_config = 1 + [(validate.rules).message.required = true]; + repeated Action actions = 2; + bool http_verify_cluster = 3; //Determines if verify cluster setting is on + bool start_failed = 4; +} diff --git a/test/common/upstream/health_check_fuzz_test.cc b/test/common/upstream/health_check_fuzz_test.cc new file mode 100644 index 000000000000..d5834144e293 --- /dev/null +++ b/test/common/upstream/health_check_fuzz_test.cc @@ -0,0 +1,42 @@ +#include "envoy/config/core/v3/health_check.pb.validate.h" + +#include "test/common/upstream/health_check_fuzz.h" +#include "test/common/upstream/health_check_fuzz.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Upstream { + +DEFINE_PROTO_FUZZER(const test::common::upstream::HealthCheckTestCase input) { + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + + std::unique_ptr health_check_fuzz; + + switch (input.health_check_config().health_checker_case()) { + case envoy::config::core::v3::HealthCheck::kHttpHealthCheck: { + health_check_fuzz = std::make_unique(); + break; + } + case envoy::config::core::v3::HealthCheck::kTcpHealthCheck: { + health_check_fuzz = std::make_unique(); + break; + } + case envoy::config::core::v3::HealthCheck::kGrpcHealthCheck: { + health_check_fuzz = std::make_unique(); + break; + } + default: // Handles custom health checker + ENVOY_LOG_MISC(trace, "Custom Health Checker currently unsupported, skipping"); + return; + } + + health_check_fuzz->initializeAndReplay(input); +} + +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 769f041e2ae2..4a1ed5a7ae35 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -68,15 +68,14 @@ TEST(HealthCheckerFactoryTest, GrpcHealthCheckHTTP2NotConfiguredException) { EXPECT_CALL(*cluster.info_, features()).WillRepeatedly(Return(0)); Runtime::MockLoader runtime; - Random::MockRandomGenerator random; Event::MockDispatcher dispatcher; AccessLog::MockAccessLogManager log_manager; NiceMock validation_visitor; Api::MockApi api; EXPECT_THROW_WITH_MESSAGE( - HealthCheckerFactory::create(createGrpcHealthCheckConfig(), cluster, runtime, random, - dispatcher, log_manager, validation_visitor, api), + HealthCheckerFactory::create(createGrpcHealthCheckConfig(), cluster, runtime, dispatcher, + log_manager, validation_visitor, api), EnvoyException, "fake_cluster cluster must support HTTP/2 for gRPC healthchecking"); } @@ -87,19 +86,483 @@ TEST(HealthCheckerFactoryTest, CreateGrpc) { .WillRepeatedly(Return(Upstream::ClusterInfo::Features::HTTP2)); Runtime::MockLoader runtime; - Random::MockRandomGenerator random; Event::MockDispatcher dispatcher; AccessLog::MockAccessLogManager log_manager; NiceMock validation_visitor; - Api::MockApi api; + NiceMock api; - EXPECT_NE(nullptr, dynamic_cast( - HealthCheckerFactory::create(createGrpcHealthCheckConfig(), cluster, - runtime, random, dispatcher, log_manager, - validation_visitor, api) - .get())); + EXPECT_NE(nullptr, + dynamic_cast( + HealthCheckerFactory::create(createGrpcHealthCheckConfig(), cluster, runtime, + dispatcher, log_manager, validation_visitor, api) + .get())); } +class HttpHealthCheckerImplTest : public testing::Test, public HttpHealthCheckerImplTestBase { +public: + void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + } + + void addCompletionCallback() { + health_checker_->addHostCheckCompleteCb( + [this](HostSharedPtr host, HealthTransition changed_state) -> void { + onHostStatus(host, changed_state); + }); + } + + void setupNoServiceValidationHCWithHttp2() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + codec_client_type: Http2 + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupInitialJitter() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + initial_jitter: 5s + interval_jitter_percent: 40 + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupIntervalJitterPercent() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter_percent: 40 + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupNoServiceValidationHC() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupNoTrafficHealthyValidationHC() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + no_traffic_healthy_interval: 10s + interval_jitter: 1s + unhealthy_threshold: 1 + healthy_threshold: 1 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupNoServiceValidationHCOneUnhealthy() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 1 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupNoServiceValidationHCAlwaysLogFailure() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + always_log_health_check_failures: true + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupNoServiceValidationNoReuseConnectionHC() { + std::string yaml = R"EOF( + timeout: 1s + interval: 1s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + reuse_connection: false + http_health_check: + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupHealthCheckIntervalOverridesHC() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_interval: 2s + unhealthy_edge_interval: 3s + healthy_edge_interval: 4s + no_traffic_interval: 5s + interval_jitter: 0s + unhealthy_threshold: 3 + healthy_threshold: 3 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupServiceValidationHC() { + std::string yaml = R"EOF( + timeout: 1s + interval: 1s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupDeprecatedServiceNameValidationHC(const std::string& prefix) { + std::string yaml = fmt::format(R"EOF( + timeout: 1s + interval: 1s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: {0} + path: /healthcheck + )EOF", + prefix); + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupServicePrefixPatternValidationHC() { + std::string yaml = R"EOF( + timeout: 1s + interval: 1s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupServiceExactPatternValidationHC() { + std::string yaml = R"EOF( + timeout: 1s + interval: 1s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + exact: locations-production-iad + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupServiceRegexPatternValidationHC() { + std::string yaml = R"EOF( + timeout: 1s + interval: 1s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + safe_regex: + google_re2: {} + regex: 'locations-.*-.*$' + path: /healthcheck + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupServiceValidationWithCustomHostValueHC(const std::string& host) { + std::string yaml = fmt::format(R"EOF( + timeout: 1s + interval: 1s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + host: {0} + )EOF", + host); + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig + makeHealthCheckConfig(const uint32_t port_value) { + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig config; + config.set_port_value(port_value); + return config; + } + + void appendTestHosts(std::shared_ptr cluster, + const HostWithHealthCheckMap& hosts, const std::string& protocol = "tcp://", + const uint32_t priority = 0) { + for (const auto& host : hosts) { + cluster->prioritySet().getMockHostSet(priority)->hosts_.emplace_back( + makeTestHost(cluster->info_, fmt::format("{}{}", protocol, host.first), host.second)); + } + } + + void setupServiceValidationWithAdditionalHeaders() { + std::string yaml = R"EOF( + timeout: 1s + interval: 1s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + host: "www.envoyproxy.io" + request_headers_to_add: + - header: + key: x-envoy-ok + value: ok + - header: + key: x-envoy-cool + value: cool + - header: + key: x-envoy-awesome + value: awesome + # The following entry replaces the current user-agent. + - header: + key: user-agent + value: CoolEnvoy/HC + append: false + - header: + key: x-protocol + value: "%PROTOCOL%" + - header: + key: x-upstream-metadata + value: "%UPSTREAM_METADATA([\"namespace\", \"key\"])%" + - header: + key: x-downstream-remote-address + value: "%DOWNSTREAM_REMOTE_ADDRESS%" + - header: + key: x-downstream-remote-address-without-port + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" + - header: + key: x-downstream-local-address + value: "%DOWNSTREAM_LOCAL_ADDRESS%" + - header: + key: x-downstream-local-address-without-port + value: "%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%" + - header: + key: x-start-time + value: "%START_TIME(%s.%9f)%" + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void setupServiceValidationWithoutUserAgent() { + std::string yaml = R"EOF( + timeout: 1s + interval: 1s + interval_jitter: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + host: "www.envoyproxy.io" + # The following entry removes the default "user-agent" header. + request_headers_to_remove: ["user-agent"] + )EOF"; + + allocHealthChecker(yaml); + addCompletionCallback(); + } + + void respond(size_t index, const std::string& code, bool conn_close, bool proxy_close = false, + bool body = false, bool trailers = false, + const absl::optional& service_cluster = absl::optional(), + bool degraded = false) { + std::unique_ptr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", code}}); + + if (degraded) { + response_headers->setEnvoyDegraded(1); + } + + if (service_cluster) { + response_headers->addCopy(Http::Headers::get().EnvoyUpstreamHealthCheckedCluster, + service_cluster.value()); + } + if (conn_close) { + response_headers->addCopy("connection", "close"); + } + if (proxy_close) { + response_headers->addCopy("proxy-connection", "close"); + } + + test_sessions_[index]->stream_response_callbacks_->decodeHeaders(std::move(response_headers), + !body && !trailers); + if (body) { + Buffer::OwnedImpl response_data; + test_sessions_[index]->stream_response_callbacks_->decodeData(response_data, !trailers); + } + + if (trailers) { + test_sessions_[index]->stream_response_callbacks_->decodeTrailers( + Http::ResponseTrailerMapPtr{new Http::TestResponseTrailerMapImpl{{"some", "trailer"}}}); + } + } + + void expectSuccessStartFailedFailFirst( + const absl::optional& health_checked_cluster = absl::optional()) { + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( + Host::HealthFlag::FAILED_ACTIVE_HC); + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + health_checker_->start(); + + // Test that failing first disables fast success. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + respond(0, "503", false, false, false, false, health_checked_cluster); + EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( + Host::HealthFlag::FAILED_ACTIVE_HC)); + EXPECT_EQ(Host::Health::Unhealthy, + cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); + + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + expectStreamCreate(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); + respond(0, "200", false, false, false, false, health_checked_cluster); + EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( + Host::HealthFlag::FAILED_ACTIVE_HC)); + EXPECT_EQ(Host::Health::Unhealthy, + cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); + + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + expectStreamCreate(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); + respond(0, "200", false, false, false, false, health_checked_cluster); + EXPECT_EQ(Host::Health::Healthy, + cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); + } + + MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state)); +}; + TEST_F(HttpHealthCheckerImplTest, Success) { setupNoServiceValidationHC(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); @@ -793,27 +1256,28 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAdditionalHeaders) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillRepeatedly(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.get(header_ok)->value().getStringView(), value_ok); - EXPECT_EQ(headers.get(header_cool)->value().getStringView(), value_cool); - EXPECT_EQ(headers.get(header_awesome)->value().getStringView(), value_awesome); + EXPECT_EQ(headers.get(header_ok)[0]->value().getStringView(), value_ok); + EXPECT_EQ(headers.get(header_cool)[0]->value().getStringView(), value_cool); + EXPECT_EQ(headers.get(header_awesome)[0]->value().getStringView(), value_awesome); EXPECT_EQ(headers.getUserAgentValue(), value_user_agent); - EXPECT_EQ(headers.get(upstream_metadata)->value().getStringView(), value_upstream_metadata); + EXPECT_EQ(headers.get(upstream_metadata)[0]->value().getStringView(), + value_upstream_metadata); - EXPECT_EQ(headers.get(protocol)->value().getStringView(), value_protocol); - EXPECT_EQ(headers.get(downstream_remote_address)->value().getStringView(), + EXPECT_EQ(headers.get(protocol)[0]->value().getStringView(), value_protocol); + EXPECT_EQ(headers.get(downstream_remote_address)[0]->value().getStringView(), value_downstream_remote_address); - EXPECT_EQ(headers.get(downstream_remote_address_without_port)->value().getStringView(), + EXPECT_EQ(headers.get(downstream_remote_address_without_port)[0]->value().getStringView(), value_downstream_remote_address_without_port); - EXPECT_EQ(headers.get(downstream_local_address)->value().getStringView(), + EXPECT_EQ(headers.get(downstream_local_address)[0]->value().getStringView(), value_downstream_local_address); - EXPECT_EQ(headers.get(downstream_local_address_without_port)->value().getStringView(), + EXPECT_EQ(headers.get(downstream_local_address_without_port)[0]->value().getStringView(), value_downstream_local_address_without_port); Envoy::DateFormatter date_formatter("%s.%9f"); std::string current_start_time = date_formatter.fromTime(dispatcher_.timeSource().systemTime()); - EXPECT_EQ(headers.get(start_time)->value().getStringView(), current_start_time); + EXPECT_EQ(headers.get(start_time)[0]->value().getStringView(), current_start_time); })); health_checker_->start(); @@ -1043,6 +1507,29 @@ TEST_F(HttpHealthCheckerImplTest, SuccessNoTraffic) { EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); } +// First start with an unhealthy cluster that moves to +// no_traffic_healthy_interval. +TEST_F(HttpHealthCheckerImplTest, UnhealthyTransitionNoTrafficHealthy) { + setupNoTrafficHealthyValidationHC(); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( + Host::HealthFlag::FAILED_ACTIVE_HC); + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + health_checker_->start(); + + // Successful health check should now trigger the no_traffic_healthy_interval 10000ms. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, _)); + EXPECT_CALL(*test_sessions_[0]->interval_timer_, + enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); + respond(0, "200", false, false, false, false); + EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); +} + TEST_F(HttpHealthCheckerImplTest, SuccessStartFailedSuccessFirst) { setupNoServiceValidationHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { @@ -2963,36 +3450,8 @@ TEST_F(TcpHealthCheckerImplTest, ConnectionLocalFailure) { EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter("health_check.passive_failure").value()); } -class TestGrpcHealthCheckerImpl : public GrpcHealthCheckerImpl { +class GrpcHealthCheckerImplTestBase : public GrpcHealthCheckerImplTestBaseUtils { public: - using GrpcHealthCheckerImpl::GrpcHealthCheckerImpl; - - Http::CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& conn_data) override { - auto codec_client = createCodecClient_(conn_data); - return Http::CodecClientPtr(codec_client); - }; - - // GrpcHealthCheckerImpl - MOCK_METHOD(Http::CodecClient*, createCodecClient_, (Upstream::Host::CreateConnectionData&)); -}; - -class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { -public: - struct TestSession { - TestSession() = default; - - Event::MockTimer* interval_timer_{}; - Event::MockTimer* timeout_timer_{}; - Http::MockClientConnection* codec_{}; - Stats::IsolatedStoreImpl stats_store_; - Network::MockClientConnection* client_connection_{}; - NiceMock request_encoder_; - Http::ResponseDecoder* stream_response_callbacks_{}; - CodecClientForTest* codec_client_{}; - }; - - using TestSessionPtr = std::unique_ptr; - struct ResponseSpec { struct ChunkSpec { bool valid; @@ -3042,16 +3501,12 @@ class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { return ret; } - std::vector> response_headers; + std::vector> + response_headers; // Encapsulates all three types of responses std::vector body_chunks; std::vector> trailers; }; - GrpcHealthCheckerImplTestBase() { - EXPECT_CALL(*cluster_->info_, features()) - .WillRepeatedly(Return(Upstream::ClusterInfo::Features::HTTP2)); - } - void allocHealthChecker(const envoy::config::core::v3::HealthCheck& config) { health_checker_ = std::make_shared( *cluster_, config, dispatcher_, runtime_, random_, @@ -3109,56 +3564,6 @@ class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { addCompletionCallback(); } - void expectSessionCreate() { - // Expectations are in LIFO order. - TestSessionPtr new_test_session(new TestSession()); - test_sessions_.emplace_back(std::move(new_test_session)); - TestSession& test_session = *test_sessions_.back(); - test_session.timeout_timer_ = new Event::MockTimer(&dispatcher_); - test_session.interval_timer_ = new Event::MockTimer(&dispatcher_); - expectClientCreate(test_sessions_.size() - 1); - } - - void expectClientCreate(size_t index) { - TestSession& test_session = *test_sessions_[index]; - test_session.codec_ = new NiceMock(); - test_session.client_connection_ = new NiceMock(); - connection_index_.push_back(index); - codec_index_.push_back(index); - - EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) - .Times(testing::AnyNumber()) - .WillRepeatedly(InvokeWithoutArgs([&]() -> Network::ClientConnection* { - uint32_t index = connection_index_.front(); - connection_index_.pop_front(); - return test_sessions_[index]->client_connection_; - })); - - EXPECT_CALL(*health_checker_, createCodecClient_(_)) - .WillRepeatedly( - Invoke([&](Upstream::Host::CreateConnectionData& conn_data) -> Http::CodecClient* { - uint32_t index = codec_index_.front(); - codec_index_.pop_front(); - TestSession& test_session = *test_sessions_[index]; - std::shared_ptr cluster{ - new NiceMock()}; - Event::MockDispatcher dispatcher_; - - test_session.codec_client_ = new CodecClientForTest( - Http::CodecClient::Type::HTTP1, std::move(conn_data.connection_), - test_session.codec_, nullptr, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), dispatcher_); - return test_session.codec_client_; - })); - } - - void expectStreamCreate(size_t index) { - test_sessions_[index]->request_encoder_.stream_.callbacks_.clear(); - EXPECT_CALL(*test_sessions_[index]->codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&test_sessions_[index]->stream_response_callbacks_), - ReturnRef(test_sessions_[index]->request_encoder_))); - } - // Starts healthchecker and sets up timer expectations, leaving up future specification of // healthcheck response for the caller. Useful when there is only one healthcheck attempt // performed during test case (but possibly on many hosts). @@ -3280,7 +3685,7 @@ class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { EXPECT_NE(nullptr, headers.Method()); EXPECT_EQ(expected_host, headers.getHostValue()); EXPECT_EQ(std::chrono::milliseconds(1000).count(), - Envoy::Grpc::Common::getGrpcTimeout(headers).count()); + Envoy::Grpc::Common::getGrpcTimeout(headers).value().count()); })); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeData(_, true)) .WillOnce(Invoke([&](Buffer::Instance& data, bool) { @@ -3309,11 +3714,6 @@ class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { } MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state)); - - std::vector test_sessions_; - std::shared_ptr health_checker_; - std::list connection_index_{}; - std::list codec_index_{}; }; class GrpcHealthCheckerImplTest : public testing::Test, public GrpcHealthCheckerImplTestBase {}; diff --git a/test/common/upstream/health_checker_impl_test_utils.cc b/test/common/upstream/health_checker_impl_test_utils.cc index 763ab236a7ad..3bdd709d43e0 100644 --- a/test/common/upstream/health_checker_impl_test_utils.cc +++ b/test/common/upstream/health_checker_impl_test_utils.cc @@ -1,6 +1,5 @@ #include "test/common/upstream/health_checker_impl_test_utils.h" -#include "test/common/http/common.h" #include "test/common/upstream/utility.h" #include "gmock/gmock.h" @@ -9,367 +8,7 @@ namespace Envoy { namespace Upstream { -void HttpHealthCheckerImplTest::allocHealthChecker(const std::string& yaml, bool avoid_boosting) { - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_storage_.release())); -} - -void HttpHealthCheckerImplTest::addCompletionCallback() { - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); -} - -void HttpHealthCheckerImplTest::setupNoServiceValidationHCWithHttp2() { - const std::string yaml = R"EOF( - timeout: 1s - interval: 1s - no_traffic_interval: 5s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - codec_client_type: Http2 - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupInitialJitter() { - const std::string yaml = R"EOF( - timeout: 1s - interval: 1s - no_traffic_interval: 5s - initial_jitter: 5s - interval_jitter_percent: 40 - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupIntervalJitterPercent() { - const std::string yaml = R"EOF( - timeout: 1s - interval: 1s - no_traffic_interval: 5s - interval_jitter_percent: 40 - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupNoServiceValidationHC() { - const std::string yaml = R"EOF( - timeout: 1s - interval: 1s - no_traffic_interval: 5s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupNoServiceValidationHCOneUnhealthy() { - const std::string yaml = R"EOF( - timeout: 1s - interval: 1s - no_traffic_interval: 5s - interval_jitter: 1s - unhealthy_threshold: 1 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupNoServiceValidationHCAlwaysLogFailure() { - const std::string yaml = R"EOF( - timeout: 1s - interval: 1s - no_traffic_interval: 5s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - always_log_health_check_failures: true - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupNoServiceValidationNoReuseConnectionHC() { - std::string yaml = R"EOF( - timeout: 1s - interval: 1s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - reuse_connection: false - http_health_check: - path: /healthcheck - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupHealthCheckIntervalOverridesHC() { - const std::string yaml = R"EOF( - timeout: 1s - interval: 1s - unhealthy_interval: 2s - unhealthy_edge_interval: 3s - healthy_edge_interval: 4s - no_traffic_interval: 5s - interval_jitter: 0s - unhealthy_threshold: 3 - healthy_threshold: 3 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupServiceValidationHC() { - std::string yaml = R"EOF( - timeout: 1s - interval: 1s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupDeprecatedServiceNameValidationHC(const std::string& prefix) { - std::string yaml = fmt::format(R"EOF( - timeout: 1s - interval: 1s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: {0} - path: /healthcheck - )EOF", - prefix); - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupServicePrefixPatternValidationHC() { - std::string yaml = R"EOF( - timeout: 1s - interval: 1s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupServiceExactPatternValidationHC() { - std::string yaml = R"EOF( - timeout: 1s - interval: 1s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - exact: locations-production-iad - path: /healthcheck - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupServiceRegexPatternValidationHC() { - std::string yaml = R"EOF( - timeout: 1s - interval: 1s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - safe_regex: - google_re2: {} - regex: 'locations-.*-.*$' - path: /healthcheck - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupServiceValidationWithCustomHostValueHC( - const std::string& host) { - std::string yaml = fmt::format(R"EOF( - timeout: 1s - interval: 1s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - host: {0} - )EOF", - host); - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig -HttpHealthCheckerImplTest::makeHealthCheckConfig(const uint32_t port_value) { - envoy::config::endpoint::v3::Endpoint::HealthCheckConfig config; - config.set_port_value(port_value); - return config; -} - -void HttpHealthCheckerImplTest::appendTestHosts(std::shared_ptr cluster, - const HostWithHealthCheckMap& hosts, - const std::string& protocol, - const uint32_t priority) { - for (const auto& host : hosts) { - cluster->prioritySet().getMockHostSet(priority)->hosts_.emplace_back( - makeTestHost(cluster->info_, fmt::format("{}{}", protocol, host.first), host.second)); - } -} - -void HttpHealthCheckerImplTest::setupServiceValidationWithAdditionalHeaders() { - std::string yaml = R"EOF( - timeout: 1s - interval: 1s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - host: "www.envoyproxy.io" - request_headers_to_add: - - header: - key: x-envoy-ok - value: ok - - header: - key: x-envoy-cool - value: cool - - header: - key: x-envoy-awesome - value: awesome - # The following entry replaces the current user-agent. - - header: - key: user-agent - value: CoolEnvoy/HC - append: false - - header: - key: x-protocol - value: "%PROTOCOL%" - - header: - key: x-upstream-metadata - value: "%UPSTREAM_METADATA([\"namespace\", \"key\"])%" - - header: - key: x-downstream-remote-address - value: "%DOWNSTREAM_REMOTE_ADDRESS%" - - header: - key: x-downstream-remote-address-without-port - value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" - - header: - key: x-downstream-local-address - value: "%DOWNSTREAM_LOCAL_ADDRESS%" - - header: - key: x-downstream-local-address-without-port - value: "%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%" - - header: - key: x-start-time - value: "%START_TIME(%s.%9f)%" - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::setupServiceValidationWithoutUserAgent() { - std::string yaml = R"EOF( - timeout: 1s - interval: 1s - interval_jitter: 1s - unhealthy_threshold: 2 - healthy_threshold: 2 - http_health_check: - service_name_matcher: - prefix: locations - path: /healthcheck - host: "www.envoyproxy.io" - # The following entry removes the default "user-agent" header. - request_headers_to_remove: ["user-agent"] - )EOF"; - - allocHealthChecker(yaml); - addCompletionCallback(); -} - -void HttpHealthCheckerImplTest::expectSessionCreate( +void HttpHealthCheckerImplTestBase::expectSessionCreate( const HostWithHealthCheckMap& health_check_map) { // Expectations are in LIFO order. TestSessionPtr new_test_session(new TestSession()); @@ -380,8 +19,8 @@ void HttpHealthCheckerImplTest::expectSessionCreate( expectClientCreate(test_sessions_.size() - 1, health_check_map); } -void HttpHealthCheckerImplTest::expectClientCreate(size_t index, - const HostWithHealthCheckMap& health_check_map) { +void HttpHealthCheckerImplTestBase::expectClientCreate( + size_t index, const HostWithHealthCheckMap& health_check_map) { TestSession& test_session = *test_sessions_[index]; test_session.codec_ = new NiceMock(); ON_CALL(*test_session.codec_, protocol()).WillByDefault(testing::Return(Http::Protocol::Http11)); @@ -419,98 +58,87 @@ void HttpHealthCheckerImplTest::expectClientCreate(size_t index, })); } -void HttpHealthCheckerImplTest::expectStreamCreate(size_t index) { +void HttpHealthCheckerImplTestBase::expectStreamCreate(size_t index) { test_sessions_[index]->request_encoder_.stream_.callbacks_.clear(); EXPECT_CALL(*test_sessions_[index]->codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&test_sessions_[index]->stream_response_callbacks_), ReturnRef(test_sessions_[index]->request_encoder_))); } -void HttpHealthCheckerImplTest::respond(size_t index, const std::string& code, bool conn_close, - bool proxy_close, bool body, bool trailers, - const absl::optional& service_cluster, - bool degraded) { - std::unique_ptr response_headers( - new Http::TestResponseHeaderMapImpl{{":status", code}}); - - if (degraded) { - response_headers->setEnvoyDegraded(1); - } - - if (service_cluster) { - response_headers->addCopy(Http::Headers::get().EnvoyUpstreamHealthCheckedCluster, - service_cluster.value()); - } - if (conn_close) { - response_headers->addCopy("connection", "close"); - } - if (proxy_close) { - response_headers->addCopy("proxy-connection", "close"); - } +void HttpHealthCheckerImplTestBase::expectSessionCreate() { + expectSessionCreate(health_checker_map_); +} +void HttpHealthCheckerImplTestBase::expectClientCreate(size_t index) { + expectClientCreate(index, health_checker_map_); +} - test_sessions_[index]->stream_response_callbacks_->decodeHeaders(std::move(response_headers), - !body && !trailers); - if (body) { - Buffer::OwnedImpl response_data; - test_sessions_[index]->stream_response_callbacks_->decodeData(response_data, !trailers); - } +// This is needed to put expectations in LIFO order. The unit tests use inSequence, which makes +// expectations FIFO. +void TcpHealthCheckerImplTestBase::expectSessionCreate() { + timeout_timer_ = new Event::MockTimer(&dispatcher_); + interval_timer_ = new Event::MockTimer(&dispatcher_); +} - if (trailers) { - test_sessions_[index]->stream_response_callbacks_->decodeTrailers( - Http::ResponseTrailerMapPtr{new Http::TestResponseTrailerMapImpl{{"some", "trailer"}}}); - } +void TcpHealthCheckerImplTestBase::expectClientCreate() { + connection_ = new NiceMock(); + EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) + .WillOnce(testing::Return(connection_)); + EXPECT_CALL(*connection_, addReadFilter(_)).WillOnce(testing::SaveArg<0>(&read_filter_)); } -void HttpHealthCheckerImplTest::expectSessionCreate() { expectSessionCreate(health_checker_map_); } -void HttpHealthCheckerImplTest::expectClientCreate(size_t index) { - expectClientCreate(index, health_checker_map_); +GrpcHealthCheckerImplTestBaseUtils::GrpcHealthCheckerImplTestBaseUtils() { + EXPECT_CALL(*cluster_->info_, features()) + .WillRepeatedly(testing::Return(Upstream::ClusterInfo::Features::HTTP2)); } -void HttpHealthCheckerImplTest::expectSuccessStartFailedFailFirst( - const absl::optional& health_checked_cluster) { - cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; - cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( - Host::HealthFlag::FAILED_ACTIVE_HC); - expectSessionCreate(); - expectStreamCreate(0); - EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); - health_checker_->start(); +void GrpcHealthCheckerImplTestBaseUtils::expectSessionCreate() { + // Expectations are in LIFO order. + TestSessionPtr new_test_session(new TestSession()); + test_sessions_.emplace_back(std::move(new_test_session)); + TestSession& test_session = *test_sessions_.back(); + test_session.timeout_timer_ = new Event::MockTimer(&dispatcher_); + test_session.interval_timer_ = new Event::MockTimer(&dispatcher_); + expectClientCreate(test_sessions_.size() - 1); +} - // Test that failing first disables fast success. - EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); - EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); - EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); - respond(0, "503", false, false, false, false, health_checked_cluster); - EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( - Host::HealthFlag::FAILED_ACTIVE_HC)); - EXPECT_EQ(Host::Health::Unhealthy, - cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); +void GrpcHealthCheckerImplTestBaseUtils::expectClientCreate(size_t index) { + TestSession& test_session = *test_sessions_[index]; + test_session.codec_ = new NiceMock(); + test_session.client_connection_ = new NiceMock(); + connection_index_.push_back(index); + codec_index_.push_back(index); - EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); - expectStreamCreate(0); - test_sessions_[0]->interval_timer_->invokeCallback(); + EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) + .Times(testing::AnyNumber()) + .WillRepeatedly(testing::InvokeWithoutArgs([&]() -> Network::ClientConnection* { + uint32_t index = connection_index_.front(); + connection_index_.pop_front(); + return test_sessions_[index]->client_connection_; + })); - EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); - EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); - EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - respond(0, "200", false, false, false, false, health_checked_cluster); - EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( - Host::HealthFlag::FAILED_ACTIVE_HC)); - EXPECT_EQ(Host::Health::Unhealthy, - cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); + EXPECT_CALL(*health_checker_, createCodecClient_(_)) + .WillRepeatedly( + Invoke([&](Upstream::Host::CreateConnectionData& conn_data) -> Http::CodecClient* { + uint32_t index = codec_index_.front(); + codec_index_.pop_front(); + TestSession& test_session = *test_sessions_[index]; + std::shared_ptr cluster{ + new NiceMock()}; + Event::MockDispatcher dispatcher_; - EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); - expectStreamCreate(0); - test_sessions_[0]->interval_timer_->invokeCallback(); + test_session.codec_client_ = new CodecClientForTest( + Http::CodecClient::Type::HTTP1, std::move(conn_data.connection_), + test_session.codec_, nullptr, + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), dispatcher_); + return test_session.codec_client_; + })); +} - EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); - EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); - EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - respond(0, "200", false, false, false, false, health_checked_cluster); - EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); +void GrpcHealthCheckerImplTestBaseUtils::expectStreamCreate(size_t index) { + test_sessions_[index]->request_encoder_.stream_.callbacks_.clear(); + EXPECT_CALL(*test_sessions_[index]->codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&test_sessions_[index]->stream_response_callbacks_), + ReturnRef(test_sessions_[index]->request_encoder_))); } } // namespace Upstream diff --git a/test/common/upstream/health_checker_impl_test_utils.h b/test/common/upstream/health_checker_impl_test_utils.h index bb6213ae2fec..5c67c7371909 100644 --- a/test/common/upstream/health_checker_impl_test_utils.h +++ b/test/common/upstream/health_checker_impl_test_utils.h @@ -2,6 +2,7 @@ #include "common/upstream/health_checker_impl.h" +#include "test/common/http/common.h" #include "test/mocks/common.h" #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" @@ -37,7 +38,7 @@ class TestHttpHealthCheckerImpl : public HttpHealthCheckerImpl { Http::CodecClient::Type codecClientType() { return codec_client_type_; } }; -class HttpHealthCheckerImplTest : public testing::Test, public HealthCheckerTestBase { +class HttpHealthCheckerImplTestBase : public HealthCheckerTestBase { public: struct TestSession { Event::MockTimer* interval_timer_{}; @@ -54,73 +55,79 @@ class HttpHealthCheckerImplTest : public testing::Test, public HealthCheckerTest absl::node_hash_map; - void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true); - - void addCompletionCallback(); - - void setupNoServiceValidationHCWithHttp2(); - - void setupInitialJitter(); - - void setupIntervalJitterPercent(); - - void setupNoServiceValidationHC(); - - void setupNoServiceValidationHCOneUnhealthy(); - - void setupNoServiceValidationHCAlwaysLogFailure(); - - void setupNoServiceValidationNoReuseConnectionHC(); - - void setupHealthCheckIntervalOverridesHC(); - - void setupServiceValidationHC(); + void expectSessionCreate(const HostWithHealthCheckMap& health_check_map); - void setupDeprecatedServiceNameValidationHC(const std::string& prefix); + void expectClientCreate(size_t index, const HostWithHealthCheckMap& health_check_map); - void setupServicePrefixPatternValidationHC(); + void expectStreamCreate(size_t index); - void setupServiceExactPatternValidationHC(); + void expectSessionCreate(); + void expectClientCreate(size_t index); - void setupServiceRegexPatternValidationHC(); + std::vector test_sessions_; + std::shared_ptr health_checker_; + std::list connection_index_{}; + std::list codec_index_{}; + const HostWithHealthCheckMap health_checker_map_{}; +}; - void setupServiceValidationWithCustomHostValueHC(const std::string& host); +// TODO(zasweq): This class here isn't currently being used in the unit test class. +// The class here expects the creates the timeout first, then the interval. This is due +// to the normal expectation call to be opposite, or LIFO (Last in, First Out). The InSequence +// object makes the tcp health checker unit tests FIFO (First in, First out). We should standardize +// this amongst the three unit test classes. +class TcpHealthCheckerImplTestBase : public HealthCheckerTestBase { +public: + void expectSessionCreate(); + void expectClientCreate(); - const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig - makeHealthCheckConfig(const uint32_t port_value); + std::shared_ptr health_checker_; + Network::MockClientConnection* connection_{}; + Event::MockTimer* timeout_timer_{}; + Event::MockTimer* interval_timer_{}; + Network::ReadFilterSharedPtr read_filter_; +}; - void appendTestHosts(std::shared_ptr cluster, - const HostWithHealthCheckMap& hosts, const std::string& protocol = "tcp://", - const uint32_t priority = 0); +class TestGrpcHealthCheckerImpl : public GrpcHealthCheckerImpl { +public: + using GrpcHealthCheckerImpl::GrpcHealthCheckerImpl; - void setupServiceValidationWithAdditionalHeaders(); + Http::CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& conn_data) override { + auto codec_client = createCodecClient_(conn_data); + return Http::CodecClientPtr(codec_client); + }; - void setupServiceValidationWithoutUserAgent(); + // GrpcHealthCheckerImpl + MOCK_METHOD(Http::CodecClient*, createCodecClient_, (Upstream::Host::CreateConnectionData&)); +}; - void expectSessionCreate(const HostWithHealthCheckMap& health_check_map); +class GrpcHealthCheckerImplTestBaseUtils : public HealthCheckerTestBase { +public: + struct TestSession { + TestSession() = default; - void expectClientCreate(size_t index, const HostWithHealthCheckMap& health_check_map); + Event::MockTimer* interval_timer_{}; + Event::MockTimer* timeout_timer_{}; + Http::MockClientConnection* codec_{}; + Stats::IsolatedStoreImpl stats_store_; + Network::MockClientConnection* client_connection_{}; + NiceMock request_encoder_; + Http::ResponseDecoder* stream_response_callbacks_{}; + CodecClientForTest* codec_client_{}; + }; - void expectStreamCreate(size_t index); + using TestSessionPtr = std::unique_ptr; - void respond(size_t index, const std::string& code, bool conn_close, bool proxy_close = false, - bool body = false, bool trailers = false, - const absl::optional& service_cluster = absl::optional(), - bool degraded = false); + GrpcHealthCheckerImplTestBaseUtils(); void expectSessionCreate(); void expectClientCreate(size_t index); - - void expectSuccessStartFailedFailFirst( - const absl::optional& health_checked_cluster = absl::optional()); - - MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state)); + void expectStreamCreate(size_t index); std::vector test_sessions_; - std::shared_ptr health_checker_; + std::shared_ptr health_checker_; std::list connection_index_{}; std::list codec_index_{}; - const HostWithHealthCheckMap health_checker_map_{}; }; } // namespace Upstream diff --git a/test/common/upstream/load_balancer_benchmark.cc b/test/common/upstream/load_balancer_benchmark.cc index 3604dfabaecb..3d13b15edb0e 100644 --- a/test/common/upstream/load_balancer_benchmark.cc +++ b/test/common/upstream/load_balancer_benchmark.cc @@ -11,6 +11,7 @@ #include "common/upstream/subset_lb.h" #include "common/upstream/upstream_impl.h" +#include "test/benchmark/main.h" #include "test/common/upstream/utility.h" #include "test/mocks/upstream/cluster_info.h" @@ -97,13 +98,18 @@ class LeastRequestTester : public BaseTester { std::unique_ptr lb_; }; -void benchmarkRoundRobinLoadBalancerBuild(benchmark::State& state) { +void benchmarkRoundRobinLoadBalancerBuild(::benchmark::State& state) { + const uint64_t num_hosts = state.range(0); + const uint64_t weighted_subset_percent = state.range(1); + const uint64_t weight = state.range(2); + + if (benchmark::skipExpensiveBenchmarks() && num_hosts > 10000) { + state.SkipWithError("Skipping expensive benchmark"); + return; + } + for (auto _ : state) { // NOLINT: Silences warning about dead store state.PauseTiming(); - const uint64_t num_hosts = state.range(0); - const uint64_t weighted_subset_percent = state.range(1); - const uint64_t weight = state.range(2); - const size_t start_tester_mem = Memory::Stats::totalCurrentlyAllocated(); RoundRobinTester tester(num_hosts, weighted_subset_percent, weight); const size_t end_tester_mem = Memory::Stats::totalCurrentlyAllocated(); @@ -137,7 +143,7 @@ BENCHMARK(benchmarkRoundRobinLoadBalancerBuild) ->Args({50000, 0, 1}) ->Args({50000, 50, 50}) ->Args({50000, 100, 50}) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); class RingHashTester : public BaseTester { public: @@ -169,7 +175,7 @@ uint64_t hashInt(uint64_t i) { return HashUtil::xxHash64(absl::string_view(reinterpret_cast(&i), sizeof(i))); } -void benchmarkRingHashLoadBalancerBuildRing(benchmark::State& state) { +void benchmarkRingHashLoadBalancerBuildRing(::benchmark::State& state) { for (auto _ : state) { // NOLINT: Silences warning about dead store state.PauseTiming(); const uint64_t num_hosts = state.range(0); @@ -195,9 +201,9 @@ BENCHMARK(benchmarkRingHashLoadBalancerBuildRing) ->Args({100, 256000}) ->Args({200, 256000}) ->Args({500, 256000}) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); -void benchmarkMaglevLoadBalancerBuildTable(benchmark::State& state) { +void benchmarkMaglevLoadBalancerBuildTable(::benchmark::State& state) { for (auto _ : state) { // NOLINT: Silences warning about dead store state.PauseTiming(); const uint64_t num_hosts = state.range(0); @@ -219,7 +225,7 @@ BENCHMARK(benchmarkMaglevLoadBalancerBuildTable) ->Arg(100) ->Arg(200) ->Arg(500) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); class TestLoadBalancerContext : public LoadBalancerContextBase { public: @@ -229,7 +235,7 @@ class TestLoadBalancerContext : public LoadBalancerContextBase { absl::optional hash_key_; }; -void computeHitStats(benchmark::State& state, +void computeHitStats(::benchmark::State& state, const absl::node_hash_map& hit_counter) { double mean = 0; for (const auto& pair : hit_counter) { @@ -249,12 +255,18 @@ void computeHitStats(benchmark::State& state, state.counters["relative_stddev_hits"] = (stddev / mean); } -void benchmarkLeastRequestLoadBalancerChooseHost(benchmark::State& state) { +void benchmarkLeastRequestLoadBalancerChooseHost(::benchmark::State& state) { + const uint64_t num_hosts = state.range(0); + const uint64_t choice_count = state.range(1); + const uint64_t keys_to_simulate = state.range(2); + + if (benchmark::skipExpensiveBenchmarks() && keys_to_simulate > 1000) { + state.SkipWithError("Skipping expensive benchmark"); + return; + } + for (auto _ : state) { // NOLINT: Silences warning about dead store state.PauseTiming(); - const uint64_t num_hosts = state.range(0); - const uint64_t choice_count = state.range(1); - const uint64_t keys_to_simulate = state.range(2); LeastRequestTester tester(num_hosts, choice_count); absl::node_hash_map hit_counter; TestLoadBalancerContext context; @@ -271,15 +283,21 @@ void benchmarkLeastRequestLoadBalancerChooseHost(benchmark::State& state) { } } BENCHMARK(benchmarkLeastRequestLoadBalancerChooseHost) + ->Args({100, 1, 1000}) + ->Args({100, 2, 1000}) + ->Args({100, 3, 1000}) + ->Args({100, 10, 1000}) + ->Args({100, 50, 1000}) + ->Args({100, 100, 1000}) ->Args({100, 1, 1000000}) ->Args({100, 2, 1000000}) ->Args({100, 3, 1000000}) ->Args({100, 10, 1000000}) ->Args({100, 50, 1000000}) ->Args({100, 100, 1000000}) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); -void benchmarkRingHashLoadBalancerChooseHost(benchmark::State& state) { +void benchmarkRingHashLoadBalancerChooseHost(::benchmark::State& state) { for (auto _ : state) { // NOLINT: Silences warning about dead store // Do not time the creation of the ring. state.PauseTiming(); @@ -316,9 +334,9 @@ BENCHMARK(benchmarkRingHashLoadBalancerChooseHost) ->Args({100, 256000, 100000}) ->Args({200, 256000, 100000}) ->Args({500, 256000, 100000}) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); -void benchmarkMaglevLoadBalancerChooseHost(benchmark::State& state) { +void benchmarkMaglevLoadBalancerChooseHost(::benchmark::State& state) { for (auto _ : state) { // NOLINT: Silences warning about dead store // Do not time the creation of the table. state.PauseTiming(); @@ -349,15 +367,20 @@ BENCHMARK(benchmarkMaglevLoadBalancerChooseHost) ->Args({100, 100000}) ->Args({200, 100000}) ->Args({500, 100000}) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); -void benchmarkRingHashLoadBalancerHostLoss(benchmark::State& state) { - for (auto _ : state) { // NOLINT: Silences warning about dead store - const uint64_t num_hosts = state.range(0); - const uint64_t min_ring_size = state.range(1); - const uint64_t hosts_to_lose = state.range(2); - const uint64_t keys_to_simulate = state.range(3); +void benchmarkRingHashLoadBalancerHostLoss(::benchmark::State& state) { + const uint64_t num_hosts = state.range(0); + const uint64_t min_ring_size = state.range(1); + const uint64_t hosts_to_lose = state.range(2); + const uint64_t keys_to_simulate = state.range(3); + + if (benchmark::skipExpensiveBenchmarks() && min_ring_size > 65536) { + state.SkipWithError("Skipping expensive benchmark"); + return; + } + for (auto _ : state) { // NOLINT: Silences warning about dead store RingHashTester tester(num_hosts, min_ring_size); tester.ring_hash_lb_->initialize(); LoadBalancerPtr lb = tester.ring_hash_lb_->factory()->create(); @@ -392,12 +415,15 @@ void benchmarkRingHashLoadBalancerHostLoss(benchmark::State& state) { } } BENCHMARK(benchmarkRingHashLoadBalancerHostLoss) + ->Args({500, 65536, 1, 10000}) + ->Args({500, 65536, 2, 10000}) + ->Args({500, 65536, 3, 10000}) ->Args({500, 256000, 1, 10000}) ->Args({500, 256000, 2, 10000}) ->Args({500, 256000, 3, 10000}) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); -void benchmarkMaglevLoadBalancerHostLoss(benchmark::State& state) { +void benchmarkMaglevLoadBalancerHostLoss(::benchmark::State& state) { for (auto _ : state) { // NOLINT: Silences warning about dead store const uint64_t num_hosts = state.range(0); const uint64_t hosts_to_lose = state.range(1); @@ -440,9 +466,9 @@ BENCHMARK(benchmarkMaglevLoadBalancerHostLoss) ->Args({500, 1, 10000}) ->Args({500, 2, 10000}) ->Args({500, 3, 10000}) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); -void benchmarkMaglevLoadBalancerWeighted(benchmark::State& state) { +void benchmarkMaglevLoadBalancerWeighted(::benchmark::State& state) { for (auto _ : state) { // NOLINT: Silences warning about dead store const uint64_t num_hosts = state.range(0); const uint64_t weighted_subset_percent = state.range(1); @@ -499,7 +525,7 @@ BENCHMARK(benchmarkMaglevLoadBalancerWeighted) ->Args({500, 95, 127, 1, 10000}) ->Args({500, 95, 25, 75, 1000}) ->Args({500, 95, 75, 25, 10000}) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); class SubsetLbTester : public BaseTester { public: @@ -545,9 +571,15 @@ class SubsetLbTester : public BaseTester { HostVector host_moved_; }; -void benchmarkSubsetLoadBalancerCreate(benchmark::State& state) { +void benchmarkSubsetLoadBalancerCreate(::benchmark::State& state) { const bool single_host_per_subset = state.range(0); const uint64_t num_hosts = state.range(1); + + if (benchmark::skipExpensiveBenchmarks() && num_hosts > 100) { + state.SkipWithError("Skipping expensive benchmark"); + return; + } + for (auto _ : state) { // NOLINT: Silences warning about dead store SubsetLbTester tester(num_hosts, single_host_per_subset); } @@ -555,13 +587,17 @@ void benchmarkSubsetLoadBalancerCreate(benchmark::State& state) { BENCHMARK(benchmarkSubsetLoadBalancerCreate) ->Ranges({{false, true}, {50, 2500}}) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); -void benchmarkSubsetLoadBalancerUpdate(benchmark::State& state) { +void benchmarkSubsetLoadBalancerUpdate(::benchmark::State& state) { const bool single_host_per_subset = state.range(0); const uint64_t num_hosts = state.range(1); - SubsetLbTester tester(num_hosts, single_host_per_subset); + if (benchmark::skipExpensiveBenchmarks() && num_hosts > 100) { + state.SkipWithError("Skipping expensive benchmark"); + return; + } + SubsetLbTester tester(num_hosts, single_host_per_subset); for (auto _ : state) { // NOLINT: Silences warning about dead store tester.update(); } @@ -569,7 +605,7 @@ void benchmarkSubsetLoadBalancerUpdate(benchmark::State& state) { BENCHMARK(benchmarkSubsetLoadBalancerUpdate) ->Ranges({{false, true}, {50, 2500}}) - ->Unit(benchmark::kMillisecond); + ->Unit(::benchmark::kMillisecond); } // namespace } // namespace Upstream diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 422cdd8710a6..81603a608a65 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -68,6 +68,9 @@ class TestLb : public LoadBalancerBase { HostConstSharedPtr chooseHostOnce(LoadBalancerContext*) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } }; class LoadBalancerBaseTest : public LoadBalancerTestBase { @@ -139,7 +142,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelection) { // on the number of hosts regardless of their health. EXPECT_EQ(50, lb_.percentageLoad(0)); EXPECT_EQ(50, lb_.percentageLoad(1)); - EXPECT_EQ(&host_set_, &lb_.chooseHostSet(&context).first); + EXPECT_EQ(&host_set_, &lb_.chooseHostSet(&context, 0).first); // Modify number of hosts in failover, but leave them in the unhealthy state // primary and secondary are in panic mode, so load distribution is @@ -147,7 +150,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelection) { updateHostSet(failover_host_set_, 2, 0); EXPECT_EQ(34, lb_.percentageLoad(0)); EXPECT_EQ(66, lb_.percentageLoad(1)); - EXPECT_EQ(&host_set_, &lb_.chooseHostSet(&context).first); + EXPECT_EQ(&host_set_, &lb_.chooseHostSet(&context, 0).first); // Update the priority set with a new priority level P=2 and ensure the host // is chosen @@ -157,7 +160,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelection) { EXPECT_EQ(0, lb_.percentageLoad(1)); EXPECT_EQ(100, lb_.percentageLoad(2)); priority_load.healthy_priority_load_ = HealthyLoad({0u, 0u, 100}); - EXPECT_EQ(&tertiary_host_set_, &lb_.chooseHostSet(&context).first); + EXPECT_EQ(&tertiary_host_set_, &lb_.chooseHostSet(&context, 0).first); // Now add a healthy host in P=0 and make sure it is immediately selected. updateHostSet(host_set_, 1 /* num_hosts */, 1 /* num_healthy_hosts */); @@ -166,14 +169,14 @@ TEST_P(LoadBalancerBaseTest, PrioritySelection) { EXPECT_EQ(100, lb_.percentageLoad(0)); EXPECT_EQ(0, lb_.percentageLoad(2)); priority_load.healthy_priority_load_ = HealthyLoad({100u, 0u, 0u}); - EXPECT_EQ(&host_set_, &lb_.chooseHostSet(&context).first); + EXPECT_EQ(&host_set_, &lb_.chooseHostSet(&context, 0).first); // Remove the healthy host and ensure we fail back over to tertiary_host_set_ updateHostSet(host_set_, 1 /* num_hosts */, 0 /* num_healthy_hosts */); EXPECT_EQ(0, lb_.percentageLoad(0)); EXPECT_EQ(100, lb_.percentageLoad(2)); priority_load.healthy_priority_load_ = HealthyLoad({0u, 0u, 100}); - EXPECT_EQ(&tertiary_host_set_, &lb_.chooseHostSet(&context).first); + EXPECT_EQ(&tertiary_host_set_, &lb_.chooseHostSet(&context, 0).first); } // Tests host selection with a randomized number of healthy, degraded and unhealthy hosts. @@ -217,7 +220,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelectionFuzz) { const auto&) -> const HealthyAndDegradedLoad& { return original_load; })); for (uint64_t i = 0; i < total_hosts; ++i) { - const auto hs = lb_.chooseHostSet(&context); + const auto hs = lb_.chooseHostSet(&context, 0); switch (hs.second) { case LoadBalancerBase::HostAvailability::Healthy: // Either we selected one of the healthy hosts or we failed to select anything and defaulted @@ -245,7 +248,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelectionWithFilter) { updateHostSet(failover_host_set_, 1, 1); // Since we've excluded P0, we should pick the failover host set - EXPECT_EQ(failover_host_set_.priority(), lb_.chooseHostSet(&context).first.priority()); + EXPECT_EQ(failover_host_set_.priority(), lb_.chooseHostSet(&context, 0).first.priority()); updateHostSet(host_set_, 1 /* num_hosts */, 0 /* num_healthy_hosts */, 1 /* num_degraded_hosts */); @@ -256,7 +259,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelectionWithFilter) { priority_load.degraded_priority_load_ = Upstream::DegradedLoad({0, 100}); // Since we've excluded P0, we should pick the failover host set - EXPECT_EQ(failover_host_set_.priority(), lb_.chooseHostSet(&context).first.priority()); + EXPECT_EQ(failover_host_set_.priority(), lb_.chooseHostSet(&context, 0).first.priority()); } TEST_P(LoadBalancerBaseTest, OverProvisioningFactor) { @@ -553,6 +556,15 @@ class RoundRobinLoadBalancerTest : public LoadBalancerTestBase { {}, empty_host_vector_, empty_host_vector_, absl::nullopt); } + void peekThenPick(std::vector picks) { + for (auto i : picks) { + EXPECT_EQ(hostSet().healthy_hosts_[i], lb_->peekAnotherHost(nullptr)); + } + for (auto i : picks) { + EXPECT_EQ(hostSet().healthy_hosts_[i], lb_->chooseHost(nullptr)); + } + } + std::shared_ptr local_priority_set_; std::shared_ptr lb_; HostsPerLocalityConstSharedPtr empty_locality_; @@ -569,6 +581,7 @@ TEST_P(FailoverTest, BasicFailover) { failover_host_set_.healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82")}; failover_host_set_.hosts_ = failover_host_set_.healthy_hosts_; init(false); + EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb_->peekAnotherHost(nullptr)); EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb_->chooseHost(nullptr)); } @@ -578,6 +591,7 @@ TEST_P(FailoverTest, BasicDegradedHosts) { host_set_.degraded_hosts_ = host_set_.hosts_; failover_host_set_.hosts_ = failover_host_set_.healthy_hosts_; init(false); + EXPECT_EQ(host_set_.degraded_hosts_[0], lb_->peekAnotherHost(nullptr)); EXPECT_EQ(host_set_.degraded_hosts_[0], lb_->chooseHost(nullptr)); } @@ -763,10 +777,36 @@ TEST_P(RoundRobinLoadBalancerTest, Normal) { makeTestHost(info_, "tcp://127.0.0.1:81")}; hostSet().hosts_ = hostSet().healthy_hosts_; init(false); + + // Make sure the round robin pattern works for peeking. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->peekAnotherHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->peekAnotherHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + + // Make sure that if picks get ahead of peeks, peeks resume at the next pick. + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->peekAnotherHost(nullptr)); EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + + // Change host set with no peeks in progress + hostSet().healthy_hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:82")); + hostSet().hosts_.push_back(hostSet().healthy_hosts_.back()); + hostSet().runCallbacks({hostSet().healthy_hosts_.back()}, {}); + peekThenPick({2, 0, 1, 2}); + + // Now peek a few extra to push the index forward, alter the host set, and + // make sure the index is restored to 0. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->peekAnotherHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->peekAnotherHost(nullptr)); + + hostSet().healthy_hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:83")); + hostSet().hosts_.push_back(hostSet().healthy_hosts_.back()); + hostSet().runCallbacks({hostSet().healthy_hosts_.back()}, {hostSet().healthy_hosts_.front()}); + peekThenPick({1, 2, 3}); } // Validate that the RNG seed influences pick order. @@ -1658,19 +1698,26 @@ class RandomLoadBalancerTest : public LoadBalancerTestBase { TEST_P(RandomLoadBalancerTest, NoHosts) { init(); + + EXPECT_EQ(nullptr, lb_->peekAnotherHost(nullptr)); EXPECT_EQ(nullptr, lb_->chooseHost(nullptr)); } TEST_P(RandomLoadBalancerTest, Normal) { init(); - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81")}; hostSet().hosts_ = hostSet().healthy_hosts_; hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. - EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(2)); + + EXPECT_CALL(random_, random()).WillOnce(Return(2)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->peekAnotherHost(nullptr)); + + EXPECT_CALL(random_, random()).WillOnce(Return(3)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->peekAnotherHost(nullptr)); + + EXPECT_CALL(random_, random()).Times(0); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); - EXPECT_CALL(random_, random()).WillOnce(Return(0)).WillOnce(Return(3)); EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); } diff --git a/test/common/upstream/logical_dns_cluster_test.cc b/test/common/upstream/logical_dns_cluster_test.cc index 349697415c98..c348e5132148 100644 --- a/test/common/upstream/logical_dns_cluster_test.cc +++ b/test/common/upstream/logical_dns_cluster_test.cc @@ -41,7 +41,7 @@ namespace { class LogicalDnsClusterTest : public testing::Test { protected: - LogicalDnsClusterTest() : api_(Api::createApiForTest(stats_store_)) {} + LogicalDnsClusterTest() : api_(Api::createApiForTest(stats_store_, random_)) {} void setupFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { resolve_timer_ = new Event::MockTimer(&dispatcher_); @@ -52,7 +52,7 @@ class LogicalDnsClusterTest : public testing::Test { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, random_, stats_store_, + admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, stats_store_, singleton_manager_, tls_, validation_visitor_, *api_); cluster_ = std::make_shared(cluster_config, runtime_, dns_resolver_, factory_context, std::move(scope), false); diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index 87e408d521da..4e95e2303882 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -80,7 +80,7 @@ class OriginalDstClusterTest : public testing::Test { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, random_, stats_store_, + admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, stats_store_, singleton_manager_, tls_, validation_visitor_, *api_); cluster_ = std::make_shared(cluster_config, runtime_, factory_context, std::move(scope), false); diff --git a/test/common/upstream/test_cluster_manager.h b/test/common/upstream/test_cluster_manager.h index 0ddd30765b1b..d76c5b06b0f1 100644 --- a/test/common/upstream/test_cluster_manager.h +++ b/test/common/upstream/test_cluster_manager.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/config_source.pb.h" @@ -61,14 +62,14 @@ namespace Upstream { // the expectations when needed. class TestClusterManagerFactory : public ClusterManagerFactory { public: - TestClusterManagerFactory() : api_(Api::createApiForTest(stats_)) { + TestClusterManagerFactory() : api_(Api::createApiForTest(stats_, random_)) { ON_CALL(*this, clusterFromProto_(_, _, _, _)) .WillByDefault(Invoke( [&](const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api) -> std::pair { auto result = ClusterFactoryImplBase::create( - cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, + cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, outlier_event_logger, added_via_api, validation_visitor_, *api_); // Convert from load balancer unique_ptr -> raw pointer -> unique_ptr. @@ -126,7 +127,6 @@ class TestClusterManagerFactory : public ClusterManagerFactory { std::shared_ptr> dns_resolver_{ new NiceMock}; NiceMock runtime_; - NiceMock random_; NiceMock dispatcher_; Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_{ dispatcher_.timeSource()}; @@ -136,6 +136,7 @@ class TestClusterManagerFactory : public ClusterManagerFactory { NiceMock log_manager_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; NiceMock validation_visitor_; + NiceMock random_; Api::ApiPtr api_; }; @@ -160,12 +161,12 @@ class TestClusterManagerImpl : public ClusterManagerImpl { TestClusterManagerImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, Http::Context& http_context, Grpc::Context& grpc_context) - : ClusterManagerImpl(bootstrap, factory, stats, tls, runtime, random, local_info, log_manager, + : ClusterManagerImpl(bootstrap, factory, stats, tls, runtime, local_info, log_manager, main_thread_dispatcher, admin, validation_context, api, http_context, grpc_context) {} @@ -182,17 +183,19 @@ class TestClusterManagerImpl : public ClusterManagerImpl { // it with the right values at the right times. class MockedUpdatedClusterManagerImpl : public TestClusterManagerImpl { public: - MockedUpdatedClusterManagerImpl( - const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, - Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, - AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, - Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, - MockLocalClusterUpdate& local_cluster_update, MockLocalHostsRemoved& local_hosts_removed, - Http::Context& http_context, Grpc::Context& grpc_context) - : TestClusterManagerImpl(bootstrap, factory, stats, tls, runtime, random, local_info, - log_manager, main_thread_dispatcher, admin, validation_context, api, - http_context, grpc_context), + MockedUpdatedClusterManagerImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, + ClusterManagerFactory& factory, Stats::Store& stats, + ThreadLocal::Instance& tls, Runtime::Loader& runtime, + const LocalInfo::LocalInfo& local_info, + AccessLog::AccessLogManager& log_manager, + Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, + ProtobufMessage::ValidationContext& validation_context, + Api::Api& api, MockLocalClusterUpdate& local_cluster_update, + MockLocalHostsRemoved& local_hosts_removed, + Http::Context& http_context, Grpc::Context& grpc_context) + : TestClusterManagerImpl(bootstrap, factory, stats, tls, runtime, local_info, log_manager, + main_thread_dispatcher, admin, validation_context, api, http_context, + grpc_context), local_cluster_update_(local_cluster_update), local_hosts_removed_(local_hosts_removed) {} protected: diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index e61ae42ebc4e..336f40d86d91 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -55,7 +55,7 @@ namespace { class UpstreamImplTestBase { protected: - UpstreamImplTestBase() : api_(Api::createApiForTest(stats_)) {} + UpstreamImplTestBase() : api_(Api::createApiForTest(stats_, random_)) {} NiceMock admin_; Ssl::MockContextManager ssl_context_manager_; @@ -187,7 +187,7 @@ TEST_P(StrictDnsParamTest, ImmediateResolve) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver, factory_context, @@ -221,7 +221,7 @@ TEST_F(StrictDnsClusterImplTest, ZeroHostsIsInializedImmediately) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context, std::move(scope), false); @@ -256,7 +256,7 @@ TEST_F(StrictDnsClusterImplTest, ZeroHostsHealthChecker) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context, std::move(scope), false); @@ -327,7 +327,7 @@ TEST_F(StrictDnsClusterImplTest, Basic) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context, std::move(scope), false); @@ -479,7 +479,7 @@ TEST_F(StrictDnsClusterImplTest, HostRemovalActiveHealthSkipped) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context, std::move(scope), false); @@ -540,7 +540,7 @@ TEST_F(StrictDnsClusterImplTest, HostRemovalAfterHcFail) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context, std::move(scope), false); @@ -667,7 +667,7 @@ TEST_F(StrictDnsClusterImplTest, LoadAssignmentBasic) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context, std::move(scope), false); @@ -902,7 +902,7 @@ TEST_F(StrictDnsClusterImplTest, LoadAssignmentBasicMultiplePriorities) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context, std::move(scope), false); @@ -1014,7 +1014,7 @@ TEST_F(StrictDnsClusterImplTest, CustomResolverFails) { Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format("cluster.{}.", cluster_config.name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); EXPECT_THROW_WITH_MESSAGE( @@ -1050,7 +1050,7 @@ TEST_F(StrictDnsClusterImplTest, FailureRefreshRateBackoffResetsWhenSuccessHappe "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context, std::move(scope), false); @@ -1099,7 +1099,7 @@ TEST_F(StrictDnsClusterImplTest, TtlAsDnsRefreshRate) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver_, factory_context, std::move(scope), false); @@ -1179,7 +1179,7 @@ TEST_F(StrictDnsClusterImplTest, Http2UserDefinedSettingsParametersValidation) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); EXPECT_THROW_WITH_REGEX( StrictDnsClusterImpl(cluster_config, runtime_, dns_resolver_, factory_context, @@ -1322,7 +1322,7 @@ TEST_F(StaticClusterImplTest, InitialHosts) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); cluster.initialize([] {}); @@ -1357,7 +1357,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentEmptyHostname) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); cluster.initialize([] {}); @@ -1392,7 +1392,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentNonEmptyHostname) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); cluster.initialize([] {}); @@ -1427,7 +1427,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentNonEmptyHostnameWithHealthChecks) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); cluster.initialize([] {}); @@ -1480,7 +1480,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentMultiplePriorities) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); cluster.initialize([] {}); @@ -1525,7 +1525,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentLocality) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); cluster.initialize([] {}); @@ -1571,7 +1571,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentEdsHealth) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); cluster.initialize([] {}); @@ -1603,7 +1603,7 @@ TEST_F(StaticClusterImplTest, AltStatName) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); cluster.initialize([] {}); @@ -1633,7 +1633,7 @@ TEST_F(StaticClusterImplTest, RingHash) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), true); cluster.initialize([] {}); @@ -1669,7 +1669,7 @@ TEST_F(StaticClusterImplTest, OutlierDetector) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); @@ -1727,7 +1727,7 @@ TEST_F(StaticClusterImplTest, HealthyStat) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); @@ -1868,7 +1868,7 @@ TEST_F(StaticClusterImplTest, UrlConfig) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); cluster.initialize([] {}); @@ -1920,7 +1920,7 @@ TEST_F(StaticClusterImplTest, UnsupportedLBType) { ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); @@ -1951,7 +1951,7 @@ TEST_F(StaticClusterImplTest, MalformedHostIP) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); EXPECT_THROW_WITH_MESSAGE( StaticClusterImpl(cluster_config, runtime_, factory_context, std::move(scope), false), @@ -1978,7 +1978,7 @@ TEST_F(StaticClusterImplTest, NoHostsTest) { Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format("cluster.{}.", cluster_config.name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); cluster.initialize([] {}); @@ -1997,7 +1997,7 @@ TEST_F(StaticClusterImplTest, SourceAddressPriority) { Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(config, runtime_, factory_context, std::move(scope), false); EXPECT_EQ("1.2.3.5:0", cluster.info()->sourceAddress()->asString()); @@ -2010,7 +2010,7 @@ TEST_F(StaticClusterImplTest, SourceAddressPriority) { Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(config, runtime_, factory_context, std::move(scope), false); EXPECT_EQ(cluster_address, cluster.info()->sourceAddress()->ip()->addressAsString()); @@ -2022,7 +2022,7 @@ TEST_F(StaticClusterImplTest, SourceAddressPriority) { Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StaticClusterImpl cluster(config, runtime_, factory_context, std::move(scope), false); EXPECT_EQ(cluster_address, cluster.info()->sourceAddress()->ip()->addressAsString()); @@ -2057,7 +2057,7 @@ TEST_F(ClusterImplTest, CloseConnectionsOnHostHealthFailure) { "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); StrictDnsClusterImpl cluster(cluster_config, runtime_, dns_resolver, factory_context, @@ -2180,7 +2180,7 @@ TEST(PrioritySet, Extend) { class ClusterInfoImplTest : public testing::Test { public: - ClusterInfoImplTest() : api_(Api::createApiForTest(stats_)) {} + ClusterInfoImplTest() : api_(Api::createApiForTest(stats_, random_)) {} std::unique_ptr makeCluster(const std::string& yaml, bool avoid_boosting = true) { @@ -2189,7 +2189,7 @@ class ClusterInfoImplTest : public testing::Test { ? cluster_config_.name() : cluster_config_.alt_stat_name())); factory_context_ = std::make_unique( - admin_, ssl_context_manager_, *scope_, cm_, local_info_, dispatcher_, random_, stats_, + admin_, ssl_context_manager_, *scope_, cm_, local_info_, dispatcher_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); return std::make_unique(cluster_config_, runtime_, dns_resolver_, diff --git a/test/common/watchdog/BUILD b/test/common/watchdog/BUILD new file mode 100644 index 000000000000..85edbe118222 --- /dev/null +++ b/test/common/watchdog/BUILD @@ -0,0 +1,43 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "abort_action_test", + srcs = ["abort_action_test.cc"], + external_deps = [ + "abseil_synchronization", + ], + deps = [ + "//include/envoy/common:time_interface", + "//include/envoy/registry", + "//include/envoy/server:guarddog_config_interface", + "//source/common/watchdog:abort_action_config", + "//source/common/watchdog:abort_action_lib", + "//test/common/stats:stat_test_utility_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "abort_action_config_test", + srcs = ["abort_action_config_test.cc"], + deps = [ + "//include/envoy/registry", + "//include/envoy/server:guarddog_config_interface", + "//source/common/watchdog:abort_action_config", + "//source/common/watchdog:abort_action_lib", + "//test/common/stats:stat_test_utility_lib", + "//test/mocks/event:event_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + ], +) diff --git a/test/common/watchdog/abort_action_config_test.cc b/test/common/watchdog/abort_action_config_test.cc new file mode 100644 index 000000000000..6c3f729fe1a4 --- /dev/null +++ b/test/common/watchdog/abort_action_config_test.cc @@ -0,0 +1,52 @@ +#include "envoy/registry/registry.h" +#include "envoy/server/guarddog_config.h" +#include "envoy/watchdog/v3alpha/abort_action.pb.h" + +#include "common/watchdog/abort_action_config.h" + +#include "test/common/stats/stat_test_utility.h" +#include "test/mocks/event/mocks.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Watchdog { +namespace { + +TEST(AbortActionFactoryTest, CanCreateAction) { + auto factory = + Registry::FactoryRegistry::getFactory( + "envoy.watchdog.abort_action"); + ASSERT_NE(factory, nullptr); + + // Create config and mock context + envoy::config::bootstrap::v3::Watchdog::WatchdogAction config; + TestUtility::loadFromJson( + R"EOF( + { + "config": { + "name": "envoy.watchdog.abort_action", + "typed_config": { + "@type": "type.googleapis.com/udpa.type.v1.TypedStruct", + "type_url": "type.googleapis.com/envoy.watchdog.abort_action.v3alpha.AbortActionConfig", + "value": { + "wait_duration": "2s", + } + } + }, + } + )EOF", + config); + + Stats::TestUtil::TestStore stats_; + Event::MockDispatcher dispatcher; + Api::ApiPtr api = Api::createApiForTest(); + Server::Configuration::GuardDogActionFactoryContext context{*api, dispatcher, stats_, "test"}; + + EXPECT_NE(factory->createGuardDogActionFromProto(config, context), nullptr); +} + +} // namespace +} // namespace Watchdog +} // namespace Envoy diff --git a/test/common/watchdog/abort_action_test.cc b/test/common/watchdog/abort_action_test.cc new file mode 100644 index 000000000000..7f2f1bf4606e --- /dev/null +++ b/test/common/watchdog/abort_action_test.cc @@ -0,0 +1,128 @@ +#include +#include + +#include "envoy/common/time.h" +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/event/dispatcher.h" +#include "envoy/server/guarddog_config.h" +#include "envoy/thread/thread.h" +#include "envoy/watchdog/v3alpha/abort_action.pb.h" + +#include "common/watchdog/abort_action.h" +#include "common/watchdog/abort_action_config.h" + +#include "test/common/stats/stat_test_utility.h" +#include "test/test_common/utility.h" + +#include "absl/synchronization/notification.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Watchdog { +namespace { + +using AbortActionConfig = envoy::watchdog::v3alpha::AbortActionConfig; + +class AbortActionTest : public testing::Test { +protected: + AbortActionTest() + : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test")), + context_({*api_, *dispatcher_, stats_, "test"}) {} + + Stats::TestUtil::TestStore stats_; + Api::ApiPtr api_; + Event::DispatcherPtr dispatcher_; + Server::Configuration::GuardDogActionFactoryContext context_; + std::unique_ptr action_; + + // Used to synchronize with the main thread + absl::Notification child_ready_; +}; + +TEST_F(AbortActionTest, ShouldNotAbortIfNoTids) { + AbortActionConfig config; + config.mutable_wait_duration()->set_nanos(1000000); + action_ = std::make_unique(config, context_); + + // Create empty vector and run the action. + const auto now = api_->timeSource().monotonicTime(); + const std::vector> tid_ltt_pairs = {}; + + // Should not signal or panic since there are no TIDs. + action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::KILL, tid_ltt_pairs, now); +} + +TEST_F(AbortActionTest, ShouldKillTheProcess) { + AbortActionConfig config; + config.mutable_wait_duration()->set_seconds(1); + action_ = std::make_unique(config, context_); + + auto die_function = [this]() -> void { + // Create a thread that we'll kill + Thread::ThreadId tid; + Thread::ThreadPtr thread = api_->threadFactory().createThread([this, &tid]() -> void { + tid = api_->threadFactory().currentThreadId(); + + child_ready_.Notify(); + + dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); + }); + + child_ready_.WaitForNotification(); + + // Create vector with child tid and run the action. + const auto now = api_->timeSource().monotonicTime(); + const std::vector> tid_ltt_pairs = {{tid, now}}; + + action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::KILL, tid_ltt_pairs, now); + }; + + EXPECT_DEATH(die_function(), ""); +} + +#ifndef WIN32 +// insufficient signal support on Windows. +void handler(int sig, siginfo_t* /*siginfo*/, void* /*context*/) { + std::cout << "Eating signal :" << std::to_string(sig) << ". will ignore it." << std::endl; + signal(SIGABRT, SIG_IGN); +} + +TEST_F(AbortActionTest, PanicsIfThreadDoesNotDie) { + AbortActionConfig config; + config.mutable_wait_duration()->set_seconds(1); + action_ = std::make_unique(config, context_); + + auto die_function = [this]() -> void { + // Create a thread that we try to kill + Thread::ThreadId tid; + Thread::ThreadPtr thread = api_->threadFactory().createThread([this, &tid]() -> void { + tid = api_->threadFactory().currentThreadId(); + + // Prepare signal handler to eat SIGABRT for the child thread. + struct sigaction saction; + std::memset(&saction, 0, sizeof(saction)); + saction.sa_flags = SA_SIGINFO; + saction.sa_sigaction = &handler; + sigaction(SIGABRT, &saction, nullptr); + + child_ready_.Notify(); + + dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); + }); + + child_ready_.WaitForNotification(); + + // Create vector with child tid and run the action. + const auto now = api_->timeSource().monotonicTime(); + const std::vector> tid_ltt_pairs = {{tid, now}}; + + action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::KILL, tid_ltt_pairs, now); + }; + + EXPECT_DEATH(die_function(), "aborting from Watchdog AbortAction instead"); +} +#endif + +} // namespace +} // namespace Watchdog +} // namespace Envoy diff --git a/test/config/integration/BUILD b/test/config/integration/BUILD index 684312b35b20..1fbb9f235da1 100644 --- a/test/config/integration/BUILD +++ b/test/config/integration/BUILD @@ -15,6 +15,7 @@ exports_files([ filegroup( name = "server_xds_files", srcs = [ + "server_xds.bootstrap.udpa.yaml", "server_xds.bootstrap.yaml", "server_xds.cds.with_unknown_field.yaml", "server_xds.cds.yaml", @@ -22,6 +23,7 @@ filegroup( "server_xds.eds.with_unknown_field.yaml", "server_xds.eds.yaml", "server_xds.lds.typed_struct.yaml", + "server_xds.lds.udpa.list_collection.yaml", "server_xds.lds.with_unknown_field.typed_struct.yaml", "server_xds.lds.with_unknown_field.yaml", "server_xds.lds.yaml", @@ -40,5 +42,5 @@ filegroup( filegroup( name = "google_com_proxy_port_0", - srcs = ["google_com_proxy_port_0.v2.yaml"], + srcs = ["google_com_proxy_port_0.yaml"], ) diff --git a/test/config/integration/certs/BUILD b/test/config/integration/certs/BUILD index 8e80a2f1d2f7..3b1fefdc71f2 100644 --- a/test/config/integration/certs/BUILD +++ b/test/config/integration/certs/BUILD @@ -10,7 +10,10 @@ envoy_package() filegroup( name = "certs", - srcs = glob(["*.pem"]), + srcs = glob([ + "*.pem", + "*.der", + ]), ) envoy_cc_test_library( diff --git a/test/config/integration/certs/certs.sh b/test/config/integration/certs/certs.sh index d67da84352da..2519dd08e486 100755 --- a/test/config/integration/certs/certs.sh +++ b/test/config/integration/certs/certs.sh @@ -4,28 +4,49 @@ set -e # $1= generate_ca() { - openssl genrsa -out $1key.pem 2048 - openssl req -new -key $1key.pem -out $1cert.csr -config $1cert.cfg -batch -sha256 - openssl x509 -req -days 730 -in $1cert.csr -signkey $1key.pem -out $1cert.pem \ - -extensions v3_ca -extfile $1cert.cfg + openssl genrsa -out "${1}key.pem" 2048 + openssl req -new -key "${1}key.pem" -out "${1}cert.csr" -config "${1}cert.cfg" -batch -sha256 + openssl x509 -req -days 730 -in "${1}cert.csr" -signkey "${1}key.pem" -out "${1}cert.pem" \ + -extensions v3_ca -extfile "${1}cert.cfg" } # $1= generate_rsa_key() { - openssl genrsa -out $1key.pem 2048 + openssl genrsa -out "${1}key.pem" 2048 } # $1= generate_ecdsa_key() { - openssl ecparam -name secp256r1 -genkey -out $1key.pem + openssl ecparam -name secp256r1 -genkey -out "${1}key.pem" } # $1= $2= generate_x509_cert() { - openssl req -new -key $1key.pem -out $1cert.csr -config $1cert.cfg -batch -sha256 - openssl x509 -req -days 730 -in $1cert.csr -sha256 -CA $2cert.pem -CAkey \ - $2key.pem -CAcreateserial -out $1cert.pem -extensions v3_ca -extfile $1cert.cfg - echo -e "// NOLINT(namespace-envoy)\nconstexpr char TEST_$(echo $1 | tr a-z A-Z)_CERT_HASH[] = \"$(openssl x509 -in $1cert.pem -noout -fingerprint -sha256 | cut -d"=" -f2)\";" > $1cert_hash.h + openssl req -new -key "${1}key.pem" -out "${1}cert.csr" -config "${1}cert.cfg" -batch -sha256 + openssl x509 -req -days 730 -in "${1}cert.csr" -sha256 -CA "${2}cert.pem" -CAkey \ + "${2}key.pem" -CAcreateserial -out "${1}cert.pem" -extensions v3_ca -extfile "${1}cert.cfg" + echo -e "// NOLINT(namespace-envoy)\nconstexpr char TEST_$(echo "$1" | tr "[:lower:]" "[:upper:]")_CERT_HASH[] = \"$(openssl x509 -in "${1}cert.pem" -noout -fingerprint -sha256 | cut -d"=" -f2)\";" > "${1}cert_hash.h" +} + +# $1= $2= +generate_ocsp_response() { + # Generate an OCSP request + openssl ocsp -CAfile "${2}cert.pem" -issuer "${2}cert.pem" \ + -cert "${1}cert.pem" -reqout "${1}_ocsp_req.der" + + # Generate the OCSP response + # Note: A database of certs is necessary to generate ocsp + # responses with `openssl ocsp`. `generate_x509_cert` does not use one + # so we must create an empty one here. Since generated certs are not + # tracked in this index, all ocsp response will have a cert status + # "unknown", but are still valid responses and the cert status should + # not matter for integration tests + touch "${2}_index.txt" + openssl ocsp -CA "${2}cert.pem" \ + -rkey "${2}key.pem" -rsigner "${2}cert.pem" -index "${2}_index.txt" \ + -reqin "${1}_ocsp_req.der" -respout "${1}_ocsp_resp.der" -ndays 730 + + rm "${1}_ocsp_req.der" "${2}_index.txt" } # Generate cert for the CA. @@ -33,10 +54,12 @@ generate_ca ca # Generate RSA cert for the server. generate_rsa_key server ca generate_x509_cert server ca +generate_ocsp_response server ca # Generate ECDSA cert for the server. cp -f servercert.cfg server_ecdsacert.cfg generate_ecdsa_key server_ecdsa ca generate_x509_cert server_ecdsa ca +generate_ocsp_response server_ecdsa ca rm -f server_ecdsacert.cfg # Generate cert for the client. generate_rsa_key client ca @@ -55,5 +78,5 @@ generate_x509_cert upstream upstreamca generate_rsa_key upstreamlocalhost upstreamca generate_x509_cert upstreamlocalhost upstreamca -rm *.csr -rm *.srl +rm ./*.csr +rm ./*.srl diff --git a/test/config/integration/certs/server_ecdsa_ocsp_resp.der b/test/config/integration/certs/server_ecdsa_ocsp_resp.der new file mode 100644 index 000000000000..c07eb7a0e317 Binary files /dev/null and b/test/config/integration/certs/server_ecdsa_ocsp_resp.der differ diff --git a/test/config/integration/certs/server_ocsp_resp.der b/test/config/integration/certs/server_ocsp_resp.der new file mode 100644 index 000000000000..c53d80a4650c Binary files /dev/null and b/test/config/integration/certs/server_ocsp_resp.der differ diff --git a/test/config/integration/google_com_proxy_port_0.v2.yaml b/test/config/integration/google_com_proxy_port_0.yaml similarity index 83% rename from test/config/integration/google_com_proxy_port_0.v2.yaml rename to test/config/integration/google_com_proxy_port_0.yaml index c67b6845960d..236942d3379d 100644 --- a/test/config/integration/google_com_proxy_port_0.v2.yaml +++ b/test/config/integration/google_com_proxy_port_0.yaml @@ -16,7 +16,7 @@ static_resources: - filters: - name: http typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO route_config: @@ -26,7 +26,7 @@ static_resources: domains: ["*"] routes: - match: { prefix: "/" } - route: { host_rewrite: www.google.com, cluster: service_google } + route: { host_rewrite_literal: www.google.com, cluster: service_google } clusters: - name: service_google connect_timeout: 0.25s diff --git a/test/config/integration/server.yaml b/test/config/integration/server.yaml index 78d8f24fd8f5..fd642eba0af6 100644 --- a/test/config/integration/server.yaml +++ b/test/config/integration/server.yaml @@ -9,7 +9,7 @@ static_resources: - filters: - name: http typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager drain_timeout: 5s route_config: virtual_hosts: @@ -44,15 +44,15 @@ static_resources: http_filters: - name: health typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck pass_through_mode: false - name: envoy.filters.http.router typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.router.v2.Router + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router access_log: - name: accesslog typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: {{ null_device_path }} filter: or_filter: @@ -142,14 +142,14 @@ flags_path: "/invalid_flags" stats_sinks: - name: local_stats typed_config: - "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink + "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink address: socket_address: address: {{ ip_loopback_address }} port_value: 8125 - name: tcp_stats typed_config: - "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink + "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink tcp_cluster_name: statsd watchdog: {} layered_runtime: diff --git a/test/config/integration/server_unix_listener.yaml b/test/config/integration/server_unix_listener.yaml index 2c3328cd1026..6d7222a4c1a9 100644 --- a/test/config/integration/server_unix_listener.yaml +++ b/test/config/integration/server_unix_listener.yaml @@ -7,11 +7,11 @@ static_resources: - filters: - name: http typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager http_filters: - name: envoy.filters.http.router typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.router.v2.Router + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router codec_type: auto stat_prefix: router drain_timeout: 5s diff --git a/test/config/integration/server_xds.bootstrap.udpa.yaml b/test/config/integration/server_xds.bootstrap.udpa.yaml new file mode 100644 index 000000000000..faf36fe7a3d7 --- /dev/null +++ b/test/config/integration/server_xds.bootstrap.udpa.yaml @@ -0,0 +1,13 @@ +dynamic_resources: + lds_resources_locator: + scheme: FILE + id: {{ lds_json_path }} + resource_type: envoy.config.listener.v3.ListenerCollection + cds_config: + path: {{ cds_json_path }} +admin: + access_log_path: {{ null_device_path }} + address: + socket_address: + address: {{ ntop_ip_loopback_address }} + port_value: 0 diff --git a/test/config/integration/server_xds.cds.with_unknown_field.yaml b/test/config/integration/server_xds.cds.with_unknown_field.yaml index 0b07a4597e1f..24eac4cd715e 100644 --- a/test/config/integration/server_xds.cds.with_unknown_field.yaml +++ b/test/config/integration/server_xds.cds.with_unknown_field.yaml @@ -1,6 +1,6 @@ version_info: "0" resources: -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster_1 connect_timeout: { seconds: 5 } type: EDS diff --git a/test/config/integration/server_xds.cds.yaml b/test/config/integration/server_xds.cds.yaml index 144818c11c83..22e9df42b1be 100644 --- a/test/config/integration/server_xds.cds.yaml +++ b/test/config/integration/server_xds.cds.yaml @@ -1,6 +1,6 @@ version_info: "0" resources: -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster_1 connect_timeout: { seconds: 5 } type: EDS diff --git a/test/config/integration/server_xds.eds.ads_cluster.yaml b/test/config/integration/server_xds.eds.ads_cluster.yaml index 0f52efde0e7e..238cf06945ac 100644 --- a/test/config/integration/server_xds.eds.ads_cluster.yaml +++ b/test/config/integration/server_xds.eds.ads_cluster.yaml @@ -1,6 +1,6 @@ version_info: "123" resources: -- "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment +- "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment cluster_name: ads_cluster endpoints: - lb_endpoints: diff --git a/test/config/integration/server_xds.eds.with_unknown_field.yaml b/test/config/integration/server_xds.eds.with_unknown_field.yaml index 912be177993d..982e5851671a 100644 --- a/test/config/integration/server_xds.eds.with_unknown_field.yaml +++ b/test/config/integration/server_xds.eds.with_unknown_field.yaml @@ -1,6 +1,6 @@ version_info: "0" resources: -- "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment +- "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment cluster_name: cluster_1 foo: bar endpoints: diff --git a/test/config/integration/server_xds.eds.yaml b/test/config/integration/server_xds.eds.yaml index 0833784c26c7..0499334e60c6 100644 --- a/test/config/integration/server_xds.eds.yaml +++ b/test/config/integration/server_xds.eds.yaml @@ -1,6 +1,6 @@ version_info: "0" resources: -- "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment +- "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment cluster_name: cluster_1 endpoints: - lb_endpoints: diff --git a/test/config/integration/server_xds.lds.typed_struct.yaml b/test/config/integration/server_xds.lds.typed_struct.yaml index 2c31d19cf9bb..a071845edd81 100644 --- a/test/config/integration/server_xds.lds.typed_struct.yaml +++ b/test/config/integration/server_xds.lds.typed_struct.yaml @@ -1,6 +1,6 @@ version_info: "0" resources: -- "@type": type.googleapis.com/envoy.api.v2.Listener +- "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: listener_0 address: socket_address: @@ -11,7 +11,7 @@ resources: - name: http typed_config: "@type": type.googleapis.com/udpa.type.v1.TypedStruct - type_url: "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" + type_url: "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" value: codec_type: HTTP2 drain_timeout: 5s diff --git a/test/config/integration/server_xds.lds.udpa.list_collection.yaml b/test/config/integration/server_xds.lds.udpa.list_collection.yaml new file mode 100644 index 000000000000..8b139c62b8f3 --- /dev/null +++ b/test/config/integration/server_xds.lds.udpa.list_collection.yaml @@ -0,0 +1,26 @@ +version: "0" +resource: + "@type": type.googleapis.com/envoy.config.listener.v3.ListenerCollection + entries: + - inline_entry: + name: listener_0 + version: "0" + resource: + "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: listener_0 + address: + socket_address: + address: {{ ntop_ip_loopback_address }} + port_value: 0 + filter_chains: + - filters: + - name: http + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: HTTP2 + drain_timeout: 5s + stat_prefix: router + rds: + route_config_name: route_config_0 + config_source: { path: {{ rds_json_path }} } + http_filters: [{ name: envoy.filters.http.router }] diff --git a/test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml b/test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml index 7960762a1d7a..bbde037eb8ae 100644 --- a/test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml +++ b/test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml @@ -1,6 +1,6 @@ version_info: "0" resources: -- "@type": type.googleapis.com/envoy.api.v2.Listener +- "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: listener_0 address: socket_address: @@ -11,7 +11,7 @@ resources: - name: http typed_config: "@type": type.googleapis.com/udpa.type.v1.TypedStruct - type_url: "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" + type_url: "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" value: codec_type: HTTP2 drain_timeout: 5s diff --git a/test/config/integration/server_xds.lds.with_unknown_field.yaml b/test/config/integration/server_xds.lds.with_unknown_field.yaml index 22b7aac8e597..e5bc253395dc 100644 --- a/test/config/integration/server_xds.lds.with_unknown_field.yaml +++ b/test/config/integration/server_xds.lds.with_unknown_field.yaml @@ -1,6 +1,6 @@ version_info: "0" resources: -- "@type": type.googleapis.com/envoy.api.v2.Listener +- "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: listener_0 address: socket_address: @@ -10,7 +10,7 @@ resources: - filters: - name: http typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: HTTP2 drain_timeout: 5s stat_prefix: router diff --git a/test/config/integration/server_xds.lds.yaml b/test/config/integration/server_xds.lds.yaml index e3e9af8f28d4..29d31ecd3e84 100644 --- a/test/config/integration/server_xds.lds.yaml +++ b/test/config/integration/server_xds.lds.yaml @@ -1,6 +1,6 @@ version_info: "0" resources: -- "@type": type.googleapis.com/envoy.api.v2.Listener +- "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: listener_0 address: socket_address: @@ -10,7 +10,7 @@ resources: - filters: - name: http typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: HTTP2 drain_timeout: 5s stat_prefix: router diff --git a/test/config/integration/server_xds.rds.with_unknown_field.yaml b/test/config/integration/server_xds.rds.with_unknown_field.yaml index 0fb40bd6a74a..f0f5c84e87d2 100644 --- a/test/config/integration/server_xds.rds.with_unknown_field.yaml +++ b/test/config/integration/server_xds.rds.with_unknown_field.yaml @@ -1,6 +1,6 @@ version_info: "0" resources: -- "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration +- "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration name: route_config_0 virtual_hosts: - name: integration diff --git a/test/config/integration/server_xds.rds.yaml b/test/config/integration/server_xds.rds.yaml index 35040a388629..ca849add15b3 100644 --- a/test/config/integration/server_xds.rds.yaml +++ b/test/config/integration/server_xds.rds.yaml @@ -1,6 +1,6 @@ version_info: "0" resources: -- "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration +- "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration name: route_config_0 virtual_hosts: - name: integration diff --git a/test/config/utility.cc b/test/config/utility.cc index 680f62005950..b248c80d5600 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -904,6 +904,10 @@ void ConfigHelper::addSslConfig(const ServerSslOptions& options) { bootstrap_.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; initializeTls(options, *tls_context.mutable_common_tls_context()); + if (options.ocsp_staple_required_) { + tls_context.set_ocsp_staple_policy( + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext::MUST_STAPLE); + } filter_chain->mutable_transport_socket()->set_name("envoy.transport_sockets.tls"); filter_chain->mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context); } @@ -967,6 +971,10 @@ void ConfigHelper::initializeTls( TestEnvironment::runfilesPath("test/config/integration/certs/servercert.pem")); tls_certificate->mutable_private_key()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/serverkey.pem")); + if (options.rsa_cert_ocsp_staple_) { + tls_certificate->mutable_ocsp_staple()->set_filename( + TestEnvironment::runfilesPath("test/config/integration/certs/server_ocsp_resp.der")); + } } if (options.ecdsa_cert_) { auto* tls_certificate = common_tls_context.add_tls_certificates(); @@ -974,6 +982,10 @@ void ConfigHelper::initializeTls( TestEnvironment::runfilesPath("test/config/integration/certs/server_ecdsacert.pem")); tls_certificate->mutable_private_key()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/server_ecdsakey.pem")); + if (options.ecdsa_cert_ocsp_staple_) { + tls_certificate->mutable_ocsp_staple()->set_filename(TestEnvironment::runfilesPath( + "test/config/integration/certs/server_ecdsa_ocsp_resp.der")); + } } } diff --git a/test/config/utility.h b/test/config/utility.h index ff338722630e..af87144d03b1 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -36,11 +36,26 @@ class ConfigHelper { return *this; } + ServerSslOptions& setRsaCertOcspStaple(bool rsa_cert_ocsp_staple) { + rsa_cert_ocsp_staple_ = rsa_cert_ocsp_staple; + return *this; + } + ServerSslOptions& setEcdsaCert(bool ecdsa_cert) { ecdsa_cert_ = ecdsa_cert; return *this; } + ServerSslOptions& setEcdsaCertOcspStaple(bool ecdsa_cert_ocsp_staple) { + ecdsa_cert_ocsp_staple_ = ecdsa_cert_ocsp_staple; + return *this; + } + + ServerSslOptions& setOcspStapleRequired(bool ocsp_staple_required) { + ocsp_staple_required_ = ocsp_staple_required; + return *this; + } + ServerSslOptions& setTlsV13(bool tlsv1_3) { tlsv1_3_ = tlsv1_3; return *this; @@ -52,7 +67,10 @@ class ConfigHelper { } bool rsa_cert_{true}; + bool rsa_cert_ocsp_staple_{true}; bool ecdsa_cert_{false}; + bool ecdsa_cert_ocsp_staple_{false}; + bool ocsp_staple_required_{false}; bool tlsv1_3_{false}; bool expect_client_ecdsa_cert_{false}; }; diff --git a/test/config_test/BUILD b/test/config_test/BUILD index 496cac0e82f0..792ba3d60320 100644 --- a/test/config_test/BUILD +++ b/test/config_test/BUILD @@ -24,6 +24,7 @@ envoy_cc_test( ], deps = [ ":config_test_lib", + "//source/common/filesystem:filesystem_lib", "//test/test_common:environment_lib", "//test/test_common:utility_lib", ], diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 47f7e88037ba..162c09c2f0ac 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -61,9 +61,9 @@ class ConfigTest { ConfigTest(const OptionsImpl& options) : api_(Api::createApiForTest(time_system_)), options_(options) { ON_CALL(server_, options()).WillByDefault(ReturnRef(options_)); - ON_CALL(server_, random()).WillByDefault(ReturnRef(random_)); ON_CALL(server_, sslContextManager()).WillByDefault(ReturnRef(ssl_context_manager_)); ON_CALL(server_.api_, fileSystem()).WillByDefault(ReturnRef(file_system_)); + ON_CALL(server_.api_, randomGenerator()).WillByDefault(ReturnRef(random_)); ON_CALL(file_system_, fileReadToEnd(StrEq("/etc/envoy/lightstep_access_token"))) .WillByDefault(Return("access_token")); ON_CALL(file_system_, fileReadToEnd(StrNe("/etc/envoy/lightstep_access_token"))) @@ -91,10 +91,10 @@ class ConfigTest { cluster_manager_factory_ = std::make_unique( server_.admin(), server_.runtime(), server_.stats(), server_.threadLocal(), - server_.random(), server_.dnsResolver(), ssl_context_manager_, server_.dispatcher(), - server_.localInfo(), server_.secretManager(), server_.messageValidationContext(), *api_, - server_.httpContext(), server_.grpcContext(), server_.accessLogManager(), - server_.singletonManager(), time_system_); + server_.dnsResolver(), ssl_context_manager_, server_.dispatcher(), server_.localInfo(), + server_.secretManager(), server_.messageValidationContext(), *api_, server_.httpContext(), + server_.grpcContext(), server_.accessLogManager(), server_.singletonManager(), + time_system_); ON_CALL(server_, clusterManager()).WillByDefault(Invoke([&]() -> Upstream::ClusterManager& { return *main_config.clusterManager(); @@ -160,7 +160,7 @@ void testMerge() { Api::ApiPtr api = Api::createApiForTest(); const std::string overlay = "static_resources: { clusters: [{name: 'foo'}]}"; - OptionsImpl options(Server::createTestOptionsImpl("google_com_proxy.v2.yaml", overlay, + OptionsImpl options(Server::createTestOptionsImpl("google_com_proxy.yaml", overlay, Network::Address::IpVersion::v6)); envoy::config::bootstrap::v3::Bootstrap bootstrap; Server::InstanceUtil::loadBootstrapConfig(bootstrap, options, diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc index c823f4ad8ac8..939b8df53e9c 100644 --- a/test/config_test/example_configs_test.cc +++ b/test/config_test/example_configs_test.cc @@ -1,3 +1,5 @@ +#include "common/filesystem/filesystem_impl.h" + #include "test/config_test/config_test.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" @@ -5,10 +7,20 @@ #include "gtest/gtest.h" namespace Envoy { + TEST(ExampleConfigsTest, All) { TestEnvironment::exec( {TestEnvironment::runfilesPath("test/config_test/example_configs_test_setup.sh")}); +#ifdef WIN32 + Filesystem::InstanceImplWin32 file_system; +#else + Filesystem::InstanceImplPosix file_system; +#endif + + const auto config_file_count = std::stoi( + file_system.fileReadToEnd(TestEnvironment::temporaryDirectory() + "/config-file-count.txt")); + // Change working directory, otherwise we won't be able to read files using relative paths. #ifdef PATH_MAX char cwd[PATH_MAX]; @@ -19,12 +31,7 @@ TEST(ExampleConfigsTest, All) { RELEASE_ASSERT(::getcwd(cwd, sizeof(cwd)) != nullptr, ""); RELEASE_ASSERT(::chdir(directory.c_str()) == 0, ""); -#if defined(__APPLE__) || defined(WIN32) - // freebind/freebind.yaml is not supported on macOS or Windows and is disabled via Bazel. - EXPECT_EQ(35UL, ConfigTest::run(directory)); -#else - EXPECT_EQ(36UL, ConfigTest::run(directory)); -#endif + EXPECT_EQ(config_file_count, ConfigTest::run(directory)); ConfigTest::testMerge(); diff --git a/test/config_test/example_configs_test_setup.sh b/test/config_test/example_configs_test_setup.sh index 876438e7bea1..49c2066add89 100755 --- a/test/config_test/example_configs_test_setup.sh +++ b/test/config_test/example_configs_test_setup.sh @@ -5,3 +5,6 @@ set -e DIR="$TEST_TMPDIR"/test/config_test mkdir -p "$DIR" tar -xvf "$TEST_SRCDIR"/envoy/configs/example_configs.tar -C "$DIR" + +# find uses full path to prevent using Windows find on Windows. +/usr/bin/find "$DIR" -type f | grep -c .yaml > "$TEST_TMPDIR"/config-file-count.txt diff --git a/test/dependencies/curl_test.cc b/test/dependencies/curl_test.cc index 859e68c90d9c..36bf8e149ed2 100644 --- a/test/dependencies/curl_test.cc +++ b/test/dependencies/curl_test.cc @@ -27,7 +27,15 @@ TEST(CurlTest, BuiltWithExpectedFeatures) { EXPECT_NE(0, info->features & CURL_VERSION_HTTP2); EXPECT_EQ(0, info->features & CURL_VERSION_GSSAPI); EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS5); +#ifndef WIN32 EXPECT_NE(0, info->features & CURL_VERSION_UNIX_SOCKETS); +#else + // TODO(wrowe): correct to expected, when curl 7.72 and later is patched + // or fixed upstream to include `afunix.h` in place of `sys/un.h` on recent + // Windows SDKs (it may be necessary to be more specific because older + // SDKs did not provide `afunix.h`) + EXPECT_EQ(0, info->features & CURL_VERSION_UNIX_SOCKETS); +#endif EXPECT_EQ(0, info->features & CURL_VERSION_PSL); EXPECT_EQ(0, info->features & CURL_VERSION_HTTPS_PROXY); EXPECT_EQ(0, info->features & CURL_VERSION_MULTI_SSL); diff --git a/test/exe/BUILD b/test/exe/BUILD index 283086ab4799..869b568b25bc 100644 --- a/test/exe/BUILD +++ b/test/exe/BUILD @@ -63,6 +63,8 @@ envoy_cc_test( name = "main_common_test", srcs = ["main_common_test.cc"], data = ["//test/config/integration:google_com_proxy_port_0"], + # TODO(envoyproxy/windows-dev): diagnose clang-cl fastbuild TIMEOUT flake + tags = ["flaky_on_windows"], deps = [ "//source/common/api:api_lib", "//source/exe:main_common_lib", diff --git a/test/exe/build_id_test.sh b/test/exe/build_id_test.sh index 6c892cca8bcf..660eb24d3601 100755 --- a/test/exe/build_id_test.sh +++ b/test/exe/build_id_test.sh @@ -4,15 +4,15 @@ set -e -o pipefail ENVOY_BIN=${TEST_SRCDIR}/envoy/source/exe/envoy-static -if [[ `uname` == "Darwin" ]]; then - BUILDID=$(otool -X -s __TEXT __build_id ${ENVOY_BIN} | grep -v section | cut -f2 | xxd -r -p) +if [[ $(uname) == "Darwin" ]]; then + BUILDID=$(otool -X -s __TEXT __build_id "${ENVOY_BIN}" | grep -v section | cut -f2 | xxd -r -p) else - BUILDID=$(file -L ${ENVOY_BIN} | sed -n -E 's/.*BuildID\[sha1\]=([0-9a-f]{40}).*/\1/p') + BUILDID=$(file -L "${ENVOY_BIN}" | sed -n -E 's/.*BuildID\[sha1\]=([0-9a-f]{40}).*/\1/p') fi -EXPECTED=$(cat ${TEST_SRCDIR}/envoy/bazel/raw_build_id.ldscript) +EXPECTED=$(cat "${TEST_SRCDIR}/envoy/bazel/raw_build_id.ldscript") -if [[ ${BUILDID} != ${EXPECTED} ]]; then +if [[ "${BUILDID}" != "${EXPECTED}" ]]; then echo "Build ID mismatch, got: ${BUILDID}, expected: ${EXPECTED}". exit 1 fi diff --git a/test/exe/envoy_static_test.sh b/test/exe/envoy_static_test.sh index 4f2b3f06476a..c18849e8d948 100755 --- a/test/exe/envoy_static_test.sh +++ b/test/exe/envoy_static_test.sh @@ -2,25 +2,24 @@ ENVOY_BIN=${TEST_SRCDIR}/envoy/source/exe/envoy-static -if [[ `uname` == "Darwin" ]]; then +if [[ $(uname) == "Darwin" ]]; then echo "macOS doesn't support statically linked binaries, skipping." exit 0 fi -# We can't rely on the exit code alone, since lld fails for statically linked binaries. -DYNLIBS=$(ldd ${ENVOY_BIN} 2>&1) -if [[ $? != 0 && ! "${DYNLIBS}" =~ "not a dynamic executable" ]]; then - echo "${DYNLIBS}" - exit 1 -fi +# We can't rely on the exit code alone, since ldd fails for statically linked binaries. +DYNLIBS=$(ldd "${ENVOY_BIN}" 2>&1) || { + if [[ ! "${DYNLIBS}" =~ 'not a dynamic executable' ]]; then + echo "${DYNLIBS}" + exit 1 + fi +} -if [[ ${DYNLIBS} =~ "libc++" ]]; then +if [[ "${DYNLIBS}" =~ libc\+\+ ]]; then echo "libc++ is dynamically linked:" echo "${DYNLIBS}" exit 1 -fi - -if [[ ${DYNLIBS} =~ "libstdc++" || ${DYNLIBS} =~ "libgcc" ]]; then +elif [[ "${DYNLIBS}" =~ libstdc\+\+ || "${DYNLIBS}" =~ libgcc ]]; then echo "libstdc++ and/or libgcc are dynamically linked:" echo "${DYNLIBS}" exit 1 diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index 39d0486683d7..8428cf6b43fe 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -31,6 +31,23 @@ using testing::Return; namespace Envoy { +namespace { + +#if !(defined(__clang_analyzer__) || \ + (defined(__has_feature) && \ + (__has_feature(thread_sanitizer) || __has_feature(address_sanitizer) || \ + __has_feature(memory_sanitizer)))) +const std::string& outOfMemoryPattern() { +#if defined(TCMALLOC) + CONSTRUCT_ON_FIRST_USE(std::string, ".*Unable to allocate.*"); +#else + CONSTRUCT_ON_FIRST_USE(std::string, ".*panic: out of memory.*"); +#endif +} +#endif + +} // namespace + /** * Captures common functions needed for invoking MainCommon.Maintains * an argv array that is terminated with nullptr. Identifies the config @@ -40,7 +57,7 @@ class MainCommonTest : public testing::TestWithParam(p)); } }(), - ".*panic: out of memory.*"); + outOfMemoryPattern()); #endif } diff --git a/test/exe/pie_test.sh b/test/exe/pie_test.sh index 2f4fb4adf3c4..e77bc901cbad 100755 --- a/test/exe/pie_test.sh +++ b/test/exe/pie_test.sh @@ -4,7 +4,7 @@ set -e ENVOY_BIN="${TEST_SRCDIR}/envoy/source/exe/envoy-static" -if [[ `uname` == "Darwin" ]]; then +if [[ $(uname) == "Darwin" ]]; then echo "Skipping on macOS." exit 0 fi diff --git a/test/exe/version_out_test.sh b/test/exe/version_out_test.sh index e21b6a8ff02d..6308287a2ce0 100755 --- a/test/exe/version_out_test.sh +++ b/test/exe/version_out_test.sh @@ -2,14 +2,14 @@ set -e -o pipefail -ENVOY_BIN=${TEST_SRCDIR}/envoy/source/exe/envoy-static +ENVOY_BIN="${TEST_SRCDIR}/envoy/source/exe/envoy-static" COMMIT=$(${ENVOY_BIN} --version | \ sed -n -E 's/.*version: ([0-9a-f]{40})\/([0-9]+\.[0-9]+\.[0-9]+)(-[a-zA-Z0-9\-_]+)?\/(Clean|Modified)\/(RELEASE|DEBUG)\/([a-zA-Z-]+)$/\1/p') -EXPECTED=$(cat ${TEST_SRCDIR}/envoy/bazel/raw_build_id.ldscript) +EXPECTED=$(cat "${TEST_SRCDIR}/envoy/bazel/raw_build_id.ldscript") -if [[ ${COMMIT} != ${EXPECTED} ]]; then +if [[ "${COMMIT}" != "${EXPECTED}" ]]; then echo "Commit mismatch, got: ${COMMIT}, expected: ${EXPECTED}". exit 1 fi @@ -17,9 +17,9 @@ fi VERSION=$(${ENVOY_BIN} --version | \ sed -n -E 's/.*version: ([0-9a-f]{40})\/([0-9]+\.[0-9]+\.[0-9]+)(-[a-zA-Z0-9\-_]+)?\/(Clean|Modified)\/(RELEASE|DEBUG)\/([a-zA-Z-]+)$/\2\3/p') -EXPECTED=$(cat ${TEST_SRCDIR}/envoy/VERSION) +EXPECTED=$(cat "${TEST_SRCDIR}/envoy/VERSION") -if [[ ${VERSION} != ${EXPECTED} ]]; then +if [[ "${VERSION}" != "${EXPECTED}" ]]; then echo "Version mismatch, got: ${VERSION}, expected: ${EXPECTED}". exit 1 fi diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc index 5e3a4460e6bf..fe55fe843ecd 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc @@ -44,6 +44,7 @@ TEST(UtilityResponseFlagsToAccessLogResponseFlagsTest, All) { true); common_access_log_expected.mutable_response_flags()->set_response_from_cache_filter(true); common_access_log_expected.mutable_response_flags()->set_no_filter_config_found(true); + common_access_log_expected.mutable_response_flags()->set_duration_timeout(true); EXPECT_EQ(common_access_log_expected.DebugString(), common_access_log.DebugString()); } diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc index 98096bb2386e..b635e7084fd3 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc @@ -26,8 +26,7 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara void createUpstreams() override { HttpIntegrationTest::createUpstreams(); - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); } void initialize() override { diff --git a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc index e79fb234eaa1..852f9867c7c2 100644 --- a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc @@ -34,8 +34,7 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrat void createUpstreams() override { BaseIntegrationTest::createUpstreams(); - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); } void initialize() override { diff --git a/test/extensions/access_loggers/wasm/BUILD b/test/extensions/access_loggers/wasm/BUILD new file mode 100644 index 000000000000..54ab90482a91 --- /dev/null +++ b/test/extensions/access_loggers/wasm/BUILD @@ -0,0 +1,33 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//bazel:envoy_select.bzl", + "envoy_select_wasm", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + data = envoy_select_wasm([ + "//test/extensions/access_loggers/wasm/test_data:test_cpp.wasm", + ]), + extension_name = "envoy.access_loggers.wasm", + deps = [ + "//source/extensions/access_loggers/wasm:config", + "//test/extensions/access_loggers/wasm/test_data:test_cpp_plugin", + "//test/mocks/server:server_mocks", + "//test/test_common:environment_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/access_loggers/wasm/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/access_loggers/wasm/config_test.cc b/test/extensions/access_loggers/wasm/config_test.cc new file mode 100644 index 000000000000..02a71c9132b7 --- /dev/null +++ b/test/extensions/access_loggers/wasm/config_test.cc @@ -0,0 +1,118 @@ +#include "envoy/extensions/access_loggers/wasm/v3/wasm.pb.h" +#include "envoy/registry/registry.h" + +#include "common/access_log/access_log_impl.h" +#include "common/protobuf/protobuf.h" + +#include "extensions/access_loggers/wasm/config.h" +#include "extensions/access_loggers/wasm/wasm_access_log_impl.h" +#include "extensions/access_loggers/well_known_names.h" +#include "extensions/common/wasm/wasm.h" + +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/printers.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace AccessLoggers { +namespace Wasm { + +class TestFactoryContext : public NiceMock { +public: + TestFactoryContext(Api::Api& api, Stats::Scope& scope) : api_(api), scope_(scope) {} + Api::Api& api() override { return api_; } + Stats::Scope& scope() override { return scope_; } + const envoy::config::core::v3::Metadata& listenerMetadata() const override { + return listener_metadata_; + } + +private: + Api::Api& api_; + Stats::Scope& scope_; + envoy::config::core::v3::Metadata listener_metadata_; +}; + +class WasmAccessLogConfigTest : public testing::TestWithParam {}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto testing_values = testing::Values( +#if defined(ENVOY_WASM_V8) + "v8", +#endif +#if defined(ENVOY_WASM_WAVM) + "wavm", +#endif + "null"); +INSTANTIATE_TEST_SUITE_P(Runtimes, WasmAccessLogConfigTest, testing_values); + +TEST_P(WasmAccessLogConfigTest, CreateWasmFromEmpty) { + auto factory = + Registry::FactoryRegistry::getFactory( + AccessLogNames::get().Wasm); + ASSERT_NE(factory, nullptr); + + ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); + ASSERT_NE(nullptr, message); + + AccessLog::FilterPtr filter; + NiceMock context; + + AccessLog::InstanceSharedPtr instance; + EXPECT_THROW_WITH_MESSAGE( + instance = factory->createAccessLogInstance(*message, std::move(filter), context), + Common::Wasm::WasmException, "Unable to create Wasm access log "); +} + +TEST_P(WasmAccessLogConfigTest, CreateWasmFromWASM) { + auto factory = + Registry::FactoryRegistry::getFactory( + AccessLogNames::get().Wasm); + ASSERT_NE(factory, nullptr); + + envoy::extensions::access_loggers::wasm::v3::WasmAccessLog config; + config.mutable_config()->mutable_vm_config()->set_runtime( + absl::StrCat("envoy.wasm.runtime.", GetParam())); + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/access_loggers/wasm/test_data/test_cpp.wasm")); + } else { + code = "AccessLoggerTestCpp"; + } + config.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_inline_bytes( + code); + // Test Any configuration. + ProtobufWkt::Struct some_proto; + config.mutable_config()->mutable_vm_config()->mutable_configuration()->PackFrom(some_proto); + + AccessLog::FilterPtr filter; + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + TestFactoryContext context(*api, stats_store); + + AccessLog::InstanceSharedPtr instance = + factory->createAccessLogInstance(config, std::move(filter), context); + EXPECT_NE(nullptr, instance); + EXPECT_NE(nullptr, dynamic_cast(instance.get())); + Http::TestRequestHeaderMapImpl request_header; + Http::TestResponseHeaderMapImpl response_header; + Http::TestResponseTrailerMapImpl response_trailer; + StreamInfo::MockStreamInfo log_stream_info; + instance->log(&request_header, &response_header, &response_trailer, log_stream_info); + + filter = std::make_unique>(); + AccessLog::InstanceSharedPtr filter_instance = + factory->createAccessLogInstance(config, std::move(filter), context); + filter_instance->log(&request_header, &response_header, &response_trailer, log_stream_info); +} + +} // namespace Wasm +} // namespace AccessLoggers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/access_loggers/wasm/test_data/BUILD b/test/extensions/access_loggers/wasm/test_data/BUILD new file mode 100644 index 000000000000..f49006867f2f --- /dev/null +++ b/test/extensions/access_loggers/wasm/test_data/BUILD @@ -0,0 +1,35 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) +load("//bazel/wasm:wasm.bzl", "envoy_wasm_cc_binary") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "test_cpp_plugin", + srcs = [ + "test_cpp.cc", + "test_cpp_null_plugin.cc", + ], + copts = ["-DNULL_PLUGIN=1"], + deps = [ + "//external:abseil_node_hash_map", + "//source/common/common:assert_lib", + "//source/common/common:c_smart_ptr_lib", + "//source/extensions/common/wasm:wasm_hdr", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/common/wasm:well_known_names", + ], +) + +envoy_wasm_cc_binary( + name = "test_cpp.wasm", + srcs = ["test_cpp.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics_lite", + ], +) diff --git a/test/extensions/access_loggers/wasm/test_data/test_cpp.cc b/test/extensions/access_loggers/wasm/test_data/test_cpp.cc new file mode 100644 index 000000000000..18e59d57ddfc --- /dev/null +++ b/test/extensions/access_loggers/wasm/test_data/test_cpp.cc @@ -0,0 +1,26 @@ +// NOLINT(namespace-envoy) +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics.h" +#else +#include "include/proxy-wasm/null_plugin.h" +#endif + +START_WASM_PLUGIN(AccessLoggerTestCpp) + +class TestRootContext : public RootContext { +public: + using RootContext::RootContext; + + void onLog() override; +}; +static RegisterContextFactory register_ExampleContext(ROOT_FACTORY(TestRootContext)); + +void TestRootContext::onLog() { + auto path = getRequestHeader(":path"); + logWarn("onLog " + std::to_string(id()) + " " + std::string(path->view())); +} + +END_WASM_PLUGIN diff --git a/test/extensions/access_loggers/wasm/test_data/test_cpp_null_plugin.cc b/test/extensions/access_loggers/wasm/test_data/test_cpp_null_plugin.cc new file mode 100644 index 000000000000..2fcfdddcbab2 --- /dev/null +++ b/test/extensions/access_loggers/wasm/test_data/test_cpp_null_plugin.cc @@ -0,0 +1,15 @@ +// NOLINT(namespace-envoy) +#include "include/proxy-wasm/null_plugin.h" + +namespace proxy_wasm { +namespace null_plugin { +namespace AccessLoggerTestCpp { +NullPluginRegistry* context_registry_; +} // namespace AccessLoggerTestCpp + +RegisterNullVmPluginFactory register_common_wasm_test_cpp_plugin("AccessLoggerTestCpp", []() { + return std::make_unique(AccessLoggerTestCpp::context_registry_); +}); + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/test/extensions/bootstrap/wasm/BUILD b/test/extensions/bootstrap/wasm/BUILD new file mode 100644 index 000000000000..6a6488e2b63b --- /dev/null +++ b/test/extensions/bootstrap/wasm/BUILD @@ -0,0 +1,93 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//bazel:envoy_select.bzl", + "envoy_select_wasm", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", + "envoy_extension_cc_test_binary", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "wasm_test", + srcs = ["wasm_test.cc"], + data = envoy_select_wasm([ + "//test/extensions/bootstrap/wasm/test_data:asm2wasm_cpp.wasm", + "//test/extensions/bootstrap/wasm/test_data:bad_signature_cpp.wasm", + "//test/extensions/bootstrap/wasm/test_data:emscripten_cpp.wasm", + "//test/extensions/bootstrap/wasm/test_data:logging_cpp.wasm", + "//test/extensions/bootstrap/wasm/test_data:logging_rust.wasm", + "//test/extensions/bootstrap/wasm/test_data:segv_cpp.wasm", + "//test/extensions/bootstrap/wasm/test_data:stats_cpp.wasm", + ]), + extension_name = "envoy.bootstrap.wasm", + external_deps = ["abseil_optional"], + deps = [ + "//source/common/event:dispatcher_lib", + "//source/common/stats:isolated_store_lib", + "//source/common/stats:stats_lib", + "//source/extensions/bootstrap/wasm:config", + "//source/extensions/common/wasm:wasm_lib", + "//test/extensions/bootstrap/wasm/test_data:stats_cpp_plugin", + "//test/mocks/server:server_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:environment_lib", + "//test/test_common:simulated_time_system_lib", + ], +) + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + data = envoy_select_wasm([ + "//test/extensions/bootstrap/wasm/test_data:missing_cpp.wasm", + "//test/extensions/bootstrap/wasm/test_data:start_cpp.wasm", + ]), + extension_name = "envoy.bootstrap.wasm", + deps = [ + "//include/envoy/registry", + "//source/common/stats:isolated_store_lib", + "//source/extensions/bootstrap/wasm:config", + "//source/extensions/common/wasm:wasm_lib", + "//test/extensions/bootstrap/wasm/test_data:start_cpp_plugin", + "//test/mocks/event:event_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:environment_lib", + "@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test_binary( + name = "wasm_speed_test", + srcs = ["wasm_speed_test.cc"], + data = envoy_select_wasm([ + "//test/extensions/bootstrap/wasm/test_data:speed_cpp.wasm", + ]), + extension_name = "envoy.bootstrap.wasm", + external_deps = [ + "abseil_optional", + "benchmark", + ], + deps = [ + "//source/common/event:dispatcher_lib", + "//source/common/stats:isolated_store_lib", + "//source/common/stats:stats_lib", + "//source/extensions/bootstrap/wasm:config", + "//source/extensions/common/wasm:wasm_lib", + "//test/extensions/bootstrap/wasm/test_data:speed_cpp_plugin", + "//test/mocks/server:server_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:environment_lib", + "//test/test_common:simulated_time_system_lib", + ], +) diff --git a/test/extensions/bootstrap/wasm/config_test.cc b/test/extensions/bootstrap/wasm/config_test.cc new file mode 100644 index 000000000000..6fb99261a3f8 --- /dev/null +++ b/test/extensions/bootstrap/wasm/config_test.cc @@ -0,0 +1,158 @@ +#include "envoy/common/exception.h" +#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/registry/registry.h" + +#include "common/stats/isolated_store_impl.h" + +#include "extensions/bootstrap/wasm/config.h" + +#include "test/mocks/event/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/environment.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Wasm { + +using Extensions::Bootstrap::Wasm::WasmServicePtr; + +class WasmFactoryTest : public testing::TestWithParam { +protected: + WasmFactoryTest() { + config_.mutable_config()->mutable_vm_config()->set_runtime( + absl::StrCat("envoy.wasm.runtime.", GetParam())); + if (GetParam() != "null") { + config_.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename( + TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/start_cpp.wasm")); + } else { + config_.mutable_config() + ->mutable_vm_config() + ->mutable_code() + ->mutable_local() + ->set_inline_bytes("WasmStartCpp"); + } + config_.mutable_config()->set_name("test"); + config_.set_singleton(true); + } + + void initializeWithConfig(const envoy::extensions::wasm::v3::WasmService& config) { + auto factory = + Registry::FactoryRegistry::getFactory( + "envoy.bootstrap.wasm"); + ASSERT_NE(factory, nullptr); + api_ = Api::createApiForTest(stats_store_); + EXPECT_CALL(context_, api()).WillRepeatedly(testing::ReturnRef(*api_)); + EXPECT_CALL(context_, initManager()).WillRepeatedly(testing::ReturnRef(init_manager_)); + EXPECT_CALL(context_, lifecycleNotifier()) + .WillRepeatedly(testing::ReturnRef(lifecycle_notifier_)); + extension_ = factory->createBootstrapExtension(config, context_); + static_cast(extension_.get())->wasmService(); + EXPECT_CALL(init_watcher_, ready()); + init_manager_.initialize(init_watcher_); + } + + envoy::extensions::wasm::v3::WasmService config_; + testing::NiceMock context_; + testing::NiceMock lifecycle_notifier_; + Init::ExpectableWatcherImpl init_watcher_; + Stats::IsolatedStoreImpl stats_store_; + Api::ApiPtr api_; + Init::ManagerImpl init_manager_{"init_manager"}; + Server::BootstrapExtensionPtr extension_; +}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto testing_values = testing::Values( +#if defined(ENVOY_WASM_V8) + "v8", +#endif +#if defined(ENVOY_WASM_WAVM) + "wavm", +#endif + "null"); +INSTANTIATE_TEST_SUITE_P(Runtimes, WasmFactoryTest, testing_values); + +TEST_P(WasmFactoryTest, CreateWasmFromWasm) { + auto factory = std::make_unique(); + auto empty_config = factory->createEmptyConfigProto(); + + initializeWithConfig(config_); + + EXPECT_NE(extension_, nullptr); +} + +TEST_P(WasmFactoryTest, CreateWasmFromWasmPerThread) { + config_.set_singleton(false); + initializeWithConfig(config_); + + EXPECT_NE(extension_, nullptr); + extension_.reset(); + context_.threadLocal().shutdownThread(); +} + +TEST_P(WasmFactoryTest, MissingImport) { + if (GetParam() == "null") { + return; + } + config_.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename( + TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/missing_cpp.wasm")); + EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config_), Extensions::Common::Wasm::WasmException, + "Unable to create Wasm service test"); +} + +TEST_P(WasmFactoryTest, UnspecifiedRuntime) { + config_.mutable_config()->mutable_vm_config()->set_runtime(""); + + EXPECT_THROW_WITH_REGEX( + initializeWithConfig(config_), EnvoyException, + "Proto constraint validation failed \\(WasmServiceValidationError\\.Config"); +} + +TEST_P(WasmFactoryTest, UnknownRuntime) { + config_.mutable_config()->mutable_vm_config()->set_runtime("envoy.wasm.runtime.invalid"); + + EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config_), Extensions::Common::Wasm::WasmException, + "Unable to create Wasm service test"); +} + +TEST_P(WasmFactoryTest, StartFailed) { + ProtobufWkt::StringValue plugin_configuration; + plugin_configuration.set_value("bad"); + config_.mutable_config()->mutable_vm_config()->mutable_configuration()->PackFrom( + plugin_configuration); + + EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config_), Extensions::Common::Wasm::WasmException, + "Unable to create Wasm service test"); +} + +TEST_P(WasmFactoryTest, StartFailedOpen) { + ProtobufWkt::StringValue plugin_configuration; + plugin_configuration.set_value("bad"); + config_.mutable_config()->mutable_vm_config()->mutable_configuration()->PackFrom( + plugin_configuration); + config_.mutable_config()->set_fail_open(true); + + EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config_), Extensions::Common::Wasm::WasmException, + "Unable to create Wasm service test"); +} + +TEST_P(WasmFactoryTest, ConfigureFailed) { + ProtobufWkt::StringValue plugin_configuration; + plugin_configuration.set_value("bad"); + config_.mutable_config()->mutable_configuration()->PackFrom(plugin_configuration); + + EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config_), Extensions::Common::Wasm::WasmException, + "Unable to create Wasm service test"); +} + +} // namespace Wasm +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/bootstrap/wasm/test_data/BUILD b/test/extensions/bootstrap/wasm/test_data/BUILD new file mode 100644 index 000000000000..cce684b45755 --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/BUILD @@ -0,0 +1,146 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) +load("//bazel/wasm:wasm.bzl", "envoy_wasm_cc_binary", "wasm_rust_binary") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +wasm_rust_binary( + name = "logging_rust.wasm", + srcs = ["logging_rust/src/lib.rs"], + deps = [ + "//bazel/external/cargo:log", + "//bazel/external/cargo:proxy_wasm", + ], +) + +envoy_cc_library( + name = "speed_cpp_plugin", + srcs = [ + "speed_cpp.cc", + "speed_cpp_null_plugin.cc", + ], + copts = ["-DNULL_PLUGIN=1"], + deps = [ + "//external:abseil_node_hash_map", + "//source/common/common:assert_lib", + "//source/common/common:c_smart_ptr_lib", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/common/wasm:well_known_names", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "start_cpp_plugin", + srcs = [ + "start_cpp.cc", + "start_cpp_null_plugin.cc", + ], + copts = ["-DNULL_PLUGIN=1"], + deps = [ + "//external:abseil_node_hash_map", + "//source/common/common:assert_lib", + "//source/common/common:c_smart_ptr_lib", + "//source/extensions/common/wasm:wasm_hdr", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/common/wasm:well_known_names", + ], +) + +envoy_cc_library( + name = "stats_cpp_plugin", + srcs = [ + "stats_cpp.cc", + "stats_cpp_null_plugin.cc", + ], + copts = ["-DNULL_PLUGIN=1"], + deps = [ + "//external:abseil_node_hash_map", + "//source/common/common:assert_lib", + "//source/common/common:c_smart_ptr_lib", + "//source/extensions/common/wasm:wasm_hdr", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/common/wasm:well_known_names", + ], +) + +envoy_wasm_cc_binary( + name = "asm2wasm_cpp.wasm", + srcs = ["asm2wasm_cpp.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], +) + +envoy_wasm_cc_binary( + name = "bad_signature_cpp.wasm", + srcs = ["bad_signature_cpp.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], +) + +envoy_wasm_cc_binary( + name = "emscripten_cpp.wasm", + srcs = ["emscripten_cpp.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], +) + +envoy_wasm_cc_binary( + name = "logging_cpp.wasm", + srcs = ["logging_cpp.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], +) + +envoy_wasm_cc_binary( + name = "missing_cpp.wasm", + srcs = ["missing_cpp.cc"], + linkopts = [ + "--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js", + "-s ERROR_ON_UNDEFINED_SYMBOLS=0", + ], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], +) + +envoy_wasm_cc_binary( + name = "segv_cpp.wasm", + srcs = ["segv_cpp.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], +) + +envoy_wasm_cc_binary( + name = "speed_cpp.wasm", + srcs = ["speed_cpp.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics_full", + ], +) + +envoy_wasm_cc_binary( + name = "start_cpp.wasm", + srcs = ["start_cpp.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], +) + +envoy_wasm_cc_binary( + name = "stats_cpp.wasm", + srcs = ["stats_cpp.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], +) diff --git a/test/extensions/bootstrap/wasm/test_data/asm2wasm_cpp.cc b/test/extensions/bootstrap/wasm/test_data/asm2wasm_cpp.cc new file mode 100644 index 000000000000..abb7b89fa705 --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/asm2wasm_cpp.cc @@ -0,0 +1,23 @@ +// NOLINT(namespace-envoy) +#include + +#include + +#include "proxy_wasm_intrinsics.h" + +// Required Proxy-Wasm ABI version. +extern "C" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {} + +// Use global variables so the compiler cannot optimize the operations away. +int32_t i32a = 0; +int32_t i32b = 1; +double f64a = 0.0; +double f64b = 1.0; + +// Emscripten in some modes and versions would use functions from the `asm2wasm` module to implement +// these operations: int32_t % /, double conversion to int32_t and remainder(). +extern "C" PROXY_WASM_KEEPALIVE uint32_t proxy_on_configure(uint32_t, uint32_t) { + logInfo(std::string("out ") + std::to_string(i32a / i32b) + " " + std::to_string(i32a % i32b) + + " " + std::to_string((int32_t)remainder(f64a, f64b))); + return 1; +} diff --git a/test/extensions/bootstrap/wasm/test_data/bad_signature_cpp.cc b/test/extensions/bootstrap/wasm/test_data/bad_signature_cpp.cc new file mode 100644 index 000000000000..29150365c2e1 --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/bad_signature_cpp.cc @@ -0,0 +1,16 @@ +// NOLINT(namespace-envoy) +#include + +#define PROXY_WASM_KEEPALIVE __attribute__((used)) __attribute__((visibility("default"))) + +// Required Proxy-Wasm ABI version. +extern "C" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {} + +extern "C" uint32_t proxy_log(uint32_t level, const char* logMessage, size_t messageSize); + +extern "C" PROXY_WASM_KEEPALIVE uint32_t proxy_on_configure(uint32_t, int bad, char* configuration, + int size) { + std::string message = "bad signature"; + proxy_log(4 /* error */, message.c_str(), message.size()); + return 1; +} diff --git a/test/extensions/bootstrap/wasm/test_data/emscripten_cpp.cc b/test/extensions/bootstrap/wasm/test_data/emscripten_cpp.cc new file mode 100644 index 000000000000..106c1df8cd57 --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/emscripten_cpp.cc @@ -0,0 +1,20 @@ +// NOLINT(namespace-envoy) +#include +#include +#include + +#include "proxy_wasm_intrinsics.h" + +// Required Proxy-Wasm ABI version. +extern "C" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {} + +float gNan = std::nan("1"); +float gInfinity = INFINITY; + +extern "C" PROXY_WASM_KEEPALIVE uint32_t proxy_on_configure(uint32_t, uint32_t) { + logInfo(std::string("NaN ") + std::to_string(gNan)); + logWarn("inf " + std::to_string(gInfinity)); + logWarn("inf " + std::to_string(1.0 / 0.0)); + logWarn(std::string("inf ") + (std::isinf(gInfinity) ? "inf" : "nan")); + return 1; +} diff --git a/test/extensions/bootstrap/wasm/test_data/logging_cpp.cc b/test/extensions/bootstrap/wasm/test_data/logging_cpp.cc new file mode 100644 index 000000000000..70fde8f6ae19 --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/logging_cpp.cc @@ -0,0 +1,47 @@ +// NOLINT(namespace-envoy) +#include + +#include + +#include "proxy_wasm_intrinsics.h" + +// Required Proxy-Wasm ABI version. +extern "C" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {} + +extern "C" PROXY_WASM_KEEPALIVE uint32_t proxy_on_configure(uint32_t, uint32_t configuration_size) { + fprintf(stdout, "printf stdout test"); + fflush(stdout); + fprintf(stderr, "printf stderr test"); + logTrace("test trace logging"); + logDebug("test debug logging"); + logError("test error logging"); + const char* configuration = nullptr; + size_t size; + proxy_get_buffer_bytes(WasmBufferType::PluginConfiguration, 0, configuration_size, &configuration, + &size); + logWarn(std::string("warn " + std::string(configuration, size))); + ::free((void*)configuration); + return 1; +} + +extern "C" PROXY_WASM_KEEPALIVE void proxy_on_context_create(uint32_t, uint32_t) {} + +extern "C" PROXY_WASM_KEEPALIVE uint32_t proxy_on_vm_start(uint32_t, uint32_t) { + proxy_set_tick_period_milliseconds(10); + return 1; +} + +extern "C" PROXY_WASM_KEEPALIVE void proxy_on_tick(uint32_t) { + const char* root_id = nullptr; + size_t size; + proxy_get_property("plugin_root_id", sizeof("plugin_root_id") - 1, &root_id, &size); + logInfo("test tick logging" + std::string(root_id, size)); + proxy_done(); +} + +extern "C" PROXY_WASM_KEEPALIVE uint32_t proxy_on_done(uint32_t) { + logInfo("onDone logging"); + return 0; +} + +extern "C" PROXY_WASM_KEEPALIVE void proxy_on_delete(uint32_t) { logInfo("onDelete logging"); } diff --git a/test/extensions/bootstrap/wasm/test_data/logging_rust/Cargo.toml b/test/extensions/bootstrap/wasm/test_data/logging_rust/Cargo.toml new file mode 100644 index 000000000000..a82aed3df58d --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/logging_rust/Cargo.toml @@ -0,0 +1,26 @@ +[package] +description = "Proxy-Wasm logging test" +name = "logging_rust" +version = "0.0.1" +authors = ["Piotr Sikora "] +edition = "2018" + +[dependencies] +proxy-wasm = "0.1" +log = "0.4" + +[lib] +crate-type = ["cdylib"] +path = "src/*.rs" + +[profile.release] +lto = true +opt-level = 3 +panic = "abort" + +[raze] +workspace_path = "//bazel/external/cargo" +genmode = "Remote" + +[raze.crates.log.'0.4.11'] +additional_flags = ["--cfg=atomic_cas"] diff --git a/test/extensions/bootstrap/wasm/test_data/logging_rust/src/lib.rs b/test/extensions/bootstrap/wasm/test_data/logging_rust/src/lib.rs new file mode 100644 index 000000000000..49947fd975c3 --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/logging_rust/src/lib.rs @@ -0,0 +1,49 @@ +use log::{debug, error, info, trace, warn}; +use proxy_wasm::traits::{Context, RootContext}; +use proxy_wasm::types::LogLevel; + +#[no_mangle] +pub fn _start() { + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_root_context(|_| -> Box { Box::new(TestRoot) }); +} + +struct TestRoot; + +impl RootContext for TestRoot { + fn on_vm_start(&mut self, _: usize) -> bool { + true + } + + fn on_configure(&mut self, _: usize) -> bool { + trace!("test trace logging"); + debug!("test debug logging"); + error!("test error logging"); + if let Some(value) = self.get_configuration() { + warn!("warn {}", String::from_utf8(value).unwrap()); + } + true + } + + fn on_tick(&mut self) { + if let Some(value) = self.get_property(vec!["plugin_root_id"]) { + info!("test tick logging{}", String::from_utf8(value).unwrap()); + } else { + info!("test tick logging"); + } + self.done(); + } +} + +impl Context for TestRoot { + fn on_done(&mut self) -> bool { + info!("onDone logging"); + false + } +} + +impl Drop for TestRoot { + fn drop(&mut self) { + info!("onDelete logging"); + } +} diff --git a/test/extensions/bootstrap/wasm/test_data/missing_cpp.cc b/test/extensions/bootstrap/wasm/test_data/missing_cpp.cc new file mode 100644 index 000000000000..365f3a240bef --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/missing_cpp.cc @@ -0,0 +1,12 @@ +// NOLINT(namespace-envoy) +#include "proxy_wasm_intrinsics.h" + +// Required Proxy-Wasm ABI version. +extern "C" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {} + +extern "C" void missing(); + +extern "C" PROXY_WASM_KEEPALIVE uint32_t proxy_on_vm_start(uint32_t, uint32_t) { + missing(); + return 1; +} diff --git a/test/extensions/bootstrap/wasm/test_data/segv_cpp.cc b/test/extensions/bootstrap/wasm/test_data/segv_cpp.cc new file mode 100644 index 000000000000..2f6f84cfabb3 --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/segv_cpp.cc @@ -0,0 +1,25 @@ +// NOLINT(namespace-envoy) +#include + +#include "proxy_wasm_intrinsics.h" + +// Required Proxy-Wasm ABI version. +extern "C" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {} + +static int* badptr = nullptr; + +extern "C" PROXY_WASM_KEEPALIVE uint32_t proxy_on_configure(uint32_t, uint32_t) { + logError("before badptr"); + *badptr = 1; + logError("after badptr"); + return 1; +} + +extern "C" PROXY_WASM_KEEPALIVE void proxy_on_log(uint32_t context_id) { + logError("before div by zero"); +#pragma clang optimize off + int zero = context_id / 1000; + logError("divide by zero: " + std::to_string(100 / zero)); +#pragma clang optimize on + logError("after div by zero"); +} diff --git a/test/extensions/bootstrap/wasm/test_data/speed_cpp.cc b/test/extensions/bootstrap/wasm/test_data/speed_cpp.cc new file mode 100644 index 000000000000..f5b3782acbde --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/speed_cpp.cc @@ -0,0 +1,345 @@ +// NOLINT(namespace-envoy) +#include + +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics_full.h" +// Required Proxy-Wasm ABI version. +extern "C" PROXY_WASM_KEEPALIVE void proxy_abi_version_0_1_0() {} +#else +#include "envoy/config/core/v3/grpc_service.pb.h" +using envoy::config::core::v3::GrpcService; +#include "include/proxy-wasm/null_plugin.h" +#endif + +START_WASM_PLUGIN(WasmSpeedCpp) + +int xDoNotRemove = 0; + +google::protobuf::Arena arena; + +google::protobuf::Struct args; +google::protobuf::Struct* args_arena = + google::protobuf::Arena::CreateMessage(&arena); +std::string configuration = R"EOF( + { + "NAME":"test_pod", + "NAMESPACE":"test_namespace", + "LABELS": { + "app": "productpage", + "version": "v1", + "pod-template-hash": "84975bc778" + }, + "OWNER":"test_owner", + "WORKLOAD_NAME":"test_workload", + "PLATFORM_METADATA":{ + "gcp_project":"test_project", + "gcp_cluster_location":"test_location", + "gcp_cluster_name":"test_cluster" + }, + "ISTIO_VERSION":"istio-1.4", + "MESH_ID":"test-mesh" + } + )EOF"; + +// google::protobuf::Struct a; +// google::protobuf::util::JsonStringToMessage(configuration+'hfdjfhkjhdskhjk', a); + +const static char encodeLookup[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +const static char padCharacter = '='; + +std::string base64Encode(const uint8_t* start, const uint8_t* end) { + std::string encodedString; + size_t size = end - start; + encodedString.reserve(((size / 3) + (size % 3 > 0)) * 4); + uint32_t temp; + auto cursor = start; + for (size_t idx = 0; idx < size / 3; idx++) { + temp = (*cursor++) << 16; // Convert to big endian + temp += (*cursor++) << 8; + temp += (*cursor++); + encodedString.append(1, encodeLookup[(temp & 0x00FC0000) >> 18]); + encodedString.append(1, encodeLookup[(temp & 0x0003F000) >> 12]); + encodedString.append(1, encodeLookup[(temp & 0x00000FC0) >> 6]); + encodedString.append(1, encodeLookup[(temp & 0x0000003F)]); + } + switch (size % 3) { + case 1: + temp = (*cursor++) << 16; // Convert to big endian + encodedString.append(1, encodeLookup[(temp & 0x00FC0000) >> 18]); + encodedString.append(1, encodeLookup[(temp & 0x0003F000) >> 12]); + encodedString.append(2, padCharacter); + break; + case 2: + temp = (*cursor++) << 16; // Convert to big endian + temp += (*cursor++) << 8; + encodedString.append(1, encodeLookup[(temp & 0x00FC0000) >> 18]); + encodedString.append(1, encodeLookup[(temp & 0x0003F000) >> 12]); + encodedString.append(1, encodeLookup[(temp & 0x00000FC0) >> 6]); + encodedString.append(1, padCharacter); + break; + } + return encodedString; +} + +bool base64Decode(const std::basic_string& input, std::vector* output) { + if (input.length() % 4) + return false; + size_t padding = 0; + if (input.length()) { + if (input[input.length() - 1] == padCharacter) + padding++; + if (input[input.length() - 2] == padCharacter) + padding++; + } + // Setup a vector to hold the result + std::vector decodedBytes; + decodedBytes.reserve(((input.length() / 4) * 3) - padding); + uint32_t temp = 0; // Holds decoded quanta + std::basic_string::const_iterator cursor = input.begin(); + while (cursor < input.end()) { + for (size_t quantumPosition = 0; quantumPosition < 4; quantumPosition++) { + temp <<= 6; + if (*cursor >= 0x41 && *cursor <= 0x5A) // This area will need tweaking if + temp |= *cursor - 0x41; // you are using an alternate alphabet + else if (*cursor >= 0x61 && *cursor <= 0x7A) + temp |= *cursor - 0x47; + else if (*cursor >= 0x30 && *cursor <= 0x39) + temp |= *cursor + 0x04; + else if (*cursor == 0x2B) + temp |= 0x3E; // change to 0x2D for URL alphabet + else if (*cursor == 0x2F) + temp |= 0x3F; // change to 0x5F for URL alphabet + else if (*cursor == padCharacter) { // pad + switch (input.end() - cursor) { + case 1: // One pad character + decodedBytes.push_back((temp >> 16) & 0x000000FF); + decodedBytes.push_back((temp >> 8) & 0x000000FF); + goto Ldone; + case 2: // Two pad characters + decodedBytes.push_back((temp >> 10) & 0x000000FF); + goto Ldone; + default: + return false; + } + } else + return false; + cursor++; + } + decodedBytes.push_back((temp >> 16) & 0x000000FF); + decodedBytes.push_back((temp >> 8) & 0x000000FF); + decodedBytes.push_back((temp)&0x000000FF); + } +Ldone: + *output = std::move(decodedBytes); + return true; +} +std::string check_compiler; + +void (*test_fn)() = nullptr; + +void empty_test() {} + +void get_current_time_test() { + uint64_t t; + if (WasmResult::Ok != proxy_get_current_time_nanoseconds(&t)) { + logError("bad result from getCurrentTimeNanoseconds"); + } +} + +void small_string_check_compiler_test() { + check_compiler = "foo"; + check_compiler += "bar"; + check_compiler = ""; +} + +void small_string_test() { + std::string s = "foo"; + s += "bar"; + xDoNotRemove = s.size(); +} + +void small_string_check_compiler1000_test() { + for (int x = 0; x < 1000; x++) { + check_compiler = "foo"; + check_compiler += "bar"; + } + check_compiler = ""; +} + +void small_string1000_test() { + for (int x = 0; x < 1000; x++) { + std::string s = "foo"; + s += "bar"; + xDoNotRemove += s.size(); + } +} + +void large_string_test() { + std::string s(1024, 'f'); + std::string d(1024, 'o'); + s += d; + xDoNotRemove += s.size(); +} + +void large_string1000_test() { + for (int x = 0; x < 1000; x++) { + std::string s(1024, 'f'); + std::string d(1024, 'o'); + s += d; + xDoNotRemove += s.size(); + } +} + +void get_property_test() { + std::string property = "plugin_root_id"; + const char* value_ptr = nullptr; + size_t value_size = 0; + auto result = proxy_get_property(property.data(), property.size(), &value_ptr, &value_size); + if (WasmResult::Ok != result) { + logError("bad result for getProperty"); + } + ::free(reinterpret_cast(const_cast(value_ptr))); +} + +void grpc_service_test() { + std::string value = "foo"; + GrpcService grpc_service; + grpc_service.mutable_envoy_grpc()->set_cluster_name(value); + std::string grpc_service_string; + grpc_service.SerializeToString(&grpc_service_string); +} + +void grpc_service1000_test() { + std::string value = "foo"; + for (int x = 0; x < 1000; x++) { + GrpcService grpc_service; + grpc_service.mutable_envoy_grpc()->set_cluster_name(value); + std::string grpc_service_string; + grpc_service.SerializeToString(&grpc_service_string); + } +} + +void modify_metadata_test() { + auto path = getRequestHeader(":path"); + addRequestHeader("newheader", "newheadervalue"); + auto server = getRequestHeader("server"); + replaceRequestHeader("server", "envoy-wasm"); + replaceRequestHeader("envoy-wasm", "server"); + removeRequestHeader("newheader"); +} + +void modify_metadata1000_test() { + for (int x = 0; x < 1000; x++) { + auto path = getRequestHeader(":path"); + addRequestHeader("newheader", "newheadervalue"); + auto server = getRequestHeader("server"); + replaceRequestHeader("server", "envoy-wasm"); + replaceRequestHeader("envoy-wasm", "server"); + removeRequestHeader("newheader"); + } +} + +void json_serialize_test() { google::protobuf::util::JsonStringToMessage(configuration, &args); } + +void json_serialize_arena_test() { + google::protobuf::util::JsonStringToMessage(configuration, args_arena); +} + +void json_deserialize_test() { + std::string json; + google::protobuf::util::MessageToJsonString(args, &json); + xDoNotRemove += json.size(); +} + +void json_deserialize_arena_test() { + std::string json; + google::protobuf::util::MessageToJsonString(*args_arena, &json); +} + +void json_deserialize_empty_test() { + std::string json; + google::protobuf::Struct empty; + google::protobuf::util::MessageToJsonString(empty, &json); + xDoNotRemove = json.size(); +} + +void json_serialize_deserialize_test() { + std::string json; + google::protobuf::Struct proto; + google::protobuf::util::JsonStringToMessage(configuration, &proto); + google::protobuf::util::MessageToJsonString(proto, &json); + xDoNotRemove = json.size(); +} + +void convert_to_filter_state_test() { + auto start = reinterpret_cast(&*configuration.begin()); + auto end = start + configuration.size(); + std::string encoded_config = base64Encode(start, end); + std::vector decoded; + base64Decode(encoded_config, &decoded); + std::string decoded_config(decoded.begin(), decoded.end()); + google::protobuf::util::JsonStringToMessage(decoded_config, &args); + auto bytes = args.SerializeAsString(); + setFilterStateStringValue("wasm_request_set_key", bytes); +} + +WASM_EXPORT(uint32_t, proxy_on_vm_start, (uint32_t, uint32_t configuration_size)) { + const char* configuration_ptr = nullptr; + size_t size; + proxy_get_buffer_bytes(WasmBufferType::VmConfiguration, 0, configuration_size, &configuration_ptr, + &size); + std::string configuration(configuration_ptr, size); + if (configuration == "empty") { + test_fn = &empty_test; + } else if (configuration == "get_current_time") { + test_fn = &get_current_time_test; + } else if (configuration == "small_string") { + test_fn = &small_string_test; + } else if (configuration == "small_string1000") { + test_fn = &small_string1000_test; + } else if (configuration == "small_string_check_compiler") { + test_fn = &small_string_check_compiler_test; + } else if (configuration == "small_string_check_compiler1000") { + test_fn = &small_string_check_compiler1000_test; + } else if (configuration == "large_string") { + test_fn = &large_string_test; + } else if (configuration == "large_string1000") { + test_fn = &large_string1000_test; + } else if (configuration == "get_property") { + test_fn = &get_property_test; + } else if (configuration == "grpc_service") { + test_fn = &grpc_service_test; + } else if (configuration == "grpc_service1000") { + test_fn = &grpc_service1000_test; + } else if (configuration == "modify_metadata") { + test_fn = &modify_metadata_test; + } else if (configuration == "modify_metadata1000") { + test_fn = &modify_metadata1000_test; + } else if (configuration == "json_serialize") { + test_fn = &json_serialize_test; + } else if (configuration == "json_serialize_arena") { + test_fn = &json_serialize_arena_test; + } else if (configuration == "json_deserialize") { + test_fn = &json_deserialize_test; + } else if (configuration == "json_deserialize_empty") { + test_fn = &json_deserialize_empty_test; + } else if (configuration == "json_deserialize_arena") { + test_fn = &json_deserialize_arena_test; + } else if (configuration == "json_serialize_deserialize") { + test_fn = &json_serialize_deserialize_test; + } else if (configuration == "convert_to_filter_state") { + test_fn = &convert_to_filter_state_test; + } else { + std::string message = "on_start " + configuration; + proxy_log(LogLevel::info, message.c_str(), message.size()); + } + ::free(const_cast(reinterpret_cast(configuration_ptr))); + return 1; +} + +WASM_EXPORT(void, proxy_on_tick, (uint32_t)) { (*test_fn)(); } + +END_WASM_PLUGIN diff --git a/test/extensions/bootstrap/wasm/test_data/speed_cpp_null_plugin.cc b/test/extensions/bootstrap/wasm/test_data/speed_cpp_null_plugin.cc new file mode 100644 index 000000000000..c3ca3f12dea7 --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/speed_cpp_null_plugin.cc @@ -0,0 +1,15 @@ +// NOLINT(namespace-envoy) +#include "include/proxy-wasm/null_plugin.h" + +namespace proxy_wasm { +namespace null_plugin { +namespace WasmSpeedCpp { +NullPluginRegistry* context_registry_; +} // namespace WasmSpeedCpp + +RegisterNullVmPluginFactory register_wasm_speed_test_plugin("WasmSpeedCpp", []() { + return std::make_unique(WasmSpeedCpp::context_registry_); +}); + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/test/extensions/bootstrap/wasm/test_data/start_cpp.cc b/test/extensions/bootstrap/wasm/test_data/start_cpp.cc new file mode 100644 index 000000000000..126cc1649aaa --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/start_cpp.cc @@ -0,0 +1,25 @@ +// NOLINT(namespace-envoy) +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics.h" +#else +#include "include/proxy-wasm/null_plugin.h" +#endif + +START_WASM_PLUGIN(WasmStartCpp) + +// Required Proxy-Wasm ABI version. +WASM_EXPORT(void, proxy_abi_version_0_1_0, ()) {} + +WASM_EXPORT(uint32_t, proxy_on_vm_start, (uint32_t, uint32_t configuration_size)) { + logDebug("onStart"); + return configuration_size ? 0 /* failure */ : 1 /* success */; +} + +WASM_EXPORT(uint32_t, proxy_on_configure, (uint32_t, uint32_t configuration_size)) { + // Fail if we are provided a non-empty configuration. + return configuration_size ? 0 /* failure */ : 1 /* success */; +} + +END_WASM_PLUGIN diff --git a/test/extensions/bootstrap/wasm/test_data/start_cpp_null_plugin.cc b/test/extensions/bootstrap/wasm/test_data/start_cpp_null_plugin.cc new file mode 100644 index 000000000000..1d3c6ff4640a --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/start_cpp_null_plugin.cc @@ -0,0 +1,15 @@ +// NOLINT(namespace-envoy) +#include "include/proxy-wasm/null_plugin.h" + +namespace proxy_wasm { +namespace null_plugin { +namespace WasmStartCpp { +NullPluginRegistry* context_registry_; +} // namespace WasmStartCpp + +RegisterNullVmPluginFactory register_wasm_speed_test_plugin("WasmStartCpp", []() { + return std::make_unique(WasmStartCpp::context_registry_); +}); + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/test/extensions/bootstrap/wasm/test_data/stats_cpp.cc b/test/extensions/bootstrap/wasm/test_data/stats_cpp.cc new file mode 100644 index 000000000000..f36f85c685af --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/stats_cpp.cc @@ -0,0 +1,113 @@ +// NOLINT(namespace-envoy) +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics.h" +#else +#include "include/proxy-wasm/null_plugin.h" +#endif + +template std::unique_ptr wrap_unique(T* ptr) { return std::unique_ptr(ptr); } + +START_WASM_PLUGIN(WasmStatsCpp) + +// Required Proxy-Wasm ABI version. +WASM_EXPORT(void, proxy_abi_version_0_1_0, ()) {} + +// Test the low level interface. +WASM_EXPORT(uint32_t, proxy_on_configure, (uint32_t, uint32_t)) { + uint32_t c, g, h; + CHECK_RESULT(defineMetric(MetricType::Counter, "test_counter", &c)); + CHECK_RESULT(defineMetric(MetricType::Gauge, "test_gauge", &g)); + CHECK_RESULT(defineMetric(MetricType::Histogram, "test_histogram", &h)); + + CHECK_RESULT(incrementMetric(c, 1)); + CHECK_RESULT(recordMetric(g, 2)); + CHECK_RESULT(recordMetric(h, 3)); + + uint64_t value; + CHECK_RESULT(getMetric(c, &value)); + logTrace(std::string("get counter = ") + std::to_string(value)); + CHECK_RESULT(incrementMetric(c, 1)); + CHECK_RESULT(getMetric(c, &value)); + logDebug(std::string("get counter = ") + std::to_string(value)); + CHECK_RESULT(recordMetric(c, 3)); + CHECK_RESULT(getMetric(c, &value)); + logInfo(std::string("get counter = ") + std::to_string(value)); + CHECK_RESULT(getMetric(g, &value)); + logWarn(std::string("get gauge = ") + std::to_string(value)); + // Get on histograms is not supported. + if (getMetric(h, &value) != WasmResult::Ok) { + logError(std::string("get histogram = Unsupported")); + } + return 1; +} + +// Test the higher level interface. +WASM_EXPORT(void, proxy_on_tick, (uint32_t)) { + Metric c(MetricType::Counter, "test_counter", + {MetricTag{"counter_tag", MetricTag::TagType::String}}); + Metric g(MetricType::Gauge, "test_gauge", {MetricTag{"gauge_int_tag", MetricTag::TagType::Int}}); + Metric h(MetricType::Histogram, "test_histogram", + {MetricTag{"histogram_int_tag", MetricTag::TagType::Int}, + MetricTag{"histogram_string_tag", MetricTag::TagType::String}, + MetricTag{"histogram_bool_tag", MetricTag::TagType::Bool}}); + + c.increment(1, "test_tag"); + g.record(2, 9); + h.record(3, 7, "test_tag", true); + + logTrace(std::string("get counter = ") + std::to_string(c.get("test_tag"))); + c.increment(1, "test_tag"); + logDebug(std::string("get counter = ") + std::to_string(c.get("test_tag"))); + c.record(3, "test_tag"); + logInfo(std::string("get counter = ") + std::to_string(c.get("test_tag"))); + logWarn(std::string("get gauge = ") + std::to_string(g.get(9))); + + auto hh = h.partiallyResolve(7); + auto h_id = hh.resolve("test_tag", true); + logError(std::string("resolved histogram name = ") + hh.nameFromIdSlow(h_id)); +} + +// Test the high level interface. +WASM_EXPORT(void, proxy_on_log, (uint32_t /* context_zero */)) { + auto c = wrap_unique( + Counter::New("test_counter", "string_tag", "int_tag", "bool_tag")); + auto g = + wrap_unique(Gauge::New("test_gauge", "string_tag1", "string_tag2")); + auto h = wrap_unique(Histogram::New("test_histogram", "int_tag", + "string_tag", "bool_tag")); + + c->increment(1, "test_tag", 7, true); + logTrace(std::string("get counter = ") + std::to_string(c->get("test_tag", 7, true))); + auto simple_c = c->resolve("test_tag", 7, true); + simple_c++; + logDebug(std::string("get counter = ") + std::to_string(c->get("test_tag", 7, true))); + c->record(3, "test_tag", 7, true); + logInfo(std::string("get counter = ") + std::to_string(c->get("test_tag", 7, true))); + + g->record(2, "test_tag1", "test_tag2"); + logWarn(std::string("get gauge = ") + std::to_string(g->get("test_tag1", "test_tag2"))); + + h->record(3, 7, "test_tag", true); + auto base_h = wrap_unique(Counter::New("test_histogram", "int_tag")); + auto complete_h = + wrap_unique(base_h->extendAndResolve(7, "string_tag", "bool_tag")); + auto simple_h = complete_h->resolve("test_tag", true); + logError(std::string("h_id = ") + complete_h->nameFromIdSlow(simple_h.metric_id)); + + Counter stack_c("test_counter", "string_tag", "int_tag", "bool_tag"); + stack_c.increment(1, "test_tag_stack", 7, true); + logError(std::string("stack_c = ") + std::to_string(stack_c.get("test_tag_stack", 7, true))); + + Gauge stack_g("test_gauge", "string_tag1", "string_tag2"); + stack_g.record(2, "stack_test_tag1", "test_tag2"); + logError(std::string("stack_g = ") + std::to_string(stack_g.get("stack_test_tag1", "test_tag2"))); + + std::string_view int_tag = "int_tag"; + Histogram stack_h("test_histogram", int_tag, "string_tag", "bool_tag"); + std::string_view stack_test_tag = "stack_test_tag"; + stack_h.record(3, 7, stack_test_tag, true); +} + +END_WASM_PLUGIN diff --git a/test/extensions/bootstrap/wasm/test_data/stats_cpp_null_plugin.cc b/test/extensions/bootstrap/wasm/test_data/stats_cpp_null_plugin.cc new file mode 100644 index 000000000000..35abc74861d5 --- /dev/null +++ b/test/extensions/bootstrap/wasm/test_data/stats_cpp_null_plugin.cc @@ -0,0 +1,15 @@ +// NOLINT(namespace-envoy) +#include "include/proxy-wasm/null_plugin.h" + +namespace proxy_wasm { +namespace null_plugin { +namespace WasmStatsCpp { +NullPluginRegistry* context_registry_; +} // namespace WasmStatsCpp + +RegisterNullVmPluginFactory register_wasm_speed_test_plugin("WasmStatsCpp", []() { + return std::make_unique(WasmStatsCpp::context_registry_); +}); + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/test/extensions/bootstrap/wasm/wasm_speed_test.cc b/test/extensions/bootstrap/wasm/wasm_speed_test.cc new file mode 100644 index 000000000000..6d39d399fb89 --- /dev/null +++ b/test/extensions/bootstrap/wasm/wasm_speed_test.cc @@ -0,0 +1,144 @@ +/** + * Simple WASM speed test. + * + * Run with: + * `bazel run --config=libc++ -c opt //test/extensions/bootstrap/wasm:wasm_speed_test` + */ +#include "common/event/dispatcher_impl.h" +#include "common/stats/isolated_store_impl.h" + +#include "extensions/common/wasm/wasm.h" + +#include "test/mocks/server/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/utility.h" + +#include "absl/types/optional.h" +#include "benchmark/benchmark.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "tools/cpp/runfiles/runfiles.h" + +using bazel::tools::cpp::runfiles::Runfiles; + +namespace Envoy { +namespace Extensions { +namespace Wasm { + +class TestRoot : public Envoy::Extensions::Common::Wasm::Context { +public: + TestRoot(Extensions::Common::Wasm::Wasm* wasm, + const std::shared_ptr& plugin) + : Envoy::Extensions::Common::Wasm::Context(wasm, plugin) {} + + using Envoy::Extensions::Common::Wasm::Context::log; + proxy_wasm::WasmResult log(uint32_t level, absl::string_view message) override { + log_(static_cast(level), message); + return proxy_wasm::WasmResult::Ok; + } + MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message)); +}; + +static void bmWasmSimpleCallSpeedTest(benchmark::State& state, std::string test, + std::string runtime) { + Envoy::Logger::Registry::getLog(Logger::Id::wasm).set_level(spdlog::level::off); + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = "some_long_root_id"; + auto vm_id = ""; + auto vm_configuration = test; + auto vm_key = ""; + auto plugin_configuration = ""; + auto plugin = std::make_shared( + name, root_id, vm_id, runtime, plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", runtime), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + std::string code; + if (runtime == "null") { + code = "WasmSpeedCpp"; + } else { + code = TestEnvironment::readFileToStringForTest( + TestEnvironment::runfilesPath("test/extensions/bootstrap/wasm/test_data/speed_cpp.wasm")); + } + EXPECT_FALSE(code.empty()); + EXPECT_TRUE(wasm->initialize(code, false)); + wasm->setCreateContextForTesting( + nullptr, + [](Extensions::Common::Wasm::Wasm* wasm, + const std::shared_ptr& plugin) + -> proxy_wasm::ContextBase* { return new TestRoot(wasm, plugin); }); + + auto root_context = wasm->start(plugin); + for (__attribute__((unused)) auto _ : state) { + root_context->onTick(0); + } +} + +#if defined(ENVOY_WASM_WAVM) +#define B(_t) \ + BENCHMARK_CAPTURE(bmWasmSimpleCallSpeedTest, V8SpeedTest_##_t, std::string(#_t), \ + std::string("v8")); \ + BENCHMARK_CAPTURE(bmWasmSimpleCallSpeedTest, NullSpeedTest_##_t, std::string(#_t), \ + std::string("null")); \ + BENCHMARK_CAPTURE(bmWasmSimpleCallSpeedTest, WavmSpeedTest_##_t, std::string(#_t), \ + std::string("wavm")); +#else +#define B(_t) \ + BENCHMARK_CAPTURE(bmWasmSimpleCallSpeedTest, V8SpeedTest_##_t, std::string(#_t), \ + std::string("v8")); \ + BENCHMARK_CAPTURE(bmWasmSimpleCallSpeedTest, NullSpeedTest_##_t, std::string(#_t), \ + std::string("null")); +#endif + +B(empty) +B(get_current_time) +B(small_string) +B(small_string1000) +B(small_string_check_compiler) +B(small_string_check_compiler1000) +B(large_string) +B(large_string1000) +B(get_property) +B(grpc_service) +B(grpc_service1000) +B(modify_metadata) +B(modify_metadata1000) +B(json_serialize) +B(json_serialize_arena) +B(json_deserialize) +B(json_deserialize_arena) +B(json_deserialize_empty) +B(json_serialize_deserialize) +B(convert_to_filter_state) + +} // namespace Wasm +} // namespace Extensions +} // namespace Envoy + +int main(int argc, char** argv) { + ::benchmark::Initialize(&argc, argv); + Envoy::TestEnvironment::initializeOptions(argc, argv); + // Create a Runfiles object for runfiles lookup. + // https://github.com/bazelbuild/bazel/blob/master/tools/cpp/runfiles/runfiles_src.h#L32 + std::string error; + std::unique_ptr runfiles(Runfiles::Create(argv[0], &error)); + RELEASE_ASSERT(Envoy::TestEnvironment::getOptionalEnvVar("NORUNFILES").has_value() || + runfiles != nullptr, + error); + Envoy::TestEnvironment::setRunfiles(runfiles.get()); + Envoy::TestEnvironment::setEnvVar("ENVOY_IP_TEST_VERSIONS", "all", 0); + Envoy::Event::Libevent::Global::initialize(); + if (::benchmark::ReportUnrecognizedArguments(argc, argv)) { + return 1; + } + ::benchmark::RunSpecifiedBenchmarks(); + return 0; +} diff --git a/test/extensions/bootstrap/wasm/wasm_test.cc b/test/extensions/bootstrap/wasm/wasm_test.cc new file mode 100644 index 000000000000..9511a91c96a9 --- /dev/null +++ b/test/extensions/bootstrap/wasm/wasm_test.cc @@ -0,0 +1,343 @@ +#include "common/event/dispatcher_impl.h" +#include "common/stats/isolated_store_impl.h" + +#include "extensions/common/wasm/wasm.h" + +#include "test/mocks/server/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/utility.h" + +#include "absl/types/optional.h" +#include "gmock/gmock.h" +#include "gtest/gtest-param-test.h" +#include "gtest/gtest.h" + +using testing::Eq; + +namespace Envoy { +namespace Extensions { +namespace Wasm { + +class TestContext : public Extensions::Common::Wasm::Context { +public: + TestContext(Extensions::Common::Wasm::Wasm* wasm, + const std::shared_ptr& plugin) + : Extensions::Common::Wasm::Context(wasm, plugin) {} + ~TestContext() override = default; + using Extensions::Common::Wasm::Context::log; + proxy_wasm::WasmResult log(uint32_t level, absl::string_view message) override { + std::cerr << std::string(message) << "\n"; + log_(static_cast(level), message); + return proxy_wasm::WasmResult::Ok; + } + MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message)); +}; + +class WasmTestBase { +public: + WasmTestBase() + : api_(Api::createApiForTest(stats_store_)), + dispatcher_(api_->allocateDispatcher("wasm_test")), + base_scope_(stats_store_.createScope("")), scope_(base_scope_->createScope("")) {} + + void createWasm(absl::string_view runtime) { + plugin_ = std::make_shared( + name_, root_id_, vm_id_, runtime, plugin_configuration_, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info_, nullptr); + wasm_ = std::make_shared( + absl::StrCat("envoy.wasm.runtime.", runtime), vm_id_, vm_configuration_, vm_key_, scope_, + cluster_manager, *dispatcher_); + EXPECT_NE(wasm_, nullptr); + wasm_->setCreateContextForTesting( + nullptr, + [](Extensions::Common::Wasm::Wasm* wasm, + const std::shared_ptr& plugin) + -> proxy_wasm::ContextBase* { return new TestContext(wasm, plugin); }); + } + + Stats::IsolatedStoreImpl stats_store_; + Api::ApiPtr api_; + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher_; + Stats::ScopeSharedPtr base_scope_; + Stats::ScopeSharedPtr scope_; + NiceMock local_info_; + std::string name_; + std::string root_id_; + std::string vm_id_; + std::string vm_configuration_; + std::string vm_key_; + std::string plugin_configuration_; + std::shared_ptr plugin_; + std::shared_ptr wasm_; +}; + +#if defined(ENVOY_WASM_V8) || defined(ENVOY_WASM_WAVM) +class WasmTest : public WasmTestBase, public testing::TestWithParam { +public: + void createWasm() { WasmTestBase::createWasm(GetParam()); } +}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto testing_values = testing::Values( +#if defined(ENVOY_WASM_V8) + "v8" +#endif +#if defined(ENVOY_WASM_V8) && defined(ENVOY_WASM_WAVM) + , +#endif +#if defined(ENVOY_WASM_WAVM) + "wavm" +#endif +); +INSTANTIATE_TEST_SUITE_P(Runtimes, WasmTest, testing_values); +#endif + +class WasmNullTest : public WasmTestBase, public testing::TestWithParam { +public: + void createWasm() { + WasmTestBase::createWasm(GetParam()); + const auto code = + GetParam() != "null" + ? TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/stats_cpp.wasm")) + : "WasmStatsCpp"; + EXPECT_FALSE(code.empty()); + EXPECT_TRUE(wasm_->initialize(code, false)); + } +}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto testing_null_values = testing::Values( +#if defined(ENVOY_WASM_V8) + "v8", +#endif +#if defined(ENVOY_WASM_WAVM) + "wavm", +#endif + "null"); +INSTANTIATE_TEST_SUITE_P(Runtimes, WasmNullTest, testing_null_values); + +#if defined(ENVOY_WASM_V8) || defined(ENVOY_WASM_WAVM) +class WasmTestMatrix : public WasmTestBase, + public testing::TestWithParam> { +public: + void createWasm() { WasmTestBase::createWasm(std::get<0>(GetParam())); } + + void setWasmCode(std::string vm_configuration) { + const auto basic_path = + absl::StrCat("test/extensions/bootstrap/wasm/test_data/", vm_configuration); + code_ = TestEnvironment::readFileToStringForTest( + TestEnvironment::runfilesPath(basic_path + "_" + std::get<1>(GetParam()) + ".wasm")); + + EXPECT_FALSE(code_.empty()); + } + +protected: + std::string code_; +}; + +INSTANTIATE_TEST_SUITE_P(RuntimesAndLanguages, WasmTestMatrix, + testing::Combine(testing::Values( +#if defined(ENVOY_WASM_V8) + "v8" +#endif +#if defined(ENVOY_WASM_V8) && defined(ENVOY_WASM_WAVM) + , +#endif +#if defined(ENVOY_WASM_WAVM) + "wavm" +#endif + ), + testing::Values("cpp", "rust"))); + +TEST_P(WasmTestMatrix, Logging) { + plugin_configuration_ = "configure-test"; + createWasm(); + setWasmCode("logging"); + + auto wasm_weak = std::weak_ptr(wasm_); + auto wasm_handler = std::make_unique(std::move(wasm_)); + + EXPECT_TRUE(wasm_weak.lock()->initialize(code_, false)); + auto context = static_cast(wasm_weak.lock()->start(plugin_)); + + if (std::get<1>(GetParam()) == "cpp") { + EXPECT_CALL(*context, log_(spdlog::level::info, Eq("printf stdout test"))); + EXPECT_CALL(*context, log_(spdlog::level::err, Eq("printf stderr test"))); + } + EXPECT_CALL(*context, log_(spdlog::level::warn, Eq("warn configure-test"))); + EXPECT_CALL(*context, log_(spdlog::level::trace, Eq("test trace logging"))); + EXPECT_CALL(*context, log_(spdlog::level::debug, Eq("test debug logging"))); + EXPECT_CALL(*context, log_(spdlog::level::err, Eq("test error logging"))); + EXPECT_CALL(*context, log_(spdlog::level::info, Eq("test tick logging"))) + .Times(testing::AtLeast(1)); + EXPECT_CALL(*context, log_(spdlog::level::info, Eq("onDone logging"))); + EXPECT_CALL(*context, log_(spdlog::level::info, Eq("onDelete logging"))); + + EXPECT_TRUE(wasm_weak.lock()->configure(context, plugin_)); + wasm_handler.reset(); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + // This will `SEGV` on nullptr if wasm has been deleted. + context->onTick(0); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->clearDeferredDeleteList(); +} +#endif + +#if defined(ENVOY_WASM_V8) || defined(ENVOY_WASM_WAVM) +TEST_P(WasmTest, BadSignature) { + createWasm(); + const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/bad_signature_cpp.wasm")); + EXPECT_FALSE(code.empty()); + EXPECT_FALSE(wasm_->initialize(code, false)); + EXPECT_TRUE(wasm_->isFailed()); +} + +TEST_P(WasmTest, Segv) { + createWasm(); + const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/segv_cpp.wasm")); + EXPECT_FALSE(code.empty()); + EXPECT_TRUE(wasm_->initialize(code, false)); + auto context = static_cast(wasm_->start(plugin_)); + EXPECT_CALL(*context, log_(spdlog::level::err, Eq("before badptr"))); + EXPECT_FALSE(wasm_->configure(context, plugin_)); + wasm_->isFailed(); +} + +TEST_P(WasmTest, DivByZero) { + createWasm(); + const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/segv_cpp.wasm")); + EXPECT_FALSE(code.empty()); + EXPECT_TRUE(wasm_->initialize(code, false)); + auto context = static_cast(wasm_->start(plugin_)); + EXPECT_CALL(*context, log_(spdlog::level::err, Eq("before div by zero"))); + context->onLog(); + wasm_->isFailed(); +} + +TEST_P(WasmTest, EmscriptenVersion) { + createWasm(); + const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/segv_cpp.wasm")); + EXPECT_FALSE(code.empty()); + EXPECT_TRUE(wasm_->initialize(code, false)); + uint32_t major = 9, minor = 9, abi_major = 9, abi_minor = 9; + EXPECT_TRUE(wasm_->getEmscriptenVersion(&major, &minor, &abi_major, &abi_minor)); + EXPECT_EQ(major, 0); + EXPECT_LE(minor, 3); + // Up to (at least) emsdk 1.39.6. + EXPECT_EQ(abi_major, 0); + EXPECT_LE(abi_minor, 20); +} + +TEST_P(WasmTest, IntrinsicGlobals) { + createWasm(); + const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/emscripten_cpp.wasm")); + EXPECT_FALSE(code.empty()); + EXPECT_TRUE(wasm_->initialize(code, false)); + auto context = static_cast(wasm_->start(plugin_)); + EXPECT_CALL(*context, log_(spdlog::level::info, Eq("NaN nan"))); + EXPECT_CALL(*context, log_(spdlog::level::warn, Eq("inf inf"))).Times(3); + EXPECT_TRUE(wasm_->configure(context, plugin_)); +} + +// The `asm2wasm.wasm` file uses operations which would require the `asm2wasm` Emscripten module +// *if* em++ is invoked with the trap mode "clamp". See +// https://emscripten.org/docs/compiling/WebAssembly.html This test demonstrates that the `asm2wasm` +// module is not required with the trap mode is set to "allow". Note: future Wasm standards will +// change this behavior by providing non-trapping instructions, but in the mean time we support the +// default Emscripten behavior. +TEST_P(WasmTest, Asm2Wasm) { + createWasm(); + const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/bootstrap/wasm/test_data/asm2wasm_cpp.wasm")); + EXPECT_FALSE(code.empty()); + EXPECT_TRUE(wasm_->initialize(code, false)); + auto context = static_cast(wasm_->start(plugin_)); + EXPECT_CALL(*context, log_(spdlog::level::info, Eq("out 0 0 0"))); + EXPECT_TRUE(wasm_->configure(context, plugin_)); +} +#endif + +TEST_P(WasmNullTest, Stats) { + createWasm(); + auto context = static_cast(wasm_->start(plugin_)); + + EXPECT_CALL(*context, log_(spdlog::level::trace, Eq("get counter = 1"))); + EXPECT_CALL(*context, log_(spdlog::level::debug, Eq("get counter = 2"))); + // recordMetric on a Counter is the same as increment. + EXPECT_CALL(*context, log_(spdlog::level::info, Eq("get counter = 5"))); + EXPECT_CALL(*context, log_(spdlog::level::warn, Eq("get gauge = 2"))); + // Get is not supported on histograms. + EXPECT_CALL(*context, log_(spdlog::level::err, Eq("get histogram = Unsupported"))); + + EXPECT_TRUE(wasm_->configure(context, plugin_)); + EXPECT_EQ(scope_->counterFromString("test_counter").value(), 5); + EXPECT_EQ(scope_->gaugeFromString("test_gauge", Stats::Gauge::ImportMode::Accumulate).value(), 2); +} + +TEST_P(WasmNullTest, StatsHigherLevel) { + createWasm(); + auto context = static_cast(wasm_->start(plugin_)); + + EXPECT_CALL(*context, log_(spdlog::level::trace, Eq("get counter = 1"))); + EXPECT_CALL(*context, log_(spdlog::level::debug, Eq("get counter = 2"))); + // recordMetric on a Counter is the same as increment. + EXPECT_CALL(*context, log_(spdlog::level::info, Eq("get counter = 5"))); + EXPECT_CALL(*context, log_(spdlog::level::warn, Eq("get gauge = 2"))); + // Get is not supported on histograms. + EXPECT_CALL(*context, log_(spdlog::level::err, + Eq(std::string("resolved histogram name = " + "histogram_int_tag.7.histogram_string_tag.test_tag." + "histogram_bool_tag.true.test_histogram")))); + + wasm_->setTimerPeriod(1, std::chrono::milliseconds(10)); + wasm_->tickHandler(1); + EXPECT_EQ(scope_->counterFromString("counter_tag.test_tag.test_counter").value(), 5); + EXPECT_EQ( + scope_->gaugeFromString("gauge_int_tag.9.test_gauge", Stats::Gauge::ImportMode::Accumulate) + .value(), + 2); +} + +TEST_P(WasmNullTest, StatsHighLevel) { + createWasm(); + auto context = static_cast(wasm_->start(plugin_)); + + EXPECT_CALL(*context, log_(spdlog::level::trace, Eq("get counter = 1"))); + EXPECT_CALL(*context, log_(spdlog::level::debug, Eq("get counter = 2"))); + // recordMetric on a Counter is the same as increment. + EXPECT_CALL(*context, log_(spdlog::level::info, Eq("get counter = 5"))); + EXPECT_CALL(*context, log_(spdlog::level::warn, Eq("get gauge = 2"))); + // Get is not supported on histograms. + // EXPECT_CALL(*context, log_(spdlog::level::err, Eq(std::string("resolved histogram name + // = int_tag.7_string_tag.test_tag.bool_tag.true.test_histogram")))); + EXPECT_CALL(*context, + log_(spdlog::level::err, + Eq("h_id = int_tag.7.string_tag.test_tag.bool_tag.true.test_histogram"))); + EXPECT_CALL(*context, log_(spdlog::level::err, Eq("stack_c = 1"))); + EXPECT_CALL(*context, log_(spdlog::level::err, Eq("stack_g = 2"))); + // Get is not supported on histograms. + // EXPECT_CALL(*context, log_(spdlog::level::err, Eq("stack_h = 3"))); + context->onLog(); + EXPECT_EQ( + scope_->counterFromString("string_tag.test_tag.int_tag.7.bool_tag.true.test_counter").value(), + 5); + EXPECT_EQ(scope_ + ->gaugeFromString("string_tag1.test_tag1.string_tag2.test_tag2.test_gauge", + Stats::Gauge::ImportMode::Accumulate) + .value(), + 2); +} + +} // namespace Wasm +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index c84bb78a57e8..566392702462 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -130,10 +130,8 @@ class AggregateIntegrationTest : public testing::TestWithParamlocalAddress()->ip()->port(), Network::Test::getLoopbackAddressString(GetParam())); diff --git a/test/extensions/clusters/aggregate/cluster_test.cc b/test/extensions/clusters/aggregate/cluster_test.cc index 343cbb9f4339..e98d79524a9f 100644 --- a/test/extensions/clusters/aggregate/cluster_test.cc +++ b/test/extensions/clusters/aggregate/cluster_test.cc @@ -98,11 +98,12 @@ class AggregateClusterTest : public testing::Test { ProtobufMessage::getStrictValidationVisitor(), config); Stats::ScopePtr scope = stats_store_.createScope("cluster.name."); Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_store_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_store_, singleton_manager_, tls_, validation_visitor_, *api_); - cluster_ = std::make_shared(cluster_config, config, cm_, runtime_, random_, - factory_context, std::move(scope), tls_, false); + cluster_ = + std::make_shared(cluster_config, config, cm_, runtime_, api_->randomGenerator(), + factory_context, std::move(scope), tls_, false); thread_aware_lb_ = std::make_unique(*cluster_); lb_factory_ = thread_aware_lb_->factory(); @@ -133,7 +134,7 @@ class AggregateClusterTest : public testing::Test { NiceMock admin_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; NiceMock validation_visitor_; - Api::ApiPtr api_{Api::createApiForTest(stats_store_)}; + Api::ApiPtr api_{Api::createApiForTest(stats_store_, random_)}; std::shared_ptr cluster_; Upstream::ThreadAwareLoadBalancerPtr thread_aware_lb_; Upstream::LoadBalancerFactorySharedPtr lb_factory_; @@ -176,6 +177,7 @@ TEST_F(AggregateClusterTest, LoadBalancerTest) { for (int i = 0; i <= 65; ++i) { EXPECT_CALL(random_, random()).WillOnce(Return(i)); + EXPECT_TRUE(lb_->peekAnotherHost(nullptr) == nullptr); Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr); EXPECT_EQ(host.get(), target.get()); } diff --git a/test/extensions/clusters/aggregate/cluster_update_test.cc b/test/extensions/clusters/aggregate/cluster_update_test.cc index 2cacf2469696..8755ba8a29ef 100644 --- a/test/extensions/clusters/aggregate/cluster_update_test.cc +++ b/test/extensions/clusters/aggregate/cluster_update_test.cc @@ -38,9 +38,9 @@ class AggregateClusterUpdateTest : public testing::Test { void initialize(const std::string& yaml_config) { auto bootstrap = parseBootstrapFromV2Yaml(yaml_config); cluster_manager_ = std::make_unique( - bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, + bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, - *api_, http_context_, grpc_context_); + *factory_.api_, http_context_, grpc_context_); cluster_manager_->initializeSecondaryClusters(bootstrap); EXPECT_EQ(cluster_manager_->activeClusters().size(), 1); cluster_ = cluster_manager_->get("aggregate_cluster"); @@ -48,11 +48,10 @@ class AggregateClusterUpdateTest : public testing::Test { Stats::IsolatedStoreImpl stats_store_; NiceMock admin_; - Api::ApiPtr api_{Api::createApiForTest(stats_store_)}; + NiceMock factory_; Upstream::ThreadLocalCluster* cluster_; Event::SimulatedTimeSystem time_system_; - NiceMock factory_; NiceMock validation_context_; std::unique_ptr cluster_manager_; AccessLog::MockAccessLogManager log_manager_; @@ -262,8 +261,8 @@ TEST_F(AggregateClusterUpdateTest, InitializeAggregateClusterAfterOtherClusters) auto bootstrap = parseBootstrapFromV2Yaml(config); cluster_manager_ = std::make_unique( - bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, - factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, *api_, + bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.local_info_, + log_manager_, factory_.dispatcher_, admin_, validation_context_, *factory_.api_, http_context_, grpc_context_); cluster_manager_->initializeSecondaryClusters(bootstrap); EXPECT_EQ(cluster_manager_->activeClusters().size(), 2); diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc index 8388b8ac280c..2ed6655eea17 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc +++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc @@ -40,7 +40,7 @@ class ClusterTest : public testing::Test, ProtobufMessage::getStrictValidationVisitor(), config); Stats::ScopePtr scope = stats_store_.createScope("cluster.name."); Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_store_, + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_store_, singleton_manager_, tls_, validation_visitor_, *api_); if (uses_tls) { EXPECT_CALL(ssl_context_manager_, createSslClientContext(_, _)); @@ -110,7 +110,6 @@ class ClusterTest : public testing::Test, Stats::IsolatedStoreImpl stats_store_; Ssl::MockContextManager ssl_context_manager_; NiceMock cm_; - NiceMock random_; NiceMock tls_; NiceMock runtime_; NiceMock dispatcher_; @@ -206,9 +205,8 @@ class ClusterFactoryTest : public testing::Test { envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV3Yaml(yaml_config, avoid_boosting); Upstream::ClusterFactoryContextImpl cluster_factory_context( - cm_, stats_store_, tls_, nullptr, ssl_context_manager_, runtime_, random_, dispatcher_, - log_manager_, local_info_, admin_, singleton_manager_, nullptr, true, validation_visitor_, - *api_); + cm_, stats_store_, tls_, nullptr, ssl_context_manager_, runtime_, dispatcher_, log_manager_, + local_info_, admin_, singleton_manager_, nullptr, true, validation_visitor_, *api_); std::unique_ptr cluster_factory = std::make_unique(); std::tie(cluster_, thread_aware_lb_) = @@ -219,7 +217,6 @@ class ClusterFactoryTest : public testing::Test { Stats::IsolatedStoreImpl stats_store_; NiceMock ssl_context_manager_; NiceMock cm_; - NiceMock random_; NiceMock tls_; NiceMock runtime_; NiceMock dispatcher_; diff --git a/test/extensions/clusters/redis/BUILD b/test/extensions/clusters/redis/BUILD index 59bcf5b879bb..ecbb05097215 100644 --- a/test/extensions/clusters/redis/BUILD +++ b/test/extensions/clusters/redis/BUILD @@ -92,7 +92,6 @@ envoy_extension_cc_test( size = "small", srcs = ["redis_cluster_integration_test.cc"], extension_name = "envoy.clusters.redis", - tags = ["fails_on_windows"], deps = [ "//source/extensions/clusters/redis:redis_cluster", "//source/extensions/clusters/redis:redis_cluster_lb", diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index cceaed87a9db..53f4d6454ec9 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -171,7 +171,8 @@ class RedisClusterIntegrationTest : public testing::TestWithParam(&(test_server.server().random())); + mock_rng_ = dynamic_cast( + &(test_server.server().api().randomGenerator())); // Abort now if we cannot downcast the server's random number generator pointer. ASSERT_TRUE(mock_rng_ != nullptr); // Ensure that fake_upstreams_[0] is the load balancer's host of choice by default. diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index 102a93f50314..6b5845359069 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -84,7 +84,7 @@ class RedisClusterTest : public testing::Test, MOCK_METHOD(Extensions::NetworkFilters::Common::Redis::Client::Client*, create_, (std::string)); protected: - RedisClusterTest() : api_(Api::createApiForTest(stats_store_)) {} + RedisClusterTest() : api_(Api::createApiForTest(stats_store_, random_)) {} std::list hostListToAddresses(const Upstream::HostVector& hosts) { std::list addresses; @@ -104,7 +104,7 @@ class RedisClusterTest : public testing::Test, "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, random_, stats_store_, + admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, stats_store_, singleton_manager_, tls_, validation_visitor_, *api_); envoy::config::cluster::redis::RedisClusterConfig config; @@ -134,7 +134,7 @@ class RedisClusterTest : public testing::Test, "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, random_, stats_store_, + admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, stats_store_, singleton_manager_, tls_, validation_visitor_, *api_); envoy::config::cluster::redis::RedisClusterConfig config; @@ -146,7 +146,7 @@ class RedisClusterTest : public testing::Test, NiceMock outlier_event_logger; NiceMock api; Upstream::ClusterFactoryContextImpl cluster_factory_context( - cm, stats_store_, tls_, std::move(dns_resolver_), ssl_context_manager_, runtime_, random_, + cm, stats_store_, tls_, std::move(dns_resolver_), ssl_context_manager_, runtime_, dispatcher_, log_manager, local_info_, admin_, singleton_manager_, std::move(outlier_event_logger), false, validation_visitor_, api); diff --git a/test/extensions/common/aws/signer_impl_test.cc b/test/extensions/common/aws/signer_impl_test.cc index 857399749fb1..cc4fb856b5de 100644 --- a/test/extensions/common/aws/signer_impl_test.cc +++ b/test/extensions/common/aws/signer_impl_test.cc @@ -37,9 +37,7 @@ class SignerImplTest : public testing::Test { message_->headers().addCopy(Http::LowerCaseString(key), value); } - void setBody(const std::string& body) { - message_->body() = std::make_unique(body); - } + void setBody(const std::string& body) { message_->body().add(body); } void expectSignHeaders(absl::string_view service_name, absl::string_view signature, absl::string_view payload) { @@ -58,8 +56,9 @@ class SignerImplTest : public testing::Test { "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " "Signature={}", service_name, signature), - headers.get(Http::CustomHeaders::get().Authorization)->value().getStringView()); - EXPECT_EQ(payload, headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); + headers.get(Http::CustomHeaders::get().Authorization)[0]->value().getStringView()); + EXPECT_EQ(payload, + headers.get(SignatureHeaders::get().ContentSha256)[0]->value().getStringView()); } NiceMock* credentials_provider_; @@ -75,7 +74,7 @@ class SignerImplTest : public testing::Test { TEST_F(SignerImplTest, AnonymousCredentials) { EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(Credentials())); signer_.sign(*message_); - EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization)); + EXPECT_TRUE(message_->headers().get(Http::CustomHeaders::get().Authorization).empty()); } // HTTP :method header is required @@ -83,7 +82,7 @@ TEST_F(SignerImplTest, MissingMethodException) { EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(credentials_)); EXPECT_THROW_WITH_MESSAGE(signer_.sign(*message_), EnvoyException, "Message is missing :method header"); - EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization)); + EXPECT_TRUE(message_->headers().get(Http::CustomHeaders::get().Authorization).empty()); } // HTTP :path header is required @@ -92,7 +91,7 @@ TEST_F(SignerImplTest, MissingPathException) { addMethod("GET"); EXPECT_THROW_WITH_MESSAGE(signer_.sign(*message_), EnvoyException, "Message is missing :path header"); - EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization)); + EXPECT_TRUE(message_->headers().get(Http::CustomHeaders::get().Authorization).empty()); } // Verify we sign the date header @@ -101,14 +100,16 @@ TEST_F(SignerImplTest, SignDateHeader) { addMethod("GET"); addPath("/"); signer_.sign(*message_); - EXPECT_NE(nullptr, message_->headers().get(SignatureHeaders::get().ContentSha256)); + EXPECT_FALSE(message_->headers().get(SignatureHeaders::get().ContentSha256).empty()); EXPECT_EQ("20180102T030400Z", - message_->headers().get(SignatureHeaders::get().Date)->value().getStringView()); - EXPECT_EQ( - "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", - message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); + message_->headers().get(SignatureHeaders::get().Date)[0]->value().getStringView()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", + message_->headers() + .get(Http::CustomHeaders::get().Authorization)[0] + ->value() + .getStringView()); } // Verify we sign the security token header if the token is present in the credentials @@ -119,12 +120,14 @@ TEST_F(SignerImplTest, SignSecurityTokenHeader) { signer_.sign(*message_); EXPECT_EQ( "token", - message_->headers().get(SignatureHeaders::get().SecurityToken)->value().getStringView()); - EXPECT_EQ( - "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date;x-amz-security-token, " - "Signature=1d42526aabf7d8b6d7d33d9db43b03537300cc7e6bb2817e349749e0a08f5b5e", - message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); + message_->headers().get(SignatureHeaders::get().SecurityToken)[0]->value().getStringView()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date;x-amz-security-token, " + "Signature=1d42526aabf7d8b6d7d33d9db43b03537300cc7e6bb2817e349749e0a08f5b5e", + message_->headers() + .get(Http::CustomHeaders::get().Authorization)[0] + ->value() + .getStringView()); } // Verify we sign the content header as the hashed empty string if the body is empty @@ -135,12 +138,14 @@ TEST_F(SignerImplTest, SignEmptyContentHeader) { signer_.sign(*message_, true); EXPECT_EQ( SignatureConstants::get().HashedEmptyString, - message_->headers().get(SignatureHeaders::get().ContentSha256)->value().getStringView()); - EXPECT_EQ( - "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", - message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); + message_->headers().get(SignatureHeaders::get().ContentSha256)[0]->value().getStringView()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", + message_->headers() + .get(Http::CustomHeaders::get().Authorization)[0] + ->value() + .getStringView()); } // Verify we sign the content header correctly when we have a body @@ -152,12 +157,14 @@ TEST_F(SignerImplTest, SignContentHeader) { signer_.sign(*message_, true); EXPECT_EQ( "937e8d5fbb48bd4949536cd65b8d35c426b80d2f830c5c308e2cdec422ae2244", - message_->headers().get(SignatureHeaders::get().ContentSha256)->value().getStringView()); - EXPECT_EQ( - "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4eab89c36f45f2032d6010ba1adab93f8510ddd6afe540821f3a05bb0253e27b", - message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); + message_->headers().get(SignatureHeaders::get().ContentSha256)[0]->value().getStringView()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4eab89c36f45f2032d6010ba1adab93f8510ddd6afe540821f3a05bb0253e27b", + message_->headers() + .get(Http::CustomHeaders::get().Authorization)[0] + ->value() + .getStringView()); } // Verify we sign some extra headers @@ -169,11 +176,13 @@ TEST_F(SignerImplTest, SignExtraHeaders) { addHeader("b", "b_value"); addHeader("c", "c_value"); signer_.sign(*message_); - EXPECT_EQ( - "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=a;b;c;x-amz-content-sha256;x-amz-date, " - "Signature=0940025fcecfef5d7ee30e0a26a0957e116560e374878cd86ef4316c53ae9e81", - message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=a;b;c;x-amz-content-sha256;x-amz-date, " + "Signature=0940025fcecfef5d7ee30e0a26a0957e116560e374878cd86ef4316c53ae9e81", + message_->headers() + .get(Http::CustomHeaders::get().Authorization)[0] + ->value() + .getStringView()); } // Verify signing a host header @@ -183,11 +192,13 @@ TEST_F(SignerImplTest, SignHostHeader) { addPath("/"); addHeader("host", "www.example.com"); signer_.sign(*message_); - EXPECT_EQ( - "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " - "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", - message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " + "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", + message_->headers() + .get(Http::CustomHeaders::get().Authorization)[0] + ->value() + .getStringView()); } // Verify signing headers for services. diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc index 03bc26f3ad60..65f948f22011 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc @@ -45,8 +45,8 @@ class ProxyProtocolRegressionTest : public testing::TestWithParamallocateDispatcher("test_thread")), socket_(std::make_shared( Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true)), - connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_)), name_("proxy"), - filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), + connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, absl::nullopt)), + name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), init_manager_(nullptr) { EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream)); EXPECT_CALL(socket_factory_, localAddress()).WillOnce(ReturnRef(socket_->localAddress())); @@ -72,6 +72,9 @@ class ProxyProtocolRegressionTest : public testing::TestWithParam + +#define EMSCRIPTEN_KEEPALIVE __attribute__((used)) __attribute__((visibility("default"))) + +// Required Proxy-Wasm ABI version. +extern "C" EMSCRIPTEN_KEEPALIVE void proxy_abi_version_0_1_0() {} + +extern "C" uint32_t proxy_log(uint32_t level, const char* logMessage, size_t messageSize); + +extern "C" EMSCRIPTEN_KEEPALIVE uint32_t proxy_on_configure(uint32_t, int bad, char* configuration, + int size) { + std::string message = "bad signature"; + proxy_log(4 /* error */, message.c_str(), message.size()); + return 1; +} diff --git a/test/extensions/common/wasm/test_data/test_context_cpp.cc b/test/extensions/common/wasm/test_data/test_context_cpp.cc new file mode 100644 index 000000000000..c89164e43f11 --- /dev/null +++ b/test/extensions/common/wasm/test_data/test_context_cpp.cc @@ -0,0 +1,82 @@ +// NOLINT(namespace-envoy) +#include +#include +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics.h" +#include "source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h" +#else +#include "extensions/common/wasm/ext/envoy_null_plugin.h" +#endif + +START_WASM_PLUGIN(CommonWasmTestContextCpp) + +class TestContext : public EnvoyContext { +public: + explicit TestContext(uint32_t id, RootContext* root) : EnvoyContext(id, root) {} +}; + +class TestRootContext : public EnvoyRootContext { +public: + explicit TestRootContext(uint32_t id, std::string_view root_id) : EnvoyRootContext(id, root_id) {} + + bool onStart(size_t vm_configuration_size) override; + bool onDone() override; + void onTick() override; + void onQueueReady(uint32_t) override; + void onResolveDns(uint32_t token, uint32_t results_size) override; + +private: + uint32_t dns_token_; +}; + +static RegisterContextFactory register_TestContext(CONTEXT_FACTORY(TestContext), + ROOT_FACTORY(TestRootContext)); +static RegisterContextFactory register_EmptyTestContext(CONTEXT_FACTORY(EnvoyContext), + ROOT_FACTORY(EnvoyRootContext), "empty"); + +bool TestRootContext::onStart(size_t) { + envoy_resolve_dns("example.com", sizeof("example.com") - 1, &dns_token_); + return true; +} + +void TestRootContext::onResolveDns(uint32_t token, uint32_t result_size) { + logWarn("TestRootContext::onResolveDns " + std::to_string(token)); + auto dns_buffer = getBufferBytes(WasmBufferType::CallData, 0, result_size); + auto dns = parseDnsResults(dns_buffer->view()); + for (auto& e : dns) { + logInfo("TestRootContext::onResolveDns dns " + std::to_string(e.ttl_seconds) + " " + e.address); + } +} + +bool TestRootContext::onDone() { + logWarn("TestRootContext::onDone " + std::to_string(id())); + return true; +} + +// Null VM fails on nullptr. +void TestRootContext::onTick() { + if (envoy_resolve_dns(0, 1, &dns_token_) != WasmResult::InvalidMemoryAccess) { + logInfo("resolve_dns should report invalid memory access"); + } + if (envoy_resolve_dns("example.com", sizeof("example.com") - 1, nullptr) != + WasmResult::InvalidMemoryAccess) { + logInfo("resolve_dns should report invalid memory access"); + } +} + +// V8 fails on pointer too large. +void TestRootContext::onQueueReady(uint32_t) { + if (envoy_resolve_dns(reinterpret_cast(INT_MAX), 0, &dns_token_) != + WasmResult::InvalidMemoryAccess) { + logInfo("resolve_dns should report invalid memory access"); + } + if (envoy_resolve_dns("example.com", sizeof("example.com") - 1, + reinterpret_cast(INT_MAX)) != WasmResult::InvalidMemoryAccess) { + logInfo("resolve_dns should report invalid memory access"); + } +} + +END_WASM_PLUGIN diff --git a/test/extensions/common/wasm/test_data/test_context_cpp_null_plugin.cc b/test/extensions/common/wasm/test_data/test_context_cpp_null_plugin.cc new file mode 100644 index 000000000000..88e3a18943f0 --- /dev/null +++ b/test/extensions/common/wasm/test_data/test_context_cpp_null_plugin.cc @@ -0,0 +1,16 @@ +// NOLINT(namespace-envoy) +#include "include/proxy-wasm/null_plugin.h" + +namespace proxy_wasm { +namespace null_plugin { +namespace CommonWasmTestContextCpp { +NullPluginRegistry* context_registry_; +} // namespace CommonWasmTestContextCpp + +RegisterNullVmPluginFactory + register_common_wasm_test_context_cpp_plugin("CommonWasmTestContextCpp", []() { + return std::make_unique(CommonWasmTestContextCpp::context_registry_); + }); + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/test/extensions/common/wasm/test_data/test_cpp.cc b/test/extensions/common/wasm/test_data/test_cpp.cc new file mode 100644 index 000000000000..1d990901846a --- /dev/null +++ b/test/extensions/common/wasm/test_data/test_cpp.cc @@ -0,0 +1,275 @@ +// NOLINT(namespace-envoy) +#ifndef WIN32 +#include "unistd.h" + +#endif +#include +#include +#include +#include +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics.h" +#else +#include "include/proxy-wasm/null_plugin.h" +#endif + +START_WASM_PLUGIN(CommonWasmTestCpp) + +static int* badptr = nullptr; +static float gNan = std::nan("1"); +static float gInfinity = INFINITY; +volatile double zero_unbeknownst_to_the_compiler = 0.0; + +#ifndef CHECK_RESULT +#define CHECK_RESULT(_c) \ + do { \ + if ((_c) != WasmResult::Ok) { \ + proxy_log(LogLevel::critical, #_c, sizeof(#_c) - 1); \ + abort(); \ + } \ + } while (0) +#endif + +#define CHECK_RESULT_NOT_OK(_c) \ + do { \ + if ((_c) == WasmResult::Ok) { \ + proxy_log(LogLevel::critical, #_c, sizeof(#_c) - 1); \ + abort(); \ + } \ + } while (0) + +#define FAIL_NOW(_msg) \ + do { \ + const std::string __message = _msg; \ + proxy_log(LogLevel::critical, __message.c_str(), __message.size()); \ + abort(); \ + } while (0) + +WASM_EXPORT(void, proxy_abi_version_0_2_1, (void)) {} + +WASM_EXPORT(void, proxy_on_context_create, (uint32_t, uint32_t)) {} + +WASM_EXPORT(uint32_t, proxy_on_vm_start, (uint32_t context_id, uint32_t configuration_size)) { + const char* configuration_ptr = nullptr; + size_t size; + proxy_get_buffer_bytes(WasmBufferType::VmConfiguration, 0, configuration_size, &configuration_ptr, + &size); + std::string configuration(configuration_ptr, size); + if (configuration == "logging") { + std::string trace_message = "test trace logging"; + proxy_log(LogLevel::trace, trace_message.c_str(), trace_message.size()); + std::string debug_message = "test debug logging"; + proxy_log(LogLevel::debug, debug_message.c_str(), debug_message.size()); + std::string warn_message = "test warn logging"; + proxy_log(LogLevel::warn, warn_message.c_str(), warn_message.size()); + std::string error_message = "test error logging"; + proxy_log(LogLevel::error, error_message.c_str(), error_message.size()); + LogLevel log_level; + CHECK_RESULT(proxy_get_log_level(&log_level)); + std::string level_message = "log level is " + std::to_string(static_cast(log_level)); + proxy_log(LogLevel::info, level_message.c_str(), level_message.size()); + } else if (configuration == "segv") { + std::string message = "before badptr"; + proxy_log(LogLevel::error, message.c_str(), message.size()); + ::free(const_cast(reinterpret_cast(configuration_ptr))); + *badptr = 1; + message = "after badptr"; + proxy_log(LogLevel::error, message.c_str(), message.size()); + } else if (configuration == "divbyzero") { + std::string message = "before div by zero"; + proxy_log(LogLevel::error, message.c_str(), message.size()); + ::free(const_cast(reinterpret_cast(configuration_ptr))); + int zero = context_id & 0x100000; + message = "divide by zero: " + std::to_string(100 / zero); + proxy_log(LogLevel::error, message.c_str(), message.size()); + } else if (configuration == "globals") { + std::string message = "NaN " + std::to_string(gNan); + proxy_log(LogLevel::warn, message.c_str(), message.size()); + message = "inf " + std::to_string(gInfinity); + proxy_log(LogLevel::warn, message.c_str(), message.size()); + message = "inf " + std::to_string(1.0 / zero_unbeknownst_to_the_compiler); + proxy_log(LogLevel::warn, message.c_str(), message.size()); + message = std::string("inf ") + (std::isinf(gInfinity) ? "inf" : "nan"); + proxy_log(LogLevel::warn, message.c_str(), message.size()); + } else if (configuration == "stats") { + uint32_t c, g, h; + + std::string name = "test_counter"; + CHECK_RESULT(proxy_define_metric(MetricType::Counter, name.data(), name.size(), &c)); + name = "test_gauge"; + CHECK_RESULT(proxy_define_metric(MetricType::Gauge, name.data(), name.size(), &g)); + name = "test_historam"; + CHECK_RESULT(proxy_define_metric(MetricType::Histogram, name.data(), name.size(), &h)); + // Bad type. + CHECK_RESULT_NOT_OK( + proxy_define_metric(static_cast(9999), name.data(), name.size(), &c)); + + CHECK_RESULT(proxy_increment_metric(c, 1)); + CHECK_RESULT(proxy_increment_metric(g, 1)); + CHECK_RESULT_NOT_OK(proxy_increment_metric(h, 1)); + CHECK_RESULT(proxy_record_metric(g, 2)); + CHECK_RESULT(proxy_record_metric(h, 3)); + + uint64_t value; + // Not found + CHECK_RESULT_NOT_OK(proxy_get_metric((1 << 10) + 0, &value)); + CHECK_RESULT_NOT_OK(proxy_get_metric((1 << 10) + 1, &value)); + CHECK_RESULT_NOT_OK(proxy_get_metric((1 << 10) + 2, &value)); + CHECK_RESULT_NOT_OK(proxy_get_metric((1 << 10) + 3, &value)); + CHECK_RESULT_NOT_OK(proxy_record_metric((1 << 10) + 0, 1)); + CHECK_RESULT_NOT_OK(proxy_record_metric((1 << 10) + 1, 1)); + CHECK_RESULT_NOT_OK(proxy_record_metric((1 << 10) + 2, 1)); + CHECK_RESULT_NOT_OK(proxy_record_metric((1 << 10) + 3, 1)); + CHECK_RESULT_NOT_OK(proxy_increment_metric((1 << 10) + 0, 1)); + CHECK_RESULT_NOT_OK(proxy_increment_metric((1 << 10) + 1, 1)); + CHECK_RESULT_NOT_OK(proxy_increment_metric((1 << 10) + 2, 1)); + CHECK_RESULT_NOT_OK(proxy_increment_metric((1 << 10) + 3, 1)); + // Found. + std::string message; + CHECK_RESULT(proxy_get_metric(c, &value)); + message = std::string("get counter = ") + std::to_string(value); + proxy_log(LogLevel::trace, message.c_str(), message.size()); + CHECK_RESULT(proxy_increment_metric(c, 1)); + CHECK_RESULT(proxy_get_metric(c, &value)); + message = std::string("get counter = ") + std::to_string(value); + proxy_log(LogLevel::debug, message.c_str(), message.size()); + CHECK_RESULT(proxy_record_metric(c, 3)); + CHECK_RESULT(proxy_get_metric(c, &value)); + message = std::string("get counter = ") + std::to_string(value); + proxy_log(LogLevel::info, message.c_str(), message.size()); + CHECK_RESULT(proxy_get_metric(g, &value)); + message = std::string("get gauge = ") + std::to_string(value); + proxy_log(LogLevel::warn, message.c_str(), message.size()); + // Get on histograms is not supported. + if (proxy_get_metric(h, &value) != WasmResult::Ok) { + message = std::string("get histogram = Unsupported"); + proxy_log(LogLevel::error, message.c_str(), message.size()); + } + // Negative. + CHECK_RESULT_NOT_OK(proxy_increment_metric(c, -1)); + CHECK_RESULT(proxy_increment_metric(g, -1)); + } else if (configuration == "foreign") { + std::string function = "compress"; + char* compressed = nullptr; + size_t compressed_size = 0; + std::string argument = std::string(2000, 'a'); // super compressible. + std::string message; + CHECK_RESULT(proxy_call_foreign_function(function.data(), function.size(), argument.data(), + argument.size(), &compressed, &compressed_size)); + message = std::string("compress ") + std::to_string(argument.size()) + " -> " + + std::to_string(compressed_size); + proxy_log(LogLevel::trace, message.c_str(), message.size()); + function = "uncompress"; + char* result = nullptr; + size_t result_size = 0; + CHECK_RESULT(proxy_call_foreign_function(function.data(), function.size(), compressed, + compressed_size, &result, &result_size)); + message = std::string("uncompress ") + std::to_string(compressed_size) + " -> " + + std::to_string(result_size); + proxy_log(LogLevel::debug, message.c_str(), message.size()); + if (argument != std::string(result, result_size)) { + message = "compress mismatch "; + proxy_log(LogLevel::error, message.c_str(), message.size()); + } + ::free(result); + result = nullptr; + memset(compressed, 0, 4); // damage the compressed version. + if (proxy_call_foreign_function(function.data(), function.size(), compressed, compressed_size, + &result, &result_size) != WasmResult::SerializationFailure) { + message = "bad uncompress should be an error"; + proxy_log(LogLevel::error, message.c_str(), message.size()); + } + if (compressed) { + ::free(compressed); + } + if (result) { + ::free(result); + } + } else if (configuration == "configuration") { + std::string message = "configuration"; + proxy_log(LogLevel::error, message.c_str(), message.size()); + } else if (configuration == "WASI") { + // These checks depend on Emscripten's support for `WASI` and will only + // work if invoked on a "real" Wasm VM. + int err = fprintf(stdout, "WASI write to stdout\n"); + if (err < 0) { + FAIL_NOW("stdout write should succeed"); + } + err = fprintf(stderr, "WASI write to stderr\n"); + if (err < 0) { + FAIL_NOW("stderr write should succeed"); + } + // We explicitly don't support reading from stdin + char tmp[16]; + size_t rc = fread(static_cast(tmp), 1, 16, stdin); + if (rc != 0 || errno != ENOSYS) { + FAIL_NOW("stdin read should fail. errno = " + std::to_string(errno)); + } + // No environment variables should be available + char* pathenv = getenv("PATH"); + if (pathenv != nullptr) { + FAIL_NOW("PATH environment variable should not be available"); + } +#ifndef WIN32 + // Exercise the `WASI` `fd_fdstat_get` a little bit + int tty = isatty(1); + if (errno != ENOTTY || tty != 0) { + FAIL_NOW("stdout is not a tty"); + } + tty = isatty(2); + if (errno != ENOTTY || tty != 0) { + FAIL_NOW("stderr is not a tty"); + } + tty = isatty(99); + if (errno != EBADF || tty != 0) { + FAIL_NOW("isatty errors on bad fds. errno = " + std::to_string(errno)); + } +#endif + } else if (configuration == "on_foreign") { + std::string message = "on_foreign start"; + proxy_log(LogLevel::debug, message.c_str(), message.size()); + } else { + std::string message = "on_vm_start " + configuration; + proxy_log(LogLevel::info, message.c_str(), message.size()); + } + ::free(const_cast(reinterpret_cast(configuration_ptr))); + return 1; +} + +WASM_EXPORT(uint32_t, proxy_on_configure, (uint32_t, uint32_t configuration_size)) { + const char* configuration_ptr = nullptr; + size_t size; + proxy_get_buffer_bytes(WasmBufferType::PluginConfiguration, 0, configuration_size, + &configuration_ptr, &size); + std::string configuration(configuration_ptr, size); + if (configuration == "done") { + proxy_done(); + } else { + std::string message = "on_configuration " + configuration; + proxy_log(LogLevel::info, message.c_str(), message.size()); + } + ::free(const_cast(reinterpret_cast(configuration_ptr))); + return 1; +} + +WASM_EXPORT(void, proxy_on_foreign_function, (uint32_t, uint32_t token, uint32_t data_size)) { + std::string message = + "on_foreign_function " + std::to_string(token) + " " + std::to_string(data_size); + proxy_log(LogLevel::info, message.c_str(), message.size()); +} + +WASM_EXPORT(uint32_t, proxy_on_done, (uint32_t)) { + std::string message = "on_done logging"; + proxy_log(LogLevel::info, message.c_str(), message.size()); + return 0; +} + +WASM_EXPORT(void, proxy_on_delete, (uint32_t)) { + std::string message = "on_delete logging"; + proxy_log(LogLevel::info, message.c_str(), message.size()); +} + +END_WASM_PLUGIN diff --git a/test/extensions/common/wasm/test_data/test_cpp_null_plugin.cc b/test/extensions/common/wasm/test_data/test_cpp_null_plugin.cc new file mode 100644 index 000000000000..d8665f7b28c0 --- /dev/null +++ b/test/extensions/common/wasm/test_data/test_cpp_null_plugin.cc @@ -0,0 +1,15 @@ +// NOLINT(namespace-envoy) +#include "include/proxy-wasm/null_plugin.h" + +namespace proxy_wasm { +namespace null_plugin { +namespace CommonWasmTestCpp { +NullPluginRegistry* context_registry_; +} // namespace CommonWasmTestCpp + +RegisterNullVmPluginFactory register_common_wasm_test_cpp_plugin("CommonWasmTestCpp", []() { + return std::make_unique(CommonWasmTestCpp::context_registry_); +}); + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/test/extensions/common/wasm/test_data/test_rust.wasm b/test/extensions/common/wasm/test_data/test_rust.wasm deleted file mode 100755 index 2de74d763a6c..000000000000 Binary files a/test/extensions/common/wasm/test_data/test_rust.wasm and /dev/null differ diff --git a/test/extensions/common/wasm/wasm_speed_test.cc b/test/extensions/common/wasm/wasm_speed_test.cc new file mode 100644 index 000000000000..af1c31a2408f --- /dev/null +++ b/test/extensions/common/wasm/wasm_speed_test.cc @@ -0,0 +1,82 @@ +#include "common/common/thread.h" +#include "common/common/thread_synchronizer.h" + +#include "extensions/common/wasm/wasm.h" + +#include "test/mocks/server/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/thread_factory_for_test.h" +#include "test/test_common/utility.h" + +#include "absl/strings/str_cat.h" +#include "absl/synchronization/notification.h" +#include "benchmark/benchmark.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "tools/cpp/runfiles/runfiles.h" + +using bazel::tools::cpp::runfiles::Runfiles; + +namespace Envoy { + +void bmWasmSpeedTest(benchmark::State& state) { + Envoy::Thread::MutexBasicLockable lock; + Envoy::Logger::Context logging_state(spdlog::level::warn, + Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); + Envoy::Logger::Registry::getLog(Envoy::Logger::Id::wasm).set_level(spdlog::level::off); + Envoy::Stats::IsolatedStoreImpl stats_store; + Envoy::Api::ApiPtr api = Envoy::Api::createApiForTest(stats_store); + Envoy::Upstream::MockClusterManager cluster_manager; + Envoy::Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Envoy::Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + auto wasm = std::make_unique( + "envoy.wasm.runtime.null", "", "", "", scope, cluster_manager, *dispatcher); + + auto context = std::make_shared(wasm.get()); + Envoy::Thread::ThreadFactory& thread_factory{Envoy::Thread::threadFactoryForTest()}; + std::pair data; + int n_threads = 10; + + for (__attribute__((unused)) auto _ : state) { + auto thread_fn = [&]() { + for (int i = 0; i < 1000000; i++) { + context->getSharedData("foo", &data); + context->setSharedData("foo", "bar", 1); + } + return new uint32_t(42); + }; + std::vector threads; + for (int i = 0; i < n_threads; ++i) { + std::string name = absl::StrCat("thread", i); + threads.emplace_back(thread_factory.createThread(thread_fn, Envoy::Thread::Options{name})); + } + for (auto& thread : threads) { + thread->join(); + } + } +} + +BENCHMARK(bmWasmSpeedTest); + +} // namespace Envoy + +int main(int argc, char** argv) { + ::benchmark::Initialize(&argc, argv); + Envoy::TestEnvironment::initializeOptions(argc, argv); + // Create a Runfiles object for runfiles lookup. + // https://github.com/bazelbuild/bazel/blob/master/tools/cpp/runfiles/runfiles_src.h#L32 + std::string error; + std::unique_ptr runfiles(Runfiles::Create(argv[0], &error)); + RELEASE_ASSERT(Envoy::TestEnvironment::getOptionalEnvVar("NORUNFILES").has_value() || + runfiles != nullptr, + error); + Envoy::TestEnvironment::setRunfiles(runfiles.get()); + Envoy::TestEnvironment::setEnvVar("ENVOY_IP_TEST_VERSIONS", "all", 0); + Envoy::Event::Libevent::Global::initialize(); + if (::benchmark::ReportUnrecognizedArguments(argc, argv)) { + return 1; + } + ::benchmark::RunSpecifiedBenchmarks(); + return 0; +} diff --git a/test/extensions/common/wasm/wasm_test.cc b/test/extensions/common/wasm/wasm_test.cc new file mode 100644 index 000000000000..f17d0ca859d6 --- /dev/null +++ b/test/extensions/common/wasm/wasm_test.cc @@ -0,0 +1,1062 @@ +#include "envoy/server/lifecycle_notifier.h" + +#include "common/common/hex.h" +#include "common/event/dispatcher_impl.h" +#include "common/stats/isolated_store_impl.h" + +#include "extensions/common/wasm/wasm.h" + +#include "test/mocks/server/mocks.h" +#include "test/mocks/stats/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/utility.h" +#include "test/test_common/wasm_base.h" + +#include "absl/types/optional.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "openssl/bytestring.h" +#include "openssl/hmac.h" +#include "openssl/sha.h" + +using Envoy::Server::ServerLifecycleNotifier; +using StageCallbackWithCompletion = + Envoy::Server::ServerLifecycleNotifier::StageCallbackWithCompletion; +using testing::Eq; +using testing::Return; + +namespace Envoy { + +namespace Server { +class MockServerLifecycleNotifier2 : public ServerLifecycleNotifier { +public: + MockServerLifecycleNotifier2() = default; + ~MockServerLifecycleNotifier2() override = default; + + using ServerLifecycleNotifier::registerCallback; + + ServerLifecycleNotifier::HandlePtr + registerCallback(Stage stage, StageCallbackWithCompletion callback) override { + return registerCallback2(stage, callback); + } + + MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, (Stage, StageCallback)); + MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback2, + (Stage stage, StageCallbackWithCompletion callback)); +}; +} // namespace Server + +namespace Extensions { +namespace Common { +namespace Wasm { + +REGISTER_WASM_EXTENSION(EnvoyWasm); + +std::string sha256(absl::string_view data) { + std::vector digest(SHA256_DIGEST_LENGTH); + EVP_MD_CTX* ctx(EVP_MD_CTX_new()); + auto rc = EVP_DigestInit(ctx, EVP_sha256()); + RELEASE_ASSERT(rc == 1, "Failed to init digest context"); + rc = EVP_DigestUpdate(ctx, data.data(), data.size()); + RELEASE_ASSERT(rc == 1, "Failed to update digest"); + rc = EVP_DigestFinal(ctx, digest.data(), nullptr); + RELEASE_ASSERT(rc == 1, "Failed to finalize digest"); + EVP_MD_CTX_free(ctx); + return std::string(reinterpret_cast(&digest[0]), digest.size()); +} + +class TestContext : public ::Envoy::Extensions::Common::Wasm::Context { +public: + using ::Envoy::Extensions::Common::Wasm::Context::Context; + ~TestContext() override = default; + using ::Envoy::Extensions::Common::Wasm::Context::log; + proxy_wasm::WasmResult log(uint32_t level, absl::string_view message) override { + std::cerr << std::string(message) << "\n"; + log_(static_cast(level), message); + Extensions::Common::Wasm::Context::log(static_cast(level), message); + return proxy_wasm::WasmResult::Ok; + } + MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message)); +}; + +class WasmCommonTest : public testing::TestWithParam { +public: + void SetUp() override { // NOLINT(readability-identifier-naming) + Logger::Registry::getLog(Logger::Id::wasm).set_level(spdlog::level::debug); + clearCodeCacheForTesting(); + } +}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto test_values = testing::Values( +#if defined(ENVOY_WASM_V8) + "v8", +#endif +#if defined(ENVOY_WASM_WAVM) + "wavm", +#endif + "null"); +INSTANTIATE_TEST_SUITE_P(Runtimes, WasmCommonTest, test_values); + +TEST_P(WasmCommonTest, EnvoyWasm) { + auto envoy_wasm = std::make_unique(); + envoy_wasm->initialize(); + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto plugin = std::make_shared( + "", "", "", GetParam(), "", false, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, + local_info, nullptr); + auto wasm = std::make_shared( + std::make_unique(absl::StrCat("envoy.wasm.runtime.", GetParam()), "", + "vm_configuration", "", scope, cluster_manager, *dispatcher)); + auto wasm_base = std::dynamic_pointer_cast(wasm); + wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::UnableToCreateVM); + EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::UnableToCreateVM); + wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::UnableToCloneVM); + EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::UnableToCloneVM); + wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::MissingFunction); + EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::MissingFunction); + wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::UnableToInitializeCode); + EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::UnableToInitializeCode); + wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::StartFailed); + EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::StartFailed); + wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::ConfigureFailed); + EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::ConfigureFailed); + wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::RuntimeError); + EXPECT_EQ(toWasmEvent(wasm_base), EnvoyWasm::WasmEvent::RuntimeError); + + auto root_context = static_cast(wasm->wasm()->createRootContext(plugin)); + uint32_t grpc_call_token1 = root_context->nextGrpcCallToken(); + uint32_t grpc_call_token2 = root_context->nextGrpcCallToken(); + EXPECT_NE(grpc_call_token1, grpc_call_token2); + root_context->setNextGrpcTokenForTesting(0); // Rollover. + EXPECT_EQ(root_context->nextGrpcCallToken(), 1); + + uint32_t grpc_stream_token1 = root_context->nextGrpcStreamToken(); + uint32_t grpc_stream_token2 = root_context->nextGrpcStreamToken(); + EXPECT_NE(grpc_stream_token1, grpc_stream_token2); + root_context->setNextGrpcTokenForTesting(0xFFFFFFFF); // Rollover. + EXPECT_EQ(root_context->nextGrpcStreamToken(), 2); + + uint32_t http_call_token1 = root_context->nextHttpCallToken(); + uint32_t http_call_token2 = root_context->nextHttpCallToken(); + EXPECT_NE(http_call_token1, http_call_token2); + root_context->setNextHttpCallTokenForTesting(0); // Rollover. + EXPECT_EQ(root_context->nextHttpCallToken(), 1); + + EXPECT_EQ(root_context->getBuffer(WasmBufferType::HttpCallResponseBody), nullptr); + EXPECT_EQ(root_context->getBuffer(WasmBufferType::PluginConfiguration), nullptr); + + delete root_context; + + WasmStatePrototype wasm_state_prototype(true, WasmType::Bytes, "", + StreamInfo::FilterState::LifeSpan::FilterChain); + auto wasm_state = std::make_unique(wasm_state_prototype); + Protobuf::Arena arena; + EXPECT_EQ(wasm_state->exprValue(&arena, true).MessageOrDie(), nullptr); + wasm_state->setValue("foo"); + auto any = wasm_state->serializeAsProto(); + EXPECT_TRUE(static_cast(any.get())->Is()); +} + +TEST_P(WasmCommonTest, Logging) { + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "logging"; + auto plugin_configuration = "configure-test"; + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + absl::StrCat("{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestCpp"; + } + EXPECT_FALSE(code.empty()); + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code); + auto wasm = std::make_shared( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_NE(wasm, nullptr); + EXPECT_NE(wasm->buildVersion(), ""); + EXPECT_NE(std::unique_ptr(wasm->createContext(plugin)), nullptr); + wasm->setCreateContextForTesting( + [](Wasm*, const std::shared_ptr&) -> ContextBase* { return nullptr; }, + [](Wasm*, const std::shared_ptr&) -> ContextBase* { return nullptr; }); + EXPECT_EQ(std::unique_ptr(wasm->createContext(plugin)), nullptr); + auto wasm_weak = std::weak_ptr(wasm); + auto wasm_handle = std::make_shared(std::move(wasm)); + EXPECT_TRUE(wasm_weak.lock()->initialize(code, false)); + auto thread_local_wasm = std::make_shared(wasm_handle, *dispatcher); + thread_local_wasm.reset(); + + auto wasm_lock = wasm_weak.lock(); + wasm_lock->setCreateContextForTesting( + nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, + log_(spdlog::level::info, Eq("on_configuration configure-test"))); + EXPECT_CALL(*root_context, log_(spdlog::level::trace, Eq("test trace logging"))); + EXPECT_CALL(*root_context, log_(spdlog::level::debug, Eq("test debug logging"))); + EXPECT_CALL(*root_context, log_(spdlog::level::warn, Eq("test warn logging"))); + EXPECT_CALL(*root_context, log_(spdlog::level::err, Eq("test error logging"))); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("log level is 1"))); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_done logging"))); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_delete logging"))); + return root_context; + }); + + auto root_context = static_cast(wasm_weak.lock()->start(plugin)); + EXPECT_EQ(root_context->getConfiguration(), "logging"); + if (GetParam() != "null") { + EXPECT_TRUE(root_context->validateConfiguration("", plugin)); + } + wasm_weak.lock()->configure(root_context, plugin); + EXPECT_EQ(root_context->getStatus().first, 0); + + wasm_handle.reset(); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + // This will fault on nullptr if wasm has been deleted. + plugin->plugin_configuration_ = "done"; + wasm_weak.lock()->configure(root_context, plugin); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + dispatcher->clearDeferredDeleteList(); +} + +TEST_P(WasmCommonTest, BadSignature) { + if (GetParam() != "v8") { + return; + } + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = ""; + auto plugin_configuration = ""; + const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/common/wasm/test_data/bad_signature_cpp.wasm")); + EXPECT_FALSE(code.empty()); + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_FALSE(wasm->initialize(code, false)); + EXPECT_TRUE(wasm->isFailed()); +} + +TEST_P(WasmCommonTest, Segv) { + if (GetParam() != "v8") { + return; + } + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "segv"; + auto plugin_configuration = ""; + const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm")); + EXPECT_FALSE(code.empty()); + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_TRUE(wasm->initialize(code, false)); + TestContext* root_context = nullptr; + wasm->setCreateContextForTesting( + nullptr, [&root_context](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, log_(spdlog::level::err, Eq("before badptr"))); + return root_context; + }); + wasm->start(plugin); + EXPECT_TRUE(wasm->isFailed()); + + // Subsequent calls should be NOOP(s). + + root_context->onResolveDns(0, Envoy::Network::DnsResolver::ResolutionStatus::Success, {}); + Envoy::Stats::MockMetricSnapshot stats_snapshot; + root_context->onStatsUpdate(stats_snapshot); +} + +TEST_P(WasmCommonTest, DivByZero) { + if (GetParam() != "v8") { + return; + } + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "divbyzero"; + auto plugin_configuration = ""; + const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm")); + EXPECT_FALSE(code.empty()); + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_NE(wasm, nullptr); + auto context = std::make_unique(wasm.get()); + EXPECT_TRUE(wasm->initialize(code, false)); + wasm->setCreateContextForTesting( + nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, log_(spdlog::level::err, Eq("before div by zero"))); + return root_context; + }); + wasm->start(plugin); +} + +TEST_P(WasmCommonTest, EmscriptenVersion) { + if (GetParam() != "v8") { + return; + } + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = ""; + auto plugin_configuration = ""; + const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm")); + EXPECT_FALSE(code.empty()); + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_NE(wasm, nullptr); + auto context = std::make_unique(wasm.get()); + EXPECT_TRUE(wasm->initialize(code, false)); + + uint32_t major = 9, minor = 9, abi_major = 9, abi_minor = 9; + EXPECT_TRUE(wasm->getEmscriptenVersion(&major, &minor, &abi_major, &abi_minor)); + EXPECT_EQ(major, 0); + EXPECT_LE(minor, 3); + // Up to (at least) emsdk 1.39.6. + EXPECT_EQ(abi_major, 0); + EXPECT_LE(abi_minor, 20); +} + +TEST_P(WasmCommonTest, IntrinsicGlobals) { + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "globals"; + auto plugin_configuration = ""; + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + absl::StrCat("{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestCpp"; + } + EXPECT_FALSE(code.empty()); + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_NE(wasm, nullptr); + EXPECT_TRUE(wasm->initialize(code, false)); + wasm->setCreateContextForTesting( + nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, log_(spdlog::level::warn, Eq("NaN nan"))); + EXPECT_CALL(*root_context, log_(spdlog::level::warn, Eq("inf inf"))).Times(3); + return root_context; + }); + wasm->start(plugin); +} + +TEST_P(WasmCommonTest, Utilities) { + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "utilities"; + auto plugin_configuration = ""; + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + absl::StrCat("{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestCpp"; + } + EXPECT_FALSE(code.empty()); + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_NE(wasm, nullptr); + EXPECT_TRUE(wasm->initialize(code, false)); + wasm->setCreateContextForTesting( + nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_vm_start utilities"))); + return root_context; + }); + wasm->start(plugin); + + // Context + auto context = std::make_unique(); + context->error("error"); + + // Buffer + Extensions::Common::Wasm::Buffer buffer; + Extensions::Common::Wasm::Buffer const_buffer; + Extensions::Common::Wasm::Buffer string_buffer; + auto buffer_impl = std::make_unique("contents"); + buffer.set(buffer_impl.get()); + const_buffer.set(static_cast(buffer_impl.get())); + string_buffer.set("contents"); + std::string data("contents"); + if (GetParam() != "null") { + EXPECT_EQ(WasmResult::InvalidMemoryAccess, + buffer.copyTo(wasm.get(), 0, 1 << 30 /* length too long */, 0, 0)); + EXPECT_EQ(WasmResult::InvalidMemoryAccess, + buffer.copyTo(wasm.get(), 0, 1, 1 << 30 /* bad pointer location */, 0)); + EXPECT_EQ(WasmResult::InvalidMemoryAccess, + buffer.copyTo(wasm.get(), 0, 1, 0, 1 << 30 /* bad size location */)); + EXPECT_EQ(WasmResult::BadArgument, buffer.copyFrom(0, 1, data)); + EXPECT_EQ(WasmResult::BadArgument, buffer.copyFrom(1, 1, data)); + EXPECT_EQ(WasmResult::BadArgument, const_buffer.copyFrom(1, 1, data)); + EXPECT_EQ(WasmResult::BadArgument, string_buffer.copyFrom(1, 1, data)); + } +} + +TEST_P(WasmCommonTest, Stats) { + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "stats"; + auto plugin_configuration = ""; + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + absl::StrCat("{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestCpp"; + } + EXPECT_FALSE(code.empty()); + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto vm_key = proxy_wasm::makeVmKey(vm_id, vm_configuration, code); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_NE(wasm, nullptr); + EXPECT_TRUE(wasm->initialize(code, false)); + wasm->setCreateContextForTesting( + nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, log_(spdlog::level::trace, Eq("get counter = 1"))); + EXPECT_CALL(*root_context, log_(spdlog::level::debug, Eq("get counter = 2"))); + // recordMetric on a Counter is the same as increment. + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("get counter = 5"))); + EXPECT_CALL(*root_context, log_(spdlog::level::warn, Eq("get gauge = 2"))); + // Get is not supported on histograms. + EXPECT_CALL(*root_context, log_(spdlog::level::err, Eq("get histogram = Unsupported"))); + return root_context; + }); + wasm->start(plugin); +} + +TEST_P(WasmCommonTest, Foreign) { + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "foreign"; + auto vm_key = ""; + auto plugin_configuration = ""; + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_NE(wasm, nullptr); + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + absl::StrCat("{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestCpp"; + } + EXPECT_FALSE(code.empty()); + EXPECT_TRUE(wasm->initialize(code, false)); + wasm->setCreateContextForTesting( + nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, log_(spdlog::level::trace, Eq("compress 2000 -> 23"))); + EXPECT_CALL(*root_context, log_(spdlog::level::debug, Eq("uncompress 23 -> 2000"))); + return root_context; + }); + wasm->start(plugin); +} + +TEST_P(WasmCommonTest, OnForeign) { + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "on_foreign"; + auto vm_key = ""; + auto plugin_configuration = ""; + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_NE(wasm, nullptr); + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + absl::StrCat("{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestCpp"; + } + EXPECT_FALSE(code.empty()); + EXPECT_TRUE(wasm->initialize(code, false)); + TestContext* test_context = nullptr; + wasm->setCreateContextForTesting( + nullptr, [&test_context](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto context = new TestContext(wasm, plugin); + EXPECT_CALL(*context, log_(spdlog::level::debug, Eq("on_foreign start"))); + EXPECT_CALL(*context, log_(spdlog::level::info, Eq("on_foreign_function 7 13"))); + test_context = context; + return context; + }); + wasm->start(plugin); + test_context->onForeignFunction(7, 13); +} + +TEST_P(WasmCommonTest, WASI) { + if (GetParam() == "null") { + // This test has no meaning unless it is invoked by actual Wasm code + return; + } + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + Upstream::MockClusterManager cluster_manager; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "WASI"; + auto vm_key = ""; + auto plugin_configuration = ""; + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + auto wasm = std::make_unique( + absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, vm_key, scope, + cluster_manager, *dispatcher); + EXPECT_NE(wasm, nullptr); + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + absl::StrCat("{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestCpp"; + } + EXPECT_FALSE(code.empty()); + EXPECT_TRUE(wasm->initialize(code, false)); + wasm->setCreateContextForTesting( + nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("WASI write to stdout"))).Times(1); + EXPECT_CALL(*root_context, log_(spdlog::level::err, Eq("WASI write to stderr"))).Times(1); + return root_context; + }); + wasm->start(plugin); +} + +TEST_P(WasmCommonTest, VmCache) { + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + NiceMock cluster_manager; + NiceMock init_manager; + NiceMock lifecycle_notifier; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider; + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "vm_cache"; + auto plugin_configuration = "init"; + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + + ServerLifecycleNotifier::StageCallbackWithCompletion lifecycle_callback; + EXPECT_CALL(lifecycle_notifier, registerCallback2(_, _)) + .WillRepeatedly( + Invoke([&](ServerLifecycleNotifier::Stage, + StageCallbackWithCompletion callback) -> ServerLifecycleNotifier::HandlePtr { + lifecycle_callback = callback; + return nullptr; + })); + + VmConfig vm_config; + vm_config.set_runtime(absl::StrCat("envoy.wasm.runtime.", GetParam())); + ProtobufWkt::StringValue vm_configuration_string; + vm_configuration_string.set_value(vm_configuration); + vm_config.mutable_configuration()->PackFrom(vm_configuration_string); + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + absl::StrCat("{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestCpp"; + } + EXPECT_FALSE(code.empty()); + vm_config.mutable_code()->mutable_local()->set_inline_bytes(code); + WasmHandleSharedPtr wasm_handle; + createWasm(vm_config, plugin, scope, cluster_manager, init_manager, *dispatcher, *api, + lifecycle_notifier, remote_data_provider, + [&wasm_handle](const WasmHandleSharedPtr& w) { wasm_handle = w; }); + EXPECT_NE(wasm_handle, nullptr); + Event::PostCb post_cb = [] {}; + lifecycle_callback(post_cb); + + WasmHandleSharedPtr wasm_handle2; + createWasm(vm_config, plugin, scope, cluster_manager, init_manager, *dispatcher, *api, + lifecycle_notifier, remote_data_provider, + [&wasm_handle2](const WasmHandleSharedPtr& w) { wasm_handle2 = w; }); + EXPECT_NE(wasm_handle2, nullptr); + EXPECT_EQ(wasm_handle, wasm_handle2); + + auto wasm_handle_local = getOrCreateThreadLocalWasm( + wasm_handle, plugin, + [&dispatcher](const WasmHandleBaseSharedPtr& base_wasm) -> WasmHandleBaseSharedPtr { + auto wasm = + std::make_shared(std::static_pointer_cast(base_wasm), *dispatcher); + wasm->setCreateContextForTesting( + nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_vm_start vm_cache"))); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_configuration init"))); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_done logging"))); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_delete logging"))); + return root_context; + }); + return std::make_shared(wasm); + }); + wasm_handle.reset(); + wasm_handle2.reset(); + + auto wasm = wasm_handle_local->wasm().get(); + wasm_handle_local.reset(); + + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + + plugin->plugin_configuration_ = "done"; + wasm->configure(wasm->getContext(1), plugin); + plugin.reset(); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + dispatcher->clearDeferredDeleteList(); + + proxy_wasm::clearWasmCachesForTesting(); +} + +TEST_P(WasmCommonTest, RemoteCode) { + if (GetParam() == "null") { + return; + } + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + NiceMock cluster_manager; + NiceMock init_manager; + NiceMock lifecycle_notifier; + Init::ExpectableWatcherImpl init_watcher; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider; + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "vm_cache"; + auto plugin_configuration = "done"; + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + + std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + absl::StrCat("{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm"))); + + VmConfig vm_config; + vm_config.set_runtime(absl::StrCat("envoy.wasm.runtime.", GetParam())); + ProtobufWkt::BytesValue vm_configuration_bytes; + vm_configuration_bytes.set_value(vm_configuration); + vm_config.mutable_configuration()->PackFrom(vm_configuration_bytes); + std::string sha256 = Extensions::Common::Wasm::sha256(code); + std::string sha256Hex = + Hex::encode(reinterpret_cast(&*sha256.begin()), sha256.size()); + vm_config.mutable_code()->mutable_remote()->set_sha256(sha256Hex); + vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->set_uri( + "http://example.com/test.wasm"); + vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->set_cluster("example_com"); + vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->mutable_timeout()->set_seconds(5); + WasmHandleSharedPtr wasm_handle; + NiceMock client; + NiceMock request(&client); + + EXPECT_CALL(cluster_manager, httpAsyncClientForCluster("example_com")) + .WillOnce(ReturnRef(cluster_manager.async_client_)); + EXPECT_CALL(cluster_manager.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + Http::ResponseMessagePtr response( + new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response->body().add(code); + callbacks.onSuccess(request, std::move(response)); + return nullptr; + })); + + Init::TargetHandlePtr init_target_handle; + EXPECT_CALL(init_manager, add(_)).WillOnce(Invoke([&](const Init::Target& target) { + init_target_handle = target.createHandle("test"); + })); + createWasm(vm_config, plugin, scope, cluster_manager, init_manager, *dispatcher, *api, + lifecycle_notifier, remote_data_provider, + [&wasm_handle](const WasmHandleSharedPtr& w) { wasm_handle = w; }); + + EXPECT_CALL(init_watcher, ready()); + init_target_handle->initialize(init_watcher); + + EXPECT_NE(wasm_handle, nullptr); + + auto wasm_handle_local = getOrCreateThreadLocalWasm( + wasm_handle, plugin, + [&dispatcher](const WasmHandleBaseSharedPtr& base_wasm) -> WasmHandleBaseSharedPtr { + auto wasm = + std::make_shared(std::static_pointer_cast(base_wasm), *dispatcher); + wasm->setCreateContextForTesting( + nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_vm_start vm_cache"))); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_done logging"))); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_delete logging"))); + return root_context; + }); + return std::make_shared(wasm); + }); + wasm_handle.reset(); + + auto wasm = wasm_handle_local->wasm().get(); + wasm_handle_local.reset(); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + wasm->configure(wasm->getContext(1), plugin); + plugin.reset(); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + dispatcher->clearDeferredDeleteList(); +} + +TEST_P(WasmCommonTest, RemoteCodeMultipleRetry) { + if (GetParam() == "null") { + return; + } + Stats::IsolatedStoreImpl stats_store; + Api::ApiPtr api = Api::createApiForTest(stats_store); + NiceMock cluster_manager; + NiceMock init_manager; + NiceMock lifecycle_notifier; + Init::ExpectableWatcherImpl init_watcher; + Event::DispatcherPtr dispatcher(api->allocateDispatcher("wasm_test")); + Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider; + auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); + NiceMock local_info; + auto name = ""; + auto root_id = ""; + auto vm_id = ""; + auto vm_configuration = "vm_cache"; + auto plugin_configuration = "done"; + auto plugin = std::make_shared( + name, root_id, vm_id, GetParam(), plugin_configuration, false, + envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); + + std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + absl::StrCat("{{ test_rundir }}/test/extensions/common/wasm/test_data/test_cpp.wasm"))); + + VmConfig vm_config; + vm_config.set_runtime(absl::StrCat("envoy.wasm.runtime.", GetParam())); + ProtobufWkt::StringValue vm_configuration_string; + vm_configuration_string.set_value(vm_configuration); + vm_config.mutable_configuration()->PackFrom(vm_configuration_string); + std::string sha256 = Extensions::Common::Wasm::sha256(code); + std::string sha256Hex = + Hex::encode(reinterpret_cast(&*sha256.begin()), sha256.size()); + int num_retries = 3; + vm_config.mutable_code()->mutable_remote()->set_sha256(sha256Hex); + vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->set_uri( + "http://example.com/test.wasm"); + vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->set_cluster("example_com"); + vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->mutable_timeout()->set_seconds(5); + vm_config.mutable_code() + ->mutable_remote() + ->mutable_retry_policy() + ->mutable_num_retries() + ->set_value(num_retries); + WasmHandleSharedPtr wasm_handle; + NiceMock client; + NiceMock request(&client); + + EXPECT_CALL(cluster_manager, httpAsyncClientForCluster("example_com")) + .WillRepeatedly(ReturnRef(cluster_manager.async_client_)); + EXPECT_CALL(cluster_manager.async_client_, send_(_, _, _)) + .WillRepeatedly(Invoke([&, retry = num_retries]( + Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) mutable + -> Http::AsyncClient::Request* { + if (retry-- == 0) { + Http::ResponseMessagePtr response(new Http::ResponseMessageImpl( + Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "503"}}})); + callbacks.onSuccess(request, std::move(response)); + return nullptr; + } else { + Http::ResponseMessagePtr response(new Http::ResponseMessageImpl( + Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response->body().add(code); + callbacks.onSuccess(request, std::move(response)); + return nullptr; + } + })); + + Init::TargetHandlePtr init_target_handle; + EXPECT_CALL(init_manager, add(_)).WillOnce(Invoke([&](const Init::Target& target) { + init_target_handle = target.createHandle("test"); + })); + createWasm(vm_config, plugin, scope, cluster_manager, init_manager, *dispatcher, *api, + lifecycle_notifier, remote_data_provider, + [&wasm_handle](const WasmHandleSharedPtr& w) { wasm_handle = w; }); + + EXPECT_CALL(init_watcher, ready()); + init_target_handle->initialize(init_watcher); + + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_NE(wasm_handle, nullptr); + + auto wasm_handle_local = getOrCreateThreadLocalWasm( + wasm_handle, plugin, + [&dispatcher](const WasmHandleBaseSharedPtr& base_wasm) -> WasmHandleBaseSharedPtr { + auto wasm = + std::make_shared(std::static_pointer_cast(base_wasm), *dispatcher); + wasm->setCreateContextForTesting( + nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + auto root_context = new TestContext(wasm, plugin); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_vm_start vm_cache"))); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_done logging"))); + EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_delete logging"))); + return root_context; + }); + return std::make_shared(wasm); + }); + wasm_handle.reset(); + + auto wasm = wasm_handle_local->wasm().get(); + wasm_handle_local.reset(); + + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + wasm->configure(wasm->getContext(1), plugin); + plugin.reset(); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + dispatcher->clearDeferredDeleteList(); +} + +class WasmCommonContextTest + : public Common::Wasm::WasmTestBase> { +public: + WasmCommonContextTest() = default; + + void setup(const std::string& code, std::string vm_configuration, std::string root_id = "") { + setupBase( + GetParam(), code, + [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + return new TestContext(wasm, plugin); + }, + root_id, vm_configuration); + } + void setupContext() { + context_ = std::make_unique(wasm_->wasm().get(), root_context_->id(), plugin_); + context_->onCreate(); + } + + TestContext& rootContext() { return *static_cast(root_context_); } + TestContext& context() { return *context_; } + + std::unique_ptr context_; +}; + +INSTANTIATE_TEST_SUITE_P(Runtimes, WasmCommonContextTest, test_values); + +TEST_P(WasmCommonContextTest, OnDnsResolve) { + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(absl::StrCat( + "{{ test_rundir }}/test/extensions/common/wasm/test_data/test_context_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestContextCpp"; + } + EXPECT_FALSE(code.empty()); + + std::shared_ptr dns_resolver(new Network::MockDnsResolver()); + EXPECT_CALL(dispatcher_, createDnsResolver(_, _)).WillRepeatedly(Return(dns_resolver)); + Network::DnsResolver::ResolveCb dns_callback; + Network::MockActiveDnsQuery active_dns_query; + EXPECT_CALL(*dns_resolver, resolve(_, _, _)) + .WillRepeatedly( + testing::DoAll(testing::SaveArg<2>(&dns_callback), Return(&active_dns_query))); + + setup(code, "context"); + setupContext(); + EXPECT_CALL(rootContext(), log_(spdlog::level::warn, Eq("TestRootContext::onResolveDns 1"))); + EXPECT_CALL(rootContext(), log_(spdlog::level::warn, Eq("TestRootContext::onResolveDns 2"))); + EXPECT_CALL(rootContext(), log_(spdlog::level::info, + Eq("TestRootContext::onResolveDns dns 1001 192.168.1.101:0"))); + EXPECT_CALL(rootContext(), log_(spdlog::level::info, + Eq("TestRootContext::onResolveDns dns 1001 192.168.1.102:0"))); + EXPECT_CALL(rootContext(), log_(spdlog::level::warn, Eq("TestRootContext::onDone 1"))); + + dns_callback( + Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"192.168.1.101", "192.168.1.102"}, std::chrono::seconds(1001))); + + rootContext().onResolveDns(1 /* token */, Envoy::Network::DnsResolver::ResolutionStatus::Failure, + {}); + if (GetParam() == "null") { + rootContext().onTick(0); + } + if (GetParam() == "v8") { + rootContext().onQueueReady(0); + } + // Wait till the Wasm is destroyed and then the late callback should do nothing. + deferred_runner_.setFunction([dns_callback] { + dns_callback(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"192.168.1.101", "192.168.1.102"}, + std::chrono::seconds(1001))); + }); +} + +TEST_P(WasmCommonContextTest, EmptyContext) { + std::string code; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(absl::StrCat( + "{{ test_rundir }}/test/extensions/common/wasm/test_data/test_context_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestContextCpp"; + } + EXPECT_FALSE(code.empty()); + + setup(code, "context", "empty"); + setupContext(); + + root_context_->onResolveDns(0, Envoy::Network::DnsResolver::ResolutionStatus::Success, {}); + NiceMock stats_snapshot; + root_context_->onStatsUpdate(stats_snapshot); + root_context_->validateConfiguration("", plugin_); +} + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/common/wasm/wasm_vm_test.cc b/test/extensions/common/wasm/wasm_vm_test.cc index b07b684a0ba4..af7816f20d0a 100644 --- a/test/extensions/common/wasm/wasm_vm_test.cc +++ b/test/extensions/common/wasm/wasm_vm_test.cc @@ -2,18 +2,21 @@ #include "common/stats/isolated_store_impl.h" -#include "extensions/common/wasm/null/null_vm_plugin.h" #include "extensions/common/wasm/wasm_vm.h" #include "test/test_common/environment.h" -#include "test/test_common/registry.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "include/proxy-wasm/null_vm_plugin.h" -using testing::HasSubstr; -using testing::Return; +using proxy_wasm::Cloneable; // NOLINT +using proxy_wasm::WasmCallVoid; // NOLINT +using proxy_wasm::WasmCallWord; // NOLINT +using proxy_wasm::Word; // NOLINT +using testing::HasSubstr; // NOLINT +using testing::Return; // NOLINT namespace Envoy { namespace Extensions { @@ -21,7 +24,7 @@ namespace Common { namespace Wasm { namespace { -class TestNullVmPlugin : public Null::NullVmPlugin { +class TestNullVmPlugin : public proxy_wasm::NullVmPlugin { public: TestNullVmPlugin() = default; ~TestNullVmPlugin() override = default; @@ -29,53 +32,41 @@ class TestNullVmPlugin : public Null::NullVmPlugin { MOCK_METHOD(void, start, ()); }; -class PluginFactory : public Null::NullVmPluginFactory { -public: - PluginFactory() = default; - - std::string name() const override { return "test_null_vm_plugin"; } - std::unique_ptr create() const override; -}; - TestNullVmPlugin* test_null_vm_plugin_ = nullptr; -std::unique_ptr PluginFactory::create() const { - auto result = std::make_unique(); - test_null_vm_plugin_ = result.get(); - return result; -} +proxy_wasm::RegisterNullVmPluginFactory register_test_null_vm_plugin("test_null_vm_plugin", []() { + auto plugin = std::make_unique(); + test_null_vm_plugin_ = plugin.get(); + return plugin; +}); class BaseVmTest : public testing::Test { public: - BaseVmTest() - : registration_(factory_), scope_(Stats::ScopeSharedPtr(stats_store.createScope("wasm."))) {} + BaseVmTest() : scope_(Stats::ScopeSharedPtr(stats_store.createScope("wasm."))) {} protected: - PluginFactory factory_; - Envoy::Registry::InjectFactory registration_; Stats::IsolatedStoreImpl stats_store; Stats::ScopeSharedPtr scope_; }; -TEST_F(BaseVmTest, NoRuntime) { - EXPECT_THROW_WITH_MESSAGE(createWasmVm("", scope_), WasmVmException, - "Failed to create WASM VM with unspecified runtime."); -} +TEST_F(BaseVmTest, NoRuntime) { EXPECT_EQ(createWasmVm("", scope_), nullptr); } TEST_F(BaseVmTest, BadRuntime) { - EXPECT_THROW_WITH_MESSAGE(createWasmVm("envoy.wasm.runtime.invalid", scope_), WasmVmException, - "Failed to create WASM VM using envoy.wasm.runtime.invalid runtime. " - "Envoy was compiled without support for it."); + EXPECT_EQ(createWasmVm("envoy.wasm.runtime.invalid", scope_), nullptr); } TEST_F(BaseVmTest, NullVmStartup) { auto wasm_vm = createWasmVm("envoy.wasm.runtime.null", scope_); EXPECT_TRUE(wasm_vm != nullptr); - EXPECT_TRUE(wasm_vm->runtime() == "envoy.wasm.runtime.null"); + EXPECT_TRUE(wasm_vm->runtime() == "null"); EXPECT_TRUE(wasm_vm->cloneable() == Cloneable::InstantiatedModule); auto wasm_vm_clone = wasm_vm->clone(); EXPECT_TRUE(wasm_vm_clone != nullptr); EXPECT_TRUE(wasm_vm->getCustomSection("user").empty()); + EXPECT_EQ(getEnvoyWasmIntegration(*wasm_vm).runtime(), "envoy.wasm.runtime.null"); + std::function f; + EXPECT_FALSE( + getEnvoyWasmIntegration(*wasm_vm).getNullVmFunction("bad_function", false, 0, nullptr, &f)); } TEST_F(BaseVmTest, NullVmMemory) { @@ -113,6 +104,7 @@ class MockHostFunctions { MOCK_METHOD(uint32_t, random, (), (const)); }; +#if defined(ENVOY_WASM_V8) MockHostFunctions* g_host_functions; void pong(void*, Word value) { g_host_functions->pong(convertWordToUint32(value)); } @@ -132,7 +124,9 @@ class WasmVmTest : public testing::TestWithParam { public: WasmVmTest() : scope_(Stats::ScopeSharedPtr(stats_store.createScope("wasm."))) {} - void SetUp() override { g_host_functions = new MockHostFunctions(); } + void SetUp() override { // NOLINT(readability-identifier-naming) + g_host_functions = new MockHostFunctions(); + } void TearDown() override { delete g_host_functions; } protected: @@ -150,16 +144,9 @@ TEST_P(WasmVmTest, V8BadCode) { } TEST_P(WasmVmTest, V8Code) { -#ifndef NDEBUG - // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the - // flags do not match. TODO: restore this test when the rust toolchain is integrated. - if (GetParam() == 1) { - return; - } -#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); - EXPECT_TRUE(wasm_vm->runtime() == "envoy.wasm.runtime.v8"); + EXPECT_TRUE(wasm_vm->runtime() == "v8"); auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/common/wasm/test_data/test_rust.wasm")); @@ -177,13 +164,6 @@ TEST_P(WasmVmTest, V8Code) { } TEST_P(WasmVmTest, V8BadHostFunctions) { -#ifndef NDEBUG - // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the - // flags do not match. TODO: restore this test when the rust toolchain is integrated. - if (GetParam() == 1) { - return; - } -#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -192,33 +172,19 @@ TEST_P(WasmVmTest, V8BadHostFunctions) { EXPECT_TRUE(wasm_vm->load(code, GetParam())); wasm_vm->registerCallback("env", "random", &random, CONVERT_FUNCTION_WORD_TO_UINT32(random)); - EXPECT_THROW_WITH_MESSAGE(wasm_vm->link("test"), WasmVmException, - "Failed to load WASM module due to a missing import: env.pong"); + EXPECT_FALSE(wasm_vm->link("test")); wasm_vm->registerCallback("env", "pong", &bad_pong1, CONVERT_FUNCTION_WORD_TO_UINT32(bad_pong1)); - EXPECT_THROW_WITH_MESSAGE(wasm_vm->link("test"), WasmVmException, - "Failed to load WASM module due to an import type mismatch: env.pong, " - "want: i32 -> void, but host exports: void -> void"); + EXPECT_FALSE(wasm_vm->link("test")); wasm_vm->registerCallback("env", "pong", &bad_pong2, CONVERT_FUNCTION_WORD_TO_UINT32(bad_pong2)); - EXPECT_THROW_WITH_MESSAGE(wasm_vm->link("test"), WasmVmException, - "Failed to load WASM module due to an import type mismatch: env.pong, " - "want: i32 -> void, but host exports: i32 -> i32"); + EXPECT_FALSE(wasm_vm->link("test")); wasm_vm->registerCallback("env", "pong", &bad_pong3, CONVERT_FUNCTION_WORD_TO_UINT32(bad_pong3)); - EXPECT_THROW_WITH_MESSAGE(wasm_vm->link("test"), WasmVmException, - "Failed to load WASM module due to an import type mismatch: env.pong, " - "want: i32 -> void, but host exports: f64 -> f64"); + EXPECT_FALSE(wasm_vm->link("test")); } TEST_P(WasmVmTest, V8BadModuleFunctions) { -#ifndef NDEBUG - // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the - // flags do not match. TODO: restore this test when the rust toolchain is integrated. - if (GetParam() == 1) { - return; - } -#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -239,21 +205,14 @@ TEST_P(WasmVmTest, V8BadModuleFunctions) { wasm_vm->getFunction("nonexistent", &sum); EXPECT_TRUE(sum == nullptr); - EXPECT_THROW_WITH_MESSAGE(wasm_vm->getFunction("ping", &sum), WasmVmException, - "Bad function signature for: ping"); + wasm_vm->getFunction("ping", &sum); + EXPECT_TRUE(wasm_vm->isFailed()); - EXPECT_THROW_WITH_MESSAGE(wasm_vm->getFunction("sum", &ping), WasmVmException, - "Bad function signature for: sum"); + wasm_vm->getFunction("sum", &ping); + EXPECT_TRUE(wasm_vm->isFailed()); } TEST_P(WasmVmTest, V8FunctionCalls) { -#ifndef NDEBUG - // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the - // flags do not match. TODO: restore this test when the rust toolchain is integrated. - if (GetParam() == 1) { - return; - } -#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -282,23 +241,16 @@ TEST_P(WasmVmTest, V8FunctionCalls) { WasmCallWord<2> div; wasm_vm->getFunction("div", &div); - EXPECT_THROW_WITH_MESSAGE(div(nullptr /* no context */, 42, 0), WasmException, - "Function: div failed: Uncaught RuntimeError: unreachable"); + div(nullptr /* no context */, 42, 0); + EXPECT_TRUE(wasm_vm->isFailed()); WasmCallVoid<0> abort; wasm_vm->getFunction("abort", &abort); - EXPECT_THROW_WITH_MESSAGE(abort(nullptr /* no context */), WasmException, - "Function: abort failed: Uncaught RuntimeError: unreachable"); + abort(nullptr /* no context */); + EXPECT_TRUE(wasm_vm->isFailed()); } TEST_P(WasmVmTest, V8Memory) { -#ifndef NDEBUG - // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the - // flags do not match. TODO: restore this test when the rust toolchain is integrated. - if (GetParam() == 1) { - return; - } -#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -331,6 +283,7 @@ TEST_P(WasmVmTest, V8Memory) { EXPECT_FALSE(wasm_vm->setWord(1024 * 1024 /* out of bound */, 1)); EXPECT_FALSE(wasm_vm->getWord(1024 * 1024 /* out of bound */, &word)); } +#endif } // namespace } // namespace Wasm diff --git a/test/extensions/filters/common/expr/BUILD b/test/extensions/filters/common/expr/BUILD index d41dcb194806..ac7dfeffce15 100644 --- a/test/extensions/filters/common/expr/BUILD +++ b/test/extensions/filters/common/expr/BUILD @@ -18,6 +18,8 @@ envoy_extension_cc_test( srcs = ["context_test.cc"], extension_name = "envoy.filters.http.rbac", deps = [ + "//source/common/router:string_accessor_lib", + "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/common/expr:context_lib", "//test/mocks/ssl:ssl_mocks", "//test/mocks/stream_info:stream_info_mocks", diff --git a/test/extensions/filters/common/expr/context_test.cc b/test/extensions/filters/common/expr/context_test.cc index be7fa3b772f1..88f4c980d62d 100644 --- a/test/extensions/filters/common/expr/context_test.cc +++ b/test/extensions/filters/common/expr/context_test.cc @@ -1,4 +1,6 @@ #include "common/network/utility.h" +#include "common/router/string_accessor_impl.h" +#include "common/stream_info/filter_state_impl.h" #include "extensions/filters/common/expr/context.h" @@ -23,7 +25,8 @@ namespace { constexpr absl::string_view Undefined = "undefined"; TEST(Context, EmptyHeadersAttributes) { - HeadersWrapper headers(nullptr); + Protobuf::Arena arena; + HeadersWrapper headers(arena, nullptr); auto header = headers[CelValue::CreateStringView(Referer)]; EXPECT_FALSE(header.has_value()); EXPECT_EQ(0, headers.size()); @@ -32,7 +35,8 @@ TEST(Context, EmptyHeadersAttributes) { TEST(Context, InvalidRequest) { Http::TestRequestHeaderMapImpl header_map{{"referer", "dogs.com"}}; - HeadersWrapper headers(&header_map); + Protobuf::Arena arena; + HeadersWrapper headers(arena, &header_map); auto header = headers[CelValue::CreateStringView("dogs.com\n")]; EXPECT_FALSE(header.has_value()); } @@ -43,10 +47,11 @@ TEST(Context, RequestAttributes) { Http::TestRequestHeaderMapImpl header_map{ {":method", "POST"}, {":scheme", "http"}, {":path", "/meow?yes=1"}, {":authority", "kittens.com"}, {"referer", "dogs.com"}, {"user-agent", "envoy-mobile"}, - {"content-length", "10"}, {"x-request-id", "blah"}, - }; - RequestWrapper request(&header_map, info); - RequestWrapper empty_request(nullptr, empty_info); + {"content-length", "10"}, {"x-request-id", "blah"}, {"double-header", "foo"}, + {"double-header", "bar"}}; + Protobuf::Arena arena; + RequestWrapper request(arena, &header_map, info); + RequestWrapper empty_request(arena, nullptr, empty_info); EXPECT_CALL(info, bytesReceived()).WillRepeatedly(Return(10)); // "2018-04-03T23:06:09.123Z". @@ -143,7 +148,7 @@ TEST(Context, RequestAttributes) { EXPECT_TRUE(value.has_value()); ASSERT_TRUE(value.value().IsInt64()); // this includes the headers size - EXPECT_EQ(138, value.value().Int64OrDie()); + EXPECT_EQ(170, value.value().Int64OrDie()); } { @@ -167,12 +172,17 @@ TEST(Context, RequestAttributes) { ASSERT_TRUE(value.value().IsMap()); auto& map = *value.value().MapOrDie(); EXPECT_FALSE(map.empty()); - EXPECT_EQ(8, map.size()); + EXPECT_EQ(10, map.size()); auto header = map[CelValue::CreateStringView(Referer)]; EXPECT_TRUE(header.has_value()); ASSERT_TRUE(header.value().IsString()); EXPECT_EQ("dogs.com", header.value().StringOrDie().value()); + + auto header2 = map[CelValue::CreateStringView("double-header")]; + EXPECT_TRUE(header2.has_value()); + ASSERT_TRUE(header2.value().IsString()); + EXPECT_EQ("foo,bar", header2.value().StringOrDie().value()); } { @@ -205,9 +215,10 @@ TEST(Context, RequestFallbackAttributes) { Http::TestRequestHeaderMapImpl header_map{ {":method", "POST"}, {":scheme", "http"}, - {":path", "/meow?yes=1"}, + {":path", "/meow"}, }; - RequestWrapper request(&header_map, info); + Protobuf::Arena arena; + RequestWrapper request(arena, &header_map, info); EXPECT_CALL(info, bytesReceived()).WillRepeatedly(Return(10)); @@ -234,13 +245,17 @@ TEST(Context, ResponseAttributes) { const std::string grpc_status = "grpc-status"; Http::TestResponseHeaderMapImpl header_map{{header_name, "a"}}; Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, "b"}, {grpc_status, "8"}}; - ResponseWrapper response(&header_map, &trailer_map, info); - ResponseWrapper empty_response(nullptr, nullptr, empty_info); + Protobuf::Arena arena; + ResponseWrapper response(arena, &header_map, &trailer_map, info); + ResponseWrapper empty_response(arena, nullptr, nullptr, empty_info); EXPECT_CALL(info, responseCode()).WillRepeatedly(Return(404)); EXPECT_CALL(info, bytesSent()).WillRepeatedly(Return(123)); EXPECT_CALL(info, responseFlags()).WillRepeatedly(Return(0x1)); + const absl::optional code_details = "unauthorized"; + EXPECT_CALL(info, responseCodeDetails()).WillRepeatedly(ReturnRef(code_details)); + { auto value = response[CelValue::CreateStringView(Undefined)]; EXPECT_FALSE(value.has_value()); @@ -279,6 +294,13 @@ TEST(Context, ResponseAttributes) { EXPECT_EQ(404, value.value().Int64OrDie()); } + { + auto value = response[CelValue::CreateStringView(CodeDetails)]; + EXPECT_TRUE(value.has_value()); + ASSERT_TRUE(value.value().IsString()); + EXPECT_EQ(code_details.value(), value.value().StringOrDie().value()); + } + { auto value = response[CelValue::CreateStringView(Headers)]; EXPECT_TRUE(value.has_value()); @@ -329,10 +351,21 @@ TEST(Context, ResponseAttributes) { EXPECT_FALSE(value.has_value()); } + { + auto value = empty_response[CelValue::CreateStringView(Code)]; + EXPECT_FALSE(value.has_value()); + } + + { + auto value = empty_response[CelValue::CreateStringView(CodeDetails)]; + EXPECT_FALSE(value.has_value()); + } + { Http::TestResponseHeaderMapImpl header_map{{header_name, "a"}, {grpc_status, "7"}}; Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, "b"}}; - ResponseWrapper response_header_status(&header_map, &trailer_map, info); + Protobuf::Arena arena; + ResponseWrapper response_header_status(arena, &header_map, &trailer_map, info); auto value = response_header_status[CelValue::CreateStringView(GrpcStatus)]; EXPECT_TRUE(value.has_value()); ASSERT_TRUE(value.value().IsInt64()); @@ -341,7 +374,8 @@ TEST(Context, ResponseAttributes) { { Http::TestResponseHeaderMapImpl header_map{{header_name, "a"}}; Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, "b"}}; - ResponseWrapper response_no_status(&header_map, &trailer_map, info); + Protobuf::Arena arena; + ResponseWrapper response_no_status(arena, &header_map, &trailer_map, info); auto value = response_no_status[CelValue::CreateStringView(GrpcStatus)]; EXPECT_TRUE(value.has_value()); ASSERT_TRUE(value.value().IsInt64()); @@ -351,12 +385,38 @@ TEST(Context, ResponseAttributes) { NiceMock info_without_code; Http::TestResponseHeaderMapImpl header_map{{header_name, "a"}}; Http::TestResponseTrailerMapImpl trailer_map{{trailer_name, "b"}}; - ResponseWrapper response_no_status(&header_map, &trailer_map, info_without_code); + Protobuf::Arena arena; + ResponseWrapper response_no_status(arena, &header_map, &trailer_map, info_without_code); auto value = response_no_status[CelValue::CreateStringView(GrpcStatus)]; EXPECT_FALSE(value.has_value()); } } +TEST(Context, ConnectionFallbackAttributes) { + NiceMock info; + ConnectionWrapper connection(info); + UpstreamWrapper upstream(info); + { + auto value = connection[CelValue::CreateStringView(Undefined)]; + EXPECT_FALSE(value.has_value()); + } + + { + auto value = connection[CelValue::CreateStringView(ID)]; + EXPECT_FALSE(value.has_value()); + } + + { + auto value = upstream[CelValue::CreateStringView(Undefined)]; + EXPECT_FALSE(value.has_value()); + } + + { + auto value = upstream[CelValue::CreateInt64(1)]; + EXPECT_FALSE(value.has_value()); + } +} + TEST(Context, ConnectionAttributes) { NiceMock info; std::shared_ptr> upstream_host( @@ -387,6 +447,8 @@ TEST(Context, ConnectionAttributes) { const std::string upstream_transport_failure_reason = "ConnectionTermination"; EXPECT_CALL(info, upstreamTransportFailureReason()) .WillRepeatedly(ReturnRef(upstream_transport_failure_reason)); + EXPECT_CALL(info, connectionID()).WillRepeatedly(Return(123)); + EXPECT_CALL(*downstream_ssl_info, peerCertificatePresented()).WillRepeatedly(Return(true)); EXPECT_CALL(*upstream_host, address()).WillRepeatedly(Return(upstream_address)); @@ -542,6 +604,13 @@ TEST(Context, ConnectionAttributes) { EXPECT_EQ(subject_peer, value.value().StringOrDie().value()); } + { + auto value = connection[CelValue::CreateStringView(ID)]; + EXPECT_TRUE(value.has_value()); + ASSERT_TRUE(value.value().IsUint64()); + EXPECT_EQ(123, value.value().Uint64OrDie()); + } + { auto value = upstream[CelValue::CreateStringView(TLSVersion)]; EXPECT_TRUE(value.has_value()); @@ -606,6 +675,32 @@ TEST(Context, ConnectionAttributes) { } } +TEST(Context, FilterStateAttributes) { + StreamInfo::FilterStateImpl filter_state(StreamInfo::FilterState::LifeSpan::FilterChain); + FilterStateWrapper wrapper(filter_state); + ProtobufWkt::Arena arena; + wrapper.Produce(&arena); + + const std::string key = "filter_state_key"; + const std::string serialized = "filter_state_value"; + const std::string missing = "missing_key"; + + auto accessor = std::make_shared(serialized); + filter_state.setData(key, accessor, StreamInfo::FilterState::StateType::ReadOnly); + + { + auto value = wrapper[CelValue::CreateStringView(missing)]; + EXPECT_FALSE(value.has_value()); + } + + { + auto value = wrapper[CelValue::CreateStringView(key)]; + EXPECT_TRUE(value.has_value()); + EXPECT_TRUE(value.value().IsBytes()); + EXPECT_EQ(serialized, value.value().BytesOrDie().value()); + } +} + } // namespace } // namespace Expr } // namespace Common diff --git a/test/extensions/filters/common/expr/evaluator_fuzz_test.cc b/test/extensions/filters/common/expr/evaluator_fuzz_test.cc index 2f29c9f0023c..6c3867747ede 100644 --- a/test/extensions/filters/common/expr/evaluator_fuzz_test.cc +++ b/test/extensions/filters/common/expr/evaluator_fuzz_test.cc @@ -44,7 +44,7 @@ DEFINE_PROTO_FUZZER(const test::extensions::filters::common::expr::EvaluatorTest // Evaluate the CEL expression. Protobuf::Arena arena; - Expr::evaluate(*expr, &arena, *stream_info, &request_headers, &response_headers, + Expr::evaluate(*expr, arena, *stream_info, &request_headers, &response_headers, &response_trailers); } catch (const CelException& e) { ENVOY_LOG_MISC(debug, "CelException: {}", e.what()); diff --git a/test/extensions/filters/common/ext_authz/BUILD b/test/extensions/filters/common/ext_authz/BUILD index 472c9a54f8c5..ebf3ad3eac1f 100644 --- a/test/extensions/filters/common/ext_authz/BUILD +++ b/test/extensions/filters/common/ext_authz/BUILD @@ -16,6 +16,7 @@ envoy_cc_test( "//source/common/network:address_lib", "//source/common/protobuf", "//source/extensions/filters/common/ext_authz:check_request_utils_lib", + "//source/extensions/filters/common/ext_authz:ext_authz_interface", "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", "//test/mocks/ssl:ssl_mocks", @@ -31,8 +32,9 @@ envoy_cc_test( deps = [ "//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib", "//test/extensions/filters/common/ext_authz:ext_authz_test_common", + "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/tracing:tracing_mocks", - "@envoy_api//envoy/service/auth/v2alpha:pkg_cc_proto", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], @@ -46,6 +48,7 @@ envoy_cc_test( "//test/extensions/filters/common/ext_authz:ext_authz_test_common", "//test/mocks/stream_info:stream_info_mocks", "//test/mocks/upstream:cluster_manager_mocks", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc index ef221d97407b..310b7c3b98a2 100644 --- a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc +++ b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc @@ -4,6 +4,7 @@ #include "common/protobuf/protobuf.h" #include "extensions/filters/common/ext_authz/check_request_utils.h" +#include "extensions/filters/common/ext_authz/ext_authz.h" #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" @@ -56,9 +57,9 @@ class CheckRequestUtilsTest : public testing::Test { auto metadata_val = MessageUtil::keyValueStruct("foo", "bar"); (*metadata_context.mutable_filter_metadata())["meta.key"] = metadata_val; - CheckRequestUtils::createHttpCheck(&callbacks_, request_headers, std::move(context_extensions), - std::move(metadata_context), request, false, - include_peer_certificate); + CheckRequestUtils::createHttpCheck( + &callbacks_, request_headers, std::move(context_extensions), std::move(metadata_context), + request, /*max_request_bytes=*/0, /*pack_as_bytes=*/false, include_peer_certificate); EXPECT_EQ("source", request.attributes().source().principal()); EXPECT_EQ("destination", request.attributes().destination().principal()); @@ -145,8 +146,7 @@ TEST_F(CheckRequestUtilsTest, BasicHttp) { envoy::service::auth::v3::CheckRequest request_; // A client supplied EnvoyAuthPartialBody header should be ignored. - Http::TestRequestHeaderMapImpl request_headers{ - {Http::Headers::get().EnvoyAuthPartialBody.get(), "1"}}; + Http::TestRequestHeaderMapImpl request_headers{{Headers::get().EnvoyAuthPartialBody.get(), "1"}}; EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); EXPECT_CALL(*ssl_, uriSanLocalCertificate()) @@ -154,12 +154,13 @@ TEST_F(CheckRequestUtilsTest, BasicHttp) { expectBasicHttp(); CheckRequestUtils::createHttpCheck(&callbacks_, request_headers, Protobuf::Map(), - envoy::config::core::v3::Metadata(), request_, size, false); + envoy::config::core::v3::Metadata(), request_, size, + /*pack_as_bytes=*/false, /*include_peer_certificate=*/false); ASSERT_EQ(size, request_.attributes().request().http().body().size()); EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body()); EXPECT_EQ(request_.attributes().request().http().headers().end(), request_.attributes().request().http().headers().find( - Http::Headers::get().EnvoyAuthPartialBody.get())); + Headers::get().EnvoyAuthPartialBody.get())); EXPECT_TRUE(request_.attributes().request().has_time()); } @@ -175,11 +176,12 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithPartialBody) { expectBasicHttp(); CheckRequestUtils::createHttpCheck(&callbacks_, headers_, Protobuf::Map(), - envoy::config::core::v3::Metadata(), request_, size, false); + envoy::config::core::v3::Metadata(), request_, size, + /*pack_as_bytes=*/false, /*include_peer_certificate=*/false); ASSERT_EQ(size, request_.attributes().request().http().body().size()); EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body()); EXPECT_EQ("true", request_.attributes().request().http().headers().at( - Http::Headers::get().EnvoyAuthPartialBody.get())); + Headers::get().EnvoyAuthPartialBody.get())); } // Verify that check request object has all the request data. @@ -193,12 +195,55 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithFullBody) { expectBasicHttp(); CheckRequestUtils::createHttpCheck( &callbacks_, headers_, Protobuf::Map(), - envoy::config::core::v3::Metadata(), request_, buffer_->length(), false); + envoy::config::core::v3::Metadata(), request_, buffer_->length(), /*pack_as_bytes=*/false, + /*include_peer_certificate=*/false); ASSERT_EQ(buffer_->length(), request_.attributes().request().http().body().size()); EXPECT_EQ(buffer_->toString().substr(0, buffer_->length()), request_.attributes().request().http().body()); EXPECT_EQ("false", request_.attributes().request().http().headers().at( - Http::Headers::get().EnvoyAuthPartialBody.get())); + Headers::get().EnvoyAuthPartialBody.get())); +} + +// Verify that check request object has all the request data and packed as bytes instead of UTF-8 +// string. +TEST_F(CheckRequestUtilsTest, BasicHttpWithFullBodyPackAsBytes) { + Http::TestRequestHeaderMapImpl headers_; + envoy::service::auth::v3::CheckRequest request_; + + EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); + EXPECT_CALL(*ssl_, uriSanLocalCertificate()) + .WillOnce(Return(std::vector{"destination"})); + + // Fill the buffer with non UTF-8 data. + uint8_t raw[2] = {0xc0, 0xc0}; + Buffer::OwnedImpl raw_buffer(raw, 2); + buffer_->drain(buffer_->length()); + buffer_->add(raw_buffer); + + expectBasicHttp(); + + // Setting pack_as_bytes as false and a string field with invalid UTF-8 data makes + // calling request_.SerializeToString() below to print an error message to stderr. Interestingly, + // request_.SerializeToString() still returns "true" when it is failed to serialize the data. + CheckRequestUtils::createHttpCheck( + &callbacks_, headers_, Protobuf::Map(), + envoy::config::core::v3::Metadata(), request_, buffer_->length(), /*pack_as_bytes=*/true, + /*include_peer_certificate=*/false); + + // TODO(dio): Find a way to test this without using function from testing::internal namespace. + testing::internal::CaptureStderr(); + std::string out; + ASSERT_TRUE(request_.SerializeToString(&out)); + ASSERT_EQ("", testing::internal::GetCapturedStderr()); + + // Non UTF-8 data sets raw_body field, instead of body field. + ASSERT_EQ(buffer_->length(), request_.attributes().request().http().raw_body().size()); + ASSERT_EQ(0, request_.attributes().request().http().body().size()); + + EXPECT_EQ(buffer_->toString().substr(0, buffer_->length()), + request_.attributes().request().http().raw_body()); + EXPECT_EQ("false", request_.attributes().request().http().headers().at( + Headers::get().EnvoyAuthPartialBody.get())); } // Verify that createHttpCheck extract the proper attributes from the http request into CheckRequest diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index 46dbfdd4cfe2..81142b827fea 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -1,4 +1,3 @@ -#include "envoy/service/auth/v2alpha/external_auth.pb.h" // for proto link #include "envoy/service/auth/v3/external_auth.pb.h" #include "envoy/type/v3/http_status.pb.h" @@ -12,7 +11,9 @@ #include "test/extensions/filters/common/ext_authz/test_common.h" #include "test/mocks/grpc/mocks.h" #include "test/mocks/stream_info/mocks.h" +#include "test/mocks/thread_local/mocks.h" #include "test/mocks/tracing/mocks.h" +#include "test/test_common/test_runtime.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -31,7 +32,7 @@ namespace Filters { namespace Common { namespace ExtAuthz { -using Params = std::tuple; +using Params = std::tuple; class ExtAuthzGrpcClientTest : public testing::TestWithParam { public: @@ -39,9 +40,8 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { void initialize(const Params& param) { api_version_ = std::get<0>(param); - use_alpha_ = std::get<1>(param); client_ = std::make_unique(Grpc::RawAsyncClientPtr{async_client_}, timeout_, - api_version_, use_alpha_); + api_version_); } void expectCallSend(envoy::service::auth::v3::CheckRequest& request) { @@ -52,10 +52,15 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { Buffer::InstancePtr&&, Grpc::RawAsyncRequestCallbacks&, Tracing::Span&, const Http::AsyncClient::RequestOptions& options) -> Grpc::AsyncRequest* { EXPECT_EQ(TestUtility::getVersionedServiceFullName( - "envoy.service.auth.{}.Authorization", api_version_, use_alpha_), + "envoy.service.auth.{}.Authorization", api_version_), service_full_name); EXPECT_EQ("Check", method_name); - EXPECT_EQ(timeout_->count(), options.timeout->count()); + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.ext_authz_measure_timeout_on_check_created")) { + EXPECT_FALSE(options.timeout.has_value()); + } else { + EXPECT_EQ(timeout_->count(), options.timeout->count()); + } return &async_request_; })); } @@ -65,17 +70,16 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { Grpc::MockAsyncRequest async_request_; GrpcClientImplPtr client_; MockRequestCallbacks request_callbacks_; + NiceMock dispatcher_; Tracing::MockSpan span_; - bool use_alpha_{}; NiceMock stream_info_; envoy::config::core::v3::ApiVersion api_version_; }; INSTANTIATE_TEST_SUITE_P(Parameterized, ExtAuthzGrpcClientTest, - Values(Params(envoy::config::core::v3::ApiVersion::AUTO, false), - Params(envoy::config::core::v3::ApiVersion::V2, false), - Params(envoy::config::core::v3::ApiVersion::V2, true), - Params(envoy::config::core::v3::ApiVersion::V3, false))); + Values(Params(envoy::config::core::v3::ApiVersion::AUTO), + Params(envoy::config::core::v3::ApiVersion::V2), + Params(envoy::config::core::v3::ApiVersion::V3))); // Test the client when an ok response is received. TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { @@ -102,9 +106,15 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { authz_response.dynamic_metadata = expected_dynamic_metadata; + NiceMock* timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timer, enableTimer(timeout_.value(), _)); + bool timer_destroyed = false; + timer->timer_destroyed_ = &timer_destroyed; + envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -113,6 +123,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo( AuthzResponseNoAttributes(authz_response)))); client_->onSuccess(std::move(check_response), span_); + // make sure the internal timeout timer is destroyed + EXPECT_EQ(timer_destroyed, true); } // Test the client when an ok response is received. @@ -128,7 +140,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithAllAtributes) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -151,7 +164,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDenied) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -175,7 +189,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedGrpcUnknownStatus) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -202,7 +217,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -218,13 +234,22 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { TEST_P(ExtAuthzGrpcClientTest, UnknownError) { initialize(GetParam()); + NiceMock* timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timer, enableTimer(timeout_.value(), _)); + bool timer_destroyed = false; + timer->timer_destroyed_ = &timer_destroyed; + envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); client_->onFailure(Grpc::Status::Unknown, "", span_); + + // make sure the internal timeout timer is destroyed + EXPECT_EQ(timer_destroyed, true); } // Test the client when the request is canceled. @@ -233,7 +258,8 @@ TEST_P(ExtAuthzGrpcClientTest, CancelledAuthorizationRequest) { envoy::service::auth::v3::CheckRequest request; EXPECT_CALL(*async_client_, sendRaw(_, _, _, _, _, _)).WillOnce(Return(&async_request_)); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); EXPECT_CALL(async_request_, cancel()); client_->cancel(); @@ -241,17 +267,69 @@ TEST_P(ExtAuthzGrpcClientTest, CancelledAuthorizationRequest) { // Test the client when the request times out. TEST_P(ExtAuthzGrpcClientTest, AuthorizationRequestTimeout) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created", "false"}}); initialize(GetParam()); envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); client_->onFailure(Grpc::Status::DeadlineExceeded, "", span_); } +// Test the client when the request times out on an internal timeout. +TEST_P(ExtAuthzGrpcClientTest, AuthorizationInternalRequestTimeout) { + initialize(GetParam()); + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created", "true"}}); + + NiceMock* timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timer, enableTimer(timeout_.value(), _)); + + envoy::service::auth::v3::CheckRequest request; + expectCallSend(request); + + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); + + EXPECT_CALL(async_request_, cancel()); + EXPECT_CALL(request_callbacks_, + onComplete_(WhenDynamicCastTo(AuthzTimedoutResponse()))); + timer->invokeCallback(); +} + +// Test when the client is cancelled with internal timeout. +TEST_P(ExtAuthzGrpcClientTest, AuthorizationInternalRequestTimeoutCancelled) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created", "true"}}); + + initialize(GetParam()); + + NiceMock* timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timer, enableTimer(timeout_.value(), _)); + + envoy::service::auth::v3::CheckRequest request; + expectCallSend(request); + + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); + + EXPECT_CALL(async_request_, cancel()); + EXPECT_CALL(request_callbacks_, onComplete_(_)).Times(0); + // make sure cancel resets the timer: + bool timer_destroyed = false; + timer->timer_destroyed_ = &timer_destroyed; + client_->cancel(); + EXPECT_EQ(timer_destroyed, true); +} + // Test the client when an OK response is received with dynamic metadata in that OK response. TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithDynamicMetadata) { initialize(GetParam()); @@ -280,7 +358,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithDynamicMetadata) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, dispatcher_, request, Tracing::NullSpan::instance(), + stream_info_); Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -291,6 +370,65 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithDynamicMetadata) { client_->onSuccess(std::move(check_response), span_); } +class AsyncClientCacheTest : public testing::Test { +public: + AsyncClientCacheTest() { + client_cache_ = std::make_unique(async_client_manager_, scope_, tls_); + } + + void expectClientCreation() { + factory_ = new Grpc::MockAsyncClientFactory; + async_client_ = new Grpc::MockAsyncClient; + EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, true)) + .WillOnce(Invoke([this](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { + EXPECT_CALL(*factory_, create()).WillOnce(Invoke([this] { + return Grpc::RawAsyncClientPtr{async_client_}; + })); + return Grpc::AsyncClientFactoryPtr{factory_}; + })); + } + + NiceMock tls_; + Grpc::MockAsyncClientManager async_client_manager_; + Grpc::MockAsyncClient* async_client_ = nullptr; + Grpc::MockAsyncClientFactory* factory_ = nullptr; + std::unique_ptr client_cache_; + NiceMock scope_; +}; + +TEST_F(AsyncClientCacheTest, Deduplication) { + Stats::IsolatedStoreImpl scope; + testing::InSequence s; + + envoy::extensions::filters::http::ext_authz::v3::ExtAuthz config; + config.mutable_grpc_service()->mutable_google_grpc()->set_target_uri("dns://test01"); + config.mutable_grpc_service()->mutable_google_grpc()->set_credentials_factory_name( + "test_credential01"); + + expectClientCreation(); + Grpc::RawAsyncClientSharedPtr test_client_01 = client_cache_->getOrCreateAsyncClient(config); + // Fetches the existing client. + EXPECT_EQ(test_client_01, client_cache_->getOrCreateAsyncClient(config)); + + config.mutable_grpc_service()->mutable_google_grpc()->set_credentials_factory_name( + "test_credential02"); + expectClientCreation(); + // Different credentials use different clients. + EXPECT_NE(test_client_01, client_cache_->getOrCreateAsyncClient(config)); + Grpc::RawAsyncClientSharedPtr test_client_02 = client_cache_->getOrCreateAsyncClient(config); + + config.mutable_grpc_service()->mutable_google_grpc()->set_credentials_factory_name( + "test_credential02"); + // No creation, fetching the existing one. + EXPECT_EQ(test_client_02, client_cache_->getOrCreateAsyncClient(config)); + + // Different targets use different clients. + config.mutable_grpc_service()->mutable_google_grpc()->set_target_uri("dns://test02"); + expectClientCreation(); + EXPECT_NE(test_client_01, client_cache_->getOrCreateAsyncClient(config)); + EXPECT_NE(test_client_02, client_cache_->getOrCreateAsyncClient(config)); +} + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 0695251a0735..14e0dd4eb955 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -13,6 +13,7 @@ #include "test/extensions/filters/common/ext_authz/test_common.h" #include "test/mocks/stream_info/mocks.h" #include "test/mocks/upstream/cluster_manager.h" +#include "test/test_common/test_runtime.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -33,6 +34,8 @@ namespace Common { namespace ExtAuthz { namespace { +constexpr uint32_t REQUEST_TIMEOUT{250}; + class ExtAuthzHttpClientTest : public testing::Test { public: ExtAuthzHttpClientTest() : async_request_{&async_client_} { initialize(EMPTY_STRING); } @@ -44,7 +47,8 @@ class ExtAuthzHttpClientTest : public testing::Test { .WillByDefault(ReturnRef(async_client_)); } - ClientConfigSharedPtr createConfig(const std::string& yaml = EMPTY_STRING, uint32_t timeout = 250, + ClientConfigSharedPtr createConfig(const std::string& yaml = EMPTY_STRING, + uint32_t timeout = REQUEST_TIMEOUT, const std::string& path_prefix = "/bar") { envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config{}; if (yaml.empty()) { @@ -95,7 +99,6 @@ class ExtAuthzHttpClientTest : public testing::Test { } else { TestUtility::loadFromYaml(yaml, proto_config); } - return std::make_shared(proto_config, timeout, path_prefix); } @@ -120,7 +123,7 @@ class ExtAuthzHttpClientTest : public testing::Test { const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); - client_->check(request_callbacks_, request, parent_span_, stream_info_); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); client_->onSuccess(async_request_, std::move(check_response)); @@ -134,6 +137,7 @@ class ExtAuthzHttpClientTest : public testing::Test { ClientConfigSharedPtr config_; std::unique_ptr client_; MockRequestCallbacks request_callbacks_; + NiceMock dispatcher_; Tracing::MockSpan parent_span_; Tracing::MockSpan child_span_; NiceMock stream_info_; @@ -261,35 +265,42 @@ TEST_F(ExtAuthzHttpClientTest, AllowedRequestHeadersPrefix) { {regexFood.get(), "food"}, {regexFool.get(), "fool"}}); - EXPECT_EQ(message_ptr->headers().get(Http::Headers::get().ContentType), nullptr); - const auto* x_squash = message_ptr->headers().get(Http::Headers::get().XSquashDebug); - ASSERT_NE(x_squash, nullptr); - EXPECT_EQ(x_squash->value().getStringView(), "foo"); + EXPECT_TRUE(message_ptr->headers().get(Http::Headers::get().ContentType).empty()); + const auto x_squash = message_ptr->headers().get(Http::Headers::get().XSquashDebug); + ASSERT_FALSE(x_squash.empty()); + EXPECT_EQ(x_squash[0]->value().getStringView(), "foo"); - const auto* x_content_type = message_ptr->headers().get(Http::Headers::get().XContentTypeOptions); - ASSERT_NE(x_content_type, nullptr); - EXPECT_EQ(x_content_type->value().getStringView(), "foobar"); + const auto x_content_type = message_ptr->headers().get(Http::Headers::get().XContentTypeOptions); + ASSERT_FALSE(x_content_type.empty()); + EXPECT_EQ(x_content_type[0]->value().getStringView(), "foobar"); - const auto* food = message_ptr->headers().get(regexFood); - ASSERT_NE(food, nullptr); - EXPECT_EQ(food->value().getStringView(), "food"); + const auto food = message_ptr->headers().get(regexFood); + ASSERT_FALSE(food.empty()); + EXPECT_EQ(food[0]->value().getStringView(), "food"); - const auto* fool = message_ptr->headers().get(regexFool); - ASSERT_NE(fool, nullptr); - EXPECT_EQ(fool->value().getStringView(), "fool"); + const auto fool = message_ptr->headers().get(regexFool); + ASSERT_FALSE(fool.empty()); + EXPECT_EQ(fool[0]->value().getStringView(), "fool"); } // Verify client response when authorization server returns a 200 OK. TEST_F(ExtAuthzHttpClientTest, AuthorizationOk) { + NiceMock* timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timer, enableTimer(_, _)); + bool timer_destroyed = false; + timer->timer_destroyed_ = &timer_destroyed; + const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}}); const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, parent_span_, stream_info_); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); client_->onSuccess(async_request_, std::move(check_response)); + // make sure the internal timeout timer is destroyed + EXPECT_EQ(timer_destroyed, true); } using HeaderValuePair = std::pair; @@ -309,7 +320,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeaders) { const HeaderValuePair header2{"x-authz-header2", "value"}; EXPECT_CALL(async_client_, send_(AllOf(ContainsPairAsHeader(header1), ContainsPairAsHeader(header2)), _, _)); - client_->check(request_callbacks_, request, parent_span_, stream_info_); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); // Check for child span tagging when the request is allowed. EXPECT_CALL(child_span_, setTag(Eq("ext_authz_http_status"), Eq("OK"))); @@ -354,7 +365,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeadersFromStreamInf EXPECT_CALL(stream_info, getRequestHeaders()).WillOnce(Return(&request_headers)); envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, parent_span_, stream_info); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); @@ -372,7 +383,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { envoy::service::auth::v3::CheckRequest request; EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - client_->check(request_callbacks_, request, parent_span_, stream_info_); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); const auto check_response_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}, @@ -387,6 +398,32 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { client_->onSuccess(async_request_, std::move(message_response)); } +// Verify headers present in x-envoy-auth-headers-to-remove make it into the +// Response correctly. +TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithHeadersToRemove) { + envoy::service::auth::v3::CheckRequest request; + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); + + // When we call onSuccess() at the bottom of the test we expect that all the + // headers-to-remove in that http response to have been correctly extracted + // and inserted into the authz Response just below. + Response authz_response; + authz_response.status = CheckStatus::OK; + authz_response.headers_to_remove.emplace_back(Http::LowerCaseString{"remove-me"}); + authz_response.headers_to_remove.emplace_back(Http::LowerCaseString{"remove-me-too"}); + authz_response.headers_to_remove.emplace_back(Http::LowerCaseString{"remove-me-also"}); + EXPECT_CALL(request_callbacks_, + onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); + + const HeaderValueOptionVector http_response_headers = TestCommon::makeHeaderValueOption({ + {":status", "200", false}, + {"x-envoy-auth-headers-to-remove", " ,remove-me,, , remove-me-too , ", false}, + {"x-envoy-auth-headers-to-remove", " remove-me-also ", false}, + }); + Http::ResponseMessagePtr http_response = TestCommon::makeMessageResponse(http_response_headers); + client_->onSuccess(async_request_, std::move(http_response)); +} + // Test the client when a denied response is received. TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "403", false}}); @@ -395,7 +432,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { auto check_response = TestCommon::makeMessageResponse(expected_headers); envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, parent_span_, stream_info_); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); // Check for child span tagging when the request is denied. EXPECT_CALL(child_span_, setTag(Eq("ext_authz_http_status"), Eq("Forbidden"))); @@ -416,7 +453,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithAllAttributes) { CheckStatus::Denied, Http::Code::Unauthorized, expected_body, expected_headers); envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, parent_span_, stream_info_); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); @@ -434,7 +471,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { {{"x-foo", "bar", false}, {":status", "401", false}, {"foo", "bar", false}})); envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, parent_span_, stream_info_); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); const auto check_response_headers = TestCommon::makeHeaderValueOption({{":method", "post", false}, @@ -447,13 +484,20 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { // Test the client when an unknown error occurs. TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestError) { + NiceMock* timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timer, enableTimer(_, _)); + bool timer_destroyed = false; + timer->timer_destroyed_ = &timer_destroyed; + envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, parent_span_, stream_info_); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); client_->onFailure(async_request_, Http::AsyncClient::FailureReason::Reset); + // make sure the internal timeout timer is destroyed + // EXPECT_EQ(timer_destroyed, true); } // Test the client when a call to authorization server returns a 5xx error status. @@ -462,7 +506,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequest5xxError) { Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "503"}}})); envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, parent_span_, stream_info_); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); @@ -474,10 +518,54 @@ TEST_F(ExtAuthzHttpClientTest, CancelledAuthorizationRequest) { envoy::service::auth::v3::CheckRequest request; EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_)); - client_->check(request_callbacks_, request, parent_span_, stream_info_); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); + + EXPECT_CALL(async_request_, cancel()); + client_->cancel(); +} + +// Test the client when the request times out on an internal timeout. +TEST_F(ExtAuthzHttpClientTest, AuthorizationInternalRequestTimeout) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created", "true"}}); + + initialize(""); + envoy::service::auth::v3::CheckRequest request; + + NiceMock* timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(REQUEST_TIMEOUT), _)); + + EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_)); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); + + EXPECT_CALL(async_request_, cancel()); + EXPECT_CALL(request_callbacks_, + onComplete_(WhenDynamicCastTo(AuthzTimedoutResponse()))); + timer->invokeCallback(); +} + +// Test when the client is cancelled with internal timeout. +TEST_F(ExtAuthzHttpClientTest, AuthorizationInternalRequestTimeoutCancelled) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.ext_authz_measure_timeout_on_check_created", "true"}}); + + initialize(""); + envoy::service::auth::v3::CheckRequest request; + + NiceMock* timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(REQUEST_TIMEOUT), _)); + + EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_)); + client_->check(request_callbacks_, dispatcher_, request, parent_span_, stream_info_); + // make sure cancel resets the timer: EXPECT_CALL(async_request_, cancel()); + bool timer_destroyed = false; + timer->timer_destroyed_ = &timer_destroyed; client_->cancel(); + EXPECT_EQ(timer_destroyed, true); } // Test the client when the configured cluster is missing/removed. @@ -488,8 +576,8 @@ TEST_F(ExtAuthzHttpClientTest, NoCluster) { EXPECT_CALL(cm_, httpAsyncClientForCluster("ext_authz")).Times(0); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - client_->check(request_callbacks_, envoy::service::auth::v3::CheckRequest{}, parent_span_, - stream_info_); + client_->check(request_callbacks_, dispatcher_, envoy::service::auth::v3::CheckRequest{}, + parent_span_, stream_info_); } } // namespace diff --git a/test/extensions/filters/common/ext_authz/mocks.h b/test/extensions/filters/common/ext_authz/mocks.h index 900d64d7d0fd..682e2ce83fe4 100644 --- a/test/extensions/filters/common/ext_authz/mocks.h +++ b/test/extensions/filters/common/ext_authz/mocks.h @@ -23,8 +23,9 @@ class MockClient : public Client { // ExtAuthz::Client MOCK_METHOD(void, cancel, ()); MOCK_METHOD(void, check, - (RequestCallbacks & callbacks, const envoy::service::auth::v3::CheckRequest& request, - Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info)); + (RequestCallbacks & callbacks, Event::Dispatcher& dispatcher, + const envoy::service::auth::v3::CheckRequest& request, Tracing::Span& parent_span, + const StreamInfo::StreamInfo& stream_info)); }; class MockRequestCallbacks : public RequestCallbacks { diff --git a/test/extensions/filters/common/ext_authz/test_common.cc b/test/extensions/filters/common/ext_authz/test_common.cc index d8da3bf56228..0f31d402750f 100644 --- a/test/extensions/filters/common/ext_authz/test_common.cc +++ b/test/extensions/filters/common/ext_authz/test_common.cc @@ -94,7 +94,7 @@ Http::ResponseMessagePtr TestCommon::makeMessageResponse(const HeaderValueOption response->headers().addCopy(Http::LowerCaseString(header.header().key()), header.header().value()); } - response->body() = std::make_unique(body); + response->body().add(body); return response; }; @@ -103,6 +103,12 @@ bool TestCommon::compareHeaderVector(const Http::HeaderVector& lhs, const Http:: std::set>(rhs.begin(), rhs.end()); } +bool TestCommon::compareVectorOfHeaderName(const std::vector& lhs, + const std::vector& rhs) { + return std::set(lhs.begin(), lhs.end()) == + std::set(rhs.begin(), rhs.end()); +} + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/test/extensions/filters/common/ext_authz/test_common.h b/test/extensions/filters/common/ext_authz/test_common.h index 106bf0f6c979..efbbc8e411ec 100644 --- a/test/extensions/filters/common/ext_authz/test_common.h +++ b/test/extensions/filters/common/ext_authz/test_common.h @@ -46,6 +46,8 @@ class TestCommon { static HeaderValueOptionVector makeHeaderValueOption(KeyValueOptionVector&& headers); static bool compareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs); + static bool compareVectorOfHeaderName(const std::vector& lhs, + const std::vector& rhs); }; MATCHER_P(AuthzErrorResponse, status, "") { @@ -60,6 +62,18 @@ MATCHER_P(AuthzErrorResponse, status, "") { return arg->status == status; } +MATCHER(AuthzTimedoutResponse, "") { + // These fields should be always empty when the status is a timeout error. + if (!arg->headers_to_add.empty() || !arg->headers_to_append.empty() || !arg->body.empty()) { + return false; + } + // HTTP status code should be always set to Forbidden. + if (arg->status_code != Http::Code::Forbidden) { + return false; + } + return arg->status == CheckStatus::Error && arg->error_kind == ErrorKind::Timedout; +} + MATCHER_P(AuthzResponseNoAttributes, response, "") { const bool equal_status = arg->status == response.status; const bool equal_metadata = @@ -99,12 +113,15 @@ MATCHER_P(AuthzOkResponse, response, "") { } // Compare headers_to_add. - return TestCommon::compareHeaderVector(response.headers_to_add, arg->headers_to_add); - ; + if (!TestCommon::compareHeaderVector(response.headers_to_add, arg->headers_to_add)) { + return false; + } + + return TestCommon::compareVectorOfHeaderName(response.headers_to_remove, arg->headers_to_remove); } MATCHER_P(ContainsPairAsHeader, pair, "") { - return arg->headers().get(pair.first)->value().getStringView() == pair.second; + return arg->headers().get(pair.first)[0]->value().getStringView() == pair.second; } } // namespace ExtAuthz diff --git a/test/extensions/filters/common/local_ratelimit/BUILD b/test/extensions/filters/common/local_ratelimit/BUILD new file mode 100644 index 000000000000..96bd5d38a495 --- /dev/null +++ b/test/extensions/filters/common/local_ratelimit/BUILD @@ -0,0 +1,18 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "local_ratelimit_test", + srcs = ["local_ratelimit_test.cc"], + deps = [ + "//source/extensions/filters/common/local_ratelimit:local_ratelimit_lib", + "//test/mocks/event:event_mocks", + ], +) diff --git a/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc b/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc new file mode 100644 index 000000000000..a6142dfb16aa --- /dev/null +++ b/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc @@ -0,0 +1,170 @@ +#include "extensions/filters/common/local_ratelimit/local_ratelimit_impl.h" + +#include "test/mocks/event/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace LocalRateLimit { + +class LocalRateLimiterImplTest : public testing::Test { +public: + void initialize(const std::chrono::milliseconds fill_interval, const uint32_t max_tokens, + const uint32_t tokens_per_fill) { + + fill_timer_ = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(*fill_timer_, enableTimer(_, nullptr)); + EXPECT_CALL(*fill_timer_, disableTimer()); + + rate_limiter_ = std::make_shared(fill_interval, max_tokens, + tokens_per_fill, dispatcher_); + } + + Thread::ThreadSynchronizer& synchronizer() { return rate_limiter_->synchronizer_; } + + NiceMock dispatcher_; + Event::MockTimer* fill_timer_{}; + std::shared_ptr rate_limiter_; +}; + +// Make sure we fail with a fill rate this is too fast. +TEST_F(LocalRateLimiterImplTest, TooFastFillRate) { + EXPECT_THROW_WITH_MESSAGE( + LocalRateLimiterImpl(std::chrono::milliseconds(49), 100, 1, dispatcher_), EnvoyException, + "local rate limit token bucket fill timer must be >= 50ms"); +} + +// Verify various token bucket CAS edge cases. +TEST_F(LocalRateLimiterImplTest, CasEdgeCases) { + // This tests the case in which an allowed check races with the fill timer. + { + initialize(std::chrono::milliseconds(50), 1, 1); + + synchronizer().enable(); + + // Start a thread and start the fill callback. This will wait pre-CAS. + synchronizer().waitOn("on_fill_timer_pre_cas"); + std::thread t1([&] { + EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); + fill_timer_->invokeCallback(); + }); + // Wait until the thread is actually waiting. + synchronizer().barrierOn("on_fill_timer_pre_cas"); + + // This should succeed. + EXPECT_TRUE(rate_limiter_->requestAllowed()); + + // Now signal the thread to continue which should cause a CAS failure and the loop to repeat. + synchronizer().signal("on_fill_timer_pre_cas"); + t1.join(); + + // 1 -> 0 tokens + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_FALSE(rate_limiter_->requestAllowed()); + } + + // This tests the case in which two allowed checks race. + { + initialize(std::chrono::milliseconds(200), 1, 1); + + synchronizer().enable(); + + // Start a thread and see if we are under limit. This will wait pre-CAS. + synchronizer().waitOn("allowed_pre_cas"); + std::thread t1([&] { EXPECT_FALSE(rate_limiter_->requestAllowed()); }); + // Wait until the thread is actually waiting. + synchronizer().barrierOn("allowed_pre_cas"); + + // Consume a token on this thread, which should cause the CAS to fail on the other thread. + EXPECT_TRUE(rate_limiter_->requestAllowed()); + synchronizer().signal("allowed_pre_cas"); + t1.join(); + } +} + +// Verify token bucket functionality with a single token. +TEST_F(LocalRateLimiterImplTest, TokenBucket) { + initialize(std::chrono::milliseconds(200), 1, 1); + + // 1 -> 0 tokens + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_FALSE(rate_limiter_->requestAllowed()); + EXPECT_FALSE(rate_limiter_->requestAllowed()); + + // 0 -> 1 tokens + EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); + fill_timer_->invokeCallback(); + + // 1 -> 0 tokens + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_FALSE(rate_limiter_->requestAllowed()); + + // 0 -> 1 tokens + EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); + fill_timer_->invokeCallback(); + + // 1 -> 1 tokens + EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); + fill_timer_->invokeCallback(); + + // 1 -> 0 tokens + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_FALSE(rate_limiter_->requestAllowed()); +} + +// Verify token bucket functionality with max tokens and tokens per fill > 1. +TEST_F(LocalRateLimiterImplTest, TokenBucketMultipleTokensPerFill) { + initialize(std::chrono::milliseconds(200), 2, 2); + + // 2 -> 0 tokens + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_FALSE(rate_limiter_->requestAllowed()); + + // 0 -> 2 tokens + EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); + fill_timer_->invokeCallback(); + + // 2 -> 1 tokens + EXPECT_TRUE(rate_limiter_->requestAllowed()); + + // 1 -> 2 tokens + EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); + fill_timer_->invokeCallback(); + + // 2 -> 0 tokens + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_FALSE(rate_limiter_->requestAllowed()); +} + +// Verify token bucket functionality with max tokens > tokens per fill. +TEST_F(LocalRateLimiterImplTest, TokenBucketMaxTokensGreaterThanTokensPerFill) { + initialize(std::chrono::milliseconds(200), 2, 1); + + // 2 -> 0 tokens + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_FALSE(rate_limiter_->requestAllowed()); + + // 0 -> 1 tokens + EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); + fill_timer_->invokeCallback(); + + // 1 -> 0 tokens + EXPECT_TRUE(rate_limiter_->requestAllowed()); + EXPECT_FALSE(rate_limiter_->requestAllowed()); +} + +} // Namespace LocalRateLimit +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/common/lua/lua_wrappers.h b/test/extensions/filters/common/lua/lua_wrappers.h index b6664b7e7c18..8dfcc0d07fe9 100644 --- a/test/extensions/filters/common/lua/lua_wrappers.h +++ b/test/extensions/filters/common/lua/lua_wrappers.h @@ -19,7 +19,7 @@ namespace Lua { // A helper to be called inside the registered closure. class Printer { public: - MOCK_CONST_METHOD1(testPrint, void(const std::string&)); + MOCK_METHOD(void, testPrint, (const std::string&), (const)); }; const Printer& getPrinter() { CONSTRUCT_ON_FIRST_USE(Printer); } diff --git a/test/extensions/filters/common/lua/wrappers_test.cc b/test/extensions/filters/common/lua/wrappers_test.cc index 8e946b73fcc7..bbb334ef4d3d 100644 --- a/test/extensions/filters/common/lua/wrappers_test.cc +++ b/test/extensions/filters/common/lua/wrappers_test.cc @@ -76,6 +76,8 @@ TEST_F(LuaBufferWrapperTest, Methods) { testPrint(object:length()) testPrint(object:getBytes(0, 2)) testPrint(object:getBytes(6, 5)) + testPrint(object:setBytes("neverland")) + testPrint(object:getBytes(0, 5)) end )EOF"}; @@ -85,6 +87,8 @@ TEST_F(LuaBufferWrapperTest, Methods) { EXPECT_CALL(printer_, testPrint("11")); EXPECT_CALL(printer_, testPrint("he")); EXPECT_CALL(printer_, testPrint("world")); + EXPECT_CALL(printer_, testPrint("9")); + EXPECT_CALL(printer_, testPrint("never")); start("callMe"); } diff --git a/test/extensions/filters/common/ratelimit/mocks.h b/test/extensions/filters/common/ratelimit/mocks.h index f04b1582da68..325f61cc79d7 100644 --- a/test/extensions/filters/common/ratelimit/mocks.h +++ b/test/extensions/filters/common/ratelimit/mocks.h @@ -4,6 +4,7 @@ #include #include "envoy/ratelimit/ratelimit.h" +#include "envoy/stream_info/stream_info.h" #include "extensions/filters/common/ratelimit/ratelimit.h" @@ -25,7 +26,7 @@ class MockClient : public Client { MOCK_METHOD(void, limit, (RequestCallbacks & callbacks, const std::string& domain, const std::vector& descriptors, - Tracing::Span& parent_span)); + Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info)); }; } // namespace RateLimit diff --git a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc index 65242eb83c1f..319596f436a9 100644 --- a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc +++ b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc @@ -13,6 +13,7 @@ #include "extensions/filters/common/ratelimit/ratelimit_impl.h" #include "test/mocks/grpc/mocks.h" +#include "test/mocks/stream_info/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" @@ -60,6 +61,7 @@ class RateLimitGrpcClientTest : public testing::Test { GrpcClientImpl client_; MockRequestCallbacks request_callbacks_; Tracing::MockSpan span_; + StreamInfo::MockStreamInfo stream_info_; }; TEST_F(RateLimitGrpcClientTest, Basic) { @@ -80,7 +82,8 @@ TEST_F(RateLimitGrpcClientTest, Basic) { return &async_request_; })); - client_.limit(request_callbacks_, "foo", {{{{"foo", "bar"}}}}, Tracing::NullSpan::instance()); + client_.limit(request_callbacks_, "foo", {{{{"foo", "bar"}}}}, Tracing::NullSpan::instance(), + stream_info_); client_.onCreateInitialMetadata(headers); EXPECT_EQ(nullptr, headers.RequestId()); @@ -100,7 +103,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { .WillOnce(Return(&async_request_)); client_.limit(request_callbacks_, "foo", {{{{"foo", "bar"}, {"bar", "baz"}}}}, - Tracing::NullSpan::instance()); + Tracing::NullSpan::instance(), stream_info_); client_.onCreateInitialMetadata(headers); @@ -121,7 +124,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { client_.limit(request_callbacks_, "foo", {{{{"foo", "bar"}, {"bar", "baz"}}}, {{{"foo2", "bar2"}, {"bar2", "baz2"}}}}, - Tracing::NullSpan::instance()); + Tracing::NullSpan::instance(), stream_info_); response = std::make_unique(); EXPECT_CALL(request_callbacks_, complete_(LimitStatus::Error, _, _, _)); @@ -140,7 +143,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { client_.limit( request_callbacks_, "foo", {{{{"foo", "bar"}, {"bar", "baz"}}, {{42, envoy::type::v3::RateLimitUnit::MINUTE}}}}, - Tracing::NullSpan::instance()); + Tracing::NullSpan::instance(), stream_info_); client_.onCreateInitialMetadata(headers); @@ -157,7 +160,8 @@ TEST_F(RateLimitGrpcClientTest, Cancel) { EXPECT_CALL(*async_client_, sendRaw(_, _, _, _, _, _)).WillOnce(Return(&async_request_)); - client_.limit(request_callbacks_, "foo", {{{{"foo", "bar"}}}}, Tracing::NullSpan::instance()); + client_.limit(request_callbacks_, "foo", {{{{"foo", "bar"}}}}, Tracing::NullSpan::instance(), + stream_info_); EXPECT_CALL(async_request_, cancel()); client_.cancel(); diff --git a/test/extensions/filters/common/rbac/BUILD b/test/extensions/filters/common/rbac/BUILD index 64e405da4d91..7da14bb32fdd 100644 --- a/test/extensions/filters/common/rbac/BUILD +++ b/test/extensions/filters/common/rbac/BUILD @@ -44,6 +44,15 @@ envoy_extension_cc_test( ], ) +envoy_extension_cc_test( + name = "utility_test", + srcs = ["utility_test.cc"], + extension_name = "envoy.filters.http.rbac", + deps = [ + "//source/extensions/filters/common/rbac:utility_lib", + ], +) + envoy_extension_cc_mock( name = "engine_mocks", hdrs = ["mocks.h"], diff --git a/test/extensions/filters/common/rbac/engine_impl_test.cc b/test/extensions/filters/common/rbac/engine_impl_test.cc index b9d8608a9208..e3727da57d7c 100644 --- a/test/extensions/filters/common/rbac/engine_impl_test.cc +++ b/test/extensions/filters/common/rbac/engine_impl_test.cc @@ -267,6 +267,26 @@ TEST(RoleBasedAccessControlEngineImpl, MistypedCondition) { checkEngine(engine, false, LogResult::Undecided); } +TEST(RoleBasedAccessControlEngineImpl, EvaluationFailure) { + envoy::config::rbac::v3::Policy policy; + policy.add_permissions()->set_any(true); + policy.add_principals()->set_any(true); + policy.mutable_condition()->MergeFrom( + TestUtility::parseYaml(R"EOF( + select_expr: + operand: + const_expr: + string_value: request + field: undefined + )EOF")); + + envoy::config::rbac::v3::RBAC rbac; + rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); + (*rbac.mutable_policies())["foo"] = policy; + RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + checkEngine(engine, false, LogResult::Undecided); +} + TEST(RoleBasedAccessControlEngineImpl, ErrorCondition) { envoy::config::rbac::v3::Policy policy; policy.add_permissions()->set_any(true); diff --git a/test/extensions/filters/common/rbac/utility_test.cc b/test/extensions/filters/common/rbac/utility_test.cc new file mode 100644 index 000000000000..35e3aad87140 --- /dev/null +++ b/test/extensions/filters/common/rbac/utility_test.cc @@ -0,0 +1,23 @@ +#include "extensions/filters/common/rbac/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace RBAC { +namespace { + +TEST(ResponseDetail, ResponseDetail) { + EXPECT_EQ(RBAC::responseDetail("abdfxy"), "rbac_access_denied_matched_policy[abdfxy]"); + EXPECT_EQ(RBAC::responseDetail("ab df xy"), "rbac_access_denied_matched_policy[ab_df__xy]"); + EXPECT_EQ(RBAC::responseDetail("a \t\f\v\n\ry"), "rbac_access_denied_matched_policy[a______y]"); +} + +} // namespace +} // namespace RBAC +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/adaptive_concurrency/BUILD b/test/extensions/filters/http/adaptive_concurrency/BUILD index a89c75b36d6d..c9f5333b75e9 100644 --- a/test/extensions/filters/http/adaptive_concurrency/BUILD +++ b/test/extensions/filters/http/adaptive_concurrency/BUILD @@ -34,6 +34,8 @@ envoy_extension_cc_test( "adaptive_concurrency_filter_integration_test.h", ], extension_name = "envoy.filters.http.adaptive_concurrency", + # TODO(envoyproxy/windows-dev): diagnose clang-cl build test failure + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/adaptive_concurrency:config", "//source/extensions/filters/http/fault:config", diff --git a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.cc b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.cc index de7ab8ce24bf..05449084a256 100644 --- a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.cc +++ b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.cc @@ -20,6 +20,10 @@ void AdaptiveConcurrencyIntegrationTest::sendRequests(uint32_t request_count, // doesn't respond between the client sending headers and data, invalidating the client's encoder // stream. We should change this integration test to allow for the ability to test this scenario. + if (use_grpc_) { + default_request_headers_.setContentType(Http::Headers::get().ContentTypeValues.Grpc); + } + // We expect these requests to reach the upstream. for (uint32_t idx = 0; idx < num_forwarded; ++idx) { auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); @@ -103,9 +107,7 @@ void AdaptiveConcurrencyIntegrationTest::respondToRequest(bool expect_forwarded) INSTANTIATE_TEST_SUITE_P(IpVersions, AdaptiveConcurrencyIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest())); -/** - * Test a single request returns successfully. - */ +// Test a single request returns successfully. TEST_P(AdaptiveConcurrencyIntegrationTest, TestConcurrency1) { customInit(); @@ -115,9 +117,7 @@ TEST_P(AdaptiveConcurrencyIntegrationTest, TestConcurrency1) { test_server_->waitForCounterEq(REQUEST_BLOCK_COUNTER_NAME, 1); } -/** - * Test many requests, where only a single request returns 200 during the minRTT window. - */ +// Test many requests, where only a single request returns 200 during the minRTT window. TEST_P(AdaptiveConcurrencyIntegrationTest, TestManyConcurrency1) { customInit(); @@ -127,6 +127,17 @@ TEST_P(AdaptiveConcurrencyIntegrationTest, TestManyConcurrency1) { test_server_->waitForCounterEq(REQUEST_BLOCK_COUNTER_NAME, 9); } +// Test many grpc requests, where only a single request returns 200 during the minRTT window. +TEST_P(AdaptiveConcurrencyIntegrationTest, TestManyConcurrencyGrpc) { + use_grpc_ = true; + customInit(); + + EXPECT_EQ(0, test_server_->counter(REQUEST_BLOCK_COUNTER_NAME)->value()); + sendRequests(10, 1); + respondToAllRequests(1, std::chrono::milliseconds(5)); + test_server_->waitForCounterEq(REQUEST_BLOCK_COUNTER_NAME, 9); +} + /** * TODO: Test the ability to increase/decrease the concurrency limit with request latencies based on * the minRTT value. diff --git a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h index a4f6d35b3dd2..06d8a457e264 100644 --- a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h +++ b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h @@ -71,12 +71,19 @@ class AdaptiveConcurrencyIntegrationTest } void verifyResponseBlocked(IntegrationStreamDecoderPtr response) { - EXPECT_EQ("503", response->headers().getStatusValue()); + if (use_grpc_) { + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ("reached concurrency limit", response->headers().getGrpcMessageValue()); + } else { + EXPECT_EQ("503", response->headers().getStatusValue()); + EXPECT_EQ("reached concurrency limit", response->body()); + } } std::deque responses_; std::deque upstream_requests_; std::deque upstream_connections_; + bool use_grpc_{}; }; } // namespace Envoy diff --git a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc index 5742385d5cc5..e05cbea0ce56 100644 --- a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc +++ b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc @@ -115,6 +115,76 @@ TEST_F(AdaptiveConcurrencyFilterTest, TestEnableOverriddenFromRuntime) { filter_->encodeComplete(); } +TEST_F(AdaptiveConcurrencyFilterTest, TestNanosValidationFail) { + std::string yaml_config = + R"EOF( +gradient_controller_config: + sample_aggregate_percentile: + value: 50 + concurrency_limit_params: + concurrency_update_interval: + nanos: 100000000 # 100ms + min_rtt_calc_params: + interval: + nanos: 8 + request_count: 50 +enabled: + default_value: true + runtime_key: "adaptive_concurrency.enabled" +)EOF"; + + EXPECT_THROW(auto config = makeConfig(yaml_config), ProtoValidationException); +} + +TEST_F(AdaptiveConcurrencyFilterTest, TestNanosValidationPass) { + std::string yaml_config = + R"EOF( +gradient_controller_config: + sample_aggregate_percentile: + value: 50 + concurrency_limit_params: + concurrency_update_interval: + nanos: 100000000 # 100ms + min_rtt_calc_params: + interval: + nanos: 1000000 + request_count: 50 +enabled: + default_value: true + runtime_key: "adaptive_concurrency.enabled" +)EOF"; + + auto config = makeConfig(yaml_config); + + auto config_ptr = std::make_shared( + config, runtime_, "testprefix.", stats_, time_system_); + filter_ = std::make_unique(config_ptr, controller_); + filter_->setDecoderFilterCallbacks(decoder_callbacks_); + filter_->setEncoderFilterCallbacks(encoder_callbacks_); + + // The filter should behave as normal here. + + Http::TestRequestHeaderMapImpl request_headers; + + // The filter will be disabled when the flag is overridden. Note there is no expected call to + // forwardingDecision() or recordLatencySample(). + + EXPECT_CALL(runtime_.snapshot_, getBoolean("adaptive_concurrency.enabled", true)) + .WillOnce(Return(false)); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); + + Buffer::OwnedImpl request_body; + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(request_body, false)); + + Http::TestRequestTrailerMapImpl request_trailers; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers)); + + Http::TestResponseHeaderMapImpl response_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); + filter_->encodeComplete(); +} + TEST_F(AdaptiveConcurrencyFilterTest, TestEnableConfiguredInProto) { std::string yaml_config = R"EOF( diff --git a/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc b/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc index 3134e30c906c..8e0eff116af5 100644 --- a/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc +++ b/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc @@ -57,7 +57,8 @@ class GradientControllerTest : public testing::Test { stats_, random_, time_system_); // Advance time so that the latency sample calculations don't underflow if monotonic time is 0. - time_system_.advanceTimeAsync(std::chrono::hours(42)); + time_system_.advanceTimeAndRun(std::chrono::hours(42), *dispatcher_, + Event::Dispatcher::RunType::Block); return config; } @@ -258,7 +259,7 @@ TEST_F(GradientControllerTest, MinRTTEpoch) { const int min_concurrency = 2; auto controller = makeController(yaml); const auto min_rtt = std::chrono::milliseconds(1350); - time_system_.advanceTimeAsync(min_rtt); + time_system_.advanceTimeAndRun(min_rtt, *dispatcher_, Event::Dispatcher::RunType::Block); verifyMinRTTActive(); EXPECT_EQ(controller->concurrencyLimit(), min_concurrency); @@ -270,7 +271,8 @@ TEST_F(GradientControllerTest, MinRTTEpoch) { uint32_t last_limit = controller->concurrencyLimit(); for (int i = 0; i < 29; ++i) { tryForward(controller, true); - time_system_.advanceTimeAsync(std::chrono::seconds(1)); + time_system_.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher_, + Event::Dispatcher::RunType::Block); sampleLatency(controller, min_rtt); dispatcher_->run(Event::Dispatcher::RunType::Block); EXPECT_GT(controller->concurrencyLimit(), last_limit); @@ -286,8 +288,8 @@ TEST_F(GradientControllerTest, MinRTTEpoch) { } // Move into the next minRTT window while the requests are outstanding. - time_system_.advanceTimeAsync(std::chrono::seconds(5)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::seconds(5), *dispatcher_, + Event::Dispatcher::RunType::Block); verifyMinRTTActive(); EXPECT_EQ(controller->concurrencyLimit(), min_concurrency); @@ -330,7 +332,7 @@ TEST_F(GradientControllerTest, MinRTTLogicTest) { } tryForward(controller, false); tryForward(controller, false); - time_system_.advanceTimeAsync(min_rtt); + time_system_.advanceTimeAndRun(min_rtt, *dispatcher_, Event::Dispatcher::RunType::Block); for (int i = 0; i < 7; ++i) { sampleLatency(controller, min_rtt); } @@ -427,8 +429,8 @@ TEST_F(GradientControllerTest, MinRTTBufferTest) { // prevent the concurrency limit from decreasing. sampleLatency(controller, std::chrono::milliseconds(6)); } - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); EXPECT_GT(controller->concurrencyLimit(), last_concurrency); } } @@ -459,8 +461,8 @@ TEST_F(GradientControllerTest, ConcurrencyLimitBehaviorTestBasic) { // Ensure that the concurrency window increases on its own due to the headroom calculation with // the max gradient. - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); EXPECT_GE(controller->concurrencyLimit(), 7); EXPECT_LE(controller->concurrencyLimit() / 7.0, 2.0); @@ -472,8 +474,8 @@ TEST_F(GradientControllerTest, ConcurrencyLimitBehaviorTestBasic) { tryForward(controller, true); sampleLatency(controller, std::chrono::milliseconds(4)); } - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); // Verify the minimum gradient. EXPECT_LE(last_concurrency, controller->concurrencyLimit()); EXPECT_GE(static_cast(last_concurrency) / controller->concurrencyLimit(), 0.5); @@ -486,8 +488,8 @@ TEST_F(GradientControllerTest, ConcurrencyLimitBehaviorTestBasic) { tryForward(controller, true); sampleLatency(controller, std::chrono::milliseconds(6)); } - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); EXPECT_LT(controller->concurrencyLimit(), last_concurrency); EXPECT_GE(controller->concurrencyLimit(), 7); } @@ -513,7 +515,8 @@ TEST_F(GradientControllerTest, MinRTTReturnToPreviousLimit) { // Get initial minRTT measurement out of the way and advance time so request samples are not // thought to come from the previous minRTT epoch. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); - time_system_.advanceTimeAsync(std::chrono::seconds(1)); + time_system_.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher_, + Event::Dispatcher::RunType::Block); // Force the limit calculation to run a few times from some measurements. for (int sample_iters = 0; sample_iters < 5; ++sample_iters) { @@ -522,8 +525,8 @@ TEST_F(GradientControllerTest, MinRTTReturnToPreviousLimit) { tryForward(controller, true); sampleLatency(controller, std::chrono::milliseconds(4)); } - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); // Verify the value is growing. EXPECT_GT(controller->concurrencyLimit(), last_concurrency); } @@ -531,12 +534,13 @@ TEST_F(GradientControllerTest, MinRTTReturnToPreviousLimit) { const auto limit_val = controller->concurrencyLimit(); // Wait until the minRTT recalculation is triggered again and verify the limit drops. - time_system_.advanceTimeAsync(std::chrono::seconds(31)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::seconds(31), *dispatcher_, + Event::Dispatcher::RunType::Block); EXPECT_EQ(controller->concurrencyLimit(), 3); // Advance time again for request samples to appear from the current epoch. - time_system_.advanceTimeAsync(std::chrono::seconds(1)); + time_system_.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher_, + Event::Dispatcher::RunType::Block); // 49 more requests should cause the minRTT to be done calculating. for (int i = 0; i < 5; ++i) { @@ -569,7 +573,8 @@ TEST_F(GradientControllerTest, MinRTTRescheduleTest) { // Get initial minRTT measurement out of the way and advance time so request samples are not // thought to come from the previous minRTT epoch. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); - time_system_.advanceTimeAsync(std::chrono::seconds(1)); + time_system_.advanceTimeAndRun(std::chrono::seconds(1), *dispatcher_, + Event::Dispatcher::RunType::Block); // Force the limit calculation to run a few times from some measurements. for (int sample_iters = 0; sample_iters < 5; ++sample_iters) { @@ -578,20 +583,20 @@ TEST_F(GradientControllerTest, MinRTTRescheduleTest) { tryForward(controller, true); sampleLatency(controller, std::chrono::milliseconds(4)); } - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); // Verify the value is growing. EXPECT_GT(controller->concurrencyLimit(), last_concurrency); } // Wait until the minRTT recalculation is triggered again and verify the limit drops. - time_system_.advanceTimeAsync(std::chrono::seconds(31)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::seconds(31), *dispatcher_, + Event::Dispatcher::RunType::Block); EXPECT_EQ(controller->concurrencyLimit(), 3); // Verify sample recalculation doesn't occur during the minRTT window. - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); EXPECT_EQ(controller->concurrencyLimit(), 3); } @@ -622,8 +627,8 @@ TEST_F(GradientControllerTest, NoSamplesTest) { tryForward(controller, true); sampleLatency(controller, std::chrono::milliseconds(4)); } - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); // Verify the value is growing. EXPECT_GT(controller->concurrencyLimit(), last_concurrency); } @@ -631,8 +636,8 @@ TEST_F(GradientControllerTest, NoSamplesTest) { // Now we make sure that the limit value doesn't change in the absence of samples. for (int sample_iters = 0; sample_iters < 5; ++sample_iters) { const auto old_limit = controller->concurrencyLimit(); - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); EXPECT_EQ(old_limit, controller->concurrencyLimit()); } } @@ -676,7 +681,8 @@ TEST_F(GradientControllerTest, TimerAccuracyTest) { EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _)); for (int i = 0; i < 6; ++i) { tryForward(controller, true); - time_system_.advanceTimeAsync(std::chrono::milliseconds(5)); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(5), *dispatcher_, + Event::Dispatcher::RunType::Block); sampleLatency(controller, std::chrono::milliseconds(5)); } } @@ -716,7 +722,8 @@ TEST_F(GradientControllerTest, TimerAccuracyTestNoJitter) { EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _)); for (int i = 0; i < 6; ++i) { tryForward(controller, true); - time_system_.advanceTimeAsync(std::chrono::milliseconds(5)); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(5), *dispatcher_, + Event::Dispatcher::RunType::Block); sampleLatency(controller, std::chrono::milliseconds(5)); } } @@ -749,8 +756,8 @@ TEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) { // Ensure that the concurrency window increases on its own due to the headroom calculation with // the max gradient. - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); EXPECT_GE(controller->concurrencyLimit(), 7); EXPECT_LE(controller->concurrencyLimit() / 7.0, 2.0); @@ -762,8 +769,8 @@ TEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) { tryForward(controller, true); sampleLatency(controller, elevated_latency); } - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); } // Verify that the concurrency limit starts growing with newly measured minRTT. @@ -773,8 +780,8 @@ TEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) { tryForward(controller, true); sampleLatency(controller, elevated_latency); } - time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); - dispatcher_->run(Event::Dispatcher::RunType::Block); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(101), *dispatcher_, + Event::Dispatcher::RunType::Block); EXPECT_GE(controller->concurrencyLimit(), last_concurrency); } } diff --git a/test/extensions/filters/http/admission_control/BUILD b/test/extensions/filters/http/admission_control/BUILD index 4551c349a40f..809a7c66a4e9 100644 --- a/test/extensions/filters/http/admission_control/BUILD +++ b/test/extensions/filters/http/admission_control/BUILD @@ -37,6 +37,7 @@ envoy_extension_cc_test( "//source/common/http:header_map_lib", "//source/common/http:headers_lib", "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "//source/extensions/filters/http/admission_control:config", "//test/mocks/http:http_mocks", "//test/mocks/server:factory_context_mocks", "//test/mocks/thread_local:thread_local_mocks", diff --git a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc index d8ba63e72382..2bfc77f46212 100644 --- a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc @@ -98,6 +98,29 @@ class AdmissionControlTest : public testing::Test { filter_->encodeHeaders(headers, true); } + void verifyProbabilities(int success_rate, double expected_rejection_probability) { + // Success rate will be the same as the number of successful requests if the total request count + // is 100. + constexpr int total_request_count = 100; + EXPECT_CALL(controller_, requestCounts()) + .WillRepeatedly(Return(RequestData(total_request_count, success_rate))); + EXPECT_CALL(*evaluator_, isGrpcSuccess(0)).WillRepeatedly(Return(true)); + + Http::TestRequestHeaderMapImpl request_headers; + uint32_t rejection_count = 0; + // Assuming 4 significant figures in rejection probability calculation. + const auto accuracy = 1e4; + for (int i = 0; i < accuracy; ++i) { + EXPECT_CALL(random_, random()).WillRepeatedly(Return(i)); + if (filter_->decodeHeaders(request_headers, true) != Http::FilterHeadersStatus::Continue) { + ++rejection_count; + } + } + + EXPECT_NEAR(static_cast(rejection_count) / accuracy, expected_rejection_probability, + 0.01); + } + protected: std::string stats_prefix_; NiceMock runtime_; @@ -114,7 +137,7 @@ class AdmissionControlTest : public testing::Test { default_value: true runtime_key: "foo.enabled" sampling_window: 10s -aggression_coefficient: +aggression: default_value: 1.0 runtime_key: "foo.aggression" success_criteria: @@ -130,7 +153,7 @@ TEST_F(AdmissionControlTest, FilterRuntimeOverride) { default_value: true runtime_key: "foo.enabled" sampling_window: 10s -aggression_coefficient: +aggression: default_value: 1.0 runtime_key: "foo.aggression" success_criteria: @@ -282,6 +305,51 @@ TEST_F(AdmissionControlTest, GrpcSuccessBehavior) { TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); } +// Validate rejection probabilities. +TEST_F(AdmissionControlTest, RejectionProbability) { + std::string yaml = R"EOF( +enabled: + default_value: true + runtime_key: "foo.enabled" +sampling_window: 10s +sr_threshold: + default_value: + value: 100.0 + runtime_key: "foo.threshold" +aggression: + default_value: 1.0 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + + auto config = makeConfig(yaml); + setupFilter(config); + + verifyProbabilities(100 /* success rate */, 0.0 /* expected rejection probability */); + verifyProbabilities(95, 0.05); + verifyProbabilities(75, 0.25); + + // Increase aggression and expect higher rejection probabilities for the same values. + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.aggression", 1.0)).WillRepeatedly(Return(2.0)); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.threshold", 100.0)).WillRepeatedly(Return(100.0)); + verifyProbabilities(100, 0.0); + verifyProbabilities(95, 0.22); + verifyProbabilities(75, 0.5); + + // Lower the success rate threshold and expect the rejections to begin at a lower SR and increase + // from there. + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.aggression", 1.0)).WillRepeatedly(Return(1.0)); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.threshold", 100.0)).WillRepeatedly(Return(95.0)); + verifyProbabilities(100, 0.0); + verifyProbabilities(98, 0.0); + verifyProbabilities(95, 0.0); + verifyProbabilities(90, 0.05); + verifyProbabilities(75, 0.20); + verifyProbabilities(50, 0.46); +} + } // namespace } // namespace AdmissionControl } // namespace HttpFilters diff --git a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc index 578f39db10c3..ce22454a68f4 100644 --- a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc @@ -17,9 +17,13 @@ name: envoy.filters.http.admission_control http_criteria: grpc_criteria: sampling_window: 120s - aggression_coefficient: - default_value: 1.0 + aggression: + default_value: 2.0 runtime_key: "foo.aggression" + sr_threshold: + default_value: + value: 100.0 + runtime_key: "foo.sr_threshold" enabled: default_value: true runtime_key: "foo.enabled" @@ -115,8 +119,8 @@ TEST_P(AdmissionControlIntegrationTest, HttpTest) { } // Given the current throttling rate formula with an aggression of 1, it should result in a ~98% - // throttling rate. Allowing an error of 3%. - EXPECT_NEAR(throttle_count / request_count, 0.98, 0.03); + // throttling rate. Allowing an error of 5%. + EXPECT_NEAR(throttle_count / request_count, 0.98, 0.05); // We now wait for the history to become stale. timeSystem().advanceTimeWait(std::chrono::seconds(120)); @@ -155,8 +159,8 @@ TEST_P(AdmissionControlIntegrationTest, GrpcTest) { } // Given the current throttling rate formula with an aggression of 1, it should result in a ~98% - // throttling rate. Allowing an error of 3%. - EXPECT_NEAR(throttle_count / request_count, 0.98, 0.03); + // throttling rate. Allowing an error of 5%. + EXPECT_NEAR(throttle_count / request_count, 0.98, 0.05); // We now wait for the history to become stale. timeSystem().advanceTimeWait(std::chrono::seconds(120)); diff --git a/test/extensions/filters/http/admission_control/config_test.cc b/test/extensions/filters/http/admission_control/config_test.cc index cd7b6b212f1e..2fba5f26016f 100644 --- a/test/extensions/filters/http/admission_control/config_test.cc +++ b/test/extensions/filters/http/admission_control/config_test.cc @@ -6,6 +6,7 @@ #include "common/stats/isolated_store_impl.h" #include "extensions/filters/http/admission_control/admission_control.h" +#include "extensions/filters/http/admission_control/config.h" #include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" #include "test/mocks/runtime/mocks.h" @@ -46,6 +47,36 @@ class AdmissionControlConfigTest : public testing::Test { NiceMock random_; }; +// Ensure the filter ingest throws an exception if it is passed a config with a default value of 0 +// for sr_threshold If exception was not thrown, a default value of 0 for sr_threshold induces a +// divide by zero error +TEST_F(AdmissionControlConfigTest, ZeroSuccessRateThreshold) { + AdmissionControlFilterFactory admission_control_filter_factory; + const std::string yaml = R"EOF( +enabled: + default_value: false + runtime_key: "foo.enabled" +sampling_window: 1337s +sr_threshold: + default_value: + value: 0 + runtime_key: "foo.sr_threshold" +aggression: + default_value: 4.2 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + + AdmissionControlProto proto; + TestUtility::loadFromYamlAndValidate(yaml, proto); + NiceMock factory_context; + EXPECT_THROW_WITH_MESSAGE(admission_control_filter_factory.createFilterFactoryFromProtoTyped( + proto, "whatever", factory_context), + EnvoyException, "Success Rate Threshold cannot be zero percent"); +} + // Verify the configuration when all fields are set. TEST_F(AdmissionControlConfigTest, BasicTestAllConfigured) { const std::string yaml = R"EOF( @@ -53,7 +84,11 @@ TEST_F(AdmissionControlConfigTest, BasicTestAllConfigured) { default_value: false runtime_key: "foo.enabled" sampling_window: 1337s -aggression_coefficient: +sr_threshold: + default_value: + value: 92 + runtime_key: "foo.sr_threshold" +aggression: default_value: 4.2 runtime_key: "foo.aggression" success_criteria: @@ -65,6 +100,7 @@ sampling_window: 1337s EXPECT_FALSE(config->filterEnabled()); EXPECT_EQ(4.2, config->aggression()); + EXPECT_EQ(0.92, config->successRateThreshold()); } // Verify the config defaults when not specified. @@ -80,7 +116,8 @@ TEST_F(AdmissionControlConfigTest, BasicTestMinimumConfigured) { auto config = makeConfig(yaml); EXPECT_TRUE(config->filterEnabled()); - EXPECT_EQ(2.0, config->aggression()); + EXPECT_EQ(1.0, config->aggression()); + EXPECT_EQ(0.95, config->successRateThreshold()); } // Ensure runtime fields are honored. @@ -90,7 +127,11 @@ TEST_F(AdmissionControlConfigTest, VerifyRuntime) { default_value: false runtime_key: "foo.enabled" sampling_window: 1337s -aggression_coefficient: +sr_threshold: + default_value: + value: 92 + runtime_key: "foo.sr_threshold" +aggression: default_value: 4.2 runtime_key: "foo.aggression" success_criteria: @@ -104,6 +145,14 @@ sampling_window: 1337s EXPECT_TRUE(config->filterEnabled()); EXPECT_CALL(runtime_.snapshot_, getDouble("foo.aggression", 4.2)).WillOnce(Return(1.3)); EXPECT_EQ(1.3, config->aggression()); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.sr_threshold", 92)).WillOnce(Return(24.0)); + EXPECT_EQ(0.24, config->successRateThreshold()); + + // Verify bogus runtime thresholds revert to the default value. + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.sr_threshold", 92)).WillOnce(Return(250.0)); + EXPECT_EQ(0.92, config->successRateThreshold()); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.sr_threshold", 92)).WillOnce(Return(-1.0)); + EXPECT_EQ(0.92, config->successRateThreshold()); } } // namespace diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc index c752f4ca651b..a65c4425ac03 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc @@ -122,13 +122,13 @@ class AwsLambdaFilterIntegrationTest : public testing::TestWithParamcomplete()); // verify headers - expected_response_headers.iterate( - [actual_headers = &response->headers()](const Http::HeaderEntry& expected_entry) { - const auto* actual_entry = actual_headers->get( - Http::LowerCaseString(std::string(expected_entry.key().getStringView()))); - EXPECT_EQ(actual_entry->value().getStringView(), expected_entry.value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }); + expected_response_headers.iterate([actual_headers = &response->headers()]( + const Http::HeaderEntry& expected_entry) { + const auto actual_entry = actual_headers->get( + Http::LowerCaseString(std::string(expected_entry.key().getStringView()))); + EXPECT_EQ(actual_entry[0]->value().getStringView(), expected_entry.value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); // verify cookies if we have any if (!expected_response_cookies.empty()) { diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc index ab0cf4c2c900..bcc46b3dc5e3 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc @@ -517,11 +517,8 @@ TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeTransformToHttp) { ASSERT_NE(nullptr, headers.Status()); EXPECT_EQ("201", headers.getStatusValue()); - EXPECT_EQ(nullptr, headers.get(Http::LowerCaseString(":other"))); - - const auto* custom_header = headers.get(Http::LowerCaseString("x-awesome-header")); - EXPECT_NE(custom_header, nullptr); - EXPECT_EQ("awesome value", custom_header->value().getStringView()); + EXPECT_FALSE(headers.has(":other")); + EXPECT_EQ("awesome value", headers.get_("x-awesome-header")); std::vector cookies; headers.iterate([&cookies](const Http::HeaderEntry& entry) { diff --git a/test/extensions/filters/http/cache/cache_filter_integration_test.cc b/test/extensions/filters/http/cache/cache_filter_integration_test.cc index 7630e0c44fdb..f8fb6a830960 100644 --- a/test/extensions/filters/http/cache/cache_filter_integration_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_integration_test.cc @@ -72,7 +72,7 @@ TEST_P(CacheIntegrationTest, MissInsertHit) { response_decoder->waitForEndStream(); EXPECT_TRUE(response_decoder->complete()); EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers)); - EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + EXPECT_TRUE(response_decoder->headers().get(Http::Headers::get().Age).empty()); EXPECT_EQ(response_decoder->body(), response_body); EXPECT_THAT(waitForAccessLog(access_log_name_), testing::HasSubstr("- via_upstream")); } @@ -129,7 +129,7 @@ TEST_P(CacheIntegrationTest, ExpiredValidated) { response_decoder->waitForEndStream(); EXPECT_TRUE(response_decoder->complete()); EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers)); - EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + EXPECT_TRUE(response_decoder->headers().get(Http::Headers::get().Age).empty()); EXPECT_EQ(response_decoder->body(), response_body); EXPECT_THAT(waitForAccessLog(access_log_name_), testing::HasSubstr("- via_upstream")); } @@ -168,7 +168,7 @@ TEST_P(CacheIntegrationTest, ExpiredValidated) { // A response that has been validated should not contain an Age header as it is equivalent to a // freshly served response from the origin, unless the 304 response has an Age header, which // means it was served by an upstream cache. - EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + EXPECT_TRUE(response_decoder->headers().get(Http::Headers::get().Age).empty()); // Advance time to force a log flush. simTime().advanceTimeWait(Seconds(1)); @@ -210,7 +210,7 @@ TEST_P(CacheIntegrationTest, ExpiredFetchedNewResponse) { response_decoder->waitForEndStream(); EXPECT_TRUE(response_decoder->complete()); EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers)); - EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + EXPECT_TRUE(response_decoder->headers().get(Http::Headers::get().Age).empty()); EXPECT_EQ(response_decoder->body(), response_body); EXPECT_THAT(waitForAccessLog(access_log_name_), testing::HasSubstr("- via_upstream")); } @@ -249,7 +249,7 @@ TEST_P(CacheIntegrationTest, ExpiredFetchedNewResponse) { EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers)); EXPECT_EQ(response_decoder->body(), response_body); // Check that age header does not exist as this is not a cached response - EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + EXPECT_TRUE(response_decoder->headers().get(Http::Headers::get().Age).empty()); // Advance time to force a log flush. simTime().advanceTimeWait(Seconds(1)); @@ -291,7 +291,7 @@ TEST_P(CacheIntegrationTest, GetRequestWithBodyAndTrailers) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); EXPECT_THAT(response->headers(), IsSupersetOfHeaders(response_headers)); - EXPECT_EQ(response->headers().get(Http::Headers::get().Age), nullptr); + EXPECT_TRUE(response->headers().get(Http::Headers::get().Age).empty()); EXPECT_EQ(response->body(), std::string(42, 'a')); } } diff --git a/test/extensions/filters/http/cache/cache_headers_utils_test.cc b/test/extensions/filters/http/cache/cache_headers_utils_test.cc index 8e963fad01eb..0f29d63d6fe7 100644 --- a/test/extensions/filters/http/cache/cache_headers_utils_test.cc +++ b/test/extensions/filters/http/cache/cache_headers_utils_test.cc @@ -113,7 +113,7 @@ class RequestCacheControlTest : public testing::TestWithParam, // Empty header { - "", + "", // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} {false, false, false, false, false, absl::nullopt} }, @@ -211,7 +211,7 @@ class ResponseCacheControlTest : public testing::TestWithParam ruleset; + absl::flat_hash_set result; + + CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result); + + ASSERT_TRUE(result.empty()); } -TEST(HasVary, Empty) { - Http::TestResponseHeaderMapImpl headers{{"vary", ""}}; - ASSERT_FALSE(VaryHeader::hasVary(headers)); +TEST(GetAllMatchingHeaderNames, EmptyHeaderMap) { + Http::TestRequestHeaderMapImpl headers; + std::vector ruleset; + absl::flat_hash_set result; + + envoy::type::matcher::v3::StringMatcher matcher; + matcher.set_exact("accept"); + ruleset.emplace_back(std::make_unique(matcher)); + + CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result); + + ASSERT_TRUE(result.empty()); } -TEST(HasVary, NotEmpty) { - Http::TestResponseHeaderMapImpl headers{{"vary", "accept"}}; - ASSERT_TRUE(VaryHeader::hasVary(headers)); +TEST(GetAllMatchingHeaderNames, SingleMatchSingleValue) { + Http::TestRequestHeaderMapImpl headers{{"accept", "image/*"}, {"accept-language", "en-US"}}; + std::vector ruleset; + absl::flat_hash_set result; + + envoy::type::matcher::v3::StringMatcher matcher; + matcher.set_exact("accept"); + ruleset.emplace_back(std::make_unique(matcher)); + + CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result); + + ASSERT_EQ(result.size(), 1); + EXPECT_TRUE(result.contains("accept")); } -TEST(ParseHeaderValue, Null) { +TEST(GetAllMatchingHeaderNames, SingleMatchMultiValue) { + Http::TestRequestHeaderMapImpl headers{{"accept", "image/*"}, {"accept", "text/html"}}; + std::vector ruleset; + absl::flat_hash_set result; + + envoy::type::matcher::v3::StringMatcher matcher; + matcher.set_exact("accept"); + ruleset.emplace_back(std::make_unique(matcher)); + + CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result); + + ASSERT_EQ(result.size(), 1); + EXPECT_TRUE(result.contains("accept")); +} + +TEST(GetAllMatchingHeaderNames, MultipleMatches) { + Http::TestRequestHeaderMapImpl headers{{"accept", "image/*"}, {"accept-language", "en-US"}}; + std::vector ruleset; + absl::flat_hash_set result; + + envoy::type::matcher::v3::StringMatcher matcher; + matcher.set_exact("accept"); + ruleset.emplace_back(std::make_unique(matcher)); + matcher.set_exact("accept-language"); + ruleset.emplace_back(std::make_unique(matcher)); + + CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result); + + ASSERT_EQ(result.size(), 2); + EXPECT_TRUE(result.contains("accept")); + EXPECT_TRUE(result.contains("accept-language")); +} + +TEST(ParseCommaDelimitedList, Null) { Http::TestResponseHeaderMapImpl headers; std::vector result = - VaryHeader::parseHeaderValue(headers.get(Http::Headers::get().Vary)); + CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::Headers::get().Vary)); EXPECT_EQ(result.size(), 0); } -TEST(ParseHeaderValue, Empty) { +TEST(ParseCommaDelimitedList, Empty) { Http::TestResponseHeaderMapImpl headers{{"vary", ""}}; std::vector result = - VaryHeader::parseHeaderValue(headers.get(Http::Headers::get().Vary)); + CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::Headers::get().Vary)); EXPECT_EQ(result.size(), 1); EXPECT_EQ(result[0], ""); } -TEST(ParseHeaderValue, SingleValue) { +TEST(ParseCommaDelimitedList, SingleValue) { Http::TestResponseHeaderMapImpl headers{{"vary", "accept"}}; std::vector result = - VaryHeader::parseHeaderValue(headers.get(Http::Headers::get().Vary)); + CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::Headers::get().Vary)); EXPECT_EQ(result.size(), 1); EXPECT_EQ(result[0], "accept"); } -class ParseHeaderValueMultipleTest : public testing::Test, - public testing::WithParamInterface { +class ParseCommaDelimitedListMultipleTest : public testing::Test, + public testing::WithParamInterface { protected: Http::TestResponseHeaderMapImpl headers{{"vary", GetParam()}}; }; -INSTANTIATE_TEST_SUITE_P(MultipleValuesMixedSpaces, ParseHeaderValueMultipleTest, +INSTANTIATE_TEST_SUITE_P(MultipleValuesMixedSpaces, ParseCommaDelimitedListMultipleTest, testing::Values("accept,accept-language", " accept,accept-language", "accept ,accept-language", "accept, accept-language", "accept,accept-language ", " accept, accept-language ", " accept , accept-language ")); -TEST_P(ParseHeaderValueMultipleTest, MultipleValuesMixedSpaces) { +TEST_P(ParseCommaDelimitedListMultipleTest, MultipleValuesMixedSpaces) { std::vector result = - VaryHeader::parseHeaderValue(headers.get(Http::Headers::get().Vary)); + CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::Headers::get().Vary)); EXPECT_EQ(result.size(), 2); EXPECT_EQ(result[0], "accept"); EXPECT_EQ(result[1], "accept-language"); } -// Set of allowed headers to be varied on the tests. -const absl::flat_hash_set allowed_vary_headers = {"accept", "accept-language", - "width"}; - -TEST(VaryIsAllowed, Null) { +TEST(HasVary, Null) { Http::TestResponseHeaderMapImpl headers; - ASSERT_TRUE(VaryHeader::isAllowed(allowed_vary_headers, headers)); + ASSERT_FALSE(VaryHeader::hasVary(headers)); } -TEST(VaryIsAllowed, Empty) { +TEST(HasVary, Empty) { Http::TestResponseHeaderMapImpl headers{{"vary", ""}}; - ASSERT_TRUE(VaryHeader::isAllowed(allowed_vary_headers, headers)); + ASSERT_FALSE(VaryHeader::hasVary(headers)); } -TEST(VaryIsAllowed, SingleAllowed) { +TEST(HasVary, NotEmpty) { Http::TestResponseHeaderMapImpl headers{{"vary", "accept"}}; - ASSERT_TRUE(VaryHeader::isAllowed(allowed_vary_headers, headers)); -} - -TEST(VaryIsAllowed, MultipleAllowed) { - Http::TestResponseHeaderMapImpl headers{{"vary", "accept, accept-language, width"}}; - ASSERT_TRUE(VaryHeader::isAllowed(allowed_vary_headers, headers)); -} - -TEST(VaryIsAllowed, StarNotAllowed) { - Http::TestResponseHeaderMapImpl headers{{"vary", "*"}}; - ASSERT_FALSE(VaryHeader::isAllowed(allowed_vary_headers, headers)); -} - -TEST(VaryIsAllowed, SingleNotAllowed) { - Http::TestResponseHeaderMapImpl headers{{"vary", "wrong-header"}}; - ASSERT_FALSE(VaryHeader::isAllowed(allowed_vary_headers, headers)); -} - -TEST(VaryIsAllowed, MultipleNotAllowed) { - Http::TestResponseHeaderMapImpl headers{{"vary", "accept, wrong-header"}}; - ASSERT_FALSE(VaryHeader::isAllowed(allowed_vary_headers, headers)); + ASSERT_TRUE(VaryHeader::hasVary(headers)); } TEST(CreateVaryKey, EmptyVaryEntry) { - Http::TestResponseHeaderMapImpl headers{{"vary", ""}}; + Http::TestResponseHeaderMapImpl response_headers{{"vary", ""}}; Http::TestRequestHeaderMapImpl request_headers{{"accept", "image/*"}}; - ASSERT_EQ(VaryHeader::createVaryKey( - headers.get(Http::Headers::get().Vary), - *VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers)), - "vary-key\n\r\n"); + ASSERT_EQ( + VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers), + "vary-key\n\r\n"); } TEST(CreateVaryKey, SingleHeaderExists) { - Http::TestResponseHeaderMapImpl headers{{"vary", "accept"}}; + Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept"}}; Http::TestRequestHeaderMapImpl request_headers{{"accept", "image/*"}}; - ASSERT_EQ(VaryHeader::createVaryKey( - headers.get(Http::Headers::get().Vary), - *VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers)), - "vary-key\naccept\r" - "image/*\n"); + ASSERT_EQ( + VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers), + "vary-key\naccept\r" + "image/*\n"); } TEST(CreateVaryKey, SingleHeaderMissing) { - Http::TestResponseHeaderMapImpl headers{{"vary", "accept"}}; + Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept"}}; Http::TestRequestHeaderMapImpl request_headers; - ASSERT_EQ(VaryHeader::createVaryKey( - headers.get(Http::Headers::get().Vary), - *VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers)), - "vary-key\naccept\r\n"); + ASSERT_EQ( + VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers), + "vary-key\naccept\r\n"); } TEST(CreateVaryKey, MultipleHeadersAllExist) { - Http::TestResponseHeaderMapImpl headers{{"vary", "accept, accept-language, width"}}; + Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept, accept-language, width"}}; Http::TestRequestHeaderMapImpl request_headers{ {"accept", "image/*"}, {"accept-language", "en-us"}, {"width", "640"}}; - ASSERT_EQ(VaryHeader::createVaryKey( - headers.get(Http::Headers::get().Vary), - *VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers)), - "vary-key\naccept\r" - "image/*\naccept-language\r" - "en-us\nwidth\r640\n"); + ASSERT_EQ( + VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers), + "vary-key\naccept\r" + "image/*\naccept-language\r" + "en-us\nwidth\r640\n"); } TEST(CreateVaryKey, MultipleHeadersSomeExist) { - Http::TestResponseHeaderMapImpl headers{{"vary", "accept, accept-language, width"}}; + Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept, accept-language, width"}}; Http::TestRequestHeaderMapImpl request_headers{{"accept", "image/*"}, {"width", "640"}}; - ASSERT_EQ(VaryHeader::createVaryKey( - headers.get(Http::Headers::get().Vary), - *VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers)), - "vary-key\naccept\r" - "image/*\naccept-language\r\nwidth\r640\n"); + ASSERT_EQ( + VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers), + "vary-key\naccept\r" + "image/*\naccept-language\r\nwidth\r640\n"); } TEST(CreateVaryKey, ExtraRequestHeaders) { - Http::TestResponseHeaderMapImpl headers{{"vary", "accept, width"}}; + Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept, width"}}; Http::TestRequestHeaderMapImpl request_headers{ {"accept", "image/*"}, {"heigth", "1280"}, {"width", "640"}}; - ASSERT_EQ(VaryHeader::createVaryKey( - headers.get(Http::Headers::get().Vary), - *VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers)), - "vary-key\naccept\r" - "image/*\nwidth\r640\n"); + ASSERT_EQ( + VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers), + "vary-key\naccept\r" + "image/*\nwidth\r640\n"); } TEST(CreateVaryKey, MultipleHeadersNoneExist) { - Http::TestResponseHeaderMapImpl headers{{"vary", "accept, accept-language, width"}}; + Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept, accept-language, width"}}; Http::TestRequestHeaderMapImpl request_headers; - ASSERT_EQ(VaryHeader::createVaryKey( - headers.get(Http::Headers::get().Vary), - *VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers)), - "vary-key\naccept\r\naccept-language\r\nwidth\r\n"); + ASSERT_EQ( + VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers), + "vary-key\naccept\r\naccept-language\r\nwidth\r\n"); } TEST(CreateVaryKey, DifferentHeadersSameValue) { // Two requests with the same value for different headers must have different vary-keys. - Http::TestResponseHeaderMapImpl headers{{"vary", "accept, accept-language"}}; + Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept, accept-language"}}; Http::TestRequestHeaderMapImpl request_headers1{{"accept", "foo"}}; - std::string vary_key1 = VaryHeader::createVaryKey( - headers.get(Http::Headers::get().Vary), - *VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers1)); + std::string vary_key1 = + VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers1); Http::TestRequestHeaderMapImpl request_headers2{{"accept-language", "foo"}}; - std::string vary_key2 = VaryHeader::createVaryKey( - headers.get(Http::Headers::get().Vary), - *VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers2)); + std::string vary_key2 = + VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers2); ASSERT_NE(vary_key1, vary_key2); } TEST(CreateVaryKey, MultiValueSameHeader) { - Http::TestResponseHeaderMapImpl headers{{"vary", "width"}}; + Http::TestResponseHeaderMapImpl response_headers{{"vary", "width"}}; Http::TestRequestHeaderMapImpl request_headers{{"width", "foo"}, {"width", "bar"}}; - ASSERT_EQ(VaryHeader::createVaryKey( - headers.get(Http::Headers::get().Vary), - *VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers)), - "vary-key\nwidth\r" - "foo\r" - "bar\n"); + ASSERT_EQ( + VaryHeader::createVaryKey(response_headers.get(Http::Headers::get().Vary), request_headers), + "vary-key\nwidth\r" + "foo\r" + "bar\n"); } -TEST(PossibleVariedHeaders, Empty) { - Http::TestRequestHeaderMapImpl request_headers; - Http::HeaderMapPtr result = - VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers); +envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { + // Allows {accept, accept-language, width} to be varied in the tests. + envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + + const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); + add_accept->set_exact("accept"); + + const auto& add_accept_language = config.mutable_allowed_vary_headers()->Add(); + add_accept_language->set_exact("accept-language"); + + const auto& add_width = config.mutable_allowed_vary_headers()->Add(); + add_width->set_exact("width"); + + return config; +} + +class VaryHeaderTest : public testing::Test { +protected: + VaryHeaderTest() : vary_allow_list_(getConfig().allowed_vary_headers()) {} - EXPECT_FALSE(result->get(Http::LowerCaseString("accept"))); - EXPECT_FALSE(result->get(Http::LowerCaseString("accept-language"))); - EXPECT_FALSE(result->get(Http::LowerCaseString("width"))); + VaryHeader vary_allow_list_; + Http::TestRequestHeaderMapImpl request_headers_; + Http::TestResponseHeaderMapImpl response_headers_; +}; + +TEST_F(VaryHeaderTest, IsAllowedNull) { + ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_)); } -TEST(PossibleVariedHeaders, NoOverlap) { - Http::TestRequestHeaderMapImpl request_headers{{"abc", "123"}}; - Http::HeaderMapPtr result = - VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers); +TEST_F(VaryHeaderTest, IsAllowedEmpty) { + response_headers_.addCopy("vary", ""); + ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_)); +} - EXPECT_FALSE(result->get(Http::LowerCaseString("accept"))); - EXPECT_FALSE(result->get(Http::LowerCaseString("accept-language"))); - EXPECT_FALSE(result->get(Http::LowerCaseString("width"))); +TEST_F(VaryHeaderTest, IsAllowedSingle) { + response_headers_.addCopy("vary", "accept"); + ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_)); } -TEST(PossibleVariedHeaders, Overlap) { - Http::TestRequestHeaderMapImpl request_headers{{"accept", "image/*"}, {"abc", "123"}}; - Http::HeaderMapPtr result = - VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers); +TEST_F(VaryHeaderTest, IsAllowedMultiple) { + response_headers_.addCopy("vary", "accept"); + ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_)); +} - std::vector values; - Http::HeaderUtility::getAllOfHeader(*result, "accept", values); - ASSERT_EQ(values.size(), 1); - EXPECT_EQ(values[0], "image/*"); +TEST_F(VaryHeaderTest, NotIsAllowedStar) { + // Should never be allowed, regardless of the allow_list. + response_headers_.addCopy("vary", "*"); + ASSERT_FALSE(vary_allow_list_.isAllowed(response_headers_)); +} + +TEST_F(VaryHeaderTest, NotIsAllowedSingle) { + response_headers_.addCopy("vary", "wrong-header"); + ASSERT_FALSE(vary_allow_list_.isAllowed(response_headers_)); +} - EXPECT_FALSE(result->get(Http::LowerCaseString("accept-language"))); - EXPECT_FALSE(result->get(Http::LowerCaseString("width"))); +TEST_F(VaryHeaderTest, NotIsAllowedMixed) { + response_headers_.addCopy("vary", "accept, wrong-header"); + ASSERT_FALSE(vary_allow_list_.isAllowed(response_headers_)); } -TEST(PossibleVariedHeaders, MultiValueSameHeader) { - Http::TestRequestHeaderMapImpl request_headers{{"accept", "image/*"}, {"accept", "text/html"}}; - Http::HeaderMapPtr result = - VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers); +TEST_F(VaryHeaderTest, PossibleVariedHeadersEmpty) { + Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_); - std::vector values; - Http::HeaderUtility::getAllOfHeader(*result, "accept", values); - ASSERT_EQ(values.size(), 2); - EXPECT_EQ(values[0], "image/*"); - EXPECT_EQ(values[1], "text/html"); + EXPECT_TRUE(result->get(Http::LowerCaseString("accept")).empty()); + EXPECT_TRUE(result->get(Http::LowerCaseString("accept-language")).empty()); + EXPECT_TRUE(result->get(Http::LowerCaseString("width")).empty()); +} + +TEST_F(VaryHeaderTest, PossibleVariedHeadersNoOverlap) { + request_headers_.addCopy("abc", "123"); + Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_); - EXPECT_FALSE(result->get(Http::LowerCaseString("accept-language"))); - EXPECT_FALSE(result->get(Http::LowerCaseString("width"))); + EXPECT_TRUE(result->get(Http::LowerCaseString("accept")).empty()); + EXPECT_TRUE(result->get(Http::LowerCaseString("accept-language")).empty()); + EXPECT_TRUE(result->get(Http::LowerCaseString("width")).empty()); } -TEST(PossibleVariedHeaders, MultiValueDifferentHeaders) { - Http::TestRequestHeaderMapImpl request_headers{{"accept", "image/*"}, - {"accept-language", "en-US"}}; - Http::HeaderMapPtr result = - VaryHeader::possibleVariedHeaders(allowed_vary_headers, request_headers); +TEST_F(VaryHeaderTest, PossibleVariedHeadersOverlap) { + request_headers_.addCopy("abc", "123"); + request_headers_.addCopy("accept", "image/*"); + Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_); - std::vector values; - Http::HeaderUtility::getAllOfHeader(*result, "accept", values); + const auto values = result->get(Http::LowerCaseString("accept")); ASSERT_EQ(values.size(), 1); - EXPECT_EQ(values[0], "image/*"); + EXPECT_EQ(values[0]->value().getStringView(), "image/*"); + + EXPECT_TRUE(result->get(Http::LowerCaseString("accept-language")).empty()); + EXPECT_TRUE(result->get(Http::LowerCaseString("width")).empty()); +} + +TEST_F(VaryHeaderTest, PossibleVariedHeadersMultiValues) { + request_headers_.addCopy("accept", "image/*"); + request_headers_.addCopy("accept", "text/html"); + Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_); - Http::HeaderUtility::getAllOfHeader(*result, "accept-language", values); + const auto values = result->get(Http::LowerCaseString("accept")); ASSERT_EQ(values.size(), 2); - EXPECT_EQ(values[1], "en-US"); + EXPECT_EQ(values[0]->value().getStringView(), "image/*"); + EXPECT_EQ(values[1]->value().getStringView(), "text/html"); - EXPECT_FALSE(result->get(Http::LowerCaseString("width"))); + EXPECT_TRUE(result->get(Http::LowerCaseString("accept-language")).empty()); + EXPECT_TRUE(result->get(Http::LowerCaseString("width")).empty()); } -TEST(VaryParseAllowlist, TempValue) { - // TODO(cbdm): This test should be expanded when the allowlist parsing is done. - absl::flat_hash_set allowed = VaryHeader::parseAllowlist(); - EXPECT_EQ(allowed.size(), 1); - EXPECT_TRUE(allowed.contains("x-temporary-standin-header-name")); +TEST_F(VaryHeaderTest, PossibleVariedHeadersMultiHeaders) { + request_headers_.addCopy("accept", "image/*"); + request_headers_.addCopy("accept-language", "en-US"); + Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_); + + const auto values = result->get(Http::LowerCaseString("accept")); + ASSERT_EQ(values.size(), 1); + EXPECT_EQ(values[0]->value().getStringView(), "image/*"); + + const auto values2 = result->get(Http::LowerCaseString("accept-language")); + ASSERT_EQ(values2.size(), 1); + EXPECT_EQ(values2[0]->value(), "en-US"); + + EXPECT_TRUE(result->get(Http::LowerCaseString("width")).empty()); } } // namespace diff --git a/test/extensions/filters/http/cache/cacheability_utils_test.cc b/test/extensions/filters/http/cache/cacheability_utils_test.cc index 44deb3a9cb93..d1fcca046ee4 100644 --- a/test/extensions/filters/http/cache/cacheability_utils_test.cc +++ b/test/extensions/filters/http/cache/cacheability_utils_test.cc @@ -29,13 +29,24 @@ class RequestConditionalHeadersTest : public testing::TestWithParam std::string conditionalHeader() const { return GetParam(); } }; +envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { + // Allows 'accept' to be varied in the tests. + envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); + add_accept->set_exact("accept"); + return config; +} + class IsCacheableResponseTest : public testing::Test { +public: + IsCacheableResponseTest() : vary_allow_list_(getConfig().allowed_vary_headers()) {} + protected: std::string cache_control_ = "max-age=3600"; Http::TestResponseHeaderMapImpl response_headers_ = {{":status", "200"}, {"date", "Sun, 06 Nov 1994 08:49:37 GMT"}, {"cache-control", cache_control_}}; - absl::flat_hash_set allowed_vary_headers_ = {"accept"}; + VaryHeader vary_allow_list_; }; TEST_F(IsCacheableRequestTest, CacheableRequest) { @@ -97,69 +108,69 @@ TEST_P(RequestConditionalHeadersTest, ConditionalHeaders) { } TEST_F(IsCacheableResponseTest, CacheableResponse) { - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); } TEST_F(IsCacheableResponseTest, UncacheableStatusCode) { - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); response_headers_.setStatus("700"); - EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); response_headers_.removeStatus(); - EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); } TEST_F(IsCacheableResponseTest, ValidationData) { - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); // No cache control headers or expires header response_headers_.remove(Http::CustomHeaders::get().CacheControl); - EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); // No max-age data or expires header response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "public, no-transform"); - EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); // Max-age data available response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "s-maxage=1000"); - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); // No max-age data, but the response requires revalidation anyway response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "no-cache"); - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); // No cache control headers, but there is an expires header response_headers_.remove(Http::CustomHeaders::get().CacheControl); response_headers_.setReferenceKey(Http::Headers::get().Expires, "Sun, 06 Nov 1994 09:49:37 GMT"); - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); } TEST_F(IsCacheableResponseTest, ResponseNoStore) { - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); std::string cache_control_no_store = absl::StrCat(cache_control_, ", no-store"); response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, cache_control_no_store); - EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); } TEST_F(IsCacheableResponseTest, ResponsePrivate) { - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); std::string cache_control_private = absl::StrCat(cache_control_, ", private"); response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, cache_control_private); - EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); } TEST_F(IsCacheableResponseTest, EmptyVary) { - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); response_headers_.setCopy(Http::Headers::get().Vary, ""); - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); } TEST_F(IsCacheableResponseTest, AllowedVary) { - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); response_headers_.setCopy(Http::Headers::get().Vary, "accept"); - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); } TEST_F(IsCacheableResponseTest, NotAllowedVary) { - EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); response_headers_.setCopy(Http::Headers::get().Vary, "*"); - EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, allowed_vary_headers_)); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers_, vary_allow_list_)); } } // namespace diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index 88794cce6b9e..0fd677973da1 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -29,16 +29,27 @@ struct LookupRequestTestCase { std::string expected_age; }; +using Seconds = std::chrono::seconds; + +envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { + // Allows 'accept' to be varied in the tests. + envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); + add_accept->set_exact("accept"); + return config; +} + class LookupRequestTest : public testing::TestWithParam { public: + LookupRequestTest() : vary_allow_list_(getConfig().allowed_vary_headers()) {} + DateFormatter formatter_{"%a, %d %b %Y %H:%M:%S GMT"}; Http::TestRequestHeaderMapImpl request_headers_{{":path", "/"}, {":method", "GET"}, {"x-forwarded-proto", "https"}, {":authority", "example.com"}}; - // Using 'accept' as an allowed header to be varied for testing-purpose. - absl::flat_hash_set allowed_vary_headers_{"accept"}; + VaryHeader vary_allow_list_; static const SystemTime& currentTime() { CONSTRUCT_ON_FIRST_USE(SystemTime, Event::SimulatedTimeSystem().systemTime()); @@ -181,7 +192,7 @@ TEST_P(LookupRequestTest, ResultWithoutBodyMatchesExpectation) { request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, GetParam().request_cache_control); const SystemTime request_time = GetParam().request_time, response_date = GetParam().response_date; - const LookupRequest lookup_request(request_headers_, request_time, allowed_vary_headers_); + const LookupRequest lookup_request(request_headers_, request_time, vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"cache-control", GetParam().response_cache_control}, {"date", formatter_.fromTime(response_date)}}); @@ -201,7 +212,7 @@ TEST_P(LookupRequestTest, ResultWithBodyMatchesExpectation) { request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, GetParam().request_cache_control); const SystemTime request_time = GetParam().request_time, response_date = GetParam().response_date; - const LookupRequest lookup_request(request_headers_, request_time, allowed_vary_headers_); + const LookupRequest lookup_request(request_headers_, request_time, vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"cache-control", GetParam().response_cache_control}, {"date", formatter_.fromTime(response_date)}}); @@ -220,7 +231,7 @@ TEST_P(LookupRequestTest, ResultWithBodyMatchesExpectation) { } TEST_F(LookupRequestTest, ExpiredViaFallbackheader) { - const LookupRequest lookup_request(request_headers_, currentTime(), allowed_vary_headers_); + const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"expires", formatter_.fromTime(currentTime() - Seconds(5))}, {"date", formatter_.fromTime(currentTime())}}); @@ -230,7 +241,7 @@ TEST_F(LookupRequestTest, ExpiredViaFallbackheader) { } TEST_F(LookupRequestTest, NotExpiredViaFallbackheader) { - const LookupRequest lookup_request(request_headers_, currentTime(), allowed_vary_headers_); + const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"expires", formatter_.fromTime(currentTime() + Seconds(5))}, {"date", formatter_.fromTime(currentTime())}}); @@ -243,7 +254,7 @@ TEST_F(LookupRequestTest, NotExpiredViaFallbackheader) { // https://httpwg.org/specs/rfc7234.html#header.pragma TEST_F(LookupRequestTest, PragmaNoCacheFallback) { request_headers_.setReferenceKey(Http::CustomHeaders::get().Pragma, "no-cache"); - const LookupRequest lookup_request(request_headers_, currentTime(), allowed_vary_headers_); + const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(currentTime())}, {"cache-control", "public, max-age=3600"}}); const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); @@ -254,7 +265,7 @@ TEST_F(LookupRequestTest, PragmaNoCacheFallback) { TEST_F(LookupRequestTest, PragmaNoCacheFallbackExtraDirectivesIgnored) { request_headers_.setReferenceKey(Http::CustomHeaders::get().Pragma, "no-cache, custom-directive=custom-value"); - const LookupRequest lookup_request(request_headers_, currentTime(), allowed_vary_headers_); + const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(currentTime())}, {"cache-control", "public, max-age=3600"}}); const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); @@ -265,7 +276,7 @@ TEST_F(LookupRequestTest, PragmaNoCacheFallbackExtraDirectivesIgnored) { TEST_F(LookupRequestTest, PragmaFallbackOtherValuesIgnored) { request_headers_.setReferenceKey(Http::CustomHeaders::get().Pragma, "max-age=0"); const LookupRequest lookup_request(request_headers_, currentTime() + Seconds(5), - allowed_vary_headers_); + vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(currentTime())}, {"cache-control", "public, max-age=3600"}}); const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); @@ -277,7 +288,7 @@ TEST_F(LookupRequestTest, PragmaNoFallback) { request_headers_.setReferenceKey(Http::CustomHeaders::get().Pragma, "no-cache"); request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "max-age=10"); const LookupRequest lookup_request(request_headers_, currentTime() + Seconds(5), - allowed_vary_headers_); + vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(currentTime())}, {"cache-control", "public, max-age=3600"}}); const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); @@ -288,7 +299,7 @@ TEST_F(LookupRequestTest, PragmaNoFallback) { TEST_F(LookupRequestTest, SingleSatisfiableRange) { // add range info to headers request_headers_.addReference(Http::Headers::get().Range, "bytes=1-99"); - const LookupRequest lookup_request(request_headers_, currentTime(), allowed_vary_headers_); + const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(currentTime())}, @@ -321,7 +332,7 @@ TEST_F(LookupRequestTest, MultipleSatisfiableRanges) { // add range info to headers request_headers_.addCopy(Http::Headers::get().Range.get(), "bytes=1-99,3-,-3"); - const LookupRequest lookup_request(request_headers_, currentTime(), allowed_vary_headers_); + const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(currentTime())}, @@ -346,7 +357,7 @@ TEST_F(LookupRequestTest, NotSatisfiableRange) { // add range info to headers request_headers_.addReference(Http::Headers::get().Range, "bytes=100-"); - const LookupRequest lookup_request(request_headers_, currentTime(), allowed_vary_headers_); + const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_); const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(currentTime())}, @@ -620,11 +631,13 @@ TEST_P(ParseInvalidRangeHeaderTest, InvalidRangeReturnsEmpty) { TEST_F(LookupRequestTest, VariedHeaders) { request_headers_.addCopy("accept", "image/*"); - const LookupRequest lookup_request(request_headers_, currentTime(), allowed_vary_headers_); + request_headers_.addCopy("other-header", "abc123"); + const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_); const Http::RequestHeaderMap& result = lookup_request.getVaryHeaders(); - ASSERT_TRUE(result.get(Http::LowerCaseString("accept"))); - ASSERT_EQ(result.get(Http::LowerCaseString("accept"))->value().getStringView(), "image/*"); + ASSERT_FALSE(result.get(Http::LowerCaseString("accept")).empty()); + ASSERT_EQ(result.get(Http::LowerCaseString("accept"))[0]->value().getStringView(), "image/*"); + ASSERT_TRUE(result.get(Http::LowerCaseString("other-header")).empty()); } } // namespace diff --git a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc index 3d2ecacc2ebf..1717e2499754 100644 --- a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc +++ b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc @@ -20,9 +20,17 @@ namespace { const std::string EpochDate = "Thu, 01 Jan 1970 00:00:00 GMT"; +envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { + // Allows 'accept' to be varied in the tests. + envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); + add_accept->set_exact("accept"); + return config; +} + class SimpleHttpCacheTest : public testing::Test { protected: - SimpleHttpCacheTest() { + SimpleHttpCacheTest() : vary_allow_list_(getConfig().allowed_vary_headers()) { request_headers_.setMethod("GET"); request_headers_.setHost("example.com"); request_headers_.setForwardedProto("https"); @@ -66,9 +74,7 @@ class SimpleHttpCacheTest : public testing::Test { LookupRequest makeLookupRequest(absl::string_view request_path) { request_headers_.setPath(request_path); - // Using 'accept' as an allowed header to be varied for testing-purpose. - absl::flat_hash_set allowed_vary_headers{"accept"}; - return LookupRequest(request_headers_, current_time_, allowed_vary_headers); + return LookupRequest(request_headers_, current_time_, vary_allow_list_); } AssertionResult expectLookupSuccessWithBody(LookupContext* lookup_context, @@ -97,6 +103,7 @@ class SimpleHttpCacheTest : public testing::Test { Event::SimulatedTimeSystem time_source_; SystemTime current_time_ = time_source_.systemTime(); DateFormatter formatter_{"%a, %d %b %Y %H:%M:%S GMT"}; + VaryHeader vary_allow_list_; }; // Simple flow of putting in an item, getting it, deleting it. diff --git a/test/extensions/filters/http/cdn_loop/BUILD b/test/extensions/filters/http/cdn_loop/BUILD new file mode 100644 index 000000000000..d39c244e72a9 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/BUILD @@ -0,0 +1,80 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", + "envoy_cc_test", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_name = "envoy.filters.http.cdn_loop", + deps = [ + "//source/extensions/filters/http/cdn_loop:config", + "//test/mocks/http:http_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "filter_integration_test", + srcs = ["filter_integration_test.cc"], + extension_name = "envoy.filters.http.cdn_loop", + deps = [ + "//source/extensions/filters/http/cdn_loop:config", + "//test/integration:http_protocol_integration_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "filter_test", + srcs = ["filter_test.cc"], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/http:filter_interface", + "//source/extensions/filters/http/cdn_loop:filter_lib", + "//test/mocks/http:http_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_cc_test( + name = "parser_test", + srcs = ["parser_test.cc"], + deps = [ + "//source/extensions/filters/http/cdn_loop:parser_lib", + "//test/test_common:status_utility_lib", + ], +) + +envoy_cc_fuzz_test( + name = "parser_fuzz_test", + srcs = ["parser_fuzz_test.cc"], + corpus = "parser_corpus", + deps = [ + "//source/common/common:statusor_lib", + "//source/extensions/filters/http/cdn_loop:parser_lib", + "//test/fuzz:utility_lib", + ], +) + +envoy_cc_test( + name = "utils_test", + srcs = ["utils_test.cc"], + deps = [ + "//source/extensions/filters/http/cdn_loop:utils_lib", + "//test/test_common:status_utility_lib", + ], +) diff --git a/test/extensions/filters/http/cdn_loop/config_test.cc b/test/extensions/filters/http/cdn_loop/config_test.cc new file mode 100644 index 000000000000..88a4a44462af --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/config_test.cc @@ -0,0 +1,71 @@ +#include + +#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" + +#include "extensions/filters/http/cdn_loop/config.h" +#include "extensions/filters/http/cdn_loop/filter.h" + +#include "test/mocks/server/factory_context.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { + +using testing::HasSubstr; + +TEST(CdnLoopFilterFactoryTest, ValidValuesWork) { + NiceMock context; + Http::StreamDecoderFilterSharedPtr filter; + Http::MockFilterChainFactoryCallbacks filter_callbacks; + EXPECT_CALL(filter_callbacks, addStreamDecoderFilter).WillOnce(::testing::SaveArg<0>(&filter)); + + envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + config.set_cdn_id("cdn"); + CdnLoopFilterFactory factory; + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, "stats", context); + cb(filter_callbacks); + EXPECT_NE(filter.get(), nullptr); + EXPECT_NE(dynamic_cast(filter.get()), nullptr); +} + +TEST(CdnLoopFilterFactoryTest, BlankCdnIdThrows) { + NiceMock context; + + envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + CdnLoopFilterFactory factory; + + EXPECT_THAT_THROWS_MESSAGE(factory.createFilterFactoryFromProto(config, "stats", context), + ProtoValidationException, HasSubstr("value length must be at least")); +} + +TEST(CdnLoopFilterFactoryTest, InvalidCdnId) { + NiceMock context; + + envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + config.set_cdn_id("[not-token-or-ip"); + CdnLoopFilterFactory factory; + + EXPECT_THAT_THROWS_MESSAGE(factory.createFilterFactoryFromProto(config, "stats", context), + EnvoyException, HasSubstr("is not a valid CDN identifier")); +} + +TEST(CdnLoopFilterFactoryTest, InvalidCdnIdNonHeaderWhitespace) { + NiceMock context; + + envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + config.set_cdn_id("\r\n"); + CdnLoopFilterFactory factory; + + EXPECT_THAT_THROWS_MESSAGE(factory.createFilterFactoryFromProto(config, "stats", context), + EnvoyException, HasSubstr("is not a valid CDN identifier")); +} + +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/cdn_loop/filter_integration_test.cc b/test/extensions/filters/http/cdn_loop/filter_integration_test.cc new file mode 100644 index 000000000000..ed6032370dcf --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/filter_integration_test.cc @@ -0,0 +1,191 @@ +#include + +#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" + +#include "test/integration/http_protocol_integration.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { +namespace { + +const std::string MaxDefaultConfig = R"EOF( +name: envoy.filters.http.cdn_loop +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3alpha.CdnLoopConfig + cdn_id: cdn +)EOF"; + +const std::string MaxOf2Config = R"EOF( +name: envoy.filters.http.cdn_loop +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3alpha.CdnLoopConfig + cdn_id: cdn + max_allowed_occurrences: 2 +)EOF"; + +class CdnLoopFilterIntegrationTest : public HttpProtocolIntegrationTest {}; + +TEST_P(CdnLoopFilterIntegrationTest, NoCdnLoopHeader) { + config_helper_.addFilter(MaxDefaultConfig); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + + const auto payload_entry = upstream_request_->headers().get(Http::LowerCaseString("CDN-Loop")); + ASSERT_FALSE(payload_entry.empty()); + EXPECT_EQ(payload_entry[0]->value().getStringView(), "cdn"); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +TEST_P(CdnLoopFilterIntegrationTest, CdnLoopHeaderWithOtherCdns) { + config_helper_.addFilter(MaxDefaultConfig); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"CDN-Loop", "cdn1,cdn2"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + + const auto payload_entry = upstream_request_->headers().get(Http::LowerCaseString("CDN-Loop")); + ASSERT_FALSE(payload_entry.empty()); + EXPECT_EQ(payload_entry[0]->value().getStringView(), "cdn1,cdn2,cdn"); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +TEST_P(CdnLoopFilterIntegrationTest, MultipleCdnLoopHeaders) { + config_helper_.addFilter(MaxDefaultConfig); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/"}, + {":scheme", "http"}, {":authority", "host"}, + {"CDN-Loop", "cdn1"}, {"CDN-Loop", "cdn2"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + + const auto payload_entry = upstream_request_->headers().get(Http::LowerCaseString("CDN-Loop")); + ASSERT_FALSE(payload_entry.empty()); + EXPECT_EQ(payload_entry[0]->value().getStringView(), "cdn1,cdn2,cdn"); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +TEST_P(CdnLoopFilterIntegrationTest, CdnLoop0Allowed1Seen) { + config_helper_.addFilter(MaxDefaultConfig); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"CDN-Loop", "cdn"}}; + + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("502", response->headers().getStatusValue()); +} + +TEST_P(CdnLoopFilterIntegrationTest, UnparseableHeader) { + config_helper_.addFilter(MaxDefaultConfig); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"CDN-Loop", "[bad-header"}}; + + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("400", response->headers().getStatusValue()); +} + +TEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed1Seen) { + config_helper_.addFilter(MaxOf2Config); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"CDN-Loop", "cdn"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + + const auto payload_entry = upstream_request_->headers().get(Http::LowerCaseString("CDN-Loop")); + ASSERT_FALSE(payload_entry.empty()); + EXPECT_EQ(payload_entry[0]->value().getStringView(), "cdn,cdn"); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +TEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed2Seen) { + config_helper_.addFilter(MaxOf2Config); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"CDN-Loop", "cdn, cdn"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + + const auto payload_entry = upstream_request_->headers().get(Http::LowerCaseString("CDN-Loop")); + ASSERT_FALSE(payload_entry.empty()); + EXPECT_EQ(payload_entry[0]->value().getStringView(), "cdn, cdn,cdn"); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +TEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed3Seen) { + config_helper_.addFilter(MaxOf2Config); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"CDN-Loop", "cdn, cdn, cdn"}}; + + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("502", response->headers().getStatusValue()); +} + +INSTANTIATE_TEST_SUITE_P(Protocols, CdnLoopFilterIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2}, + // Upstream doesn't matter, so by testing only 1, + // the test is twice as fast. + {FakeHttpConnection::Type::HTTP1})), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +} // namespace +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/cdn_loop/filter_test.cc b/test/extensions/filters/http/cdn_loop/filter_test.cc new file mode 100644 index 000000000000..9c96e26dca2d --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/filter_test.cc @@ -0,0 +1,117 @@ +#include "envoy/http/codes.h" +#include "envoy/http/filter.h" + +#include "extensions/filters/http/cdn_loop/filter.h" + +#include "test/mocks/http/mocks.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { +namespace { + +TEST(CdnLoopFilterTest, TestNoHeader) { + NiceMock decoder_callbacks; + CdnLoopFilter filter("cdn", 0); + filter.setDecoderFilterCallbacks(decoder_callbacks); + + Http::TestRequestHeaderMapImpl request_headers{}; + + EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue); + EXPECT_EQ(request_headers.get(Http::LowerCaseString("CDN-Loop"))[0]->value().getStringView(), + "cdn"); +} + +TEST(CdnLoopFilterTest, OtherCdnsInHeader) { + NiceMock decoder_callbacks; + CdnLoopFilter filter("cdn", 0); + filter.setDecoderFilterCallbacks(decoder_callbacks); + + Http::TestRequestHeaderMapImpl request_headers{{"CDN-Loop", "cdn1,cdn2"}}; + + EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue); + EXPECT_EQ(request_headers.get(Http::LowerCaseString("CDN-Loop"))[0]->value().getStringView(), + "cdn1,cdn2,cdn"); +} + +TEST(CdnLoopFilterTest, LoopDetected) { + NiceMock decoder_callbacks; + EXPECT_CALL(decoder_callbacks, sendLocalReply(Http::Code::BadGateway, _, _, _, _)).Times(1); + CdnLoopFilter filter("cdn", 0); + filter.setDecoderFilterCallbacks(decoder_callbacks); + + Http::TestRequestHeaderMapImpl request_headers{{"CDN-Loop", "cdn"}}; + + EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::StopIteration); +} + +TEST(CdnLoopFilterTest, MultipleTransitsAllowed) { + NiceMock decoder_callbacks; + EXPECT_CALL(decoder_callbacks, sendLocalReply(Http::Code::BadGateway, _, _, _, _)).Times(1); + CdnLoopFilter filter("cdn", 3); + filter.setDecoderFilterCallbacks(decoder_callbacks); + + { + Http::TestRequestHeaderMapImpl request_headers{}; + EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue); + EXPECT_EQ(request_headers.get(Http::LowerCaseString("CDN-Loop"))[0]->value().getStringView(), + "cdn"); + } + { + Http::TestRequestHeaderMapImpl request_headers{{"CDN-Loop", "cdn"}}; + EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue); + EXPECT_EQ(request_headers.get(Http::LowerCaseString("CDN-Loop"))[0]->value().getStringView(), + "cdn,cdn"); + } + { + Http::TestRequestHeaderMapImpl request_headers{{"CDN-Loop", "cdn,cdn"}}; + EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue); + EXPECT_EQ(request_headers.get(Http::LowerCaseString("CDN-Loop"))[0]->value().getStringView(), + "cdn,cdn,cdn"); + } + { + Http::TestRequestHeaderMapImpl request_headers{{"CDN-Loop", "cdn,cdn,cdn"}}; + EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue); + EXPECT_EQ(request_headers.get(Http::LowerCaseString("CDN-Loop"))[0]->value().getStringView(), + "cdn,cdn,cdn,cdn"); + } + { + Http::TestRequestHeaderMapImpl request_headers{{"CDN-Loop", "cdn,cdn,cdn,cdn"}}; + EXPECT_EQ(filter.decodeHeaders(request_headers, false), + Http::FilterHeadersStatus::StopIteration); + } +} + +TEST(CdnLoopFilterTest, MultipleHeadersAllowed) { + NiceMock decoder_callbacks; + CdnLoopFilter filter("cdn", 0); + filter.setDecoderFilterCallbacks(decoder_callbacks); + + Http::TestRequestHeaderMapImpl request_headers{{"CDN-Loop", "cdn1"}, {"CDN-Loop", "cdn2"}}; + + EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::Continue); + EXPECT_EQ(request_headers.get(Http::LowerCaseString("CDN-Loop"))[0]->value().getStringView(), + "cdn1,cdn2,cdn"); +} + +TEST(CdnLoopFilterTest, UnparseableHeader) { + NiceMock decoder_callbacks; + EXPECT_CALL(decoder_callbacks, sendLocalReply(Http::Code::BadRequest, _, _, _, _)).Times(1); + CdnLoopFilter filter("cdn", 0); + filter.setDecoderFilterCallbacks(decoder_callbacks); + + Http::TestRequestHeaderMapImpl request_headers{{"CDN-Loop", ";"}}; + + EXPECT_EQ(filter.decodeHeaders(request_headers, false), Http::FilterHeadersStatus::StopIteration); +} + +} // namespace +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-InvalidParameter.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-InvalidParameter.txt new file mode 100644 index 000000000000..0c14eb10ec7a --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-InvalidParameter.txt @@ -0,0 +1 @@ +name ; a= diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-MissingParameter.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-MissingParameter.txt new file mode 100644 index 000000000000..a7b3c5405310 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-MissingParameter.txt @@ -0,0 +1 @@ +name ; diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-MultipleParametersWithWhitespace.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-MultipleParametersWithWhitespace.txt new file mode 100644 index 000000000000..270e9a9137f7 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-MultipleParametersWithWhitespace.txt @@ -0,0 +1 @@ +name ; a=b ; c="d" ; e=";" diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-SingleParameter.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-SingleParameter.txt new file mode 100644 index 000000000000..9fa7a93b6429 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-SingleParameter.txt @@ -0,0 +1 @@ +name;a=b diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-SingleParameterExtraWhitespace.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-SingleParameterExtraWhitespace.txt new file mode 100644 index 000000000000..22e8588d2c1b --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfo-SingleParameterExtraWhitespace.txt @@ -0,0 +1 @@ +name ; a=b diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-ExtraWhiteSpace.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-ExtraWhiteSpace.txt new file mode 100644 index 000000000000..2118b6548479 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-ExtraWhiteSpace.txt @@ -0,0 +1 @@ + cdn1 , cdn2 , cdn3 diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-InvalidCdnId.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-InvalidCdnId.txt new file mode 100644 index 000000000000..97fa9647278f --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-InvalidCdnId.txt @@ -0,0 +1 @@ +[bad diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-InvalidParseNoComma.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-InvalidParseNoComma.txt new file mode 100644 index 000000000000..d66f2c68f3a2 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-InvalidParseNoComma.txt @@ -0,0 +1 @@ +cdn1 cdn2 diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-1.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-1.txt new file mode 100644 index 000000000000..47929eeb7b36 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-1.txt @@ -0,0 +1 @@ +foo,bar diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-2.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-2.txt new file mode 100644 index 000000000000..737fe28e643e --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-2.txt @@ -0,0 +1 @@ +foo ,bar, diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-3.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-3.txt new file mode 100644 index 000000000000..a71b886e9735 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-3.txt @@ -0,0 +1 @@ +foo , ,bar,charlie diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-4-empty.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-4-empty.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-5.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-5.txt new file mode 100644 index 000000000000..7edb2fa5bce5 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-5.txt @@ -0,0 +1 @@ +, diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-6.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-6.txt new file mode 100644 index 000000000000..7ac405e813f5 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Rfc7230Section7Tests-6.txt @@ -0,0 +1 @@ +, , diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Simple.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Simple.txt new file mode 100644 index 000000000000..1f6eb4f7f45c --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/ParseCdnInfoTest-Simple.txt @@ -0,0 +1 @@ +cdn1, cdn2, cdn3 diff --git a/test/extensions/filters/http/cdn_loop/parser_corpus/rfc8586-example.txt b/test/extensions/filters/http/cdn_loop/parser_corpus/rfc8586-example.txt new file mode 100644 index 000000000000..6191cd018084 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_corpus/rfc8586-example.txt @@ -0,0 +1 @@ +foo123.foocdn.example, barcdn.example; trace="abcdef",AnotherCDN; abc=123; def="456" diff --git a/test/extensions/filters/http/cdn_loop/parser_fuzz_test.cc b/test/extensions/filters/http/cdn_loop/parser_fuzz_test.cc new file mode 100644 index 000000000000..8fa6c16085b6 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_fuzz_test.cc @@ -0,0 +1,34 @@ +#include "common/common/statusor.h" + +#include "extensions/filters/http/cdn_loop/parser.h" + +#include "test/fuzz/fuzz_runner.h" +#include "test/fuzz/utility.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Fuzz { + +using Envoy::Extensions::HttpFilters::CdnLoop::Parser::parseCdnInfoList; +using Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParseContext; +using Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParsedCdnInfoList; + +DEFINE_FUZZER(const uint8_t* buf, size_t len) { + absl::string_view input(reinterpret_cast(buf), len); + StatusOr list = parseCdnInfoList(ParseContext(input)); + if (list) { + // If we successfully parse input, we should make sure that cdn_ids we find appear in the input + // string in order. + size_t start = 0; + for (const absl::string_view& cdn_id : list->cdnIds()) { + size_t pos = input.find(cdn_id, start); + FUZZ_ASSERT(pos != absl::string_view::npos); + FUZZ_ASSERT(pos >= start); + start = pos + cdn_id.length(); + } + } +} + +} // namespace Fuzz +} // namespace Envoy diff --git a/test/extensions/filters/http/cdn_loop/parser_test.cc b/test/extensions/filters/http/cdn_loop/parser_test.cc new file mode 100644 index 000000000000..cdd43ebfa224 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/parser_test.cc @@ -0,0 +1,515 @@ +#include + +#include "extensions/filters/http/cdn_loop/parser.h" + +#include "test/test_common/status_utility.h" + +#include "absl/status/status.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { +namespace Parser { +namespace { + +using ::Envoy::StatusHelpers::IsOkAndHolds; +using ::Envoy::StatusHelpers::StatusIs; + +TEST(ParseContextOstreamTest, Works) { + std::ostringstream out; + ParseContext context("foo", 3); + out << context; + EXPECT_EQ(out.str(), "ParseContext{next=3}"); +} + +TEST(ParsedCdnIdOstreamTest, Works) { + std::ostringstream out; + ParsedCdnId cdnId(ParseContext("foo", 3), "foo"); + out << cdnId; + EXPECT_EQ(out.str(), "ParsedCdnId{context=ParseContext{next=3}, cdn_id=foo}"); +} + +TEST(ParsedCdnInfoOstreamTest, Works) { + std::ostringstream out; + ParsedCdnInfo cdnId(ParseContext("foo", 3), "foo"); + out << cdnId; + EXPECT_EQ(out.str(), "ParsedCdnInfo{context=ParseContext{next=3}, cdn_id=foo}"); +} + +TEST(ParsedCdnInfoListOstreamTest, Works) { + std::ostringstream out; + ParsedCdnInfoList cdnId(ParseContext("foo", 3), {"foo"}); + out << cdnId; + EXPECT_EQ(out.str(), "ParsedCdnInfoList{context=ParseContext{next=3}, cdn_ids=[foo]}"); +} + +TEST(SkipOptionalWhitespaceTest, TestEmpty) { + const std::string value = ""; + ParseContext input(value); + EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 0))); +} + +TEST(SkipOptionalWhitespaceTest, TestSpace) { + const std::string value = " "; + ParseContext input(value); + EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 1))); +} + +TEST(SkipOptionalWhitespaceTest, TestTab) { + const std::string value = "\t"; + ParseContext input(value); + EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 1))); +} + +TEST(SkipOptionalWhitespaceTest, TestLots) { + const std::string value = " \t \t "; + ParseContext input(value); + EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 7))); +} + +TEST(SkipOptionalWhitespaceTest, NoWhitespace) { + const std::string value = "c"; + ParseContext input(value); + EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 0))); +} + +TEST(SkipOptionalWhitespaceTest, StopsAtNonWhitespace) { + const std::string value = " c"; + ParseContext input(value); + EXPECT_EQ(skipOptionalWhitespace(input), (ParseContext(value, 2))); +} + +TEST(ParseQuotedPairTest, Simple) { + const std::string value = R"(\a)"; + ParseContext input(value); + EXPECT_THAT(parseQuotedPair(input), IsOkAndHolds(ParseContext(value, 2))); +} + +TEST(ParseQuotedPairTest, EndOfInput) { + const std::string value = ""; + ParseContext input(value); + EXPECT_THAT(parseQuotedPair(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseQuotedPairTest, MissingQuotable) { + const std::string value = R"(\)"; + ParseContext input(value); + EXPECT_THAT(parseQuotedPair(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseQuotedPairTest, BadQuotable) { + const std::string value = "\\\x1f"; + ParseContext input(value); + EXPECT_THAT(parseQuotedPair(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseQuotedPairTest, MissingBackslash) { + const std::string value = R"(a)"; + ParseContext input(value); + EXPECT_THAT(parseQuotedPair(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseQuotedStringTest, Simple) { + const std::string value = "\"abcd\""; + ParseContext input(value); + EXPECT_THAT(parseQuotedString(input), IsOkAndHolds(ParseContext(value, 6))); +} + +TEST(ParseQuotedStringTest, QdStringEdgeCases) { + const std::string value = "\"\t \x21\x23\x5b\x5d\x7e\x80\xff\""; + ParseContext input(value); + EXPECT_THAT(parseQuotedString(input), IsOkAndHolds(ParseContext(value, 11))); +} + +TEST(ParseQuotedStringTest, QuotedPair) { + const std::string value = "\"\\\"\""; + ParseContext input(value); + EXPECT_THAT(parseQuotedString(input), IsOkAndHolds(ParseContext(value, 4))); +} + +TEST(ParseQuotedStringTest, NoStartQuote) { + const std::string value = "foo"; + ParseContext input(value); + EXPECT_THAT(parseQuotedString(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseQuotedStringTest, NoEndQuote) { + const std::string value = "\"missing-final-dquote"; + ParseContext input(value); + EXPECT_THAT(parseQuotedString(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseQuotedStringTest, EmptyInput) { + const std::string value = ""; + ParseContext input(value); + EXPECT_THAT(parseQuotedString(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseQuotedStringTest, NonVisualChar) { + const std::string value = "\"\x1f\""; + ParseContext input(value); + EXPECT_THAT(parseQuotedString(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseQuotedStringTest, QuotedPairEdgeCases) { + const std::string value = "\"\\"; + ParseContext input(value); + EXPECT_THAT(parseQuotedString(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseTokenTest, AllValues) { + const std::string value = "!#$%&'*+-.^_`|~09azAZ"; + ParseContext input(value); + EXPECT_THAT(parseToken(input), IsOkAndHolds(ParseContext(value, 21))); +} + +TEST(ParseTokenTest, TwoTokens) { + const std::string value = "token1 token2"; + { + ParseContext input(value); + EXPECT_THAT(parseToken(input), IsOkAndHolds(ParseContext(value, 6))); + } + { + ParseContext input(value, 6); + EXPECT_THAT(parseToken(input), StatusIs(absl::StatusCode::kInvalidArgument)); + } + { + ParseContext input(value, 7); + EXPECT_THAT(parseToken(input), IsOkAndHolds(ParseContext(value, 13))); + } +} + +TEST(ParseTokenTest, ParseEmpty) { + const std::string value = ""; + ParseContext input(value); + EXPECT_THAT(parseToken(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParsePlausibleIpV6, Example) { + const std::string value = "[2001:DB8::1]"; + ParseContext input(value); + EXPECT_THAT(parsePlausibleIpV6(input), IsOkAndHolds(ParseContext(value, 13))); +} + +TEST(ParsePlausibleIpV6, ExampleLowerCase) { + const std::string value = "[2001:db8::1]"; + ParseContext input(value); + EXPECT_THAT(parsePlausibleIpV6(input), IsOkAndHolds(ParseContext(value, 13))); +} + +TEST(ParsePlausibleIpV6, ExampleIpV4) { + const std::string value = "[2001:db8::192.0.2.0]"; + ParseContext input(value); + EXPECT_THAT(parsePlausibleIpV6(input), IsOkAndHolds(ParseContext(value, 21))); +} + +TEST(ParsePlausibleIpV6, AllHexValues) { + const std::string value = "[1234:5678:90aA:bBcC:dDeE:fF00]"; + ParseContext input(value); + EXPECT_THAT(parsePlausibleIpV6(input), IsOkAndHolds(ParseContext(value, 31))); +} + +TEST(ParsePlausibleIpV6, EmptyInput) { + const std::string value = ""; + ParseContext input(value); + EXPECT_THAT(parsePlausibleIpV6(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParsePlausibleIpV6, BadStartDelimiter) { + const std::string value = "{2001:DB8::1}"; + ParseContext input(value); + EXPECT_THAT(parsePlausibleIpV6(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParsePlausibleIpV6, BadCharacter) { + const std::string value = "[hello]"; + ParseContext input(value); + EXPECT_THAT(parsePlausibleIpV6(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParsePlausibleIpV6, BadEndDelimiter) { + const std::string value = "[2001:DB8::1}"; + ParseContext input(value); + EXPECT_THAT(parsePlausibleIpV6(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParsePlausibleIpV6, EndBeforeDelimiter) { + const std::string value = "[2001:DB8::1"; + ParseContext input(value); + EXPECT_THAT(parsePlausibleIpV6(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnIdTest, Simple) { + const std::string value = "name"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), IsOkAndHolds(ParsedCdnId(ParseContext(value, 4), "name"))); +} + +TEST(ParseCdnIdTest, SecondInSeries) { + // Make sure that absl::string_view::substr is called with (start, end) not + // (start, len) + const std::string value = "cdn1, cdn2, cdn3"; + ParseContext input(value, 6); + EXPECT_THAT(parseCdnId(input), IsOkAndHolds(ParsedCdnId(ParseContext(value, 10), "cdn2"))); +} + +TEST(ParseCdnIdTest, Empty) { + const std::string value = ""; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnIdTest, NotValidTokenOrUri) { + const std::string value = ","; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnIdTest, InvalidIpV6) { + const std::string value = "[2001::"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnIdTest, InvalidPortNumberStopsParse) { + const std::string value = "host:13z"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), IsOkAndHolds(ParsedCdnId(ParseContext(value, 7), "host:13"))); +} + +TEST(ParseCdnIdTest, UriHostName) { + const std::string value = "www.example.com"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), + IsOkAndHolds(ParsedCdnId(ParseContext(value, 15), "www.example.com"))); +} + +TEST(ParseCdnIdTest, UriHostPercentEncoded) { + const std::string value = "%ba%ba.example.com"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), + IsOkAndHolds(ParsedCdnId(ParseContext(value, 18), "%ba%ba.example.com"))); +} + +TEST(ParseCdnIdTest, UriHostNamePort) { + const std::string value = "www.example.com:443"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), + IsOkAndHolds(ParsedCdnId(ParseContext(value, 19), "www.example.com:443"))); +} + +TEST(ParseCdnIdTest, UriHostNameBlankPort) { + const std::string value = "www.example.com:"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), + IsOkAndHolds(ParsedCdnId(ParseContext(value, 16), "www.example.com:"))); +} + +TEST(ParseCdnIdTest, UriHostIpV4) { + const std::string value = "192.0.2.0"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), IsOkAndHolds(ParsedCdnId(ParseContext(value, 9), "192.0.2.0"))); +} + +TEST(ParseCdnIdTest, UriHostIpV4Port) { + const std::string value = "192.0.2.0:443"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), + IsOkAndHolds(ParsedCdnId(ParseContext(value, 13), "192.0.2.0:443"))); +} + +TEST(ParseCdnIdTest, UriHostIpV4BlankPort) { + const std::string value = "192.0.2.0:"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), IsOkAndHolds(ParsedCdnId(ParseContext(value, 10), "192.0.2.0:"))); +} + +TEST(ParseCdnIdTest, UriHostIpV6) { + const std::string value = "[2001:DB8::1]"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), + IsOkAndHolds(ParsedCdnId(ParseContext(value, 13), "[2001:DB8::1]"))); +} + +TEST(ParseCdnIdTest, UriHostIpV6Port) { + const std::string value = "[2001:DB8::1]:443"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), + IsOkAndHolds(ParsedCdnId(ParseContext(value, 17), "[2001:DB8::1]:443"))); +} + +TEST(ParseCdnIdTest, UriHostIpV6BlankPort) { + const std::string value = "[2001:DB8::1]:"; + ParseContext input(value); + EXPECT_THAT(parseCdnId(input), + IsOkAndHolds(ParsedCdnId(ParseContext(value, 14), "[2001:DB8::1]:"))); +} + +TEST(ParseParameterTest, SimpleTokenValue) { + const std::string value = "a=b"; + ParseContext input(value); + EXPECT_THAT(parseParameter(input), IsOkAndHolds(ParseContext(value, 3))); +} + +TEST(ParseParameterTest, SimpleQuotedValue) { + const std::string value = "a=\"b\""; + ParseContext input(value); + EXPECT_THAT(parseParameter(input), IsOkAndHolds(ParseContext(value, 5))); +} + +TEST(ParseParameterTest, EndOfInputBeforeEquals) { + const std::string value = "a"; + ParseContext input(value); + EXPECT_THAT(parseParameter(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseParameterTest, EndOfInputAfterEquals) { + const std::string value = "a="; + ParseContext input(value); + EXPECT_THAT(parseParameter(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseParameterTest, MissingEquals) { + const std::string value = "a,"; + ParseContext input(value); + EXPECT_THAT(parseParameter(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseParameterTest, ValueNotToken) { + const std::string value = "a=,"; + ParseContext input(value); + EXPECT_THAT(parseParameter(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseParameterTest, ValueNotQuotedString) { + const std::string value = "a=\""; + ParseContext input(value); + EXPECT_THAT(parseParameter(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnInfoTest, Simple) { + const std::string value = "name"; + ParseContext input(value); + EXPECT_THAT(parseCdnInfo(input), IsOkAndHolds(ParsedCdnInfo(ParseContext(value, 4), "name"))); +} + +TEST(ParseCdnInfoTest, Empty) { + const std::string value = ""; + ParseContext input(value); + EXPECT_THAT(parseCdnInfo(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnInfoTest, NotValidTokenOrUri) { + const std::string value = ","; + ParseContext input(value); + EXPECT_THAT(parseCdnInfo(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnInfoTest, SingleParameter) { + const std::string value = "name;a=b"; + ParseContext input(value); + EXPECT_THAT(parseCdnInfo(input), IsOkAndHolds(ParsedCdnInfo(ParseContext(value, 8), "name"))); +} + +TEST(ParseCdnInfoTest, SingleParameterExtraWhitespace) { + const std::string value = "name ; a=b "; + ParseContext input(value); + EXPECT_THAT(parseCdnInfo(input), IsOkAndHolds(ParsedCdnInfo(ParseContext(value, 12), "name"))); +} + +TEST(ParseCdnInfoTest, MultipleParametersWithWhitespace) { + const std::string value = "name ; a=b ; c=\"d\" ; e=\";\" "; + ParseContext input(value); + EXPECT_THAT(parseCdnInfo(input), IsOkAndHolds(ParsedCdnInfo(ParseContext(value, 27), "name"))); +} + +TEST(ParseCdnInfoTest, MissingParameter) { + const std::string value = "name ; "; + ParseContext input(value); + EXPECT_THAT(parseCdnInfo(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnInfoTest, InvalidParameter) { + const std::string value = "name ; a= "; + ParseContext input(value); + EXPECT_THAT(parseCdnInfo(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnInfoListTest, Simple) { + const std::string value = "cdn1, cdn2, cdn3"; + ParseContext input(value); + EXPECT_THAT(parseCdnInfoList(input), + IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 16), {"cdn1", "cdn2", "cdn3"}))); +} + +TEST(ParseCdnInfoListTest, ExtraWhitespace) { + const std::string value = " \t cdn1 \t , cdn2 \t , \t cdn3 "; + ParseContext input(value); + EXPECT_THAT(parseCdnInfoList(input), + IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 33), {"cdn1", "cdn2", "cdn3"}))); +} + +TEST(ParseCdnInfoListTest, InvalidParseNoComma) { + const std::string value = "cdn1 cdn2"; + ParseContext input(value); + EXPECT_THAT(parseCdnInfoList(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnInfoListTest, InvalidCdnId) { + const std::string value = "[bad"; + ParseContext input(value); + EXPECT_THAT(parseCdnInfoList(input), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(ParseCdnInfoListTest, Rfc7230Section7Tests) { + // These are the examples from https://tools.ietf.org/html/rfc7230#section-7 + { + const std::string value = "foo,bar"; + ParseContext input(value); + EXPECT_THAT(parseCdnInfoList(input), + IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 7), {"foo", "bar"}))); + } + { + const std::string value = "foo ,bar,"; + ParseContext input(value); + EXPECT_THAT(parseCdnInfoList(input), + IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 9), {"foo", "bar"}))); + } + { + const std::string value = "foo , ,bar,charlie "; + ParseContext input(value); + EXPECT_THAT(parseCdnInfoList(input), IsOkAndHolds(ParsedCdnInfoList( + ParseContext(value, 21), {"foo", "bar", "charlie"}))); + } + // The following tests are allowed in the #cdn-info rule because it doesn't + // require a single element. + { + const std::string value = ""; + ParseContext input(value); + + EXPECT_THAT(parseCdnInfoList(input), + IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 0), {}))); + } + { + const std::string value = ","; + ParseContext input(value); + EXPECT_THAT(parseCdnInfoList(input), + IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 1), {}))); + } + { + const std::string value = ", ,"; + ParseContext input(value); + EXPECT_THAT(parseCdnInfoList(input), + IsOkAndHolds(ParsedCdnInfoList(ParseContext(value, 5), {}))); + } +} + +} // namespace +} // namespace Parser +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/cdn_loop/utils_test.cc b/test/extensions/filters/http/cdn_loop/utils_test.cc new file mode 100644 index 000000000000..95a4ab1415a3 --- /dev/null +++ b/test/extensions/filters/http/cdn_loop/utils_test.cc @@ -0,0 +1,141 @@ +#include "extensions/filters/http/cdn_loop/utils.h" + +#include "test/test_common/status_utility.h" + +#include "absl/status/status.h" +#include "absl/strings/str_cat.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace CdnLoop { +namespace { + +using ::Envoy::StatusHelpers::IsOkAndHolds; +using ::Envoy::StatusHelpers::StatusIs; + +TEST(CountCdnLoopOccurrencesTest, EmptyHeader) { + EXPECT_THAT(countCdnLoopOccurrences("", "cdn"), IsOkAndHolds(0)); +} + +TEST(CountCdnLoopOccurrencesTest, NoParameterTests) { + // A pseudonym + EXPECT_THAT(countCdnLoopOccurrences("cdn", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn, CDN", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn, CDN", "CDN"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn, CDN", "foo"), IsOkAndHolds(0)); + EXPECT_THAT(countCdnLoopOccurrences("cdn, cdn, cdn", "cdn"), IsOkAndHolds(3)); + EXPECT_THAT(countCdnLoopOccurrences("cdn, cdn, cdn", "foo"), IsOkAndHolds(0)); + + // A DNS name + EXPECT_THAT(countCdnLoopOccurrences("cdn.example.com", "cdn.example.com"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn.example.com, CDN", "cdn.example.com"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn.example.com, CDN", "CDN"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn.example.com, CDN", "foo"), IsOkAndHolds(0)); + EXPECT_THAT(countCdnLoopOccurrences("cdn.example.com, cdn.example.com, cdn.example.com", + "cdn.example.com"), + IsOkAndHolds(3)); + EXPECT_THAT(countCdnLoopOccurrences("cdn.example.com, cdn.example.com, cdn.example.com", "foo"), + IsOkAndHolds(0)); + + // IPv4 Addresses + EXPECT_THAT(countCdnLoopOccurrences("192.0.2.1", "192.0.2.1"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("192.0.2.1, CDN", "192.0.2.1"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("192.0.2.1, CDN", "CDN"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("192.0.2.1, CDN", "foo"), IsOkAndHolds(0)); + EXPECT_THAT(countCdnLoopOccurrences("192.0.2.1, 192.0.2.1, 192.0.2.1", "192.0.2.1"), + IsOkAndHolds(3)); + EXPECT_THAT(countCdnLoopOccurrences("192.0.2.1, 192.0.2.1, 192.0.2.1", "foo"), IsOkAndHolds(0)); + + // IpV6 Addresses + EXPECT_THAT(countCdnLoopOccurrences("[2001:DB8::3]", "[2001:DB8::3]"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("[2001:DB8::3], CDN", "[2001:DB8::3]"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("[2001:DB8::3], CDN", "CDN"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("[2001:DB8::3], CDN", "foo"), IsOkAndHolds(0)); + EXPECT_THAT( + countCdnLoopOccurrences("[2001:DB8::3], [2001:DB8::3], [2001:DB8::3]", "[2001:DB8::3]"), + IsOkAndHolds(3)); + EXPECT_THAT(countCdnLoopOccurrences("[2001:DB8::3], [2001:DB8::3], [2001:DB8::3]", "foo"), + IsOkAndHolds(0)); +} + +TEST(CountCdnLoopOccurrencesTest, SimpleParameterTests) { + EXPECT_THAT(countCdnLoopOccurrences("cdn; foo=bar", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn; foo=bar, CDN", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn; foo=bar; baz=quux, CDN", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn, cdn; foo=bar, cdn; baz=quux", "cdn"), IsOkAndHolds(3)); + EXPECT_THAT(countCdnLoopOccurrences("cdn, cdn; foo=bar; baz=quux, cdn", "foo"), IsOkAndHolds(0)); +} + +TEST(CountCdnLoopOccurrencesTest, ExcessWhitespace) { + EXPECT_THAT(countCdnLoopOccurrences(" cdn", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn ", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences(" cdn ", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("\tcdn\t", "cdn"), IsOkAndHolds(1)); +} + +TEST(CountCdnLoopOccurrencesTest, NoWhitespace) { + EXPECT_THAT(countCdnLoopOccurrences("cdn", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn,cdn", "cdn"), IsOkAndHolds(2)); + EXPECT_THAT(countCdnLoopOccurrences("cdn;foo=bar;baz=quuz,cdn", "cdn"), IsOkAndHolds(2)); +} + +TEST(CountCdnLoopOccurrencesTest, CdnIdInParameterTests) { + // In these tests, the parameter contains a string matching the cdn_id in + // either the key or the value of the parameters. + EXPECT_THAT(countCdnLoopOccurrences("cdn; cdn=bar", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn; foo=cdn", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn; cdn=cdn", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn; cdn=\"cdn\"", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn; cdn=\"cdn,cdn\"", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn; cdn=\"cdn, cdn\"", "cdn"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences("cdn, cdn; cdn=\"cdn\", cdn ; cdn=\"cdn,cdn\"", "cdn"), + IsOkAndHolds(3)); +} + +TEST(CountCdnLoopOccurrencesTest, Rfc8586Tests) { + // Examples from RFC 8586, Section 2. + const std::string example1 = "foo123.foocdn.example, barcdn.example; trace=\"abcdef\""; + EXPECT_THAT(countCdnLoopOccurrences(example1, "foo123.foocdn.example"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences(example1, "barcdn.example"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences(example1, "trace=\"abcdef\""), IsOkAndHolds(0)); + const std::string example2 = "AnotherCDN; abc=123; def=\"456\""; + EXPECT_THAT(countCdnLoopOccurrences(example2, "AnotherCDN"), IsOkAndHolds(1)); + + // The concatenation of the two done correctly as per RFC 7230 rules + { + const std::string combined = absl::StrCat(example1, ",", example2); + EXPECT_THAT(countCdnLoopOccurrences(combined, "foo123.foocdn.example"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences(combined, "barcdn.example"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences(combined, "AnotherCDN"), IsOkAndHolds(1)); + } + + // The concatenation of two done poorly (with extra commas) + { + const std::string combined = absl::StrCat(example1, ",,,", example2); + EXPECT_THAT(countCdnLoopOccurrences(combined, "foo123.foocdn.example"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences(combined, "barcdn.example"), IsOkAndHolds(1)); + EXPECT_THAT(countCdnLoopOccurrences(combined, "AnotherCDN"), IsOkAndHolds(1)); + } +} + +TEST(CountCdnLoopOccurrencesTest, ValidHeaderInsideParameter) { + EXPECT_THAT(countCdnLoopOccurrences("cdn; header=\"cdn; cdn=cdn; cdn\"", "cdn"), IsOkAndHolds(1)); +} + +TEST(CountCdnLoopOccurrencesTest, BadCdnId) { + EXPECT_THAT(countCdnLoopOccurrences("cdn", ""), StatusIs(absl::StatusCode::kInvalidArgument)); +} + +TEST(CountCdnLoopOccurrencesTest, BadHeader) { + EXPECT_THAT(countCdnLoopOccurrences("[bad-id", "cdn"), + StatusIs(absl::StatusCode::kInvalidArgument)); +} + +} // namespace +} // namespace CdnLoop +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/common/compressor/BUILD b/test/extensions/filters/http/common/compressor/BUILD index a6b214dd6b50..a6ed07bd1528 100644 --- a/test/extensions/filters/http/common/compressor/BUILD +++ b/test/extensions/filters/http/common/compressor/BUILD @@ -49,4 +49,6 @@ envoy_cc_benchmark_binary( envoy_benchmark_test( name = "compressor_filter_speed_test_benchmark_test", benchmark_binary = "compressor_filter_speed_test", + # TODO(envoyproxy/windows-dev): diagnose clang-cl build test failure + tags = ["fails_on_windows"], ) diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index 80066cf9eb40..ffc67eb4a232 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -23,6 +23,18 @@ envoy_proto_library( ], ) +envoy_cc_test_library( + name = "http_filter_fuzzer_lib", + hdrs = ["http_filter_fuzzer.h"], + deps = [ + "//include/envoy/http:filter_interface", + "//source/common/http:utility_lib", + "//test/fuzz:common_proto_cc_proto", + "//test/fuzz:utility_lib", + "//test/test_common:utility_lib", + ], +) + envoy_cc_test_library( name = "uber_filter_lib", srcs = [ @@ -32,13 +44,13 @@ envoy_cc_test_library( hdrs = ["uber_filter.h"], deps = [ ":filter_fuzz_proto_cc_proto", + ":http_filter_fuzzer_lib", "//source/common/config:utility_lib", "//source/common/http:utility_lib", "//source/common/protobuf:utility_lib", "//source/common/tracing:http_tracer_lib", "//source/extensions/filters/http:well_known_names", "//source/extensions/filters/http/common:utility_lib", - "//test/fuzz:utility_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/http:http_mocks", "//test/mocks/server:factory_context_mocks", diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 new file mode 100644 index 000000000000..60ffb84c5ac3 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.oauth" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2" + value: "\n\306\t\022\006\022\001(\032\001r\032<\n\035envoy.filters.\360\222\213\217Qgrpc_stats\022\r\022\013\022\002\010\006\"\005\010\200\200\200\001\032\014\022\n\n\001t\"\005\010\200\200\200\001\"\006\022\001(\032\001r*\005\n\003:\001=2\351\010\n\346\010*\343\010\n\010\n\006\010\200\200\200\200\004\022\326\010^^^^^j!^^.*..............................................*............................config {\n name: \"envoy.filters.http.jwt_authn\"\n typed....._config {\n type_url: \"type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAu........[thentication\"\n value: \"\\n=\\n\\022not_health_check_f\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n1\\n\\0A_]^06\\000\\000\\000\\000\\000\\002\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matche!^^.*..............................................*............................config {\n name: \"envoy.filters.http.jwt_authn\"\n typed....._config {\n type_url: \"type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAu........[thentication\"\n value: \"\\n=\\n\\022not_health_check_f\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n1\\n\\0A_]^06\\000\\000\\000\\000\\000\\002\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n+\\n\\000\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\r/v3/number.\\n+\\n\\000\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\"\n }\n}\nB\003\n\001A" + } +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5728684315770880 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5728684315770880 new file mode 100644 index 000000000000..9521f0e839ea --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5728684315770880 @@ -0,0 +1,13 @@ +config { + name: "envoy.filters.http.adaptive_concurrency" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.adaptive_concurrency.v3.AdaptiveConcurrency" + value: "\n\024\022\010\022\002\010\010\032\002\020\010\032\010\n\002\020\010\022\002\010\001" + } +} +data { +} +upstream_data { + trailers { + } +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 new file mode 100644 index 000000000000..5bb334c90502 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 @@ -0,0 +1,12 @@ +config { + name: "envoy.filters.http.admission_control" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl" + value: "\022\000\032\000*\003\022\001$" + } +} +data { + http_body { + data: "\022\000" + } +} diff --git a/test/extensions/filters/http/common/fuzz/http_filter_fuzzer.h b/test/extensions/filters/http/common/fuzz/http_filter_fuzzer.h new file mode 100644 index 000000000000..dba20989d343 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/http_filter_fuzzer.h @@ -0,0 +1,206 @@ +#pragma once + +#include "envoy/http/filter.h" + +#include "common/http/utility.h" + +#include "test/fuzz/common.pb.h" +#include "test/fuzz/utility.h" +#include "test/test_common/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { + +// Generic library to fuzz HTTP filters. +// Usage: +// 1. Create filter and set callbacks. +// ExampleFilter filter; +// filter.setDecoderFilterCallbacks(decoder_callbacks); +// +// 2. Create HttpFilterFuzzer class and run decode methods. Optionally add access logging. Reset +// fuzzer to reset state. This class can be static. All state is reset in the reset method. +// Envoy::Extensions::HttpFilters::HttpFilterFuzzer fuzzer; +// fuzzer.runData(static_cast(&filter), +// input.downstream_request()); +// fuzzer.accessLog(static_cast(&filter), +// stream_info); +// fuzzer.reset(); + +class HttpFilterFuzzer { +public: + // Instantiate HttpFilterFuzzer + HttpFilterFuzzer() = default; + + // This executes the filter decode or encode methods with the fuzzed data. + template void runData(FilterType* filter, const test::fuzz::HttpData& data); + + // This executes the access logger with the fuzzed headers/trailers. + void accessLog(AccessLog::Instance* access_logger, const StreamInfo::StreamInfo& stream_info) { + ENVOY_LOG_MISC(debug, "Access logging"); + access_logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); + } + + // Fuzzed headers and trailers are needed for access logging, reset the data and destroy filters. + void reset() { + enabled_ = true; + request_headers_.clear(); + response_headers_.clear(); + request_trailers_.clear(); + response_trailers_.clear(); + encoded_trailers_.clear(); + } + +protected: + // Templated functions to validate and send headers/data/trailers for decoders/encoders. + // General functions are deleted, but templated specializations for encoders/decoders are defined + // in the cc file. + template + Http::FilterHeadersStatus sendHeaders(FilterType* filter, const test::fuzz::HttpData& data, + bool end_stream) = delete; + + template + Http::FilterDataStatus sendData(FilterType* filter, Buffer::Instance& buffer, + bool end_stream) = delete; + + template + void sendTrailers(FilterType* filter, const test::fuzz::HttpData& data) = delete; + + // This keeps track of when a filter will stop decoding due to direct responses. + // If your filter needs to stop decoding because of a direct response, make sure you override + // sendLocalReply to set enabled_ to false. + bool enabled_ = true; + + // Headers/trailers need to be saved for the lifetime of the filter, + // so save them as member variables. + Http::TestRequestHeaderMapImpl request_headers_; + Http::TestResponseHeaderMapImpl response_headers_; + Http::TestRequestTrailerMapImpl request_trailers_; + Http::TestResponseTrailerMapImpl response_trailers_; + Http::TestResponseTrailerMapImpl encoded_trailers_; +}; + +template +void HttpFilterFuzzer::runData(FilterType* filter, const test::fuzz::HttpData& data) { + bool end_stream = false; + enabled_ = true; + if (data.body_case() == test::fuzz::HttpData::BODY_NOT_SET && !data.has_trailers()) { + end_stream = true; + } + const auto& headersStatus = sendHeaders(filter, data, end_stream); + ENVOY_LOG_MISC(debug, "Finished with FilterHeadersStatus: {}", headersStatus); + if ((headersStatus != Http::FilterHeadersStatus::Continue && + headersStatus != Http::FilterHeadersStatus::StopIteration) || + !enabled_) { + return; + } + + const std::vector data_chunks = Fuzz::parseHttpData(data); + for (size_t i = 0; i < data_chunks.size(); i++) { + if (!data.has_trailers() && i == data_chunks.size() - 1) { + end_stream = true; + } + Buffer::OwnedImpl buffer(data_chunks[i]); + const auto& dataStatus = sendData(filter, buffer, end_stream); + ENVOY_LOG_MISC(debug, "Finished with FilterDataStatus: {}", dataStatus); + if (dataStatus != Http::FilterDataStatus::Continue || !enabled_) { + return; + } + } + + if (data.has_trailers() && enabled_) { + sendTrailers(filter, data); + } +} + +template <> +inline Http::FilterHeadersStatus HttpFilterFuzzer::sendHeaders(Http::StreamDecoderFilter* filter, + const test::fuzz::HttpData& data, + bool end_stream) { + request_headers_ = Fuzz::fromHeaders(data.headers()); + if (request_headers_.Path() == nullptr) { + request_headers_.setPath("/foo"); + } + if (request_headers_.Method() == nullptr) { + request_headers_.setMethod("GET"); + } + if (request_headers_.Host() == nullptr) { + request_headers_.setHost("foo.com"); + } + + ENVOY_LOG_MISC(debug, "Decoding headers (end_stream={}):\n{} ", end_stream, request_headers_); + Http::FilterHeadersStatus status = filter->decodeHeaders(request_headers_, end_stream); + if (end_stream) { + filter->decodeComplete(); + } + return status; +} + +template <> +inline Http::FilterHeadersStatus HttpFilterFuzzer::sendHeaders(Http::StreamEncoderFilter* filter, + const test::fuzz::HttpData& data, + bool end_stream) { + response_headers_ = Fuzz::fromHeaders(data.headers()); + + // Status must be a valid unsigned long. If not set, the utility function below will throw + // an exception on the data path of some filters. This should never happen in production, so catch + // the exception and set to a default value. + try { + (void)Http::Utility::getResponseStatus(response_headers_); + } catch (const Http::CodecClientException& e) { + response_headers_.setStatus(200); + } + + ENVOY_LOG_MISC(debug, "Encoding headers (end_stream={}):\n{} ", end_stream, response_headers_); + Http::FilterHeadersStatus status = filter->encodeHeaders(response_headers_, end_stream); + if (end_stream) { + filter->encodeComplete(); + } + return status; +} + +template <> +inline Http::FilterDataStatus HttpFilterFuzzer::sendData(Http::StreamDecoderFilter* filter, + Buffer::Instance& buffer, + bool end_stream) { + ENVOY_LOG_MISC(debug, "Decoding data (end_stream={}): {} ", end_stream, buffer.toString()); + Http::FilterDataStatus status = filter->decodeData(buffer, end_stream); + if (end_stream) { + filter->decodeComplete(); + } + return status; +} + +template <> +inline Http::FilterDataStatus HttpFilterFuzzer::sendData(Http::StreamEncoderFilter* filter, + Buffer::Instance& buffer, + bool end_stream) { + ENVOY_LOG_MISC(debug, "Encoding data (end_stream={}): {} ", end_stream, buffer.toString()); + Http::FilterDataStatus status = filter->encodeData(buffer, end_stream); + if (end_stream) { + filter->encodeComplete(); + } + return status; +} + +template <> +inline void HttpFilterFuzzer::sendTrailers(Http::StreamDecoderFilter* filter, + const test::fuzz::HttpData& data) { + request_trailers_ = Fuzz::fromHeaders(data.trailers()); + ENVOY_LOG_MISC(debug, "Decoding trailers:\n{} ", request_trailers_); + filter->decodeTrailers(request_trailers_); + filter->decodeComplete(); +} + +template <> +inline void HttpFilterFuzzer::sendTrailers(Http::StreamEncoderFilter* filter, + const test::fuzz::HttpData& data) { + response_trailers_ = Fuzz::fromHeaders(data.trailers()); + ENVOY_LOG_MISC(debug, "Encoding trailers:\n{} ", response_trailers_); + filter->encodeTrailers(response_trailers_); + filter->encodeComplete(); +} + +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc index 65924becd985..0fcab5125e71 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -54,138 +54,6 @@ UberFilterFuzzer::UberFilterFuzzer() : async_request_{&cluster_manager_.async_cl perFilterSetup(); } -std::vector UberFilterFuzzer::parseHttpData(const test::fuzz::HttpData& data) { - std::vector data_chunks; - - if (data.has_http_body()) { - data_chunks.reserve(data.http_body().data_size()); - for (const std::string& http_data : data.http_body().data()) { - data_chunks.push_back(http_data); - } - } else if (data.has_proto_body()) { - const std::string serialized = data.proto_body().message().value(); - data_chunks = absl::StrSplit(serialized, absl::ByLength(data.proto_body().chunk_size())); - } - - return data_chunks; -} - -template -void UberFilterFuzzer::runData(FilterType* filter, const test::fuzz::HttpData& data) { - bool end_stream = false; - enabled_ = true; - if (data.body_case() == test::fuzz::HttpData::BODY_NOT_SET && !data.has_trailers()) { - end_stream = true; - } - const auto& headersStatus = sendHeaders(filter, data, end_stream); - ENVOY_LOG_MISC(debug, "Finished with FilterHeadersStatus: {}", headersStatus); - if ((headersStatus != Http::FilterHeadersStatus::Continue && - headersStatus != Http::FilterHeadersStatus::StopIteration) || - !enabled_) { - return; - } - - const std::vector data_chunks = parseHttpData(data); - for (size_t i = 0; i < data_chunks.size(); i++) { - if (!data.has_trailers() && i == data_chunks.size() - 1) { - end_stream = true; - } - Buffer::OwnedImpl buffer(data_chunks[i]); - const auto& dataStatus = sendData(filter, buffer, end_stream); - ENVOY_LOG_MISC(debug, "Finished with FilterDataStatus: {}", dataStatus); - if (dataStatus != Http::FilterDataStatus::Continue || !enabled_) { - return; - } - } - - if (data.has_trailers() && enabled_) { - sendTrailers(filter, data); - } -} - -template <> -Http::FilterHeadersStatus UberFilterFuzzer::sendHeaders(Http::StreamDecoderFilter* filter, - const test::fuzz::HttpData& data, - bool end_stream) { - request_headers_ = Fuzz::fromHeaders(data.headers()); - if (request_headers_.Path() == nullptr) { - request_headers_.setPath("/foo"); - } - if (request_headers_.Method() == nullptr) { - request_headers_.setMethod("GET"); - } - if (request_headers_.Host() == nullptr) { - request_headers_.setHost("foo.com"); - } - - ENVOY_LOG_MISC(debug, "Decoding headers (end_stream={}):\n{} ", end_stream, request_headers_); - return filter->decodeHeaders(request_headers_, end_stream); -} - -template <> -Http::FilterHeadersStatus UberFilterFuzzer::sendHeaders(Http::StreamEncoderFilter* filter, - const test::fuzz::HttpData& data, - bool end_stream) { - response_headers_ = Fuzz::fromHeaders(data.headers()); - - // Status must be a valid unsigned long. If not set, the utility function below will throw - // an exception on the data path of some filters. This should never happen in production, so catch - // the exception and set to a default value. - try { - (void)Http::Utility::getResponseStatus(response_headers_); - } catch (const Http::CodecClientException& e) { - response_headers_.setStatus(200); - } - - ENVOY_LOG_MISC(debug, "Encoding headers (end_stream={}):\n{} ", end_stream, response_headers_); - Http::FilterHeadersStatus status = filter->encodeHeaders(response_headers_, end_stream); - if (end_stream) { - filter->encodeComplete(); - } - return status; -} - -template <> -Http::FilterDataStatus UberFilterFuzzer::sendData(Http::StreamDecoderFilter* filter, - Buffer::Instance& buffer, bool end_stream) { - ENVOY_LOG_MISC(debug, "Decoding data (end_stream={}): {} ", end_stream, buffer.toString()); - return filter->decodeData(buffer, end_stream); -} - -template <> -Http::FilterDataStatus UberFilterFuzzer::sendData(Http::StreamEncoderFilter* filter, - Buffer::Instance& buffer, bool end_stream) { - ENVOY_LOG_MISC(debug, "Encoding data (end_stream={}): {} ", end_stream, buffer.toString()); - Http::FilterDataStatus status = filter->encodeData(buffer, end_stream); - if (end_stream) { - filter->encodeComplete(); - } - return status; -} - -template <> -void UberFilterFuzzer::sendTrailers(Http::StreamDecoderFilter* filter, - const test::fuzz::HttpData& data) { - request_trailers_ = Fuzz::fromHeaders(data.trailers()); - ENVOY_LOG_MISC(debug, "Decoding trailers:\n{} ", request_trailers_); - filter->decodeTrailers(request_trailers_); -} - -template <> -void UberFilterFuzzer::sendTrailers(Http::StreamEncoderFilter* filter, - const test::fuzz::HttpData& data) { - response_trailers_ = Fuzz::fromHeaders(data.trailers()); - ENVOY_LOG_MISC(debug, "Encoding trailers:\n{} ", response_trailers_); - filter->encodeTrailers(response_trailers_); - filter->encodeComplete(); -} - -void UberFilterFuzzer::accessLog(AccessLog::Instance* access_logger, - const StreamInfo::StreamInfo& stream_info) { - ENVOY_LOG_MISC(debug, "Access logging"); - access_logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); -} - void UberFilterFuzzer::fuzz( const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter& proto_config, @@ -208,13 +76,13 @@ void UberFilterFuzzer::fuzz( // Data path should not throw exceptions. if (decoder_filter_ != nullptr) { - runData(decoder_filter_.get(), downstream_data); + HttpFilterFuzzer::runData(decoder_filter_.get(), downstream_data); } if (encoder_filter_ != nullptr) { - runData(encoder_filter_.get(), upstream_data); + HttpFilterFuzzer::runData(encoder_filter_.get(), upstream_data); } if (access_logger_ != nullptr) { - accessLog(access_logger_.get(), stream_info_); + HttpFilterFuzzer::accessLog(access_logger_.get(), stream_info_); } reset(); @@ -232,11 +100,7 @@ void UberFilterFuzzer::reset() { encoder_filter_.reset(); access_logger_.reset(); - request_headers_.clear(); - response_headers_.clear(); - request_trailers_.clear(); - response_trailers_.clear(); - encoded_trailers_.clear(); + HttpFilterFuzzer::reset(); } } // namespace HttpFilters diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index 4d1a894fef1e..8bcef67d6afa 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -1,3 +1,4 @@ +#include "test/extensions/filters/http/common/fuzz/http_filter_fuzzer.h" #include "test/fuzz/utility.h" #include "test/mocks/buffer/mocks.h" #include "test/mocks/http/mocks.h" @@ -8,7 +9,8 @@ namespace Envoy { namespace Extensions { namespace HttpFilters { -class UberFilterFuzzer { +// Generic filter fuzzer that can fuzz any HttpFilter. +class UberFilterFuzzer : public HttpFilterFuzzer { public: UberFilterFuzzer(); @@ -17,16 +19,9 @@ class UberFilterFuzzer { proto_config, const test::fuzz::HttpData& downstream_data, const test::fuzz::HttpData& upstream_data); - // This executes the filter decoders/encoders with the fuzzed data. - template void runData(FilterType* filter, const test::fuzz::HttpData& data); - - // This executes the access logger with the fuzzed headers/trailers. - void accessLog(AccessLog::Instance* access_logger, const StreamInfo::StreamInfo& stream_info); - // For fuzzing proto data, guide the mutator to useful 'Any' types. static void guideAnyProtoType(test::fuzz::HttpData* mutable_data, uint choice); - // Resets cached data (request headers, etc.). Should be called for each fuzz iteration. void reset(); protected: @@ -35,26 +30,7 @@ class UberFilterFuzzer { // Filter specific input cleanup. void cleanFuzzedConfig(absl::string_view filter_name, Protobuf::Message* message); - // Parses http or proto body into chunks. - static std::vector parseHttpData(const test::fuzz::HttpData& data); - - // Templated functions to validate and send headers/data/trailers for decoders/encoders. - // General functions are deleted, but templated specializations for encoders/decoders are defined - // in the cc file. - template - Http::FilterHeadersStatus sendHeaders(FilterType* filter, const test::fuzz::HttpData& data, - bool end_stream) = delete; - - template - Http::FilterDataStatus sendData(FilterType* filter, Buffer::Instance& buffer, - bool end_stream) = delete; - - template - void sendTrailers(FilterType* filter, const test::fuzz::HttpData& data) = delete; - private: - // This keeps track of when a filter will stop decoding due to direct responses. - bool enabled_ = true; NiceMock factory_context_; NiceMock filter_callback_; std::shared_ptr resolver_{std::make_shared()}; @@ -65,23 +41,14 @@ class UberFilterFuzzer { NiceMock async_request_; NiceMock stream_info_; - // Mocked callbacks. - NiceMock decoder_callbacks_; - NiceMock encoder_callbacks_; - // Filter constructed from the config. Http::StreamDecoderFilterSharedPtr decoder_filter_; Http::StreamEncoderFilterSharedPtr encoder_filter_; AccessLog::InstanceSharedPtr access_logger_; - // Headers/trailers need to be saved for the lifetime of the filter, - // so save them as member variables. - // TODO(nareddyt): Use for access logging in a followup PR. - Http::TestRequestHeaderMapImpl request_headers_; - Http::TestResponseHeaderMapImpl response_headers_; - Http::TestRequestTrailerMapImpl request_trailers_; - Http::TestResponseTrailerMapImpl response_trailers_; - Http::TestResponseTrailerMapImpl encoded_trailers_; + // Mocked callbacks. + NiceMock decoder_callbacks_; + NiceMock encoder_callbacks_; }; } // namespace HttpFilters diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index fde523d813ab..c873b7bdd2da 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -76,16 +76,6 @@ void UberFilterFuzzer::guideAnyProtoType(test::fuzz::HttpData* mutable_data, uin mutable_any->set_type_url(type_url); } -void removeConnectMatcher(Protobuf::Message* message) { - envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication& config = - dynamic_cast(*message); - for (auto& rules : *config.mutable_rules()) { - if (rules.match().has_connect_matcher()) { - rules.mutable_match()->set_path("/"); - } - } -} - void cleanAttachmentTemplate(Protobuf::Message* message) { envoy::extensions::filters::http::squash::v3::Squash& config = dynamic_cast(*message); @@ -138,10 +128,6 @@ void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, // TapDS oneof field and OutputSinkType StreamingGrpc not implemented cleanTapConfig(message); } - if (filter_name == HttpFilterNames::get().JwtAuthn) { - // Remove when connect matcher is implemented for Jwt Authentication filter. - removeConnectMatcher(message); - } } void UberFilterFuzzer::perFilterSetup() { diff --git a/test/extensions/filters/http/common/mock.cc b/test/extensions/filters/http/common/mock.cc index 45129c0edc5c..89de081bebd6 100644 --- a/test/extensions/filters/http/common/mock.cc +++ b/test/extensions/filters/http/common/mock.cc @@ -17,9 +17,9 @@ MockUpstream::MockUpstream(Upstream::MockClusterManager& mock_cm, const std::str new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", status_}}})); if (response_body_.length()) { - response_message->body() = std::make_unique(response_body_); + response_message->body().add(response_body_); } else { - response_message->body().reset(nullptr); + response_message->body().drain(response_message->body().length()); } cb.onSuccess(request_, std::move(response_message)); return &request_; diff --git a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc index 0ded467631a2..623512513303 100644 --- a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc +++ b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc @@ -38,7 +38,7 @@ class CompressorIntegrationTest : public testing::TestWithParamheaders().getStatusValue()); EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip, response->headers() - .get(Http::CustomHeaders::get().ContentEncoding) + .get(Http::CustomHeaders::get().ContentEncoding)[0] ->value() .getStringView()); EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked, @@ -61,7 +61,7 @@ class CompressorIntegrationTest : public testing::TestWithParambodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding).empty()); ASSERT_EQ(content_length, response->body().size()); EXPECT_EQ(response->body(), std::string(content_length, 'a')); } @@ -188,9 +188,10 @@ TEST_P(CompressorIntegrationTest, UpstreamResponseAlreadyEncoded) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ( - "br", - response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("br", response->headers() + .get(Http::CustomHeaders::get().ContentEncoding)[0] + ->value() + .getStringView()); EXPECT_EQ(128U, response->body().size()); } @@ -214,7 +215,7 @@ TEST_P(CompressorIntegrationTest, NotEnoughContentLength) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding).empty()); EXPECT_EQ(10U, response->body().size()); } @@ -237,7 +238,7 @@ TEST_P(CompressorIntegrationTest, EmptyResponse) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("204", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding).empty()); EXPECT_EQ(0U, response->body().size()); } @@ -292,9 +293,10 @@ TEST_P(CompressorIntegrationTest, AcceptanceFullConfigChunkedResponse) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ( - "gzip", - response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("gzip", response->headers() + .get(Http::CustomHeaders::get().ContentEncoding)[0] + ->value() + .getStringView()); ASSERT_EQ("chunked", response->headers().getTransferEncodingValue()); } @@ -318,10 +320,11 @@ TEST_P(CompressorIntegrationTest, AcceptanceFullConfigVaryHeader) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ( - "gzip", - response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("gzip", response->headers() + .get(Http::CustomHeaders::get().ContentEncoding)[0] + ->value() + .getStringView()); ASSERT_EQ("Cookie, Accept-Encoding", - response->headers().get(Http::CustomHeaders::get().Vary)->value().getStringView()); + response->headers().get(Http::CustomHeaders::get().Vary)[0]->value().getStringView()); } } // namespace Envoy diff --git a/test/extensions/filters/http/csrf/csrf_filter_test.cc b/test/extensions/filters/http/csrf/csrf_filter_test.cc index dbac2d629e2e..c2e864d40ad1 100644 --- a/test/extensions/filters/http/csrf/csrf_filter_test.cc +++ b/test/extensions/filters/http/csrf/csrf_filter_test.cc @@ -156,7 +156,7 @@ TEST_F(CsrfFilterTest, RequestWithInvalidOrigin) { EXPECT_EQ(0U, config_->stats().missing_source_origin_.value()); EXPECT_EQ(1U, config_->stats().request_invalid_.value()); EXPECT_EQ(0U, config_->stats().request_valid_.value()); - EXPECT_EQ("csrf_origin_mismatch", decoder_callbacks_.details_); + EXPECT_EQ("csrf_origin_mismatch", decoder_callbacks_.details()); } TEST_F(CsrfFilterTest, RequestWithInvalidOriginDifferentNonStandardPorts) { @@ -180,7 +180,7 @@ TEST_F(CsrfFilterTest, RequestWithInvalidOriginDifferentNonStandardPorts) { EXPECT_EQ(0U, config_->stats().missing_source_origin_.value()); EXPECT_EQ(1U, config_->stats().request_invalid_.value()); EXPECT_EQ(0U, config_->stats().request_valid_.value()); - EXPECT_EQ("csrf_origin_mismatch", decoder_callbacks_.details_); + EXPECT_EQ("csrf_origin_mismatch", decoder_callbacks_.details()); } TEST_F(CsrfFilterTest, RequestWithValidOrigin) { diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc index 1e1450f744ae..be709d1fcdb4 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc @@ -95,19 +95,19 @@ TEST_P(DecompressorIntegrationTest, BidirectionalDecompression) { // sent. EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ("gzip", upstream_request_->headers() - .get(Http::LowerCaseString("accept-encoding")) + .get(Http::LowerCaseString("accept-encoding"))[0] ->value() .getStringView()); - EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::LowerCaseString("content-encoding"))); + EXPECT_TRUE(upstream_request_->headers().get(Http::LowerCaseString("content-encoding")).empty()); EXPECT_EQ(uncompressed_request_length, upstream_request_->bodyLength()); EXPECT_EQ(std::to_string(compressed_request_length), upstream_request_->trailers() - ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes")) + ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes"))[0] ->value() .getStringView()); EXPECT_EQ(std::to_string(uncompressed_request_length), upstream_request_->trailers() - ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-uncompressed-bytes")) + ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-uncompressed-bytes"))[0] ->value() .getStringView()); @@ -153,12 +153,12 @@ TEST_P(DecompressorIntegrationTest, BidirectionalDecompression) { EXPECT_EQ(uncompressed_response_length, response->body().length()); EXPECT_EQ(std::to_string(compressed_response_length), response->trailers() - ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes")) + ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes"))[0] ->value() .getStringView()); EXPECT_EQ(std::to_string(uncompressed_response_length), response->trailers() - ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-uncompressed-bytes")) + ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-uncompressed-bytes"))[0] ->value() .getStringView()); @@ -224,13 +224,13 @@ TEST_P(DecompressorIntegrationTest, BidirectionalDecompressionError) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ("gzip", upstream_request_->headers() - .get(Http::LowerCaseString("accept-encoding")) + .get(Http::LowerCaseString("accept-encoding"))[0] ->value() .getStringView()); - EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::LowerCaseString("content-encoding"))); + EXPECT_TRUE(upstream_request_->headers().get(Http::LowerCaseString("content-encoding")).empty()); EXPECT_EQ(std::to_string(compressed_request_length), upstream_request_->trailers() - ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes")) + ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes"))[0] ->value() .getStringView()); @@ -270,7 +270,7 @@ TEST_P(DecompressorIntegrationTest, BidirectionalDecompressionError) { EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(std::to_string(compressed_response_length), response->trailers() - ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes")) + ->get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes"))[0] ->value() .getStringView()); diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc index 5ae7accd1085..bd8f8891d3ec 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc @@ -78,14 +78,14 @@ class DecompressorFilterTest : public testing::TestWithParam { if (end_stream && expect_decompression) { EXPECT_EQ( "30", - trailers.get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes")) - ->value() - .getStringView()); - EXPECT_EQ( - "60", - trailers.get(Http::LowerCaseString("x-envoy-decompressor-testlib-uncompressed-bytes")) + trailers.get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes"))[0] ->value() .getStringView()); + EXPECT_EQ("60", trailers + .get(Http::LowerCaseString( + "x-envoy-decompressor-testlib-uncompressed-bytes"))[0] + ->value() + .getStringView()); } } else { Http::TestResponseTrailerMapImpl trailers; @@ -98,14 +98,14 @@ class DecompressorFilterTest : public testing::TestWithParam { if (end_stream && expect_decompression) { EXPECT_EQ( "30", - trailers.get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes")) - ->value() - .getStringView()); - EXPECT_EQ( - "60", - trailers.get(Http::LowerCaseString("x-envoy-decompressor-testlib-uncompressed-bytes")) + trailers.get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes"))[0] ->value() .getStringView()); + EXPECT_EQ("60", trailers + .get(Http::LowerCaseString( + "x-envoy-decompressor-testlib-uncompressed-bytes"))[0] + ->value() + .getStringView()); } } } @@ -116,27 +116,27 @@ class DecompressorFilterTest : public testing::TestWithParam { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers)); EXPECT_EQ("30", request_trailers - .get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes")) - ->value() - .getStringView()); - EXPECT_EQ("60", - request_trailers - .get(Http::LowerCaseString("x-envoy-decompressor-testlib-uncompressed-bytes")) + .get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes"))[0] ->value() .getStringView()); + EXPECT_EQ("60", request_trailers + .get(Http::LowerCaseString( + "x-envoy-decompressor-testlib-uncompressed-bytes"))[0] + ->value() + .getStringView()); } else { Http::TestResponseTrailerMapImpl response_trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); EXPECT_EQ("30", response_trailers - .get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes")) - ->value() - .getStringView()); - EXPECT_EQ("60", - response_trailers - .get(Http::LowerCaseString("x-envoy-decompressor-testlib-uncompressed-bytes")) + .get(Http::LowerCaseString("x-envoy-decompressor-testlib-compressed-bytes"))[0] ->value() .getStringView()); + EXPECT_EQ("60", response_trailers + .get(Http::LowerCaseString( + "x-envoy-decompressor-testlib-uncompressed-bytes"))[0] + ->value() + .getStringView()); } } @@ -186,21 +186,21 @@ class DecompressorFilterTest : public testing::TestWithParam { // The filter removes the decompressor's content encoding from the Content-Encoding header. if (expected_content_encoding.has_value()) { EXPECT_EQ(expected_content_encoding.value(), - headers_after_filter->get(Http::CustomHeaders::get().ContentEncoding) + headers_after_filter->get(Http::CustomHeaders::get().ContentEncoding)[0] ->value() .getStringView()); } else { - EXPECT_EQ(nullptr, headers_after_filter->get(Http::CustomHeaders::get().ContentEncoding)); + EXPECT_TRUE(headers_after_filter->get(Http::CustomHeaders::get().ContentEncoding).empty()); } // The filter adds the decompressor's content encoding to the Accept-Encoding header on the // request direction. - const auto* accept_encoding = + const auto accept_encoding = headers_after_filter->get(Http::LowerCaseString{"accept-encoding"}); if (isRequestDirection() && expected_accept_encoding.has_value()) { - EXPECT_EQ(expected_accept_encoding.value(), accept_encoding->value().getStringView()); + EXPECT_EQ(expected_accept_encoding.value(), accept_encoding[0]->value().getStringView()); } else { - EXPECT_EQ(nullptr, accept_encoding); + EXPECT_TRUE(accept_encoding.empty()); } expectDecompression(decompressor_ptr, end_with_data); @@ -399,7 +399,7 @@ TEST_P(DecompressorFilterTest, NoDecompressionHeadersOnly) { doHeaders(headers_before_filter, true /* end_stream */); if (isRequestDirection()) { - ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding"))[0] ->value() .getStringView(), "mock"); @@ -418,7 +418,7 @@ TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingAbsent) { doHeaders(headers_before_filter, false /* end_stream */); if (isRequestDirection()) { - ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding"))[0] ->value() .getStringView(), "mock"); @@ -451,7 +451,7 @@ TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingNotCurrent) { doHeaders(headers_before_filter, false /* end_stream */); if (isRequestDirection()) { - ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding"))[0] ->value() .getStringView(), "mock"); @@ -474,7 +474,7 @@ TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresent) { doHeaders(headers_before_filter, false /* end_stream */); if (isRequestDirection()) { - ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding"))[0] ->value() .getStringView(), "mock"); @@ -498,7 +498,7 @@ TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresentInList) doHeaders(headers_before_filter, false /* end_stream */); if (isRequestDirection()) { - ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding"))[0] ->value() .getStringView(), "mock"); diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index 43aa9e2f4d79..e6e1e53ab4d5 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -92,10 +92,9 @@ name: envoy.clusters.dynamic_forward_proxy void createUpstreams() override { if (upstream_tls_) { - fake_upstreams_.emplace_back( - new FakeUpstream(Ssl::createFakeUpstreamSslContext(upstream_cert_name_, context_manager_, - factory_context_), - 0, FakeHttpConnection::Type::HTTP1, version_, timeSystem())); + addFakeUpstream(Ssl::createFakeUpstreamSslContext(upstream_cert_name_, context_manager_, + factory_context_), + FakeHttpConnection::Type::HTTP1); } else { HttpIntegrationTest::createUpstreams(); } diff --git a/test/extensions/filters/http/ext_authz/BUILD b/test/extensions/filters/http/ext_authz/BUILD index 135b92bd3adb..65ea7d623b81 100644 --- a/test/extensions/filters/http/ext_authz/BUILD +++ b/test/extensions/filters/http/ext_authz/BUILD @@ -28,7 +28,6 @@ envoy_extension_cc_test( "//source/extensions/filters/http/ext_authz", "//test/extensions/filters/common/ext_authz:ext_authz_mocks", "//test/mocks/http:http_mocks", - "//test/mocks/local_info:local_info_mocks", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/tracing:tracing_mocks", @@ -48,6 +47,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/http/ext_authz:config", "//test/mocks/server:factory_context_mocks", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index 7a3f011032fe..167779f8aeed 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -6,6 +6,7 @@ #include "extensions/filters/http/ext_authz/config.h" #include "test/mocks/server/factory_context.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -36,8 +37,9 @@ void expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion api_version) { fmt::format(yaml, TestUtility::getVersionStringFromApiVersion(api_version)), *proto_config); testing::StrictMock context; + EXPECT_CALL(context, singletonManager()).Times(1); + EXPECT_CALL(context, threadLocal()).Times(1); EXPECT_CALL(context, messageValidationVisitor()).Times(1); - EXPECT_CALL(context, localInfo()).Times(1); EXPECT_CALL(context, clusterManager()).Times(1); EXPECT_CALL(context, runtime()).Times(1); EXPECT_CALL(context, scope()).Times(2); @@ -61,6 +63,7 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoGrpc) { TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { std::string yaml = R"EOF( + stat_prefix: "wall" http_service: server_uri: uri: "ext_authz:9000" @@ -97,6 +100,7 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { failure_mode_allow: true with_request_body: max_request_bytes: 100 + pack_as_bytes: true )EOF"; ExtAuthzFilterConfig factory; @@ -104,7 +108,6 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { TestUtility::loadFromYaml(yaml, *proto_config); testing::StrictMock context; EXPECT_CALL(context, messageValidationVisitor()).Times(1); - EXPECT_CALL(context, localInfo()).Times(1); EXPECT_CALL(context, clusterManager()).Times(1); EXPECT_CALL(context, runtime()).Times(1); EXPECT_CALL(context, scope()).Times(1); @@ -114,6 +117,49 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { cb(filter_callback); } +// Test that setting the use_alpha proto field throws. +TEST(HttpExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(UseAlphaFieldIsNoLongerSupported)) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.deprecated_features:envoy.extensions.filters.http.ext_authz.v3.ExtAuthz.hidden_" + "envoy_deprecated_use_alpha", + "true"}}); + + envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config; + proto_config.set_hidden_envoy_deprecated_use_alpha(true); + + // Trigger the throw in the Envoy gRPC branch. + { + testing::StrictMock context; + EXPECT_CALL(context, messageValidationVisitor()); + EXPECT_CALL(context, runtime()); + EXPECT_CALL(context, scope()); + + ExtAuthzFilterConfig factory; + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context), + EnvoyException, + "The use_alpha field is deprecated and is no longer supported.") + } + + // Trigger the throw in the Google gRPC branch. + { + auto google_grpc = new envoy::config::core::v3::GrpcService_GoogleGrpc(); + google_grpc->set_stat_prefix("grpc"); + google_grpc->set_target_uri("http://example.com"); + proto_config.mutable_grpc_service()->set_allocated_google_grpc(google_grpc); + + testing::StrictMock context; + EXPECT_CALL(context, messageValidationVisitor()); + EXPECT_CALL(context, runtime()); + EXPECT_CALL(context, scope()); + + ExtAuthzFilterConfig factory; + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context), + EnvoyException, + "The use_alpha field is deprecated and is no longer supported.") + } +} + // Test that the deprecated extension name still functions. TEST(HttpExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.ext_authz"; diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index 4127deffc973..0d48ac15cfb7 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -23,6 +23,16 @@ namespace Envoy { using Headers = std::vector>; +void setMeasureTimeoutOnCheckCreated(ConfigHelper& config_helper, bool timeout_on_check) { + if (timeout_on_check) { + config_helper.addRuntimeOverride( + "envoy.reloadable_features.ext_authz_measure_timeout_on_check_created", "true"); + } else { + config_helper.addRuntimeOverride( + "envoy.reloadable_features.ext_authz_measure_timeout_on_check_created", "false"); + } +} + class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: @@ -31,12 +41,12 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP void createUpstreams() override { HttpIntegrationTest::createUpstreams(); - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); } - void initializeConfig() { - config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + void initializeConfig(bool with_timeout = false, bool disable_with_metadata = false) { + config_helper_.addConfigModifier([this, with_timeout, disable_with_metadata]( + envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* ext_authz_cluster = bootstrap.mutable_static_resources()->add_clusters(); ext_authz_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); ext_authz_cluster->set_name("ext_authz"); @@ -46,8 +56,20 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP setGrpcService(*proto_config_.mutable_grpc_service(), "ext_authz", fake_upstreams_.back()->localAddress()); + if (with_timeout) { + proto_config_.mutable_grpc_service()->mutable_timeout()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(1)); + } + proto_config_.mutable_filter_enabled()->set_runtime_key("envoy.ext_authz.enable"); proto_config_.mutable_filter_enabled()->mutable_default_value()->set_numerator(100); + if (disable_with_metadata) { + // Disable the ext_authz filter with metadata matcher that never matches. + auto* metadata = proto_config_.mutable_filter_enabled_metadata(); + metadata->set_filter("xyz.abc"); + metadata->add_path()->set_key("k1"); + metadata->mutable_value()->mutable_string_match()->set_exact("never_matched"); + } proto_config_.mutable_deny_at_disable()->set_runtime_key("envoy.ext_authz.deny_at_disable"); proto_config_.mutable_deny_at_disable()->mutable_default_value()->set_value(false); proto_config_.set_transport_api_version(apiVersion()); @@ -59,8 +81,10 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP }); } - void setDenyAtDisableRuntimeConfig(bool deny_at_disable) { - config_helper_.addRuntimeOverride("envoy.ext_authz.enable", "numerator: 0"); + void setDenyAtDisableRuntimeConfig(bool deny_at_disable, bool disable_with_metadata) { + if (!disable_with_metadata) { + config_helper_.addRuntimeOverride("envoy.ext_authz.enable", "numerator: 0"); + } if (deny_at_disable) { config_helper_.addRuntimeOverride("envoy.ext_authz.deny_at_disable", "true"); } else { @@ -70,7 +94,8 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP void initiateClientConnection(uint64_t request_body_length, const Headers& headers_to_add = Headers{}, - const Headers& headers_to_append = Headers{}) { + const Headers& headers_to_append = Headers{}, + const Headers& headers_to_remove = Headers{}) { auto conn = makeClientConnection(lookupPort("http")); codec_client_ = makeHttpConnection(std::move(conn)); Http::TestRequestHeaderMapImpl headers{ @@ -89,6 +114,12 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP headers.addCopy(headers_to_append.first, headers_to_append.second); } + // Initialize headers to be removed. If the authorization server returns any of + // these as a header to remove, it will be removed. + for (const auto& header_to_remove : headers_to_remove) { + headers.addCopy(header_to_remove.first, header_to_remove.second); + } + TestUtility::feedBufferWithRandomCharacters(request_body_, request_body_length); response_ = codec_client_->makeRequestWithBody(headers, request_body_.toString()); } @@ -136,7 +167,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP void waitForSuccessfulUpstreamResponse( const std::string& expected_response_code, const Headers& headers_to_add = Headers{}, - const Headers& headers_to_append = Headers{}, + const Headers& headers_to_append = Headers{}, const Headers& headers_to_remove = Headers{}, const Http::TestRequestHeaderMapImpl& new_headers_from_upstream = Http::TestRequestHeaderMapImpl{}, const Http::TestRequestHeaderMapImpl& headers_to_append_multiple = @@ -173,7 +204,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP // string value. Hence for "header2" key, the value is "header2,header2-appended". absl::StrCat(header_to_append.first, ",", header_to_append.second))); const auto value = upstream_request_->headers() - .get(Http::LowerCaseString(header_to_append.first)) + .get(Http::LowerCaseString(header_to_append.first))[0] ->value() .getStringView(); EXPECT_TRUE(absl::EndsWith(value, "-appended")); @@ -197,6 +228,12 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP Http::HeaderValueOf("multiple", "multiple-first,multiple-second")); } + for (const auto& header_to_remove : headers_to_remove) { + // The headers that were originally present in the request have now been removed. + EXPECT_TRUE( + upstream_request_->headers().get(Http::LowerCaseString{header_to_remove.first}).empty()); + } + response_->waitForEndStream(); EXPECT_TRUE(upstream_request_->complete()); @@ -208,6 +245,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP } void sendExtAuthzResponse(const Headers& headers_to_add, const Headers& headers_to_append, + const Headers& headers_to_remove, const Http::TestRequestHeaderMapImpl& new_headers_from_upstream, const Http::TestRequestHeaderMapImpl& headers_to_append_multiple) { ext_authz_request_->startGrpcStream(); @@ -228,6 +266,11 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP entry->mutable_header()->set_value(header_to_append.second); } + for (const auto& header_to_remove : headers_to_remove) { + auto* entry = check_response.mutable_ok_response()->mutable_headers_to_remove(); + entry->Add(std::string(header_to_remove.first)); + } + // Entries in this headers are not present in the original request headers. new_headers_from_upstream.iterate( [&check_response](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate { @@ -268,11 +311,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP void cleanup() { if (fake_ext_authz_connection_ != nullptr) { - if (clientType() != Grpc::ClientType::GoogleGrpc) { - AssertionResult result = fake_ext_authz_connection_->close(); - RELEASE_ASSERT(result, result.message()); - } - AssertionResult result = fake_ext_authz_connection_->waitForDisconnect(); + AssertionResult result = fake_ext_authz_connection_->close(); RELEASE_ASSERT(result, result.message()); } cleanupUpstreamAndDownstream(); @@ -300,19 +339,20 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP void expectCheckRequestWithBody(Http::CodecClient::Type downstream_protocol, uint64_t request_size) { expectCheckRequestWithBodyWithHeaders(downstream_protocol, request_size, Headers{}, Headers{}, - Http::TestRequestHeaderMapImpl{}, + Headers{}, Http::TestRequestHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); } void expectCheckRequestWithBodyWithHeaders( Http::CodecClient::Type downstream_protocol, uint64_t request_size, const Headers& headers_to_add, const Headers& headers_to_append, + const Headers& headers_to_remove, const Http::TestRequestHeaderMapImpl& new_headers_from_upstream, const Http::TestRequestHeaderMapImpl& headers_to_append_multiple) { initializeConfig(); setDownstreamProtocol(downstream_protocol); HttpIntegrationTest::initialize(); - initiateClientConnection(request_size, headers_to_add, headers_to_append); + initiateClientConnection(request_size, headers_to_add, headers_to_append, headers_to_remove); waitForExtAuthzRequest(expectedCheckRequest(downstream_protocol)); Headers updated_headers_to_add; @@ -325,17 +365,41 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP updated_headers_to_append.push_back( std::make_pair(header_to_append.first, header_to_append.second + "-appended")); } - sendExtAuthzResponse(updated_headers_to_add, updated_headers_to_append, + sendExtAuthzResponse(updated_headers_to_add, updated_headers_to_append, headers_to_remove, new_headers_from_upstream, headers_to_append_multiple); waitForSuccessfulUpstreamResponse("200", updated_headers_to_add, updated_headers_to_append, - new_headers_from_upstream, headers_to_append_multiple); + headers_to_remove, new_headers_from_upstream, + headers_to_append_multiple); cleanup(); } - void expectFilterDisableCheck(bool deny_at_disable, const std::string& expected_status) { - initializeConfig(); - setDenyAtDisableRuntimeConfig(deny_at_disable); + void initiateAndWait() { + initiateClientConnection(4); + response_->waitForEndStream(); + } + + void expectCheckRequestTimedout(bool timeout_on_check) { + setMeasureTimeoutOnCheckCreated(this->config_helper_, timeout_on_check); + initializeConfig(true); + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + HttpIntegrationTest::initialize(); + initiateAndWait(); + if (timeout_on_check) { + uint32_t timeouts = test_server_->counter("http.config_test.ext_authz.timeout")->value(); + EXPECT_EQ(1U, timeouts); + } + + EXPECT_TRUE(response_->complete()); + EXPECT_EQ("403", response_->headers().getStatusValue()); + + cleanup(); + } + + void expectFilterDisableCheck(bool deny_at_disable, bool disable_with_metadata, + const std::string& expected_status) { + initializeConfig(false, disable_with_metadata); + setDenyAtDisableRuntimeConfig(deny_at_disable, disable_with_metadata); setDownstreamProtocol(Http::CodecClient::Type::HTTP2); HttpIntegrationTest::initialize(); initiateClientConnection(4); @@ -367,8 +431,7 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, void createUpstreams() override { HttpIntegrationTest::createUpstreams(); - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP1, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP1); } // By default, HTTP Service uses case sensitive string matcher. @@ -389,6 +452,7 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, {"x-case-sensitive-header", case_sensitive_header_value_}, {"baz", "foo"}, {"bat", "foo"}, + {"remove-me", "upstream-should-not-see-me"}, }); } @@ -403,12 +467,14 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, // Send back authorization response with "baz" and "bat" headers. // Also add multiple values "append-foo" and "append-bar" for key "x-append-bat". + // Also tell Envoy to remove "remove-me" header before sending to upstream. Http::TestResponseHeaderMapImpl response_headers{ {":status", "200"}, {"baz", "baz"}, {"bat", "bar"}, {"x-append-bat", "append-foo"}, {"x-append-bat", "append-bar"}, + {"x-envoy-auth-headers-to-remove", "remove-me"}, }; ext_authz_request_->encodeHeaders(response_headers, true); } @@ -422,24 +488,29 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, } cleanupUpstreamAndDownstream(); } - - void setupWithDisabledCaseSensitiveStringMatcher(bool disable_case_sensitive_matcher) { - config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + void initializeConfig(bool with_timeout = false) { + config_helper_.addConfigModifier([this, with_timeout]( + envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* ext_authz_cluster = bootstrap.mutable_static_resources()->add_clusters(); ext_authz_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); ext_authz_cluster->set_name("ext_authz"); TestUtility::loadFromYaml(default_config_, proto_config_); - + if (with_timeout) { + proto_config_.mutable_http_service()->mutable_server_uri()->mutable_timeout()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(1)); + proto_config_.clear_failure_mode_allow(); + } envoy::config::listener::v3::Filter ext_authz_filter; ext_authz_filter.set_name(Extensions::HttpFilters::HttpFilterNames::get().ExtAuthorization); ext_authz_filter.mutable_typed_config()->PackFrom(proto_config_); + config_helper_.addFilter(MessageUtil::getJsonStringFromMessage(ext_authz_filter)); }); + } - if (disable_case_sensitive_matcher) { - disableCaseSensitiveStringMatcher(); - } + void setup() { + initializeConfig(); HttpIntegrationTest::initialize(); @@ -474,6 +545,17 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, {"x-append-bat", "append-foo"}, {"x-append-bat", "append-bar"}}; EXPECT_THAT(request_nonexisted_headers, Http::IsSubsetOfHeaders(upstream_request_->headers())); + // The "remove-me" header that was present in the downstream request has + // been removed by envoy as a result of being present in + // "x-envoy-auth-headers-to-remove". + EXPECT_TRUE(upstream_request_->headers().get(Http::LowerCaseString{"remove-me"}).empty()); + // "x-envoy-auth-headers-to-remove" itself has also been removed because + // it's only used for communication between the authorization server and + // envoy itself. + EXPECT_TRUE(upstream_request_->headers() + .get(Http::LowerCaseString{"x-envoy-auth-headers-to-remove"}) + .empty()); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response_->waitForEndStream(); EXPECT_TRUE(response_->complete()); @@ -482,6 +564,27 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, cleanup(); } + void initiateAndWait() { + initiateClientConnection(); + response_->waitForEndStream(); + } + + void expectCheckRequestTimedout(bool timeout_on_check) { + setMeasureTimeoutOnCheckCreated(this->config_helper_, timeout_on_check); + initializeConfig(true); + HttpIntegrationTest::initialize(); + + initiateAndWait(); + if (timeout_on_check) { + uint32_t timeouts = test_server_->counter("http.config_test.ext_authz.timeout")->value(); + EXPECT_EQ(1U, timeouts); + } + + EXPECT_TRUE(response_->complete()); + EXPECT_EQ("403", response_->headers().getStatusValue()); + cleanup(); + } + envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config_{}; FakeHttpConnectionPtr fake_ext_authz_connection_; FakeStreamPtr ext_authz_request_; @@ -549,15 +652,34 @@ TEST_P(ExtAuthzGrpcIntegrationTest, SendHeadersToAddAndToAppendToUpstream) { Http::CodecClient::Type::HTTP1, 4, /*headers_to_add=*/Headers{{"header1", "header1"}}, /*headers_to_append=*/Headers{{"header2", "header2"}}, + /*headers_to_remove=*/Headers{{"remove-me", "upstream-should-not-see-me"}}, /*new_headers_from_upstream=*/Http::TestRequestHeaderMapImpl{{"new1", "new1"}}, /*headers_to_append_multiple=*/ Http::TestRequestHeaderMapImpl{{"multiple", "multiple-first"}, {"multiple", "multiple-second"}}); } -TEST_P(ExtAuthzGrpcIntegrationTest, AllowAtDisable) { expectFilterDisableCheck(false, "200"); } +TEST_P(ExtAuthzGrpcIntegrationTest, CheckTimesOutLegacy) { expectCheckRequestTimedout(false); } -TEST_P(ExtAuthzGrpcIntegrationTest, DenyAtDisable) { expectFilterDisableCheck(true, "403"); } +TEST_P(ExtAuthzGrpcIntegrationTest, CheckTimesOutFromCheckCreation) { + expectCheckRequestTimedout(true); +} + +TEST_P(ExtAuthzGrpcIntegrationTest, AllowAtDisable) { + expectFilterDisableCheck(/*deny_at_disable=*/false, /*disable_with_metadata=*/false, "200"); +} + +TEST_P(ExtAuthzGrpcIntegrationTest, AllowAtDisableWithMetadata) { + expectFilterDisableCheck(/*deny_at_disable=*/false, /*disable_with_metadata=*/true, "200"); +} + +TEST_P(ExtAuthzGrpcIntegrationTest, DenyAtDisable) { + expectFilterDisableCheck(/*deny_at_disable=*/true, /*disable_with_metadata=*/false, "403"); +} + +TEST_P(ExtAuthzGrpcIntegrationTest, DenyAtDisableWithMetadata) { + expectFilterDisableCheck(/*deny_at_disable=*/true, /*disable_with_metadata=*/true, "403"); +} INSTANTIATE_TEST_SUITE_P(IpVersions, ExtAuthzHttpIntegrationTest, ValuesIn(TestEnvironment::getIpVersionsForTest()), @@ -565,19 +687,15 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ExtAuthzHttpIntegrationTest, // Verifies that by default HTTP service uses the case-sensitive string matcher. TEST_P(ExtAuthzHttpIntegrationTest, DefaultCaseSensitiveStringMatcher) { - setupWithDisabledCaseSensitiveStringMatcher(false); - const auto* header_entry = ext_authz_request_->headers().get(case_sensitive_header_name_); - ASSERT_EQ(header_entry, nullptr); + setup(); + const auto header_entry = ext_authz_request_->headers().get(case_sensitive_header_name_); + ASSERT_TRUE(header_entry.empty()); } -// Verifies that by setting "false" to -// envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher, the string -// matcher used by HTTP service will be case-insensitive. -TEST_P(ExtAuthzHttpIntegrationTest, DisableCaseSensitiveStringMatcher) { - setupWithDisabledCaseSensitiveStringMatcher(true); - const auto* header_entry = ext_authz_request_->headers().get(case_sensitive_header_name_); - ASSERT_NE(header_entry, nullptr); - EXPECT_EQ(case_sensitive_header_value_, header_entry->value().getStringView()); +TEST_P(ExtAuthzHttpIntegrationTest, CheckTimesOutLegacy) { expectCheckRequestTimedout(false); } + +TEST_P(ExtAuthzHttpIntegrationTest, CheckTimesOutFromCheckCreation) { + expectCheckRequestTimedout(true); } class ExtAuthzLocalReplyIntegrationTest : public HttpIntegrationTest, @@ -588,8 +706,7 @@ class ExtAuthzLocalReplyIntegrationTest : public HttpIntegrationTest, void createUpstreams() override { HttpIntegrationTest::createUpstreams(); - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP1, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP1); } void cleanup() { @@ -692,4 +809,67 @@ TEST_P(ExtAuthzLocalReplyIntegrationTest, DeniedHeaderTest) { cleanup(); } +TEST_P(ExtAuthzGrpcIntegrationTest, GoogleAsyncClientCreation) { + initializeConfig(); + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + HttpIntegrationTest::initialize(); + initiateClientConnection(4, Headers{}, Headers{}); + + waitForExtAuthzRequest(expectedCheckRequest(Http::CodecClient::Type::HTTP2)); + if (clientType() == Grpc::ClientType::GoogleGrpc) { + // Make sure one Google grpc client is created. + EXPECT_EQ(1, test_server_->counter("grpc.ext_authz.google_grpc_client_creation")->value()); + } + sendExtAuthzResponse(Headers{}, Headers{}, Headers{}, Http::TestRequestHeaderMapImpl{}, + Http::TestRequestHeaderMapImpl{}); + + waitForSuccessfulUpstreamResponse("200"); + + Http::TestRequestHeaderMapImpl headers{ + {":method", "POST"}, {":path", "/test"}, {":scheme", "http"}, {":authority", "host"}}; + TestUtility::feedBufferWithRandomCharacters(request_body_, 4); + response_ = codec_client_->makeRequestWithBody(headers, request_body_.toString()); + + auto result = fake_ext_authz_connection_->waitForNewStream(*dispatcher_, ext_authz_request_); + RELEASE_ASSERT(result, result.message()); + + envoy::service::auth::v3::CheckRequest check_request; + result = ext_authz_request_->waitForGrpcMessage(*dispatcher_, check_request); + RELEASE_ASSERT(result, result.message()); + + EXPECT_EQ("POST", ext_authz_request_->headers().getMethodValue()); + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.auth.{}.Authorization", "Check", + apiVersion()), + ext_authz_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", ext_authz_request_->headers().getContentTypeValue()); + result = ext_authz_request_->waitForEndStream(*dispatcher_); + RELEASE_ASSERT(result, result.message()); + + if (clientType() == Grpc::ClientType::GoogleGrpc) { + // Make sure one Google grpc client is created. + EXPECT_EQ(1, test_server_->counter("grpc.ext_authz.google_grpc_client_creation")->value()); + } + sendExtAuthzResponse(Headers{}, Headers{}, Headers{}, Http::TestRequestHeaderMapImpl{}, + Http::TestRequestHeaderMapImpl{}); + + result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_); + RELEASE_ASSERT(result, result.message()); + result = upstream_request_->waitForEndStream(*dispatcher_); + RELEASE_ASSERT(result, result.message()); + + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(response_size_, true); + + response_->waitForEndStream(); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(request_body_.length(), upstream_request_->bodyLength()); + + EXPECT_TRUE(response_->complete()); + EXPECT_EQ("200", response_->headers().getStatusValue()); + EXPECT_EQ(response_size_, response_->body().size()); + + cleanup(); +} + } // namespace Envoy diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index 38939b19a714..3456b0e57259 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -22,7 +22,6 @@ #include "test/extensions/filters/common/ext_authz/mocks.h" #include "test/mocks/http/mocks.h" -#include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" @@ -59,8 +58,8 @@ template class HttpFilterTestBase : public T { if (!yaml.empty()) { TestUtility::loadFromYaml(yaml, proto_config); } - config_.reset(new FilterConfig(proto_config, local_info_, stats_store_, runtime_, http_context_, - "ext_authz_prefix")); + config_.reset( + new FilterConfig(proto_config, stats_store_, runtime_, http_context_, "ext_authz_prefix")); client_ = new Filters::Common::ExtAuthz::MockClient(); filter_ = std::make_unique(config_, Filters::Common::ExtAuthz::ClientPtr{client_}); filter_->setDecoderFilterCallbacks(filter_callbacks_); @@ -84,7 +83,6 @@ template class HttpFilterTestBase : public T { Buffer::OwnedImpl data_; NiceMock runtime_; NiceMock cm_; - NiceMock local_info_; Network::Address::InstanceConstSharedPtr addr_; NiceMock connection_; Http::ContextImpl http_context_; @@ -157,13 +155,63 @@ TEST_F(HttpFilterTest, MergeConfig) { base_config.merge(disabled_config); // Make sure all values were merged: - EXPECT_TRUE(base_config.disabled()); auto&& merged_extensions = base_config.contextExtensions(); EXPECT_EQ("base_value", merged_extensions.at("base_key")); EXPECT_EQ("value", merged_extensions.at("merged_key")); EXPECT_EQ("value", merged_extensions.at("key")); } +// Test that defining stat_prefix appends an additional prefix to the emitted statistics names. +TEST_F(HttpFilterTest, StatsWithPrefix) { + const std::string stat_prefix = "with_stat_prefix"; + const std::string error_counter_name_with_prefix = + absl::StrCat("ext_authz.", stat_prefix, ".error"); + const std::string error_counter_name_without_prefix = "ext_authz.error"; + + InSequence s; + + initialize(fmt::format(R"EOF( + stat_prefix: "{}" + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + )EOF", + stat_prefix)); + + EXPECT_EQ(0U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromString(error_counter_name_with_prefix) + .value()); + EXPECT_EQ(0U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromString(error_counter_name_without_prefix) + .value()); + + prepareCheck(); + EXPECT_CALL(*client_, check(_, _, _, _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(filter_callbacks_, encodeHeaders_(_, _)).Times(1); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::Error; + request_callbacks_->onComplete(std::make_unique(response)); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromString(error_counter_name_with_prefix) + .value()); + // The one without an additional prefix is not incremented, since it is not "defined". + EXPECT_EQ(0U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromString(error_counter_name_without_prefix) + .value()); +} + // Test when failure_mode_allow is NOT set and the response from the authorization service is Error // that the request is not allowed to continue. TEST_F(HttpFilterTest, ErrorFailClose) { @@ -179,7 +227,46 @@ TEST_F(HttpFilterTest, ErrorFailClose) { ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(filter_callbacks_, encodeHeaders_(_, true)) + .WillOnce(Invoke([&](const Http::ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ(headers.getStatusValue(), std::to_string(enumToInt(Http::Code::Forbidden))); + })); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::Error; + request_callbacks_->onComplete(std::make_unique(response)); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.error").value()); + EXPECT_EQ( + 0U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.timeout").value()); + EXPECT_EQ(1U, config_->stats().error_.value()); + EXPECT_EQ(0U, config_->stats().timeout_.value()); +} + +// Test when when a timeout error occurs, the correct stat is incremented. +TEST_F(HttpFilterTest, TimeoutError) { + InSequence s; + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + failure_mode_allow: false + )EOF"); + + ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); + EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -194,11 +281,16 @@ TEST_F(HttpFilterTest, ErrorFailClose) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Error; + response.error_kind = Filters::Common::ExtAuthz::ErrorKind::Timedout; request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.error").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.timeout").value()); EXPECT_EQ(1U, config_->stats().error_.value()); + EXPECT_EQ(1U, config_->stats().timeout_.value()); } // Verifies that the filter responds with a configurable HTTP status when an network error occurs. @@ -217,7 +309,7 @@ TEST_F(HttpFilterTest, ErrorCustomStatusCode) { ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -238,7 +330,7 @@ TEST_F(HttpFilterTest, ErrorCustomStatusCode) { 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.error").value()); EXPECT_EQ(1U, config_->stats().error_.value()); - EXPECT_EQ("ext_authz_error", filter_callbacks_.details_); + EXPECT_EQ("ext_authz_error", filter_callbacks_.details()); } // Test when failure_mode_allow is set and the response from the authorization service is Error that @@ -256,7 +348,7 @@ TEST_F(HttpFilterTest, ErrorOpen) { ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -292,7 +384,7 @@ TEST_F(HttpFilterTest, ImmediateErrorOpen) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Error; - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::make_unique(response)); @@ -345,7 +437,7 @@ TEST_F(HttpFilterTest, RequestDataIsTooLarge) { EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(1); EXPECT_CALL(connection_, remoteAddress()).Times(0); EXPECT_CALL(connection_, localAddress()).Times(0); - EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); @@ -377,7 +469,7 @@ TEST_F(HttpFilterTest, RequestDataWithPartialMessage) { EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _, _)).Times(1); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); @@ -397,6 +489,62 @@ TEST_F(HttpFilterTest, RequestDataWithPartialMessage) { EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_)); } +// Checks that the filter initiates an authorization request when the buffer reaches maximum +// request bytes and allow_partial_message is set to true. In addition to that, after the filter +// sends the check request, data decoding continues. +TEST_F(HttpFilterTest, RequestDataWithPartialMessageThenContinueDecoding) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + failure_mode_allow: false + with_request_body: + max_request_bytes: 10 + allow_partial_message: true + )EOF"); + + ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); + ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_)); + EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); + EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); + + // The check call should only be called once. + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + + EXPECT_CALL(filter_callbacks_, continueDecoding()); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + + data_.add("foo"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + + data_.add("bar"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + + data_.add("barfoo"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); + + data_.add("more data after watermark is set is possible"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + request_callbacks_->onComplete(std::make_unique(response)); + + data_.add("more data after calling check request"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, true)); + + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); +} + // Checks that the filter initiates the authorization process only when the filter decode trailers // is called. TEST_F(HttpFilterTest, RequestDataWithSmallBuffer) { @@ -417,7 +565,7 @@ TEST_F(HttpFilterTest, RequestDataWithSmallBuffer) { EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _, _)).Times(1); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); @@ -439,13 +587,18 @@ TEST_F(HttpFilterTest, AuthWithRequestData) { max_request_bytes: 10 )EOF"); + ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_)); prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) - .WillOnce( - WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + envoy::service::auth::v3::CheckRequest check_request; + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) + .WillOnce(WithArgs<0, 2>( + Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks, + const envoy::service::auth::v3::CheckRequest& check_param) -> void { request_callbacks_ = &callbacks; + check_request = check_param; }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); data_.add("foo"); @@ -453,6 +606,50 @@ TEST_F(HttpFilterTest, AuthWithRequestData) { data_.add("bar"); EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, true)); EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_)); + + EXPECT_EQ(data_.length(), check_request.attributes().request().http().body().size()); + EXPECT_EQ(0, check_request.attributes().request().http().raw_body().size()); +} + +// Checks that the filter buffers the data and initiates the authorization request. +TEST_F(HttpFilterTest, AuthWithNonUtf8RequestData) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + with_request_body: + max_request_bytes: 10 + pack_as_bytes: true + )EOF"); + + ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_)); + prepareCheck(); + + envoy::service::auth::v3::CheckRequest check_request; + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) + .WillOnce(WithArgs<0, 2>( + Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks, + const envoy::service::auth::v3::CheckRequest& check_param) -> void { + request_callbacks_ = &callbacks; + check_request = check_param; + }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + + // Use non UTF-8 data to fill up the decoding buffer. + uint8_t raw[1] = {0xc0}; + Buffer::OwnedImpl raw_buffer(raw, 1); + + data_.add(raw_buffer); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + data_.add(raw_buffer); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, true)); + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_)); + + EXPECT_EQ(0, check_request.attributes().request().http().body().size()); + EXPECT_EQ(data_.length(), check_request.attributes().request().http().raw_body().size()); } // Checks that filter does not buffer data on header-only request. @@ -469,7 +666,7 @@ TEST_F(HttpFilterTest, HeaderOnlyRequest) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -498,7 +695,7 @@ TEST_F(HttpFilterTest, UpgradeWebsocketRequest) { request_headers_.addCopy(Http::Headers::get().Upgrade, Http::Headers::get().UpgradeValues.WebSocket); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -526,7 +723,7 @@ TEST_F(HttpFilterTest, H2UpgradeRequest) { request_headers_.addCopy(Http::Headers::get().Protocol, Http::Headers::get().ProtocolStrings.Http2String); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -551,7 +748,7 @@ TEST_F(HttpFilterTest, HeaderOnlyRequestWithStream) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -563,10 +760,69 @@ TEST_F(HttpFilterTest, HeaderOnlyRequestWithStream) { EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_)); } +// Checks that the filter removes the specified headers from the request, but +// that pseudo headers and Host are not removed. +TEST_F(HttpFilterTest, HeadersToRemoveRemovesHeadersExceptSpecialHeaders) { + InSequence s; + + // Set up all the typical headers plus an additional user defined header. + request_headers_.addCopy(Http::Headers::get().Host, "example.com"); + request_headers_.addCopy(Http::Headers::get().Method, "GET"); + request_headers_.addCopy(Http::Headers::get().Path, "/users"); + request_headers_.addCopy(Http::Headers::get().Protocol, "websocket"); + request_headers_.addCopy(Http::Headers::get().Scheme, "https"); + request_headers_.addCopy("remove-me", "upstream-should-not-see-me"); + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + clear_route_cache: true + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) + .Times(0); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + // Let's try to remove all the headers in the request. + response.headers_to_remove = std::vector{ + Http::Headers::get().Host, Http::Headers::get().HostLegacy, + Http::Headers::get().Method, Http::Headers::get().Path, + Http::Headers::get().Protocol, Http::Headers::get().Scheme, + Http::LowerCaseString{"remove-me"}, + }; + request_callbacks_->onComplete(std::make_unique(response)); + + // All :-prefixed headers (and Host) should still be there - only the user + // defined header should have been removed. + EXPECT_EQ("example.com", request_headers_.get_(Http::Headers::get().Host)); + EXPECT_EQ("example.com", request_headers_.get_(Http::Headers::get().HostLegacy)); + EXPECT_EQ("GET", request_headers_.get_(Http::Headers::get().Method)); + EXPECT_EQ("/users", request_headers_.get_(Http::Headers::get().Path)); + EXPECT_EQ("websocket", request_headers_.get_(Http::Headers::get().Protocol)); + EXPECT_EQ("https", request_headers_.get_(Http::Headers::get().Scheme)); + EXPECT_TRUE(request_headers_.get(Http::LowerCaseString{"remove-me"}).empty()); +} + // Verifies that the filter clears the route cache when an authorization response: // 1. is an OK response. // 2. has headers to append. // 3. has headers to add. +// 4. has headers to remove. TEST_F(HttpFilterTest, ClearCache) { InSequence s; @@ -579,7 +835,7 @@ TEST_F(HttpFilterTest, ClearCache) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -598,6 +854,8 @@ TEST_F(HttpFilterTest, ClearCache) { response.status = Filters::Common::ExtAuthz::CheckStatus::OK; response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; + response.headers_to_remove = + std::vector{Http::LowerCaseString{"remove-me"}}; request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); @@ -608,6 +866,7 @@ TEST_F(HttpFilterTest, ClearCache) { // 1. is an OK response. // 2. has headers to append. // 3. has NO headers to add. +// 4. has NO headers to remove. TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAppendOnly) { InSequence s; @@ -620,7 +879,7 @@ TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAppendOnly) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -646,8 +905,9 @@ TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAppendOnly) { // Verifies that the filter clears the route cache when an authorization response: // 1. is an OK response. -// 2. has headers to add. -// 3. has NO headers to append. +// 2. has NO headers to append. +// 3. has headers to add. +// 4. has NO headers to remove. TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAddOnly) { InSequence s; @@ -660,7 +920,7 @@ TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAddOnly) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -684,9 +944,53 @@ TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAddOnly) { EXPECT_EQ(1U, config_->stats().ok_.value()); } +// Verifies that the filter clears the route cache when an authorization response: +// 1. is an OK response. +// 2. has NO headers to append. +// 3. has NO headers to add. +// 4. has headers to remove. +TEST_F(HttpFilterTest, ClearCacheRouteHeadersToRemoveOnly) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + clear_route_cache: true + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(1); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) + .Times(0); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.headers_to_remove = + std::vector{Http::LowerCaseString{"remove-me"}}; + request_callbacks_->onComplete(std::make_unique(response)); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); + EXPECT_EQ(1U, config_->stats().ok_.value()); +} + // Verifies that the filter DOES NOT clear the route cache when an authorization response: // 1. is an OK response. -// 2. has NO headers to add or to append. +// 2. has NO headers to append. +// 3. has NO headers to add. +// 4. has NO headers to remove. TEST_F(HttpFilterTest, NoClearCacheRoute) { InSequence s; @@ -699,7 +1003,7 @@ TEST_F(HttpFilterTest, NoClearCacheRoute) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -734,7 +1038,7 @@ TEST_F(HttpFilterTest, NoClearCacheRouteConfig) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -778,7 +1082,7 @@ TEST_F(HttpFilterTest, NoClearCacheRouteDeniedResponse) { response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; auto response_ptr = std::make_unique(response); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::move(response_ptr)); @@ -793,7 +1097,7 @@ TEST_F(HttpFilterTest, NoClearCacheRouteDeniedResponse) { EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.denied").value()); - EXPECT_EQ("ext_authz_denied", filter_callbacks_.details_); + EXPECT_EQ("ext_authz_denied", filter_callbacks_.details()); } // Verifies that specified metadata is passed along in the check request @@ -828,8 +1132,8 @@ TEST_F(HttpFilterTest, MetadataContext) { prepareCheck(); envoy::service::auth::v3::CheckRequest check_request; - EXPECT_CALL(*client_, check(_, _, _, _)) - .WillOnce(WithArgs<1>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param) + EXPECT_CALL(*client_, check(_, _, _, _, _)) + .WillOnce(WithArgs<2>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param) -> void { check_request = check_param; }))); filter_->decodeHeaders(request_headers_, false); @@ -877,7 +1181,7 @@ TEST_F(HttpFilterTest, FilterDisabled) { .WillByDefault(Return(false)); // Make sure check is not called. - EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); } @@ -903,7 +1207,204 @@ TEST_F(HttpFilterTest, FilterEnabled) { .WillByDefault(Return(true)); // Make sure check is called once. - EXPECT_CALL(*client_, check(_, _, _, _)).Times(1); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1); + // Engage the filter. + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); +} + +// Test that filter can be disabled via the filter_enabled_metadata field. +TEST_F(HttpFilterTest, MetadataDisabled) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + filter_enabled_metadata: + filter: "abc.xyz" + path: + - key: "k1" + value: + string_match: + exact: "check" + )EOF"); + + // Disable in filter_enabled. + const std::string yaml = R"EOF( + filter_metadata: + abc.xyz: + k1: skip + )EOF"; + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(yaml, metadata); + ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata)); + + // Make sure check is not called. + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); + // Engage the filter. + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); +} + +// Test that filter can be enabled via the filter_enabled_metadata field. +TEST_F(HttpFilterTest, MetadataEnabled) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + filter_enabled_metadata: + filter: "abc.xyz" + path: + - key: "k1" + value: + string_match: + exact: "check" + )EOF"); + + // Enable in filter_enabled. + const std::string yaml = R"EOF( + filter_metadata: + abc.xyz: + k1: check + )EOF"; + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(yaml, metadata); + ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata)); + + prepareCheck(); + + // Make sure check is called once. + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1); + // Engage the filter. + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); +} + +// Test that the filter is disabled if one of the filter_enabled and filter_enabled_metadata field +// is disabled. +TEST_F(HttpFilterTest, FilterEnabledButMetadataDisabled) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + filter_enabled: + runtime_key: "http.ext_authz.enabled" + default_value: + numerator: 100 + denominator: HUNDRED + filter_enabled_metadata: + filter: "abc.xyz" + path: + - key: "k1" + value: + string_match: + exact: "check" + )EOF"); + + // Enable in filter_enabled. + ON_CALL(runtime_.snapshot_, + featureEnabled("http.ext_authz.enabled", + testing::Matcher(Percent(100)))) + .WillByDefault(Return(true)); + + // Disable in filter_enabled_metadata. + const std::string yaml = R"EOF( + filter_metadata: + abc.xyz: + k1: skip + )EOF"; + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(yaml, metadata); + ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata)); + + // Make sure check is not called. + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); + // Engage the filter. + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); +} + +// Test that the filter is disabled if one of the filter_enabled and filter_enabled_metadata field +// is disabled. +TEST_F(HttpFilterTest, FilterDisabledButMetadataEnabled) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + filter_enabled: + runtime_key: "http.ext_authz.enabled" + default_value: + numerator: 0 + denominator: HUNDRED + filter_enabled_metadata: + filter: "abc.xyz" + path: + - key: "k1" + value: + string_match: + exact: "check" + )EOF"); + + // Disable in filter_enabled. + ON_CALL(runtime_.snapshot_, + featureEnabled("http.ext_authz.enabled", + testing::Matcher(Percent(0)))) + .WillByDefault(Return(false)); + + // Enable in filter_enabled_metadata. + const std::string yaml = R"EOF( + filter_metadata: + abc.xyz: + k1: check + )EOF"; + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(yaml, metadata); + ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata)); + + // Make sure check is not called. + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); + // Engage the filter. + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); +} + +// Test that the filter is enabled if both the filter_enabled and filter_enabled_metadata field +// is enabled. +TEST_F(HttpFilterTest, FilterEnabledAndMetadataEnabled) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + filter_enabled: + runtime_key: "http.ext_authz.enabled" + default_value: + numerator: 100 + denominator: HUNDRED + filter_enabled_metadata: + filter: "abc.xyz" + path: + - key: "k1" + value: + string_match: + exact: "check" + )EOF"); + + // Enable in filter_enabled. + ON_CALL(runtime_.snapshot_, + featureEnabled("http.ext_authz.enabled", + testing::Matcher(Percent(100)))) + .WillByDefault(Return(true)); + + // Enable in filter_enabled_metadata. + const std::string yaml = R"EOF( + filter_metadata: + abc.xyz: + k1: check + )EOF"; + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(yaml, metadata); + ON_CALL(filter_callbacks_.stream_info_, dynamicMetadata()).WillByDefault(ReturnRef(metadata)); + + prepareCheck(); + + // Make sure check is called once. + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); @@ -935,7 +1436,7 @@ TEST_F(HttpFilterTest, FilterDenyAtDisable) { .WillByDefault(Return(true)); // Make sure check is not called. - EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); @@ -967,7 +1468,7 @@ TEST_F(HttpFilterTest, FilterAllowAtDisable) { .WillByDefault(Return(false)); // Make sure check is not called. - EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); } @@ -1004,8 +1505,8 @@ TEST_P(HttpFilterTestParam, ContextExtensions) { // Save the check request from the check call. envoy::service::auth::v3::CheckRequest check_request; - EXPECT_CALL(*client_, check(_, _, _, _)) - .WillOnce(WithArgs<1>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param) + EXPECT_CALL(*client_, check(_, _, _, _, _)) + .WillOnce(WithArgs<2>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param) -> void { check_request = check_param; }))); // Engage the filter so that check is called. @@ -1038,7 +1539,7 @@ TEST_P(HttpFilterTestParam, DisabledOnRoute) { // baseline: make sure that when not disabled, check is called test_disable(false); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)).Times(1); + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)).Times(1); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); @@ -1046,7 +1547,7 @@ TEST_P(HttpFilterTestParam, DisabledOnRoute) { // test that disabling works test_disable(true); // Make sure check is not called. - EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); } @@ -1082,14 +1583,14 @@ TEST_P(HttpFilterTestParam, DisabledOnRouteWithRequestBody) { EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(1); EXPECT_CALL(connection_, remoteAddress()).Times(0); EXPECT_CALL(connection_, localAddress()).Times(0); - EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); // To test that disabling the filter works. test_disable(true); - EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); // Make sure that setDecoderBufferLimit is skipped. EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); @@ -1111,7 +1612,7 @@ TEST_P(HttpFilterTestParam, OkResponse) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1147,7 +1648,7 @@ TEST_P(HttpFilterTestParam, ImmediateOkResponse) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::make_unique(response)); @@ -1176,7 +1677,7 @@ TEST_P(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { auto response_ptr = std::make_unique(response); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::move(response_ptr)); @@ -1208,16 +1709,21 @@ TEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { const Http::LowerCaseString key_to_override{"foobar"}; request_headers_.addCopy("foobar", "foo"); + // `remove-me` will be removed + const Http::LowerCaseString key_to_remove("remove-me"); + request_headers_.addCopy(key_to_remove, "upstream-should-not-see-me"); + prepareCheck(); Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; response.headers_to_append = Http::HeaderVector{{request_header_key, "bar"}}; response.headers_to_set = Http::HeaderVector{{key_to_add, "foo"}, {key_to_override, "bar"}}; + response.headers_to_remove = std::vector{key_to_remove}; auto response_ptr = std::make_unique(response); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::move(response_ptr)); @@ -1230,6 +1736,7 @@ TEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { EXPECT_EQ(request_headers_.get_(request_header_key), "foo,bar"); EXPECT_EQ(request_headers_.get_(key_to_add), "foo"); EXPECT_EQ(request_headers_.get_(key_to_override), "bar"); + EXPECT_EQ(request_headers_.has(key_to_remove), false); } // Test that an synchronous denied response from the authorization service, on the call stack, @@ -1241,7 +1748,7 @@ TEST_P(HttpFilterTestParam, ImmediateDeniedResponse) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::make_unique(response)); @@ -1262,7 +1769,7 @@ TEST_P(HttpFilterTestParam, DeniedResponseWith401) { InSequence s; prepareCheck(); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1294,7 +1801,7 @@ TEST_P(HttpFilterTestParam, DeniedResponseWith403) { InSequence s; prepareCheck(); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1338,7 +1845,7 @@ TEST_P(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { std::make_unique(response); prepareCheck(); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1396,7 +1903,7 @@ TEST_P(HttpFilterTestParam, OverrideEncodingHeaders) { std::make_unique(response); prepareCheck(); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1428,10 +1935,11 @@ TEST_P(HttpFilterTestParam, OverrideEncodingHeaders) { EXPECT_EQ(test_headers.get_("foobar"), "DO_NOT_OVERRIDE"); EXPECT_EQ(test_headers.get_("accept-encoding"), "gzip,deflate"); EXPECT_EQ(data.toString(), "foo"); - - std::vector setCookieHeaderValues; - Http::HeaderUtility::getAllOfHeader(test_headers, "set-cookie", setCookieHeaderValues); - EXPECT_THAT(setCookieHeaderValues, UnorderedElementsAre("cookie1=value", "cookie2=value")); + EXPECT_EQ(Http::HeaderUtility::getAllOfHeaderAsString(test_headers, + Http::LowerCaseString("set-cookie")) + .result() + .value(), + "cookie1=value,cookie2=value"); })); request_callbacks_->onComplete(std::move(response_ptr)); @@ -1460,7 +1968,7 @@ TEST_F(HttpFilterTest, EmitDynamicMetadata) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1502,7 +2010,7 @@ TEST_P(HttpFilterTestParam, ResetDuringCall) { InSequence s; prepareCheck(); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1534,8 +2042,8 @@ TEST_P(HttpFilterTestParam, NoCluster) { // Save the check request from the check call. envoy::service::auth::v3::CheckRequest check_request; - EXPECT_CALL(*client_, check(_, _, _, _)) - .WillOnce(WithArgs<1>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param) + EXPECT_CALL(*client_, check(_, _, _, _, _)) + .WillOnce(WithArgs<2>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param) -> void { check_request = check_param; }))); // Make sure that filter chain is not continued and the call has been invoked. EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, @@ -1545,6 +2053,53 @@ TEST_P(HttpFilterTestParam, NoCluster) { filter_->decodeHeaders(request_headers_, false); } +// Verify that request body buffering can be skipped per route. +TEST_P(HttpFilterTestParam, DisableRequestBodyBufferingOnRoute) { + envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings; + FilterConfigPerRoute auth_per_route(settings); + + ON_CALL(*filter_callbacks_.route_, perFilterConfig(HttpFilterNames::get().ExtAuthorization)) + .WillByDefault(Return(&auth_per_route)); + + auto test_disable_request_body_buffering = [&](bool bypass) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + failure_mode_allow: false + with_request_body: + max_request_bytes: 1 + allow_partial_message: false + )EOF"); + + // Set bypass request body buffering for this route. + settings.mutable_check_settings()->set_disable_request_body_buffering(bypass); + // Initialize the route's per filter config. + auth_per_route = FilterConfigPerRoute(settings); + }; + + test_disable_request_body_buffering(false); + ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); + // When request body buffering is not skipped, setDecoderBufferLimit is called. + EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(1); + EXPECT_CALL(connection_, remoteAddress()).Times(0); + EXPECT_CALL(connection_, localAddress()).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + + test_disable_request_body_buffering(true); + // When request body buffering is skipped, setDecoderBufferLimit is not called. + EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); + EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(1); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); +} + } // namespace } // namespace ExtAuthz } // namespace HttpFilters diff --git a/test/extensions/filters/http/fault/fault_filter_test.cc b/test/extensions/filters/http/fault/fault_filter_test.cc index 0e25b9acb7de..e5ca23e2094d 100644 --- a/test/extensions/filters/http/fault/fault_filter_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_test.cc @@ -277,7 +277,7 @@ TEST_F(FaultFilterTest, AbortWithHttpStatus) { EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); EXPECT_EQ(0UL, config_->stats().active_faults_.value()); - EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details()); } TEST_F(FaultFilterTest, HeaderAbortWithHttpStatus) { @@ -324,7 +324,7 @@ TEST_F(FaultFilterTest, HeaderAbortWithHttpStatus) { EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); EXPECT_EQ(0UL, config_->stats().active_faults_.value()); - EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details()); } TEST_F(FaultFilterTest, AbortWithGrpcStatus) { @@ -377,7 +377,7 @@ TEST_F(FaultFilterTest, AbortWithGrpcStatus) { EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); EXPECT_EQ(0UL, config_->stats().active_faults_.value()); - EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details()); } TEST_F(FaultFilterTest, HeaderAbortWithGrpcStatus) { @@ -427,7 +427,7 @@ TEST_F(FaultFilterTest, HeaderAbortWithGrpcStatus) { EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); EXPECT_EQ(0UL, config_->stats().active_faults_.value()); - EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details()); } TEST_F(FaultFilterTest, HeaderAbortWithHttpAndGrpcStatus) { @@ -477,7 +477,7 @@ TEST_F(FaultFilterTest, HeaderAbortWithHttpAndGrpcStatus) { EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); EXPECT_EQ(0UL, config_->stats().active_faults_.value()); - EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details()); } TEST_F(FaultFilterTest, FixedDelayZeroDuration) { diff --git a/test/extensions/filters/http/grpc_http1_bridge/BUILD b/test/extensions/filters/http/grpc_http1_bridge/BUILD index fbf5798cf07c..a68ebdec1e39 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/BUILD +++ b/test/extensions/filters/http/grpc_http1_bridge/BUILD @@ -18,7 +18,6 @@ envoy_extension_cc_test( deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", - "//source/common/stats:fake_symbol_table_lib", "//source/extensions/filters/http/grpc_http1_bridge:http1_bridge_filter_lib", "//test/mocks/http:http_mocks", "//test/test_common:global_lib", diff --git a/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc b/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc index b3d0e7898bc2..d37bd313b090 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc +++ b/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc @@ -1,7 +1,7 @@ #include "common/buffer/buffer_impl.h" #include "common/grpc/common.h" #include "common/http/header_map_impl.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.h" diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc index febd3d40a3ed..c6c2b4d43e7e 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc @@ -173,5 +173,34 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } +TEST_P(ReverseBridgeIntegrationTest, EnabledRouteBadContentType) { + upstream_protocol_ = FakeHttpConnection::Type::HTTP1; + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestRequestHeaderMapImpl request_headers({{":scheme", "http"}, + {":method", "POST"}, + {":authority", "foo"}, + {":path", "/testing.ExampleService/Print"}, + {"content-type", "application/grpc"}}); + + Http::TestResponseHeaderMapImpl response_headers; + response_headers.setStatus(200); + response_headers.setContentType("application/x-not-protobuf"); + + auto response = sendRequestAndWaitForResponse(request_headers, 5, response_headers, 5); + + EXPECT_TRUE(response->complete()); + + // The response should indicate an error. + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(response->headers(), HeaderValueOf(Http::Headers::get().GrpcStatus, "2")); + + codec_client_->close(); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); +} } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc index 54ee07792c48..599e16a845f3 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc @@ -77,7 +77,7 @@ TEST_F(ReverseBridgeTest, InvalidGrpcRequest) { Http::Utility::PercentEncoding::encode("invalid request body"))); })); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(buffer, false)); - EXPECT_EQ(decoder_callbacks_.details_, "grpc_bridge_data_too_small"); + EXPECT_EQ(decoder_callbacks_.details(), "grpc_bridge_data_too_small"); } } @@ -532,9 +532,6 @@ TEST_F(ReverseBridgeTest, GrpcRequestBadResponseNoContentType) { buffer.add("abcdefgh", 8); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); EXPECT_EQ("fgh", buffer.toString()); - EXPECT_CALL(decoder_callbacks_, streamInfo()); - EXPECT_CALL(decoder_callbacks_.stream_info_, - setResponseCodeDetails(absl::string_view("grpc_bridge_content_type_wrong"))); } { @@ -549,15 +546,14 @@ TEST_F(ReverseBridgeTest, GrpcRequestBadResponseNoContentType) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers)); Http::TestResponseHeaderMapImpl headers({{":status", "400"}}); - EXPECT_EQ(Http::FilterHeadersStatus::ContinueAndEndStream, - filter_->encodeHeaders(headers, false)); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Status, "200")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().GrpcStatus, "2")); - EXPECT_THAT( - headers, - HeaderValueOf( - Http::Headers::get().GrpcMessage, - "envoy reverse bridge: upstream responded with no content-type header, status code 400")); + EXPECT_CALL( + decoder_callbacks_, + sendLocalReply( + Http::Code::OK, + "envoy reverse bridge: upstream responded with no content-type header, status code 400", + _, absl::make_optional(static_cast(Grpc::Status::Unknown)), _)); + EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, _)); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->encodeHeaders(headers, false)); } // Tests that a gRPC is downgraded to application/x-protobuf and that if the response @@ -583,9 +579,6 @@ TEST_F(ReverseBridgeTest, GrpcRequestBadResponse) { buffer.add("abcdefgh", 8); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); EXPECT_EQ("fgh", buffer.toString()); - EXPECT_CALL(decoder_callbacks_, streamInfo()); - EXPECT_CALL(decoder_callbacks_.stream_info_, - setResponseCodeDetails(absl::string_view("grpc_bridge_content_type_wrong"))); } { @@ -601,13 +594,15 @@ TEST_F(ReverseBridgeTest, GrpcRequestBadResponse) { Http::TestResponseHeaderMapImpl headers( {{":status", "400"}, {"content-type", "application/json"}}); - EXPECT_EQ(Http::FilterHeadersStatus::ContinueAndEndStream, - filter_->encodeHeaders(headers, false)); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Status, "200")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().GrpcStatus, "2")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().GrpcMessage, - "envoy reverse bridge: upstream responded with unsupported " - "content-type application/json, status code 400")); + EXPECT_CALL( + decoder_callbacks_, + sendLocalReply( + Http::Code::OK, + "envoy reverse bridge: upstream responded with unsupported " + "content-type application/json, status code 400", + _, absl::make_optional(static_cast(Grpc::Status::Unknown)), _)); + EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, _)); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->encodeHeaders(headers, false)); } // Tests that the filter passes a GRPC request through without modification because it is disabled diff --git a/test/extensions/filters/http/grpc_json_transcoder/BUILD b/test/extensions/filters/http/grpc_json_transcoder/BUILD index 0798152a20f7..7ff14b7ed650 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/BUILD +++ b/test/extensions/filters/http/grpc_json_transcoder/BUILD @@ -60,6 +60,8 @@ envoy_extension_cc_test( "//test/proto:bookstore_proto_descriptor", ], extension_name = "envoy.filters.http.grpc_json_transcoder", + # TODO(envoyproxy/windows-dev): diagnose clang-cl build test failure + tags = ["fails_on_windows"], deps = [ "//source/common/grpc:codec_lib", "//source/common/http:header_map_lib", diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index c0384a71dc94..473d9bfa655f 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -120,23 +120,23 @@ class GrpcJsonTranscoderIntegrationTest response->waitForEndStream(); EXPECT_TRUE(response->complete()); - if (response->headers().get(Http::LowerCaseString("transfer-encoding")) == nullptr || + if (response->headers().get(Http::LowerCaseString("transfer-encoding")).empty() || !absl::StartsWith(response->headers() - .get(Http::LowerCaseString("transfer-encoding")) + .get(Http::LowerCaseString("transfer-encoding"))[0] ->value() .getStringView(), "chunked")) { - EXPECT_EQ(response->headers().get(Http::LowerCaseString("trailer")), nullptr); + EXPECT_TRUE(response->headers().get(Http::LowerCaseString("trailer")).empty()); } response_headers.iterate( [response = response.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; if (entry.value() == UnexpectedHeaderValue) { - EXPECT_FALSE(response->headers().get(lower_key)); + EXPECT_TRUE(response->headers().get(lower_key).empty()); } else { EXPECT_EQ(entry.value().getStringView(), - response->headers().get(lower_key)->value().getStringView()); + response->headers().get(lower_key)[0]->value().getStringView()); } return Http::HeaderMap::Iterate::Continue; }); @@ -427,7 +427,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBodyFragmented) { EXPECT_EQ(response->body(), http_body.data()); // As well as content-type header auto content_type = response->headers().get(Http::LowerCaseString("content-type")); - EXPECT_EQ("text/plain", content_type->value().getStringView()); + EXPECT_EQ("text/plain", content_type[0]->value().getStringView()); } TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryEchoHttpBody) { @@ -744,6 +744,14 @@ std::string jsonStrToPbStrucStr(std::string json) { } TEST_P(GrpcJsonTranscoderIntegrationTest, DeepStruct) { + // Lower the timeout for the 408 response. + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + auto* virtual_host = hcm.mutable_route_config()->mutable_virtual_hosts(0); + virtual_host->mutable_routes(0)->mutable_route()->mutable_idle_timeout()->set_seconds(5); + }); + HttpIntegrationTest::initialize(); // Due to the limit of protobuf util, we can only compare to level 32. std::string deepJson = createDeepJson(32, true); @@ -757,14 +765,13 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, DeepStruct) { R"({"content":)" + deepJson + R"(})"); // The valid deep struct is parsed successfully. - // Since we didn't set the response, it return 503. - // Response body is empty (not a valid JSON), so content type should be application/grpc. + // Since we didn't set a response, it will time out. + // Response body is empty (not a valid JSON), so the error response is plaintext. testTranscoding( Http::TestRequestHeaderMapImpl{ {":method", "POST"}, {":path", "/echoStruct"}, {":authority", "host"}}, createDeepJson(100, true), {}, {}, Status(), - Http::TestResponseHeaderMapImpl{{":status", "503"}, {"content-type", "application/grpc"}}, - ""); + Http::TestResponseHeaderMapImpl{{":status", "408"}, {"content-type", "text/plain"}}, ""); // The invalid deep struct is detected. testTranscoding( diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index 08a37dd9671e..1ec37be541b4 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -657,7 +657,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryError) { EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_.decodeData(request_data, true)); EXPECT_EQ(0, request_data.length()); - EXPECT_EQ(decoder_callbacks_.details_, "grpc_json_transcode_failure{INVALID_ARGUMENT}"); + EXPECT_EQ(decoder_callbacks_.details(), "grpc_json_transcode_failure{INVALID_ARGUMENT}"); } TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryTimeout) { diff --git a/test/extensions/filters/http/grpc_stats/config_test.cc b/test/extensions/filters/http/grpc_stats/config_test.cc index 68bf0bde27f0..5a405dbfe847 100644 --- a/test/extensions/filters/http/grpc_stats/config_test.cc +++ b/test/extensions/filters/http/grpc_stats/config_test.cc @@ -415,6 +415,7 @@ TEST_F(GrpcStatsFilterConfigTest, MessageCounts) { data.serializeAsProto().get()); EXPECT_EQ(2U, filter_object.request_message_count()); EXPECT_EQ(3U, filter_object.response_message_count()); + EXPECT_EQ("2,3", data.serializeAsString().value()); } TEST_F(GrpcStatsFilterConfigTest, UpstreamStats) { diff --git a/test/extensions/filters/http/grpc_web/BUILD b/test/extensions/filters/http/grpc_web/BUILD index bb92594cd687..831e596ffb14 100644 --- a/test/extensions/filters/http/grpc_web/BUILD +++ b/test/extensions/filters/http/grpc_web/BUILD @@ -16,7 +16,6 @@ envoy_extension_cc_test( srcs = ["grpc_web_filter_test.cc"], extension_name = "envoy.filters.http.grpc_web", deps = [ - "//source/common/stats:fake_symbol_table_lib", "//source/extensions/filters/http/grpc_web:grpc_web_filter_lib", "//test/mocks/http:http_mocks", "//test/test_common:global_lib", diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc index dfd6bbe69024..2a35daa2d94a 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc @@ -7,7 +7,7 @@ #include "common/http/codes.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "extensions/filters/http/grpc_web/grpc_web_filter.h" @@ -177,7 +177,7 @@ TEST_F(GrpcWebFilterTest, InvalidBase64) { request_buffer.add(&INVALID_B64_MESSAGE, INVALID_B64_MESSAGE_SIZE); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_.decodeData(request_buffer, true)); - EXPECT_EQ(decoder_callbacks_.details_, "grpc_base_64_decode_failed"); + EXPECT_EQ(decoder_callbacks_.details(), "grpc_base_64_decode_failed"); } TEST_F(GrpcWebFilterTest, Base64NoPadding) { @@ -192,7 +192,7 @@ TEST_F(GrpcWebFilterTest, Base64NoPadding) { request_buffer.add(&B64_MESSAGE_NO_PADDING, B64_MESSAGE_NO_PADDING_SIZE); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_.decodeData(request_buffer, true)); - EXPECT_EQ(decoder_callbacks_.details_, "grpc_base_64_decode_failed_bad_size"); + EXPECT_EQ(decoder_callbacks_.details(), "grpc_base_64_decode_failed_bad_size"); } TEST_P(GrpcWebFilterTest, StatsNoCluster) { diff --git a/test/extensions/filters/http/gzip/BUILD b/test/extensions/filters/http/gzip/BUILD index b4b638ff40d1..649baaa34546 100644 --- a/test/extensions/filters/http/gzip/BUILD +++ b/test/extensions/filters/http/gzip/BUILD @@ -16,6 +16,7 @@ envoy_extension_cc_test( srcs = ["gzip_filter_test.cc"], extension_name = "envoy.filters.http.gzip", deps = [ + "//source/common/json:json_loader_lib", "//source/common/protobuf:utility_lib", "//source/extensions/compression/gzip/compressor:compressor_lib", "//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib", diff --git a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc index 8996e2aa0684..50ad7f30422a 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc @@ -36,10 +36,10 @@ class GzipIntegrationTest : public testing::TestWithParambodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) != nullptr); + ASSERT_FALSE(response->headers().get(Http::CustomHeaders::get().ContentEncoding).empty()); EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip, response->headers() - .get(Http::CustomHeaders::get().ContentEncoding) + .get(Http::CustomHeaders::get().ContentEncoding)[0] ->value() .getStringView()); ASSERT_TRUE(response->headers().TransferEncoding() != nullptr); @@ -63,7 +63,7 @@ class GzipIntegrationTest : public testing::TestWithParambodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding).empty()); ASSERT_EQ(content_length, response->body().size()); EXPECT_EQ(response->body(), std::string(content_length, 'a')); } @@ -209,9 +209,10 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(UpstreamResponseAlreadyEncod EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ( - "br", - response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("br", response->headers() + .get(Http::CustomHeaders::get().ContentEncoding)[0] + ->value() + .getStringView()); EXPECT_EQ(128U, response->body().size()); } @@ -235,7 +236,7 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(NotEnoughContentLength)) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding).empty()); EXPECT_EQ(10U, response->body().size()); } @@ -258,7 +259,7 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(EmptyResponse)) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("204", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding).empty()); EXPECT_EQ(0U, response->body().size()); } @@ -313,9 +314,10 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigChunkedR EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ( - "gzip", - response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("gzip", response->headers() + .get(Http::CustomHeaders::get().ContentEncoding)[0] + ->value() + .getStringView()); ASSERT_EQ("chunked", response->headers().getTransferEncodingValue()); } @@ -339,10 +341,11 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigVaryHead EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ( - "gzip", - response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("gzip", response->headers() + .get(Http::CustomHeaders::get().ContentEncoding)[0] + ->value() + .getStringView()); ASSERT_EQ("Cookie, Accept-Encoding", - response->headers().get(Http::CustomHeaders::get().Vary)->value().getStringView()); + response->headers().get(Http::CustomHeaders::get().Vary)[0]->value().getStringView()); } } // namespace Envoy diff --git a/test/extensions/filters/http/gzip/gzip_filter_test.cc b/test/extensions/filters/http/gzip/gzip_filter_test.cc index 7f92d1c06e46..b7877bc183aa 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_test.cc @@ -3,6 +3,7 @@ #include "envoy/extensions/filters/http/gzip/v3/gzip.pb.h" #include "common/common/hex.h" +#include "common/json/json_loader.h" #include "common/protobuf/utility.h" #include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" diff --git a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc index fc9c81431ae1..9a88da9ee050 100644 --- a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc +++ b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc @@ -109,6 +109,24 @@ TEST_F(HeaderToMetadataTest, BasicRequestTest) { filter_->onDestroy(); } +// Verify concatenation works. +TEST_F(HeaderToMetadataTest, BasicRequestDoubleHeadersTest) { + initializeFilter(request_config_yaml); + Http::TestRequestHeaderMapImpl incoming_headers{{"X-VERSION", "foo"}, {"X-VERSION", "bar"}}; + std::map expected = {{"version", "foo,bar"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(incoming_headers, false)); + Http::MetadataMap metadata_map{{"metadata", "metadata"}}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map)); + Buffer::OwnedImpl data("data"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false)); + Http::TestRequestTrailerMapImpl incoming_trailers; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(incoming_trailers)); + filter_->onDestroy(); +} + TEST_F(HeaderToMetadataTest, PerRouteOverride) { // Global config is empty. initializeFilter("{}"); diff --git a/test/extensions/filters/http/health_check/health_check_test.cc b/test/extensions/filters/http/health_check/health_check_test.cc index c8f4da639e5e..d67f3a1fb54c 100644 --- a/test/extensions/filters/http/health_check/health_check_test.cc +++ b/test/extensions/filters/http/health_check/health_check_test.cc @@ -127,7 +127,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) { EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); - EXPECT_EQ("health_check_ok", callbacks_.details_); + EXPECT_EQ("health_check_ok", callbacks_.details()); } { Http::TestResponseHeaderMapImpl health_check_response{{":status", "503"}}; @@ -135,7 +135,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) { EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); - EXPECT_EQ("health_check_failed", callbacks_.details_); + EXPECT_EQ("health_check_failed", callbacks_.details()); } // Test non-pass-through health checks with upstream cluster minimum health specified. @@ -154,7 +154,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) { EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); - EXPECT_EQ("health_check_ok_cluster_healthy", callbacks_.details_); + EXPECT_EQ("health_check_ok_cluster_healthy", callbacks_.details()); } { // This should fail, because one upstream cluster has too few healthy servers. @@ -168,7 +168,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) { EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); - EXPECT_EQ("health_check_failed_cluster_unhealthy", callbacks_.details_); + EXPECT_EQ("health_check_failed_cluster_unhealthy", callbacks_.details()); } { // This should fail, because one upstream cluster has no servers at all. @@ -182,7 +182,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) { EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); - EXPECT_EQ("health_check_failed_cluster_empty", callbacks_.details_); + EXPECT_EQ("health_check_failed_cluster_empty", callbacks_.details()); } // Test the cases where an upstream cluster is empty, or has no healthy servers, but // the minimum required percent healthy is zero. The health check should return a 200. @@ -199,7 +199,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) { EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); - EXPECT_EQ("health_check_ok_cluster_healthy", callbacks_.details_); + EXPECT_EQ("health_check_ok_cluster_healthy", callbacks_.details()); } { // This should succeed, because each cluster has degraded + healthy hosts greater than the @@ -214,7 +214,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) { EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); - EXPECT_EQ("health_check_ok_cluster_healthy", callbacks_.details_); + EXPECT_EQ("health_check_ok_cluster_healthy", callbacks_.details()); } } @@ -325,7 +325,7 @@ TEST_F(HealthCheckFilterCachingTest, CachedOkCallbackNotCalled) { EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); - EXPECT_EQ("health_check_cached", callbacks_.details_); + EXPECT_EQ("health_check_cached", callbacks_.details()); } TEST_F(HealthCheckFilterCachingTest, All) { diff --git a/test/extensions/filters/http/jwt_authn/BUILD b/test/extensions/filters/http/jwt_authn/BUILD index 3b1cf0812b09..e6af79c63d78 100644 --- a/test/extensions/filters/http/jwt_authn/BUILD +++ b/test/extensions/filters/http/jwt_authn/BUILD @@ -149,6 +149,8 @@ envoy_extension_cc_test( name = "group_verifier_test", srcs = ["group_verifier_test.cc"], extension_name = "envoy.filters.http.jwt_authn", + # TODO(envoyproxy/windows-dev): Diagnose msvc-cl fastbuild test failure + tags = ["fails_on_windows"], deps = [ ":mock_lib", ":test_common_lib", diff --git a/test/extensions/filters/http/jwt_authn/extractor_test.cc b/test/extensions/filters/http/jwt_authn/extractor_test.cc index d91f2c7dfee1..07fce913059f 100644 --- a/test/extensions/filters/http/jwt_authn/extractor_test.cc +++ b/test/extensions/filters/http/jwt_authn/extractor_test.cc @@ -167,7 +167,15 @@ TEST_F(ExtractorTest, TestCustomHeaderToken) { // Test token remove tokens[0]->removeJwt(headers); - EXPECT_FALSE(headers.get(Http::LowerCaseString("token-header"))); + EXPECT_FALSE(headers.has(Http::LowerCaseString("token-header"))); +} + +// Make sure a double custom header concatenates the token +TEST_F(ExtractorTest, TestDoubleCustomHeaderToken) { + auto headers = TestRequestHeaderMapImpl{{"token-header", "jwt_token"}, {"token-header", "foo"}}; + auto tokens = extractor_->extract(headers); + EXPECT_EQ(tokens.size(), 1); + EXPECT_EQ(tokens[0]->token(), "jwt_token,foo"); } // Test extracting token from the custom header: "prefix-header" @@ -195,7 +203,7 @@ TEST_F(ExtractorTest, TestPrefixHeaderMatch) { // Test token remove tokens[0]->removeJwt(headers); - EXPECT_FALSE(headers.get(Http::LowerCaseString("prefix-header"))); + EXPECT_FALSE(headers.has(Http::LowerCaseString("prefix-header"))); } // Test extracting token from the custom header: "prefix-header" diff --git a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc index 5b72bb7e2adc..4c71a7bef275 100644 --- a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc @@ -31,10 +31,10 @@ class HeaderToFilterStateFilter : public Http::PassThroughDecoderFilter { : header_(header), state_(state) {} Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override { - const Http::HeaderEntry* entry = headers.get(header_); - if (entry) { + const auto entry = headers.get(header_); + if (!entry.empty()) { decoder_callbacks_->streamInfo().filterState()->setData( - state_, std::make_unique(entry->value().getStringView()), + state_, std::make_unique(entry[0]->value().getStringView()), StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain); } @@ -109,12 +109,12 @@ TEST_P(LocalJwksIntegrationTest, WithGoodToken) { }); waitForNextUpstreamRequest(); - const auto* payload_entry = + const auto payload_entry = upstream_request_->headers().get(Http::LowerCaseString("sec-istio-auth-userinfo")); - EXPECT_TRUE(payload_entry != nullptr); - EXPECT_EQ(payload_entry->value().getStringView(), ExpectedPayloadValue); + EXPECT_FALSE(payload_entry.empty()); + EXPECT_EQ(payload_entry[0]->value().getStringView(), ExpectedPayloadValue); // Verify the token is removed. - EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::CustomHeaders::get().Authorization)); + EXPECT_TRUE(upstream_request_->headers().get(Http::CustomHeaders::get().Authorization).empty()); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); @@ -309,8 +309,7 @@ class RemoteJwksIntegrationTest : public HttpProtocolIntegrationTest { void createUpstreams() override { HttpProtocolIntegrationTest::createUpstreams(); // for Jwks upstream. - fake_upstreams_.emplace_back( - new FakeUpstream(0, GetParam().upstream_protocol, version_, timeSystem())); + addFakeUpstream(GetParam().upstream_protocol); } void initializeFilter(bool add_cluster) { @@ -387,12 +386,12 @@ TEST_P(RemoteJwksIntegrationTest, WithGoodToken) { waitForNextUpstreamRequest(); - const auto* payload_entry = + const auto payload_entry = upstream_request_->headers().get(Http::LowerCaseString("sec-istio-auth-userinfo")); - EXPECT_TRUE(payload_entry != nullptr); - EXPECT_EQ(payload_entry->value().getStringView(), ExpectedPayloadValue); + EXPECT_FALSE(payload_entry.empty()); + EXPECT_EQ(payload_entry[0]->value().getStringView(), ExpectedPayloadValue); // Verify the token is removed. - EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::CustomHeaders::get().Authorization)); + EXPECT_TRUE(upstream_request_->headers().get(Http::CustomHeaders::get().Authorization).empty()); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); diff --git a/test/extensions/filters/http/jwt_authn/filter_test.cc b/test/extensions/filters/http/jwt_authn/filter_test.cc index 0f8f1ff9c549..059f9f6656b8 100644 --- a/test/extensions/filters/http/jwt_authn/filter_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_test.cc @@ -179,7 +179,8 @@ TEST_F(FilterTest, InlineUnauthorizedFailure) { Buffer::OwnedImpl data(""); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_)); - EXPECT_EQ("jwt_authn_access_denied", filter_callbacks_.details_); + EXPECT_EQ(filter_callbacks_.details(), "jwt_authn_access_denied{Jwt is not in the form of " + "Header.Payload.Signature with two dots and 3 sections}"); } // This test verifies Verifier::Callback is called inline with a failure(403 Forbidden) status. @@ -200,7 +201,8 @@ TEST_F(FilterTest, InlineForbiddenFailure) { Buffer::OwnedImpl data(""); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_)); - EXPECT_EQ("jwt_authn_access_denied", filter_callbacks_.details_); + EXPECT_EQ(filter_callbacks_.details(), + "jwt_authn_access_denied{Audiences in Jwt are not allowed}"); } // This test verifies Verifier::Callback is called with OK status after verify(). diff --git a/test/extensions/filters/http/jwt_authn/matcher_test.cc b/test/extensions/filters/http/jwt_authn/matcher_test.cc index a7559685d8b5..e64245b6cfdb 100644 --- a/test/extensions/filters/http/jwt_authn/matcher_test.cc +++ b/test/extensions/filters/http/jwt_authn/matcher_test.cc @@ -162,6 +162,36 @@ TEST_F(MatcherTest, TestMatchPathAndHeader) { EXPECT_FALSE(matcher->matches(headers)); } +TEST_F(MatcherTest, TestMatchConnect) { + const char config[] = R"(match: + connect_matcher: {})"; + RequirementRule rule; + TestUtility::loadFromYaml(config, rule); + MatcherConstPtr matcher = Matcher::create(rule); + auto headers = TestRequestHeaderMapImpl{{":method", "CONNECT"}}; + EXPECT_TRUE(matcher->matches(headers)); + headers = TestRequestHeaderMapImpl{{":method", "GET"}}; + EXPECT_FALSE(matcher->matches(headers)); +} + +TEST_F(MatcherTest, TestMatchConnectQuery) { + const char config[] = R"(match: + connect_matcher: {} + query_parameters: + - name: foo + string_match: + exact: "bar")"; + RequirementRule rule; + TestUtility::loadFromYaml(config, rule); + MatcherConstPtr matcher = Matcher::create(rule); + auto headers = TestRequestHeaderMapImpl{{":method", "CONNECT"}, {":path", "/boo?foo=bar"}}; + EXPECT_TRUE(matcher->matches(headers)); + headers = TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/boo?foo=bar"}}; + EXPECT_FALSE(matcher->matches(headers)); + headers = TestRequestHeaderMapImpl{{":method", "CONNECT"}, {":path", "/boo?ok=bye"}}; + EXPECT_FALSE(matcher->matches(headers)); +} + } // namespace } // namespace JwtAuthn } // namespace HttpFilters diff --git a/test/extensions/filters/http/jwt_authn/mock.h b/test/extensions/filters/http/jwt_authn/mock.h index 84d7a35892ca..990353cf2971 100644 --- a/test/extensions/filters/http/jwt_authn/mock.h +++ b/test/extensions/filters/http/jwt_authn/mock.h @@ -72,7 +72,7 @@ class MockUpstream { Http::ResponseMessagePtr response_message( new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - response_message->body() = std::make_unique(response_body_); + response_message->body().add(response_body_); cb.onSuccess(request_, std::move(response_message)); called_count_++; return &request_; diff --git a/test/extensions/filters/http/local_ratelimit/BUILD b/test/extensions/filters/http/local_ratelimit/BUILD new file mode 100644 index 000000000000..38cd85098ee7 --- /dev/null +++ b/test/extensions/filters/http/local_ratelimit/BUILD @@ -0,0 +1,34 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "filter_test", + srcs = ["filter_test.cc"], + extension_name = "envoy.filters.http.local_ratelimit", + deps = [ + "//source/extensions/filters/http/local_ratelimit:local_ratelimit_lib", + "//test/common/stream_info:test_util", + "//test/mocks/http:http_mocks", + "@envoy_api//envoy/extensions/filters/http/local_ratelimit/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_name = "envoy.filters.http.local_ratelimit", + deps = [ + "//source/extensions/filters/http/local_ratelimit:config", + "//test/mocks/server:server_mocks", + ], +) diff --git a/test/extensions/filters/http/local_ratelimit/config_test.cc b/test/extensions/filters/http/local_ratelimit/config_test.cc new file mode 100644 index 000000000000..3bbf10ad1ee1 --- /dev/null +++ b/test/extensions/filters/http/local_ratelimit/config_test.cc @@ -0,0 +1,131 @@ +#include "extensions/filters/http/local_ratelimit/config.h" +#include "extensions/filters/http/local_ratelimit/local_ratelimit.h" + +#include "test/mocks/server/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace LocalRateLimitFilter { + +TEST(Factory, GlobalEmptyConfig) { + const std::string yaml = R"( +stat_prefix: test + )"; + + LocalRateLimitFilterConfig factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + + NiceMock context; + + EXPECT_CALL(context.dispatcher_, createTimer_(_)).Times(0); + auto callback = factory.createFilterFactoryFromProto(*proto_config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + callback(filter_callback); +} + +TEST(Factory, RouteSpecificFilterConfig) { + const std::string config_yaml = R"( +stat_prefix: test +token_bucket: + max_tokens: 1 + tokens_per_fill: 1 + fill_interval: 1000s +filter_enabled: + runtime_key: test_enabled + default_value: + numerator: 100 + denominator: HUNDRED +filter_enforced: + runtime_key: test_enforced + default_value: + numerator: 100 + denominator: HUNDRED +response_headers_to_add: + - append: false + header: + key: x-test-rate-limit + value: 'true' + )"; + + LocalRateLimitFilterConfig factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(config_yaml, *proto_config); + + NiceMock context; + + EXPECT_CALL(context.dispatcher_, createTimer_(_)).Times(1); + const auto route_config = factory.createRouteSpecificFilterConfig( + *proto_config, context, ProtobufMessage::getNullValidationVisitor()); + const auto* config = dynamic_cast(route_config.get()); + EXPECT_TRUE(config->requestAllowed()); +} + +TEST(Factory, EnabledEnforcedDisabledByDefault) { + const std::string config_yaml = R"( +stat_prefix: test +token_bucket: + max_tokens: 1 + tokens_per_fill: 1 + fill_interval: 1000s + )"; + + LocalRateLimitFilterConfig factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(config_yaml, *proto_config); + + NiceMock context; + + EXPECT_CALL(context.dispatcher_, createTimer_(_)).Times(1); + const auto route_config = factory.createRouteSpecificFilterConfig( + *proto_config, context, ProtobufMessage::getNullValidationVisitor()); + const auto* config = dynamic_cast(route_config.get()); + EXPECT_FALSE(config->enabled()); + EXPECT_FALSE(config->enforced()); +} + +TEST(Factory, PerRouteConfigNoTokenBucket) { + const std::string config_yaml = R"( +stat_prefix: test + )"; + + LocalRateLimitFilterConfig factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(config_yaml, *proto_config); + + NiceMock context; + EXPECT_THROW(factory.createRouteSpecificFilterConfig(*proto_config, context, + ProtobufMessage::getNullValidationVisitor()), + EnvoyException); +} + +TEST(Factory, FillTimerTooLow) { + const std::string config_yaml = R"( +stat_prefix: test +token_bucket: + max_tokens: 1 + tokens_per_fill: 1 + fill_interval: 0.040s + )"; + + LocalRateLimitFilterConfig factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(config_yaml, *proto_config); + + NiceMock context; + + EXPECT_CALL(context.dispatcher_, createTimer_(_)).Times(1); + EXPECT_THROW(factory.createRouteSpecificFilterConfig(*proto_config, context, + ProtobufMessage::getNullValidationVisitor()), + EnvoyException); +} + +} // namespace LocalRateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/local_ratelimit/filter_test.cc b/test/extensions/filters/http/local_ratelimit/filter_test.cc new file mode 100644 index 000000000000..9662f9a783e1 --- /dev/null +++ b/test/extensions/filters/http/local_ratelimit/filter_test.cc @@ -0,0 +1,146 @@ +#include "envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.pb.h" + +#include "extensions/filters/http/local_ratelimit/local_ratelimit.h" + +#include "test/mocks/http/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace LocalRateLimitFilter { + +static const std::string config_yaml = R"( +stat_prefix: test +token_bucket: + max_tokens: {} + tokens_per_fill: 1 + fill_interval: 1000s +filter_enabled: + runtime_key: test_enabled + default_value: + numerator: 100 + denominator: HUNDRED +filter_enforced: + runtime_key: test_enforced + default_value: + numerator: 100 + denominator: HUNDRED +response_headers_to_add: + - append: false + header: + key: x-test-rate-limit + value: 'true' + )"; + +class FilterTest : public testing::Test { +public: + FilterTest() = default; + + void setup(const std::string& yaml, const bool enabled = true, const bool enforced = true) { + EXPECT_CALL( + runtime_.snapshot_, + featureEnabled(absl::string_view("test_enabled"), + testing::Matcher(Percent(100)))) + .WillRepeatedly(testing::Return(enabled)); + EXPECT_CALL( + runtime_.snapshot_, + featureEnabled(absl::string_view("test_enforced"), + testing::Matcher(Percent(100)))) + .WillRepeatedly(testing::Return(enforced)); + + envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit config; + TestUtility::loadFromYaml(yaml, config); + config_ = std::make_shared(config, dispatcher_, stats_, runtime_); + filter_ = std::make_shared(config_); + filter_->setDecoderFilterCallbacks(decoder_callbacks_); + } + + uint64_t findCounter(const std::string& name) { + const auto counter = TestUtility::findCounter(stats_, name); + return counter != nullptr ? counter->value() : 0; + } + + Http::Code toErrorCode(const uint64_t code) { return config_->toErrorCode(code); } + + Stats::IsolatedStoreImpl stats_; + testing::NiceMock decoder_callbacks_; + NiceMock dispatcher_; + NiceMock runtime_; + std::shared_ptr config_; + std::shared_ptr filter_; +}; + +TEST_F(FilterTest, Runtime) { + setup(fmt::format(config_yaml, "1"), false, false); + EXPECT_EQ(&runtime_, &(config_->runtime())); +} + +TEST_F(FilterTest, ToErrorCode) { + setup(fmt::format(config_yaml, "1"), false, false); + EXPECT_EQ(Http::Code::BadRequest, toErrorCode(400)); +} + +TEST_F(FilterTest, Disabled) { + setup(fmt::format(config_yaml, "1"), false, false); + auto headers = Http::TestRequestHeaderMapImpl(); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + EXPECT_EQ(0U, findCounter("test.http_local_rate_limit.enabled")); + EXPECT_EQ(0U, findCounter("test.http_local_rate_limit.enforced")); +} + +TEST_F(FilterTest, RequestOk) { + setup(fmt::format(config_yaml, "1")); + auto headers = Http::TestRequestHeaderMapImpl(); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.enabled")); + EXPECT_EQ(0U, findCounter("test.http_local_rate_limit.enforced")); + EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.ok")); +} + +TEST_F(FilterTest, RequestRateLimited) { + setup(fmt::format(config_yaml, "0")); + + EXPECT_CALL(decoder_callbacks_, sendLocalReply(Http::Code::TooManyRequests, _, _, _, _)) + .WillOnce(Invoke([](Http::Code code, absl::string_view body, + std::function modify_headers, + const absl::optional grpc_status, + absl::string_view details) { + EXPECT_EQ(Http::Code::TooManyRequests, code); + EXPECT_EQ("local_rate_limited", body); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + modify_headers(response_headers); + EXPECT_EQ("true", response_headers.get(Http::LowerCaseString("x-test-rate-limit"))[0] + ->value() + .getStringView()); + + EXPECT_EQ(grpc_status, absl::nullopt); + EXPECT_EQ(details, "local_rate_limited"); + })); + + auto headers = Http::TestRequestHeaderMapImpl(); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(headers, false)); + EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.enabled")); + EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.enforced")); + EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.rate_limited")); +} + +TEST_F(FilterTest, RequestRateLimitedButNotEnforced) { + setup(fmt::format(config_yaml, "0"), true, false); + + EXPECT_CALL(decoder_callbacks_, sendLocalReply(Http::Code::TooManyRequests, _, _, _, _)).Times(0); + + auto headers = Http::TestRequestHeaderMapImpl(); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.enabled")); + EXPECT_EQ(0U, findCounter("test.http_local_rate_limit.enforced")); + EXPECT_EQ(1U, findCounter("test.http_local_rate_limit.rate_limited")); +} + +} // namespace LocalRateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/lua/BUILD b/test/extensions/filters/http/lua/BUILD index 645591b2304e..9980083db8a4 100644 --- a/test/extensions/filters/http/lua/BUILD +++ b/test/extensions/filters/http/lua/BUILD @@ -15,6 +15,8 @@ envoy_extension_cc_test( name = "lua_filter_test", srcs = ["lua_filter_test.cc"], extension_name = "envoy.filters.http.lua", + # TODO(envoyproxy/windows-dev): diagnose clang-cl build test failure + tags = ["fails_on_windows"], deps = [ "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/http/lua:lua_filter_lib", diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index 13455f81e8d4..310bafc36b39 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -790,7 +790,10 @@ TEST_F(LuaHttpFilterTest, HttpCall) { for key, value in pairs(headers) do request_handle:logTrace(key .. " " .. value) end + request_handle:logTrace(string.len(body)) request_handle:logTrace(body) + request_handle:logTrace(string.byte(body, 5)) + request_handle:logTrace(string.sub(body, 6, 8)) end )EOF"}; @@ -806,13 +809,14 @@ TEST_F(LuaHttpFilterTest, HttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":method", "POST"}, + {":path", "/"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -828,9 +832,13 @@ TEST_F(LuaHttpFilterTest, HttpCall) { Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - response_message->body() = std::make_unique("response"); + const char response[8] = {'r', 'e', 's', 'p', '\0', 'n', 's', 'e'}; + response_message->body().add(response, 8); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 200"))); - EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("response"))); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("8"))); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("resp"))); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("0"))); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("nse"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); callbacks->onBeforeFinalizeUpstreamSpan(child_span_, &response_message->headers()); callbacks->onSuccess(request, std::move(response_message)); @@ -870,13 +878,14 @@ TEST_F(LuaHttpFilterTest, HttpCallAsyncFalse) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -892,7 +901,7 @@ TEST_F(LuaHttpFilterTest, HttpCallAsyncFalse) { Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - response_message->body() = std::make_unique("response"); + response_message->body().add("response"); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 200"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("response"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); @@ -929,13 +938,14 @@ TEST_F(LuaHttpFilterTest, HttpCallAsynchronous) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -997,11 +1007,11 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"content-length", "11"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{{":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"content-length", "11"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -1011,7 +1021,7 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - response_message->body() = std::make_unique("response"); + response_message->body().add("response"); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 200"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("response"))); EXPECT_CALL(cluster_manager_, get(Eq("cluster2"))); @@ -1020,9 +1030,9 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{ - {":path", "/bar"}, {":method", "GET"}, {":authority", "foo"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/bar"}, {":method", "GET"}, {":authority", "foo"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -1077,9 +1087,9 @@ TEST_F(LuaHttpFilterTest, HttpCallNoBody) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{ - {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -1135,9 +1145,9 @@ TEST_F(LuaHttpFilterTest, HttpCallImmediateResponse) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{ - {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -1864,6 +1874,8 @@ TEST_F(LuaHttpFilterTest, InspectStreamInfoDowstreamSslConnection) { request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():tlsVersion()) request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():urlEncodedPemEncodedPeerCertificate()) request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():urlEncodedPemEncodedPeerCertificateChain()) + + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():sessionId()) end end )EOF"}; @@ -1952,6 +1964,10 @@ TEST_F(LuaHttpFilterTest, InspectStreamInfoDowstreamSslConnection) { .WillOnce(ReturnRef(peer_cert_chain)); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_chain))); + const std::string id = "12345"; + EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(id)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(id))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); } @@ -2127,7 +2143,7 @@ TEST_F(LuaHttpFilterTest, LuaFilterDisabled) { Http::TestRequestHeaderMapImpl request_headers_2{{":path", "/"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_2, true)); - EXPECT_EQ(nullptr, request_headers_2.get(Http::LowerCaseString("hello"))); + EXPECT_FALSE(request_headers_2.has("hello")); } // Test whether the route can directly reuse the Lua code in the global configuration. @@ -2191,7 +2207,7 @@ TEST_F(LuaHttpFilterTest, LuaFilterRefSourceCodeNotExist) { Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); - EXPECT_EQ(nullptr, request_headers.get(Http::LowerCaseString("hello"))); + EXPECT_TRUE(request_headers.get(Http::LowerCaseString("hello")).empty()); } TEST_F(LuaHttpFilterTest, LuaFilterBase64Escape) { @@ -2204,6 +2220,11 @@ TEST_F(LuaHttpFilterTest, LuaFilterBase64Escape) { function envoy_on_response(response_handle) local base64Encoded = response_handle:base64Escape("barfoo") response_handle:logTrace(base64Encoded) + + local resp_body_buf = response_handle:body() + local resp_body = resp_body_buf:getBytes(0, resp_body_buf:length()) + local b64_resp_body = response_handle:base64Escape(resp_body) + response_handle:logTrace(b64_resp_body) end )EOF"}; @@ -2216,9 +2237,69 @@ TEST_F(LuaHttpFilterTest, LuaFilterBase64Escape) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("YmFyZm9v"))); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->encodeHeaders(response_headers, false)); + + // Base64 encoding should also work for binary data. + uint8_t buffer[34] = {31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 202, 72, 205, 201, 201, 47, 207, + 47, 202, 73, 1, 4, 0, 0, 255, 255, 173, 32, 235, 249, 10, 0, 0, 0}; + Buffer::OwnedImpl response_body(buffer, 34); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, + StrEq("H4sIAAAAAAAA/8pIzcnJL88vykkBBAAA//+tIOv5CgAAAA=="))); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true)); +} + +TEST_F(LuaHttpFilterTest, LuaFilterSetResponseBuffer) { + const std::string SCRIPT{R"EOF( + function envoy_on_response(response_handle) + local content_length = response_handle:body():setBytes("1234") + response_handle:logTrace(content_length) + + -- It is possible to replace an entry in headers after overridding encoding buffer. + response_handle:headers():replace("content-length", content_length) + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->encodeHeaders(response_headers, false)); + Buffer::OwnedImpl response_body("1234567890"); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("4"))); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true)); + EXPECT_EQ(4, encoder_callbacks_.buffer_->length()); +} + +TEST_F(LuaHttpFilterTest, LuaFilterSetResponseBufferChunked) { + const std::string SCRIPT{R"EOF( + function envoy_on_response(response_handle) + local last + for chunk in response_handle:bodyChunks() do + chunk:setBytes("") + last = chunk + end + response_handle:logTrace(last:setBytes("1234")) + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); + + Buffer::OwnedImpl response_body("1234567890"); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("4"))); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true)); } } // namespace diff --git a/test/extensions/filters/http/lua/lua_integration_test.cc b/test/extensions/filters/http/lua/lua_integration_test.cc index fa0e474eed8d..7c99faea6df1 100644 --- a/test/extensions/filters/http/lua/lua_integration_test.cc +++ b/test/extensions/filters/http/lua/lua_integration_test.cc @@ -20,13 +20,10 @@ class LuaIntegrationTest : public testing::TestWithParamstartRequest(request_headers); + Http::StreamEncoder& encoder = encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + Buffer::OwnedImpl request_data("done"); + encoder.encodeData(request_data, true); + + waitForNextUpstreamRequest(); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"foo", "bar"}}; + upstream_request_->encodeHeaders(response_headers, false); + Buffer::OwnedImpl response_data1("good"); + upstream_request_->encodeData(response_data1, false); + Buffer::OwnedImpl response_data2("bye"); + upstream_request_->encodeData(response_data2, true); + + response->waitForEndStream(); + + EXPECT_EQ("2", response->headers() + .get(Http::LowerCaseString("content-length"))[0] + ->value() + .getStringView()); + EXPECT_EQ("ok", response->body()); + cleanup(); + } + void cleanup() { codec_client_->close(); if (fake_lua_connection_ != nullptr) { @@ -266,31 +297,31 @@ name: lua waitForNextUpstreamRequest(); EXPECT_EQ("10", upstream_request_->headers() - .get(Http::LowerCaseString("request_body_size")) + .get(Http::LowerCaseString("request_body_size"))[0] ->value() .getStringView()); EXPECT_EQ("bar", upstream_request_->headers() - .get(Http::LowerCaseString("request_metadata_foo")) + .get(Http::LowerCaseString("request_metadata_foo"))[0] ->value() .getStringView()); EXPECT_EQ("bat", upstream_request_->headers() - .get(Http::LowerCaseString("request_metadata_baz")) + .get(Http::LowerCaseString("request_metadata_baz"))[0] ->value() .getStringView()); EXPECT_EQ("false", upstream_request_->headers() - .get(Http::LowerCaseString("request_secure")) + .get(Http::LowerCaseString("request_secure"))[0] ->value() .getStringView()); EXPECT_EQ("HTTP/1.1", upstream_request_->headers() - .get(Http::LowerCaseString("request_protocol")) + .get(Http::LowerCaseString("request_protocol"))[0] ->value() .getStringView()); EXPECT_EQ("bar", upstream_request_->headers() - .get(Http::LowerCaseString("request_dynamic_metadata_value")) + .get(Http::LowerCaseString("request_dynamic_metadata_value"))[0] ->value() .getStringView()); @@ -304,21 +335,22 @@ name: lua response->waitForEndStream(); EXPECT_EQ("7", response->headers() - .get(Http::LowerCaseString("response_body_size")) + .get(Http::LowerCaseString("response_body_size"))[0] ->value() .getStringView()); EXPECT_EQ("bar", response->headers() - .get(Http::LowerCaseString("response_metadata_foo")) + .get(Http::LowerCaseString("response_metadata_foo"))[0] ->value() .getStringView()); EXPECT_EQ("bat", response->headers() - .get(Http::LowerCaseString("response_metadata_baz")) + .get(Http::LowerCaseString("response_metadata_baz"))[0] ->value() .getStringView()); - EXPECT_EQ( - "HTTP/1.1", - response->headers().get(Http::LowerCaseString("request_protocol"))->value().getStringView()); - EXPECT_EQ(nullptr, response->headers().get(Http::LowerCaseString("foo"))); + EXPECT_EQ("HTTP/1.1", response->headers() + .get(Http::LowerCaseString("request_protocol"))[0] + ->value() + .getStringView()); + EXPECT_TRUE(response->headers().get(Http::LowerCaseString("foo")).empty()); cleanup(); } @@ -367,11 +399,11 @@ name: lua waitForNextUpstreamRequest(); EXPECT_EQ("bar", upstream_request_->headers() - .get(Http::LowerCaseString("upstream_foo")) + .get(Http::LowerCaseString("upstream_foo"))[0] ->value() .getStringView()); EXPECT_EQ("4", upstream_request_->headers() - .get(Http::LowerCaseString("upstream_body_size")) + .get(Http::LowerCaseString("upstream_body_size"))[0] ->value() .getStringView()); @@ -632,12 +664,12 @@ name: lua waitForNextUpstreamRequest(); EXPECT_EQ("approved", upstream_request_->headers() - .get(Http::LowerCaseString("signature_verification")) + .get(Http::LowerCaseString("signature_verification"))[0] ->value() .getStringView()); EXPECT_EQ("done", upstream_request_->headers() - .get(Http::LowerCaseString("verification")) + .get(Http::LowerCaseString("verification"))[0] ->value() .getStringView()); @@ -774,11 +806,11 @@ TEST_P(LuaIntegrationTest, BasicTestOfLuaPerRoute) { auto response = codec_client_->makeHeaderOnlyRequest(request_headers); waitForNextUpstreamRequest(1); - auto* entry = upstream_request_->headers().get(Http::LowerCaseString("code")); + auto entry = upstream_request_->headers().get(Http::LowerCaseString("code")); if (!expected_value.empty()) { - EXPECT_EQ(expected_value, entry->value().getStringView()); + EXPECT_EQ(expected_value, entry[0]->value().getStringView()); } else { - EXPECT_EQ(nullptr, entry); + EXPECT_TRUE(entry.empty()); } upstream_request_->encodeHeaders(default_response_headers_, true); @@ -858,11 +890,11 @@ TEST_P(LuaIntegrationTest, RdsTestOfLuaPerRoute) { auto response = codec_client_->makeHeaderOnlyRequest(request_headers); waitForNextUpstreamRequest(1); - auto* entry = upstream_request_->headers().get(Http::LowerCaseString("code")); + auto entry = upstream_request_->headers().get(Http::LowerCaseString("code")); if (!expected_value.empty()) { - EXPECT_EQ(expected_value, entry->value().getStringView()); + EXPECT_EQ(expected_value, entry[0]->value().getStringView()); } else { - EXPECT_EQ(nullptr, entry); + EXPECT_TRUE(entry.empty()); } upstream_request_->encodeHeaders(default_response_headers_, true); @@ -900,5 +932,46 @@ TEST_P(LuaIntegrationTest, RdsTestOfLuaPerRoute) { #endif } +// Rewrite response buffer. +TEST_P(LuaIntegrationTest, RewriteResponseBuffer) { + const std::string FILTER_AND_CODE = + R"EOF( +name: lua +typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua + inline_code: | + function envoy_on_response(response_handle) + local content_length = response_handle:body():setBytes("ok") + response_handle:logTrace(content_length) + + response_handle:headers():replace("content-length", content_length) + end +)EOF"; + + testRewriteResponse(FILTER_AND_CODE); +} + +// Rewrite chunked response body. +TEST_P(LuaIntegrationTest, RewriteChunkedBody) { + const std::string FILTER_AND_CODE = + R"EOF( +name: lua +typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua + inline_code: | + function envoy_on_response(response_handle) + response_handle:headers():replace("content-length", 2) + local last + for chunk in response_handle:bodyChunks() do + chunk:setBytes("") + last = chunk + end + last:setBytes("ok") + end +)EOF"; + + testRewriteResponse(FILTER_AND_CODE); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/lua/wrappers_test.cc b/test/extensions/filters/http/lua/wrappers_test.cc index 7ccf297bf343..990016db3f15 100644 --- a/test/extensions/filters/http/lua/wrappers_test.cc +++ b/test/extensions/filters/http/lua/wrappers_test.cc @@ -44,6 +44,10 @@ TEST_F(LuaHeaderMapWrapperTest, Methods) { for key, value in pairs(object) do testPrint(string.format("'%s' '%s'", key, value)) end + + object:add("header3", "foo") + object:add("header3", "bar") + testPrint(object:get("header3")) end )EOF"}; @@ -58,6 +62,7 @@ TEST_F(LuaHeaderMapWrapperTest, Methods) { EXPECT_CALL(printer_, testPrint("'header2' 'foo'")); EXPECT_CALL(printer_, testPrint("'hello' 'WORLD'")); EXPECT_CALL(printer_, testPrint("'header2' 'foo'")); + EXPECT_CALL(printer_, testPrint("foo,bar")); start("callMe"); } @@ -288,7 +293,6 @@ TEST_F(LuaStreamInfoWrapperTest, SetGetAndIterateDynamicMetadata) { end )EOF"}; - InSequence s; setup(SCRIPT); StreamInfo::StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem()); diff --git a/test/extensions/filters/http/oauth2/config_test.cc b/test/extensions/filters/http/oauth2/config_test.cc index 4ac59da9fe96..4be857a21d0a 100644 --- a/test/extensions/filters/http/oauth2/config_test.cc +++ b/test/extensions/filters/http/oauth2/config_test.cc @@ -22,33 +22,102 @@ namespace Oauth2 { using testing::NiceMock; using testing::Return; +namespace { + +// This loads one of the secrets in credentials, and fails the other one. +void expectInvalidSecretConfig(const std::string& failed_secret_name, + const std::string& exception_message) { + const std::string yaml = R"EOF( +config: + token_endpoint: + cluster: foo + uri: oauth.com/token + timeout: 3s + credentials: + client_id: "secret" + token_secret: + name: token + hmac_secret: + name: hmac + authorization_endpoint: https://oauth.com/oauth/authorize/ + redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + )EOF"; + + OAuth2Config factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + NiceMock context; + + auto& secret_manager = context.cluster_manager_.cluster_manager_factory_.secretManager(); + ON_CALL(secret_manager, + findStaticGenericSecretProvider(failed_secret_name == "token" ? "hmac" : "token")) + .WillByDefault(Return(std::make_shared( + envoy::extensions::transport_sockets::tls::v3::GenericSecret()))); + + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(*proto_config, "stats", context), + EnvoyException, exception_message); +} + +} // namespace + TEST(ConfigTest, CreateFilter) { const std::string yaml = R"EOF( config: - token_endpoint: - cluster: foo - uri: oauth.com/token - timeout: 3s - authorization_endpoint: https://oauth.com/oauth/authorize/ - redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" - signout_path: - path: - exact: /signout + token_endpoint: + cluster: foo + uri: oauth.com/token + timeout: 3s + credentials: + client_id: "secret" + token_secret: + name: token + hmac_secret: + name: hmac + authorization_endpoint: https://oauth.com/oauth/authorize/ + redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout )EOF"; - envoy::extensions::filters::http::oauth2::v3alpha::OAuth2 proto_config; - MessageUtil::loadFromYaml(yaml, proto_config, ProtobufMessage::getStrictValidationVisitor()); - NiceMock factory_context; - auto& secret_manager = factory_context.cluster_manager_.cluster_manager_factory_.secretManager(); + OAuth2Config factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + Server::Configuration::MockFactoryContext context; + + // This returns non-nullptr for token_secret and hmac_secret. + auto& secret_manager = context.cluster_manager_.cluster_manager_factory_.secretManager(); ON_CALL(secret_manager, findStaticGenericSecretProvider(_)) .WillByDefault(Return(std::make_shared( envoy::extensions::transport_sockets::tls::v3::GenericSecret()))); - OAuth2Config config; - auto cb = config.createFilterFactoryFromProtoTyped(proto_config, "whatever", factory_context); + EXPECT_CALL(context, messageValidationVisitor()); + EXPECT_CALL(context, clusterManager()); + EXPECT_CALL(context, scope()); + EXPECT_CALL(context, timeSource()); + EXPECT_CALL(context, api()); + EXPECT_CALL(context, getTransportSocketFactoryContext()); + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamDecoderFilter(_)); + cb(filter_callback); +} + +TEST(ConfigTest, InvalidTokenSecret) { + expectInvalidSecretConfig("token", "invalid token secret configuration"); +} - NiceMock filter_callbacks; - cb(filter_callbacks); +TEST(ConfigTest, InvalidHmacSecret) { + expectInvalidSecretConfig("hmac", "invalid HMAC secret configuration"); } TEST(ConfigTest, CreateFilterMissingConfig) { @@ -65,4 +134,4 @@ TEST(ConfigTest, CreateFilterMissingConfig) { } // namespace Oauth2 } // namespace HttpFilters } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/extensions/filters/http/oauth2/oauth_test.cc b/test/extensions/filters/http/oauth2/oauth_test.cc index 66f55ba14eec..5e5c4b78e6c3 100644 --- a/test/extensions/filters/http/oauth2/oauth_test.cc +++ b/test/extensions/filters/http/oauth2/oauth_test.cc @@ -73,7 +73,7 @@ TEST_F(OAuth2ClientTest, RequestAccessTokenSuccess) { }}; Http::ResponseMessagePtr mock_response( new Http::ResponseMessageImpl(std::move(mock_response_headers))); - mock_response->body() = std::make_unique(json); + mock_response->body().add(json); EXPECT_CALL(cm_.async_client_, send_(_, _, _)) .WillRepeatedly( @@ -104,7 +104,7 @@ TEST_F(OAuth2ClientTest, RequestAccessTokenIncompleteResponse) { }}; Http::ResponseMessagePtr mock_response( new Http::ResponseMessageImpl(std::move(mock_response_headers))); - mock_response->body() = std::make_unique(json); + mock_response->body().add(json); EXPECT_CALL(cm_.async_client_, send_(_, _, _)) .WillRepeatedly( @@ -160,7 +160,7 @@ TEST_F(OAuth2ClientTest, RequestAccessTokenInvalidResponse) { }}; Http::ResponseMessagePtr mock_response( new Http::ResponseMessageImpl(std::move(mock_response_headers))); - mock_response->body() = std::make_unique(json); + mock_response->body().add(json); EXPECT_CALL(cm_.async_client_, send_(_, _, _)) .WillRepeatedly( diff --git a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc index f0b19bd43806..e20d573b4a92 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc @@ -31,8 +31,7 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara void createUpstreams() override { HttpIntegrationTest::createUpstreams(); - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); } void initialize() override { @@ -47,6 +46,7 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara TestUtility::loadFromYaml(base_filter_config_, proto_config_); proto_config_.set_failure_mode_deny(failure_mode_deny_); proto_config_.set_enable_x_ratelimit_headers(enable_x_ratelimit_headers_); + proto_config_.set_disable_x_envoy_ratelimited_header(disable_x_envoy_ratelimited_header_); setGrpcService(*proto_config_.mutable_rate_limit_service()->mutable_grpc_service(), "ratelimit", fake_upstreams_.back()->localAddress()); proto_config_.mutable_rate_limit_service()->set_transport_api_version(apiVersion()); @@ -193,6 +193,7 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara bool failure_mode_deny_ = false; envoy::extensions::filters::http::ratelimit::v3::RateLimit::XRateLimitHeadersRFCVersion enable_x_ratelimit_headers_ = envoy::extensions::filters::http::ratelimit::v3::RateLimit::OFF; + bool disable_x_envoy_ratelimited_header_ = false; envoy::extensions::filters::http::ratelimit::v3::RateLimit proto_config_{}; const std::string base_filter_config_ = R"EOF( domain: some_domain @@ -215,12 +216,24 @@ class RatelimitFilterHeadersEnabledIntegrationTest : public RatelimitIntegration } }; +// Test verifies that disabling X-Envoy-RateLimited response header works. +class RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest + : public RatelimitIntegrationTest { +public: + RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest() { + disable_x_envoy_ratelimited_header_ = true; + } +}; + INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitIntegrationTest, VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFailureModeIntegrationTest, VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFilterHeadersEnabledIntegrationTest, VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, + RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest, + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); TEST_P(RatelimitIntegrationTest, Ok) { basicFlow(); } @@ -238,14 +251,14 @@ TEST_P(RatelimitIntegrationTest, OkWithHeaders) { ratelimit_response_headers.iterate( [response = response_.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; - EXPECT_EQ(entry.value(), response->headers().get(lower_key)->value().getStringView()); + EXPECT_EQ(entry.value(), response->headers().get(lower_key)[0]->value().getStringView()); return Http::HeaderMap::Iterate::Continue; }); request_headers_to_add.iterate([upstream = upstream_request_.get()]( const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; - EXPECT_EQ(entry.value(), upstream->headers().get(lower_key)->value().getStringView()); + EXPECT_EQ(entry.value(), upstream->headers().get(lower_key)[0]->value().getStringView()); return Http::HeaderMap::Iterate::Continue; }); @@ -262,6 +275,11 @@ TEST_P(RatelimitIntegrationTest, OverLimit) { sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {}, Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); waitForFailedUpstreamResponse(429); + + EXPECT_THAT(response_.get()->headers(), + Http::HeaderValueOf(Http::Headers::get().EnvoyRateLimited, + Http::Headers::get().EnvoyRateLimitedValues.True)); + cleanup(); EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.ok")); @@ -281,10 +299,14 @@ TEST_P(RatelimitIntegrationTest, OverLimitWithHeaders) { ratelimit_response_headers.iterate( [response = response_.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; - EXPECT_EQ(entry.value(), response->headers().get(lower_key)->value().getStringView()); + EXPECT_EQ(entry.value(), response->headers().get(lower_key)[0]->value().getStringView()); return Http::HeaderMap::Iterate::Continue; }); + EXPECT_THAT(response_.get()->headers(), + Http::HeaderValueOf(Http::Headers::get().EnvoyRateLimited, + Http::Headers::get().EnvoyRateLimitedValues.True)); + cleanup(); EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.ok")); @@ -437,5 +459,23 @@ TEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OverLimitWithFilterHeaders) EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.error")); } +TEST_P(RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest, + OverLimitWithoutEnvoyRatelimitedHeader) { + initiateClientConnection(); + waitForRatelimitRequest(); + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {}, + Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); + waitForFailedUpstreamResponse(429); + + EXPECT_THAT(response_.get()->headers(), + ::testing::Not(Http::HeaderValueOf(Http::Headers::get().EnvoyRateLimited, _))); + + cleanup(); + + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.ok")); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.ratelimit.over_limit")->value()); + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.error")); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 87c0e7925409..4eff42850721 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -10,6 +10,7 @@ #include "common/http/headers.h" #include "extensions/filters/http/ratelimit/ratelimit.h" +#include "extensions/filters/http/well_known_names.h" #include "test/extensions/filters/common/ratelimit/mocks.h" #include "test/extensions/filters/common/ratelimit/utils.h" @@ -79,6 +80,11 @@ class HttpRateLimitFilterTest : public testing::Test { enable_x_ratelimit_headers: DRAFT_VERSION_03 )EOF"; + const std::string disable_x_envoy_ratelimited_header_config_ = R"EOF( + domain: foo + disable_x_envoy_ratelimited_header: true + )EOF"; + const std::string filter_config_ = R"EOF( domain: foo )EOF"; @@ -106,6 +112,7 @@ class HttpRateLimitFilterTest : public testing::Test { NiceMock route_rate_limit_; NiceMock vh_rate_limit_; std::vector descriptor_{{{{"descriptor_key", "descriptor_value"}}}}; + std::vector descriptor_two_{{{{"key", "value"}}}}; NiceMock local_info_; Http::ContextImpl http_context_; }; @@ -146,7 +153,7 @@ TEST_F(HttpRateLimitFilterTest, NoApplicableRateLimit) { SetUpTest(filter_config_); filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.clear(); - EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); @@ -162,7 +169,7 @@ TEST_F(HttpRateLimitFilterTest, NoDescriptor) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(1); EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(1); - EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); @@ -205,7 +212,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponse) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -252,7 +259,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -310,7 +317,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithFilterHeaders) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -363,7 +370,7 @@ TEST_F(HttpRateLimitFilterTest, ImmediateOkResponse) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, @@ -394,7 +401,7 @@ TEST_F(HttpRateLimitFilterTest, ImmediateErrorResponse) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, @@ -426,7 +433,7 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponse) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -460,7 +467,7 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -483,7 +490,7 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { ->statsScope() .counterFromStatName(ratelimit_failure_mode_allowed_) .value()); - EXPECT_EQ("rate_limiter_error", filter_callbacks_.details_); + EXPECT_EQ("rate_limiter_error", filter_callbacks_.details()); } TEST_F(HttpRateLimitFilterTest, LimitResponse) { @@ -492,7 +499,7 @@ TEST_F(HttpRateLimitFilterTest, LimitResponse) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -523,7 +530,7 @@ TEST_F(HttpRateLimitFilterTest, LimitResponse) { EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); - EXPECT_EQ("request_rate_limited", filter_callbacks_.details_); + EXPECT_EQ("request_rate_limited", filter_callbacks_.details()); } TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { @@ -532,7 +539,7 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -584,7 +591,7 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithFilterHeaders) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -630,13 +637,51 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithFilterHeaders) { filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); } +TEST_F(HttpRateLimitFilterTest, LimitResponseWithoutEnvoyRateLimitedHeader) { + SetUpTest(disable_x_envoy_ratelimited_header_config_); + InSequence s; + + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_)); + EXPECT_CALL(*client_, limit(_, _, _, _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + + Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl()}; + Http::TestResponseHeaderMapImpl response_headers{{":status", "429"}}; + EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + std::move(h), nullptr); + + EXPECT_EQ(1U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromStatName(ratelimit_over_limit_) + .value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); + EXPECT_EQ("request_rate_limited", filter_callbacks_.details()); +} + TEST_F(HttpRateLimitFilterTest, LimitResponseRuntimeDisabled) { SetUpTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -678,7 +723,7 @@ TEST_F(HttpRateLimitFilterTest, ResetDuringCall) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -699,7 +744,7 @@ TEST_F(HttpRateLimitFilterTest, RouteRateLimitDisabledForRouteKey) { .WillByDefault(Return(false)); EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); - EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -719,7 +764,7 @@ TEST_F(HttpRateLimitFilterTest, VirtualHostRateLimitDisabledForRouteKey) { .WillByDefault(Return(false)); EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); - EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -742,7 +787,7 @@ TEST_F(HttpRateLimitFilterTest, IncorrectRequestType) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); - EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); @@ -763,7 +808,7 @@ TEST_F(HttpRateLimitFilterTest, IncorrectRequestType) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); - EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); @@ -798,7 +843,7 @@ TEST_F(HttpRateLimitFilterTest, InternalRequestType) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, @@ -844,7 +889,7 @@ TEST_F(HttpRateLimitFilterTest, ExternalRequestType) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, @@ -865,13 +910,16 @@ TEST_F(HttpRateLimitFilterTest, ExternalRequestType) { 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); } -TEST_F(HttpRateLimitFilterTest, ExcludeVirtualHost) { +TEST_F(HttpRateLimitFilterTest, DEPRECATED_FEATURE_TEST(ExcludeVirtualHost)) { std::string external_filter_config = R"EOF( { "domain": "foo" } )EOF"; SetUpTest(external_filter_config); + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute vh_settings; + vh_settings.clear_vh_rate_limits(); + FilterConfigPerRoute per_route_config_(vh_settings); InSequence s; EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); @@ -880,6 +928,265 @@ TEST_F(HttpRateLimitFilterTest, ExcludeVirtualHost) { EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) .WillOnce(Return(false)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, + perFilterConfig(HttpFilterNames::get().RateLimit)) + .WillOnce(Return(&per_route_config_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, empty()) + .WillOnce(Return(false)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, + getApplicableRateLimit(0)) + .Times(0); + + EXPECT_CALL(*client_, limit(_, "foo", + testing::ContainerEq(std::vector{ + {{{"descriptor_key", "descriptor_value"}}}}), + _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); + }))); + + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encode100ContinueHeaders(response_headers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); +} + +// Tests that the route rate limit is used when VhRateLimitsOptions::OVERRIDE and route rate limit +// is set +TEST_F(HttpRateLimitFilterTest, OverrideVHRateLimitOptionWithRouteRateLimitSet) { + SetUpTest(filter_config_); + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings; + settings.set_vh_rate_limits( + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::OVERRIDE); + FilterConfigPerRoute per_route_config_(settings); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) + .WillOnce(Return(false)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, + perFilterConfig(HttpFilterNames::get().RateLimit)) + .WillOnce(Return(&per_route_config_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, empty()) + .WillOnce(Return(false)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, + getApplicableRateLimit(0)) + .Times(0); + + EXPECT_CALL(*client_, limit(_, "foo", + testing::ContainerEq(std::vector{ + {{{"descriptor_key", "descriptor_value"}}}}), + _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); + }))); + + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encode100ContinueHeaders(response_headers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); +} + +// Tests that the virtual host rate limit is used when VhRateLimitsOptions::OVERRIDE is set and +// route rate limit is empty +TEST_F(HttpRateLimitFilterTest, OverrideVHRateLimitOptionWithoutRouteRateLimit) { + SetUpTest(filter_config_); + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings; + settings.set_vh_rate_limits( + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::OVERRIDE); + FilterConfigPerRoute per_route_config_(settings); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) + .WillOnce(Return(false)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, + perFilterConfig(HttpFilterNames::get().RateLimit)) + .WillOnce(Return(&per_route_config_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, empty()) + .WillOnce(Return(true)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, + getApplicableRateLimit(0)) + .Times(1); + + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_)); + + EXPECT_CALL(*client_, limit(_, "foo", + testing::ContainerEq(std::vector{ + {{{"descriptor_key", "descriptor_value"}}}}), + _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); + }))); + + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encode100ContinueHeaders(response_headers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); +} + +// Tests that the virtual host rate limit is used when VhRateLimitsOptions::INCLUDE is set and route +// rate limit is empty +TEST_F(HttpRateLimitFilterTest, IncludeVHRateLimitOptionWithOnlyVHRateLimitSet) { + SetUpTest(filter_config_); + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings; + settings.set_vh_rate_limits( + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::INCLUDE); + FilterConfigPerRoute per_route_config_(settings); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) + .WillOnce(Return(false)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, + perFilterConfig(HttpFilterNames::get().RateLimit)) + .WillOnce(Return(&per_route_config_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, + getApplicableRateLimit(0)) + .Times(1); + + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_two_)); + + EXPECT_CALL(*client_, + limit(_, "foo", + testing::ContainerEq(std::vector{{{{"key", "value"}}}}), + _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); + }))); + + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encode100ContinueHeaders(response_headers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); +} + +// Tests that the virtual host rate limit is used when VhRateLimitsOptions::INCLUDE and route rate +// limit is set +TEST_F(HttpRateLimitFilterTest, IncludeVHRateLimitOptionWithRouteAndVHRateLimitSet) { + SetUpTest(filter_config_); + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings; + settings.set_vh_rate_limits( + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::INCLUDE); + FilterConfigPerRoute per_route_config_(settings); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) + .WillOnce(Return(false)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, + perFilterConfig(HttpFilterNames::get().RateLimit)) + .WillOnce(Return(&per_route_config_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, + getApplicableRateLimit(0)) + .Times(1); + + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_two_)); + + EXPECT_CALL(*client_, + limit(_, "foo", + testing::ContainerEq(std::vector{{{{"key", "value"}}}}), + _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); + }))); + + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encode100ContinueHeaders(response_headers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); +} + +// Tests that the route rate limit is used when VhRateLimitsOptions::IGNORE and route rate limit is +// set +TEST_F(HttpRateLimitFilterTest, IgnoreVHRateLimitOptionWithRouteRateLimitSet) { + SetUpTest(filter_config_); + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings; + settings.set_vh_rate_limits( + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::IGNORE); + FilterConfigPerRoute per_route_config_(settings); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) + .WillOnce(Return(false)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, + perFilterConfig(HttpFilterNames::get().RateLimit)) + .WillOnce(Return(&per_route_config_)); + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, getApplicableRateLimit(0)) .Times(0); @@ -887,7 +1194,7 @@ TEST_F(HttpRateLimitFilterTest, ExcludeVirtualHost) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, @@ -908,6 +1215,42 @@ TEST_F(HttpRateLimitFilterTest, ExcludeVirtualHost) { 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); } +// Tests that no rate limit is used when VhRateLimitsOptions::IGNORE is set and route rate limit +// empty +TEST_F(HttpRateLimitFilterTest, IgnoreVHRateLimitOptionWithOutRouteRateLimit) { + SetUpTest(filter_config_); + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute settings; + settings.set_vh_rate_limits( + envoy::extensions::filters::http::ratelimit::v3::RateLimitPerRoute::IGNORE); + FilterConfigPerRoute per_route_config_(settings); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) + .WillOnce(Return(false)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, + perFilterConfig(HttpFilterNames::get().RateLimit)) + .WillOnce(Return(&per_route_config_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, + getApplicableRateLimit(0)) + .Times(0); + + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encode100ContinueHeaders(response_headers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + + EXPECT_EQ( + 0, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); +} + TEST_F(HttpRateLimitFilterTest, ConfigValueTest) { std::string stage_filter_config = R"EOF( { diff --git a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc index b7fcd3ebcbb7..89b73bbef081 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc @@ -23,6 +23,20 @@ name: rbac - any: true )EOF"; +const std::string RBAC_CONFIG_WITH_DENY_ACTION = R"EOF( +name: rbac +typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.rbac.v2.RBAC + rules: + action: DENY + policies: + "deny policy": + permissions: + - header: { name: ":method", exact_match: "GET" } + principals: + - any: true +)EOF"; + const std::string RBAC_CONFIG_WITH_PREFIX_MATCH = R"EOF( name: rbac typed_config: @@ -78,6 +92,33 @@ name: rbac - any: true )EOF"; +const std::string RBAC_CONFIG_HEADER_MATCH_CONDITION = R"EOF( +name: rbac +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + policies: + foo: + permissions: + - any: true + principals: + - any: true + condition: + call_expr: + function: _==_ + args: + - select_expr: + operand: + select_expr: + operand: + ident_expr: + name: request + field: headers + field: xxx + - const_expr: + string_value: {} +)EOF"; + using RBACIntegrationTest = HttpProtocolIntegrationTest; INSTANTIATE_TEST_SUITE_P(Protocols, RBACIntegrationTest, @@ -85,6 +126,7 @@ INSTANTIATE_TEST_SUITE_P(Protocols, RBACIntegrationTest, HttpProtocolIntegrationTest::protocolTestParamsToString); TEST_P(RBACIntegrationTest, Allowed) { + useAccessLog("%RESPONSE_CODE_DETAILS%"); config_helper_.addFilter(RBAC_CONFIG); initialize(); @@ -105,9 +147,11 @@ TEST_P(RBACIntegrationTest, Allowed) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_THAT(waitForAccessLog(access_log_name_), testing::HasSubstr("via_upstream")); } TEST_P(RBACIntegrationTest, Denied) { + useAccessLog("%RESPONSE_CODE_DETAILS%"); config_helper_.addFilter(RBAC_CONFIG); initialize(); @@ -125,6 +169,32 @@ TEST_P(RBACIntegrationTest, Denied) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); EXPECT_EQ("403", response->headers().getStatusValue()); + EXPECT_THAT(waitForAccessLog(access_log_name_), + testing::HasSubstr("rbac_access_denied_matched_policy[none]")); +} + +TEST_P(RBACIntegrationTest, DeniedWithDenyAction) { + useAccessLog("%RESPONSE_CODE_DETAILS%"); + config_helper_.addFilter(RBAC_CONFIG_WITH_DENY_ACTION); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody( + Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + }, + 1024); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); + // Note the whitespace in the policy id is replaced by '_'. + EXPECT_THAT(waitForAccessLog(access_log_name_), + testing::HasSubstr("rbac_access_denied_matched_policy[deny_policy]")); } TEST_P(RBACIntegrationTest, DeniedWithPrefixRule) { @@ -314,5 +384,78 @@ TEST_P(RBACIntegrationTest, LogConnectionAllow) { EXPECT_EQ("200", response->headers().getStatusValue()); } +// Basic CEL match on a header value. +TEST_P(RBACIntegrationTest, HeaderMatchCondition) { + config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy")); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody( + Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, + {":path", "/path"}, + {":scheme", "http"}, + {":authority", "host"}, + {"xxx", "yyy"}, + }, + 1024); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// CEL match on a header value in which the header is a duplicate. Verifies we handle string +// copying correctly inside the CEL expression. +TEST_P(RBACIntegrationTest, HeaderMatchConditionDuplicateHeaderNoMatch) { + config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy")); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody( + Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, + {":path", "/path"}, + {":scheme", "http"}, + {":authority", "host"}, + {"xxx", "yyy"}, + {"xxx", "zzz"}, + }, + 1024); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); +} + +// CEL match on a header value in which the header is a duplicate. Verifies we handle string +// copying correctly inside the CEL expression. +TEST_P(RBACIntegrationTest, HeaderMatchConditionDuplicateHeaderMatch) { + config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy,zzz")); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody( + Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, + {":path", "/path"}, + {":scheme", "http"}, + {":authority", "host"}, + {"xxx", "yyy"}, + {"xxx", "zzz"}, + }, + 1024); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/rbac/rbac_filter_test.cc b/test/extensions/filters/http/rbac/rbac_filter_test.cc index 519a49126bbb..018ebd9319b8 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_test.cc @@ -186,7 +186,7 @@ TEST_F(RoleBasedAccessControlFilterTest, Denied) { auto filter_meta = req_info_.dynamicMetadata().filter_metadata().at(HttpFilterNames::get().Rbac); EXPECT_EQ("allowed", filter_meta.fields().at("shadow_engine_result").string_value()); EXPECT_EQ("bar", filter_meta.fields().at("shadow_effective_policy_id").string_value()); - EXPECT_EQ("rbac_access_denied", callbacks_.details_); + EXPECT_EQ("rbac_access_denied_matched_policy[none]", callbacks_.details()); checkAccessLogMetadata(LogResult::Undecided); } diff --git a/test/extensions/filters/http/router/auto_sni_integration_test.cc b/test/extensions/filters/http/router/auto_sni_integration_test.cc index 5cd543805b5d..17cd646d9270 100644 --- a/test/extensions/filters/http/router/auto_sni_integration_test.cc +++ b/test/extensions/filters/http/router/auto_sni_integration_test.cc @@ -38,8 +38,7 @@ class AutoSniIntegrationTest : public testing::TestWithParambody() = std::make_unique(body); + msg->body().add(body); popPendingCallback()->onSuccess(request_, std::move(msg)); } @@ -412,6 +412,19 @@ TEST_F(SquashFilterTest, InvalidJsonForGetAttachment) { completeRequest("200", "This is not a JSON object"); } +TEST_F(SquashFilterTest, InvalidResponseWithNoBody) { + doDownstreamRequest(); + // Expect the get attachment request + expectAsyncClientSend(); + completeCreateRequest(); + + auto retry_timer = new NiceMock(&filter_callbacks_.dispatcher_); + EXPECT_CALL(*retry_timer, enableTimer(config_->attachmentPollPeriod(), _)); + Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-length", "0"}}})); + popPendingCallback()->onSuccess(request_, std::move(msg)); +} + TEST_F(SquashFilterTest, DestroyedInFlight) { doDownstreamRequest(); diff --git a/test/extensions/filters/http/wasm/BUILD b/test/extensions/filters/http/wasm/BUILD new file mode 100644 index 000000000000..c36e60799cf6 --- /dev/null +++ b/test/extensions/filters/http/wasm/BUILD @@ -0,0 +1,62 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//bazel:envoy_select.bzl", + "envoy_select_wasm", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "wasm_filter_test", + size = "enormous", # For WAVM without precompilation. TODO: add precompilation. + srcs = ["wasm_filter_test.cc"], + data = envoy_select_wasm([ + "//test/extensions/filters/http/wasm/test_data:async_call_rust.wasm", + "//test/extensions/filters/http/wasm/test_data:body_rust.wasm", + "//test/extensions/filters/http/wasm/test_data:headers_rust.wasm", + "//test/extensions/filters/http/wasm/test_data:metadata_rust.wasm", + "//test/extensions/filters/http/wasm/test_data:shared_data_rust.wasm", + "//test/extensions/filters/http/wasm/test_data:shared_queue_rust.wasm", + "//test/extensions/filters/http/wasm/test_data:test_cpp.wasm", + ]), + extension_name = "envoy.filters.http.wasm", + deps = [ + "//source/common/http:message_lib", + "//source/extensions/filters/http/wasm:wasm_filter_lib", + "//test/extensions/filters/http/wasm/test_data:test_cpp_plugin", + "//test/mocks/network:connection_mocks", + "//test/mocks/router:router_mocks", + "//test/test_common:wasm_lib", + ], +) + +envoy_extension_cc_test( + name = "config_test", + size = "enormous", # For WAVM without precompilation. TODO: add precompilation. + srcs = ["config_test.cc"], + data = envoy_select_wasm([ + "//test/extensions/filters/http/wasm/test_data:test_cpp.wasm", + ]), + extension_name = "envoy.filters.http.wasm", + deps = [ + "//source/common/common:base64_lib", + "//source/common/common:hex_lib", + "//source/common/crypto:utility_lib", + "//source/common/http:message_lib", + "//source/extensions/common/crypto:utility_lib", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/filters/http/wasm:config", + "//test/mocks/server:server_mocks", + "//test/test_common:environment_lib", + "@envoy_api//envoy/extensions/filters/http/wasm/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/http/wasm/config_test.cc b/test/extensions/filters/http/wasm/config_test.cc new file mode 100644 index 000000000000..6b41185f7913 --- /dev/null +++ b/test/extensions/filters/http/wasm/config_test.cc @@ -0,0 +1,833 @@ +#include + +#include "envoy/extensions/filters/http/wasm/v3/wasm.pb.validate.h" + +#include "common/common/base64.h" +#include "common/common/hex.h" +#include "common/crypto/utility.h" +#include "common/http/message_impl.h" +#include "common/stats/isolated_store_impl.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/filters/http/wasm/config.h" + +#include "test/mocks/http/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { + +using Common::Wasm::WasmException; + +namespace HttpFilters { +namespace Wasm { + +#if defined(ENVOY_WASM_V8) || defined(ENVOY_WASM_WAVM) +class WasmFilterConfigTest : public Event::TestUsingSimulatedTime, + public testing::TestWithParam { +protected: + WasmFilterConfigTest() : api_(Api::createApiForTest(stats_store_)) { + ON_CALL(context_, api()).WillByDefault(ReturnRef(*api_)); + ON_CALL(context_, scope()).WillByDefault(ReturnRef(stats_store_)); + ON_CALL(context_, listenerMetadata()).WillByDefault(ReturnRef(listener_metadata_)); + EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager_)); + ON_CALL(context_, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(context_, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + } + + void SetUp() override { Envoy::Extensions::Common::Wasm::clearCodeCacheForTesting(); } + + void initializeForRemote() { + retry_timer_ = new Event::MockTimer(); + + EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) { + retry_timer_cb_ = timer_cb; + return retry_timer_; + })); + } + + NiceMock context_; + Stats::IsolatedStoreImpl stats_store_; + Api::ApiPtr api_; + envoy::config::core::v3::Metadata listener_metadata_; + Init::ManagerImpl init_manager_{"init_manager"}; + NiceMock cluster_manager_; + Init::ExpectableWatcherImpl init_watcher_; + NiceMock dispatcher_; + Event::MockTimer* retry_timer_; + Event::TimerCb retry_timer_cb_; +}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto testing_values = testing::Values( +#if defined(ENVOY_WASM_V8) + "v8" +#endif +#if defined(ENVOY_WASM_V8) && defined(ENVOY_WASM_WAVM) + , +#endif +#if defined(ENVOY_WASM_WAVM) + "wavm" +#endif +); +INSTANTIATE_TEST_SUITE_P(Runtimes, WasmFilterConfigTest, testing_values); + +TEST_P(WasmFilterConfigTest, JsonLoadFromFileWasm) { + const std::string json = TestEnvironment::substitute(absl::StrCat(R"EOF( + { + "config" : { + "vm_config": { + "runtime": "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(", + "configuration": { + "@type": "type.googleapis.com/google.protobuf.StringValue", + "value": "some configuration" + }, + "code": { + "local": { + "filename": "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm" + } + }, + }}} + )EOF")); + + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromJson(json, proto_config); + WasmFilterConfig factory; + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + EXPECT_CALL(filter_callback, addAccessLogHandler(_)); + cb(filter_callback); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromFileWasm) { + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: "some configuration" + code: + local: + filename: "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm" + )EOF")); + + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + EXPECT_CALL(filter_callback, addAccessLogHandler(_)); + cb(filter_callback); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromFileWasmFailOpenOk) { + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + fail_open: true + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: "some configuration" + code: + local: + filename: "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm" + )EOF")); + + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + EXPECT_CALL(filter_callback, addAccessLogHandler(_)); + cb(filter_callback); +} + +TEST_P(WasmFilterConfigTest, YamlLoadInlineWasm) { + const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm")); + EXPECT_FALSE(code.empty()); + const std::string yaml = absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + local: { inline_bytes: ")EOF", + Base64::encode(code.data(), code.size()), R"EOF(" } + )EOF"); + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + EXPECT_CALL(filter_callback, addAccessLogHandler(_)); + cb(filter_callback); +} + +TEST_P(WasmFilterConfigTest, YamlLoadInlineBadCode) { + const std::string yaml = absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + local: + inline_string: "bad code" + )EOF"); + + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context_), + WasmException, "Unable to create Wasm HTTP filter "); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteWasm) { + const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm")); + const std::string sha256 = Hex::encode( + Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code))); + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + remote: + http_uri: + uri: https://example.com/data + cluster: cluster_1 + timeout: 5s + sha256: )EOF", + sha256)); + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); + + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) + .WillOnce(ReturnRef(cluster_manager_.async_client_)); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + Http::ResponseMessagePtr response( + new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response->body().add(code); + callbacks.onSuccess(request, std::move(response)); + return &request; + })); + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + EXPECT_CALL(filter_callback, addAccessLogHandler(_)); + cb(filter_callback); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteWasmFailOnUncachedThenSucceed) { + const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm")); + const std::string sha256 = Hex::encode( + Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code))); + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + nack_on_code_cache_miss: true + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + remote: + http_uri: + uri: https://example.com/data + cluster: cluster_1 + timeout: 5s + sha256: )EOF", + sha256)); + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); + + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) + .WillOnce(ReturnRef(cluster_manager_.async_client_)); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + Http::ResponseMessagePtr response( + new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response->body().add(code); + callbacks.onSuccess(request, std::move(response)); + return &request; + })); + + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context_), + WasmException, "Unable to create Wasm HTTP filter "); + + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + + Init::ManagerImpl init_manager2{"init_manager2"}; + Init::ExpectableWatcherImpl init_watcher2; + + EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager2)); + + auto cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + + EXPECT_CALL(init_watcher2, ready()); + init_manager2.initialize(init_watcher2); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + EXPECT_CALL(filter_callback, addAccessLogHandler(_)); + + cb(filter_callback); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteWasmFailCachedThenSucceed) { + const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm")); + const std::string sha256 = Hex::encode( + Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code))); + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + nack_on_code_cache_miss: true + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + remote: + http_uri: + uri: https://example.com/data + cluster: cluster_1 + timeout: 5s + retry_policy: + num_retries: 0 + sha256: )EOF", + sha256)); + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); + + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) + .WillRepeatedly(ReturnRef(cluster_manager_.async_client_)); + + Http::AsyncClient::Callbacks* async_callbacks = nullptr; + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillRepeatedly( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + // Store the callback the first time through for delayed call. + if (!async_callbacks) { + async_callbacks = &callbacks; + } else { + // Subsequent send()s happen inline. + callbacks.onSuccess( + request, + Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); + } + return &request; + })); + + // Case 1: fail and fetch in the background, got 503, cache failure. + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context_), + WasmException, "Unable to create Wasm HTTP filter "); + // Fail a second time because we are in-progress. + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context_), + WasmException, "Unable to create Wasm HTTP filter "); + async_callbacks->onSuccess( + request, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); + + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + + // Case 2: fail immediately with negatively cached result. + Init::ManagerImpl init_manager2{"init_manager2"}; + Init::ExpectableWatcherImpl init_watcher2; + + EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager2)); + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context_), + WasmException, "Unable to create Wasm HTTP filter "); + + EXPECT_CALL(init_watcher2, ready()); + init_manager2.initialize(init_watcher2); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + + // Wait for negative cache to timeout. + ::Envoy::Extensions::Common::Wasm::setTimeOffsetForCodeCacheForTesting(std::chrono::seconds(10)); + + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillRepeatedly( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + Http::ResponseMessagePtr response( + new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response->body().add(code); + callbacks.onSuccess(request, std::move(response)); + return &request; + })); + + // Case 3: fail and fetch in the background, got 200, cache success. + Init::ManagerImpl init_manager3{"init_manager3"}; + Init::ExpectableWatcherImpl init_watcher3; + + EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager3)); + + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context_), + WasmException, "Unable to create Wasm HTTP filter "); + + EXPECT_CALL(init_watcher3, ready()); + init_manager3.initialize(init_watcher3); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + + // Case 4: success from cache. + Init::ManagerImpl init_manager4{"init_manager4"}; + Init::ExpectableWatcherImpl init_watcher4; + + EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager4)); + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + + EXPECT_CALL(init_watcher4, ready()); + init_manager4.initialize(init_watcher4); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + EXPECT_CALL(filter_callback, addAccessLogHandler(_)); + + cb(filter_callback); + + // Wait for cache to timeout. + ::Envoy::Extensions::Common::Wasm::setTimeOffsetForCodeCacheForTesting( + std::chrono::seconds(10 + 24 * 3600)); + + // Case 5: flush the stale cache. + const std::string sha256_2 = sha256 + "new"; + const std::string yaml2 = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + nack_on_code_cache_miss: true + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + remote: + http_uri: + uri: https://example.com/data + cluster: cluster_1 + timeout: 5s + retry_policy: + num_retries: 0 + sha256: )EOF", + sha256_2)); + + envoy::extensions::filters::http::wasm::v3::Wasm proto_config2; + TestUtility::loadFromYaml(yaml2, proto_config2); + + Init::ManagerImpl init_manager5{"init_manager4"}; + Init::ExpectableWatcherImpl init_watcher5; + + EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager5)); + + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config2, "stats", context_), + WasmException, "Unable to create Wasm HTTP filter "); + + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + + // Case 6: fail and fetch in the background, got 200, cache success. + Init::ManagerImpl init_manager6{"init_manager6"}; + Init::ExpectableWatcherImpl init_watcher6; + + EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager6)); + + factory.createFilterFactoryFromProto(proto_config, "stats", context_); + + EXPECT_CALL(init_watcher6, ready()); + init_manager6.initialize(init_watcher6); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + + // Case 7: success from cache. + Init::ManagerImpl init_manager7{"init_manager7"}; + Init::ExpectableWatcherImpl init_watcher7; + + EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager7)); + + Http::FilterFactoryCb cb2 = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + + EXPECT_CALL(init_watcher7, ready()); + init_manager7.initialize(init_watcher7); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + + Http::MockFilterChainFactoryCallbacks filter_callback2; + EXPECT_CALL(filter_callback2, addStreamFilter(_)); + EXPECT_CALL(filter_callback2, addAccessLogHandler(_)); + + cb2(filter_callback2); + + dispatcher_.clearDeferredDeleteList(); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteConnectionReset) { + const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm")); + const std::string sha256 = Hex::encode( + Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code))); + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + remote: + http_uri: + uri: https://example.com/data + cluster: cluster_1 + timeout: 5s + retry_policy: + num_retries: 0 + sha256: )EOF", + sha256)); + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); + + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) + .WillOnce(ReturnRef(cluster_manager_.async_client_)); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + callbacks.onFailure(request, Envoy::Http::AsyncClient::FailureReason::Reset); + return &request; + })); + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessWith503) { + const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm")); + const std::string sha256 = Hex::encode( + Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code))); + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + remote: + http_uri: + uri: https://example.com/data + cluster: cluster_1 + timeout: 5s + retry_policy: + num_retries: 0 + sha256: )EOF", + sha256)); + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); + + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) + .WillOnce(ReturnRef(cluster_manager_.async_client_)); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + callbacks.onSuccess( + request, + Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); + return &request; + })); + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessIncorrectSha256) { + const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm")); + const std::string sha256 = Hex::encode( + Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code))); + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + remote: + http_uri: + uri: https://example.com/data + cluster: cluster_1 + timeout: 5s + retry_policy: + num_retries: 0 + sha256: xxxx )EOF")); + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); + + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) + .WillOnce(ReturnRef(cluster_manager_.async_client_)); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + Http::ResponseMessagePtr response( + new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response->body().add(code); + callbacks.onSuccess(request, std::move(response)); + return &request; + })); + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteMultipleRetries) { + initializeForRemote(); + const std::string code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/filters/http/wasm/test_data/test_cpp.wasm")); + const std::string sha256 = Hex::encode( + Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code))); + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + remote: + http_uri: + uri: https://example.com/data + cluster: cluster_1 + timeout: 5s + retry_policy: + num_retries: 3 + sha256: )EOF", + sha256)); + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); + int num_retries = 3; + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) + .WillRepeatedly(ReturnRef(cluster_manager_.async_client_)); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .Times(num_retries) + .WillRepeatedly( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + Http::ResponseMessagePtr response( + new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "503"}}})); + response->body().add(code); + callbacks.onSuccess(request, std::move(response)); + return &request; + })); + + EXPECT_CALL(*retry_timer_, enableTimer(_, _)) + .WillRepeatedly(Invoke([&](const std::chrono::milliseconds&, const ScopeTrackedObject*) { + if (--num_retries == 0) { + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce(Invoke( + [&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + Http::ResponseMessagePtr response( + new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response->body().add(code); + callbacks.onSuccess(request, std::move(response)); + return &request; + })); + } + + retry_timer_cb_(); + })); + EXPECT_CALL(*retry_timer_, disableTimer()); + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + EXPECT_CALL(filter_callback, addAccessLogHandler(_)); + cb(filter_callback); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessBadcode) { + const std::string code = "foo"; + const std::string sha256 = Hex::encode( + Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code))); + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + remote: + http_uri: + uri: https://example.com/data + cluster: cluster_1 + timeout: 5s + sha256: )EOF", + sha256)); + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); + + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) + .WillOnce(ReturnRef(cluster_manager_.async_client_)); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + Http::ResponseMessagePtr response( + new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response->body().add(code); + callbacks.onSuccess(request, std::move(response)); + return nullptr; + })); + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + + // Fail closed. + Http::MockFilterChainFactoryCallbacks filter_callback; + Extensions::Common::Wasm::ContextSharedPtr context; + EXPECT_CALL(filter_callback, addStreamFilter(_)) + .WillOnce(Invoke([&context](Http::StreamFilterSharedPtr filter) { + context = std::static_pointer_cast(filter); + })); + EXPECT_CALL(filter_callback, addAccessLogHandler(_)); + cb(filter_callback); + EXPECT_EQ(context->wasm(), nullptr); + EXPECT_TRUE(context->isFailed()); + + Http::MockStreamDecoderFilterCallbacks decoder_callbacks; + NiceMock stream_info; + + context->setDecoderFilterCallbacks(decoder_callbacks); + EXPECT_CALL(decoder_callbacks, streamInfo()).WillRepeatedly(ReturnRef(stream_info)); + EXPECT_CALL(stream_info, setResponseCodeDetails("wasm_fail_stream")); + EXPECT_CALL(decoder_callbacks, resetStream()); + + EXPECT_EQ(context->onRequestHeaders(10, false), proxy_wasm::FilterHeadersStatus::StopIteration); +} + +TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessBadcodeFailOpen) { + const std::string code = "foo"; + const std::string sha256 = Hex::encode( + Envoy::Common::Crypto::UtilitySingleton::get().getSha256Digest(Buffer::OwnedImpl(code))); + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + fail_open: true + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + remote: + http_uri: + uri: https://example.com/data + cluster: cluster_1 + timeout: 5s + sha256: )EOF", + sha256)); + envoy::extensions::filters::http::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); + + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) + .WillOnce(ReturnRef(cluster_manager_.async_client_)); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + Http::ResponseMessagePtr response( + new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response->body().add(code); + callbacks.onSuccess(request, std::move(response)); + return nullptr; + })); + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + Http::MockFilterChainFactoryCallbacks filter_callback; + // The filter is not registered. + cb(filter_callback); +} +#endif + +} // namespace Wasm +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/wasm/test_data/BUILD b/test/extensions/filters/http/wasm/test_data/BUILD new file mode 100644 index 000000000000..81741d0bb37c --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/BUILD @@ -0,0 +1,145 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) +load("//bazel/wasm:wasm.bzl", "envoy_wasm_cc_binary", "wasm_rust_binary") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +wasm_rust_binary( + name = "async_call_rust.wasm", + srcs = ["async_call_rust/src/lib.rs"], + deps = [ + "//bazel/external/cargo:log", + "//bazel/external/cargo:proxy_wasm", + ], +) + +wasm_rust_binary( + name = "body_rust.wasm", + srcs = ["body_rust/src/lib.rs"], + deps = [ + "//bazel/external/cargo:log", + "//bazel/external/cargo:proxy_wasm", + ], +) + +wasm_rust_binary( + name = "headers_rust.wasm", + srcs = ["headers_rust/src/lib.rs"], + deps = [ + "//bazel/external/cargo:log", + "//bazel/external/cargo:proxy_wasm", + ], +) + +wasm_rust_binary( + name = "metadata_rust.wasm", + srcs = ["metadata_rust/src/lib.rs"], + deps = [ + "//bazel/external/cargo:log", + "//bazel/external/cargo:proxy_wasm", + ], +) + +wasm_rust_binary( + name = "shared_data_rust.wasm", + srcs = ["shared_data_rust/src/lib.rs"], + deps = [ + "//bazel/external/cargo:log", + "//bazel/external/cargo:proxy_wasm", + ], +) + +wasm_rust_binary( + name = "shared_queue_rust.wasm", + srcs = ["shared_queue_rust/src/lib.rs"], + deps = [ + "//bazel/external/cargo:log", + "//bazel/external/cargo:proxy_wasm", + ], +) + +envoy_cc_library( + name = "test_cpp_plugin", + srcs = [ + "test_async_call_cpp.cc", + "test_body_cpp.cc", + "test_cpp.cc", + "test_cpp_null_plugin.cc", + "test_grpc_call_cpp.cc", + "test_grpc_stream_cpp.cc", + "test_shared_data_cpp.cc", + "test_shared_queue_cpp.cc", + ], + copts = ["-DNULL_PLUGIN=1"], + deps = [ + ":test_cc_proto", + "//external:abseil_node_hash_map", + "//source/common/common:assert_lib", + "//source/common/common:c_smart_ptr_lib", + "//source/extensions/common/wasm:wasm_hdr", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/common/wasm:well_known_names", + "//source/extensions/common/wasm/ext:envoy_null_plugin", + "@proxy_wasm_cpp_sdk//contrib:contrib_lib", + ], +) + +envoy_wasm_cc_binary( + name = "test_cpp.wasm", + srcs = [ + "test_async_call_cpp.cc", + "test_body_cpp.cc", + "test_cpp.cc", + "test_grpc_call_cpp.cc", + "test_grpc_stream_cpp.cc", + "test_shared_data_cpp.cc", + "test_shared_queue_cpp.cc", + ], + deps = [ + ":test_cc_proto", + "//source/extensions/common/wasm/ext:declare_property_cc_proto", + "//source/extensions/common/wasm/ext:envoy_proxy_wasm_api_lib", + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics_lite", + "@proxy_wasm_cpp_sdk//contrib:contrib_lib", + ], +) + +# NB: this target is compiled both to native code and to Wasm. Hence the generic rule. +proto_library( + name = "test_proto", + srcs = ["test.proto"], + deps = [ + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:timestamp_proto", + ], +) + +# NB: this target is compiled both to native code and to Wasm. Hence the generic rule. +cc_proto_library( + name = "test_cc_proto", + deps = [":test_proto"], +) + +# TODO: FIXME +# +#filegroup( +# name = "wavm_binary", +# srcs = ["//bazel/foreign_cc:wavm"], +# output_group = "wavm", +#) +# +#genrule( +# name = "test_cpp_wavm_compile", +# srcs = [":test_cpp.wasm"], +# outs = ["test_cpp.wavm_compiled.wasm"], +# cmd = "./$(location wavm_binary) compile $(location test_cpp.wasm) $(location test_cpp.wavm_compiled.wasm)", +# tools = [ +# ":test_cpp.wasm", +# ":wavm_binary", +# ], +#) diff --git a/test/extensions/filters/http/wasm/test_data/async_call_rust/Cargo.toml b/test/extensions/filters/http/wasm/test_data/async_call_rust/Cargo.toml new file mode 100644 index 000000000000..b5c503a6356c --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/async_call_rust/Cargo.toml @@ -0,0 +1,26 @@ +[package] +description = "Proxy-Wasm async call test" +name = "async_call_rust" +version = "0.0.1" +authors = ["Piotr Sikora "] +edition = "2018" + +[dependencies] +proxy-wasm = "0.1" +log = "0.4" + +[lib] +crate-type = ["cdylib"] +path = "src/*.rs" + +[profile.release] +lto = true +opt-level = 3 +panic = "abort" + +[raze] +workspace_path = "//bazel/external/cargo" +genmode = "Remote" + +[raze.crates.log.'0.4.11'] +additional_flags = ["--cfg=atomic_cas"] diff --git a/test/extensions/filters/http/wasm/test_data/async_call_rust/src/lib.rs b/test/extensions/filters/http/wasm/test_data/async_call_rust/src/lib.rs new file mode 100644 index 000000000000..0cb7833c4437 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/async_call_rust/src/lib.rs @@ -0,0 +1,45 @@ +use log::{debug, info, warn}; +use proxy_wasm::traits::{Context, HttpContext}; +use proxy_wasm::types::*; +use std::time::Duration; + +#[no_mangle] +pub fn _start() { + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_http_context(|_, _| -> Box { Box::new(TestStream) }); +} + +struct TestStream; + +impl HttpContext for TestStream { + fn on_http_request_headers(&mut self, _: usize) -> Action { + self.dispatch_http_call( + "cluster", + vec![(":method", "POST"), (":path", "/"), (":authority", "foo")], + Some(b"hello world"), + vec![("trail", "cow")], + Duration::from_secs(5), + ) + .unwrap(); + info!("onRequestHeaders"); + Action::Pause + } +} + +impl Context for TestStream { + fn on_http_call_response(&mut self, _: u32, _: usize, body_size: usize, _: usize) { + if body_size == 0 { + info!("async_call failed"); + return; + } + for (name, value) in &self.get_http_call_response_headers() { + info!("{} -> {}", name, value); + } + if let Some(body) = self.get_http_call_response_body(0, body_size) { + debug!("{}", String::from_utf8(body).unwrap()); + } + for (name, value) in &self.get_http_call_response_trailers() { + warn!("{} -> {}", name, value); + } + } +} diff --git a/test/extensions/filters/http/wasm/test_data/body_rust/Cargo.toml b/test/extensions/filters/http/wasm/test_data/body_rust/Cargo.toml new file mode 100644 index 000000000000..bded4bb904f9 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/body_rust/Cargo.toml @@ -0,0 +1,26 @@ +[package] +description = "Proxy-Wasm HTTP body test" +name = "body_rust" +version = "0.0.1" +authors = ["Piotr Sikora "] +edition = "2018" + +[dependencies] +proxy-wasm = "0.1" +log = "0.4" + +[lib] +crate-type = ["cdylib"] +path = "src/*.rs" + +[profile.release] +lto = true +opt-level = 3 +panic = "abort" + +[raze] +workspace_path = "//bazel/external/cargo" +genmode = "Remote" + +[raze.crates.log.'0.4.11'] +additional_flags = ["--cfg=atomic_cas"] diff --git a/test/extensions/filters/http/wasm/test_data/body_rust/src/lib.rs b/test/extensions/filters/http/wasm/test_data/body_rust/src/lib.rs new file mode 100644 index 000000000000..4c577e64695f --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/body_rust/src/lib.rs @@ -0,0 +1,208 @@ +use log::error; +use proxy_wasm::traits::{Context, HttpContext}; +use proxy_wasm::types::*; + +#[no_mangle] +pub fn _start() { + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_http_context(|_, _| -> Box { + Box::new(TestStream { + test: None, + body_chunks: 0, + }) + }); +} + +struct TestStream { + test: Option, + body_chunks: usize, +} + +impl HttpContext for TestStream { + fn on_http_request_headers(&mut self, _: usize) -> Action { + self.test = self.get_http_request_header("x-test-operation"); + self.body_chunks = 0; + Action::Continue + } + + fn on_http_request_body(&mut self, body_size: usize, end_of_stream: bool) -> Action { + match self.test.as_deref() { + Some("ReadBody") => { + let body = self.get_http_request_body(0, body_size).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + Action::Continue + } + Some("PrependAndAppendToBody") => { + self.set_http_request_body(0, 0, b"prepend."); + self.set_http_request_body(0xffffffff, 0, b".append"); + let body = self.get_http_request_body(0, 0xffffffff).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + Action::Continue + } + Some("ReplaceBody") => { + self.set_http_request_body(0, 0xffffffff, b"replace"); + let body = self.get_http_request_body(0, 0xffffffff).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + Action::Continue + } + Some("RemoveBody") => { + self.set_http_request_body(0, 0xffffffff, b""); + if let Some(body) = self.get_http_request_body(0, 0xffffffff) { + error!("onBody {}", String::from_utf8(body).unwrap()); + } else { + error!("onBody "); + } + Action::Continue + } + Some("BufferBody") => { + let body = self.get_http_request_body(0, body_size).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + if end_of_stream { + Action::Continue + } else { + Action::Pause + } + } + Some("PrependAndAppendToBufferedBody") => { + self.set_http_request_body(0, 0, b"prepend."); + self.set_http_request_body(0xffffffff, 0, b".append"); + let body = self.get_http_request_body(0, 0xffffffff).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + if end_of_stream { + Action::Continue + } else { + Action::Pause + } + } + Some("ReplaceBufferedBody") => { + self.set_http_request_body(0, 0xffffffff, b"replace"); + let body = self.get_http_request_body(0, 0xffffffff).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + if end_of_stream { + Action::Continue + } else { + Action::Pause + } + } + Some("RemoveBufferedBody") => { + self.set_http_request_body(0, 0xffffffff, b""); + if let Some(body) = self.get_http_request_body(0, 0xffffffff) { + error!("onBody {}", String::from_utf8(body).unwrap()); + } else { + error!("onBody "); + } + if end_of_stream { + Action::Continue + } else { + Action::Pause + } + } + Some("BufferTwoBodies") => { + if let Some(body) = self.get_http_request_body(0, body_size) { + error!("onBody {}", String::from_utf8(body).unwrap()); + } + self.body_chunks += 1; + if end_of_stream || self.body_chunks > 2 { + Action::Continue + } else { + Action::Pause + } + } + _ => Action::Continue, + } + } + + fn on_http_response_headers(&mut self, _: usize) -> Action { + self.test = self.get_http_response_header("x-test-operation"); + Action::Continue + } + + fn on_http_response_body(&mut self, body_size: usize, end_of_stream: bool) -> Action { + match self.test.as_deref() { + Some("ReadBody") => { + let body = self.get_http_response_body(0, body_size).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + Action::Continue + } + Some("PrependAndAppendToBody") => { + self.set_http_response_body(0, 0, b"prepend."); + self.set_http_response_body(0xffffffff, 0, b".append"); + let body = self.get_http_response_body(0, 0xffffffff).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + Action::Continue + } + Some("ReplaceBody") => { + self.set_http_response_body(0, 0xffffffff, b"replace"); + let body = self.get_http_response_body(0, 0xffffffff).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + Action::Continue + } + Some("RemoveBody") => { + self.set_http_response_body(0, 0xffffffff, b""); + if let Some(body) = self.get_http_response_body(0, 0xffffffff) { + error!("onBody {}", String::from_utf8(body).unwrap()); + } else { + error!("onBody "); + } + Action::Continue + } + Some("BufferBody") => { + let body = self.get_http_response_body(0, body_size).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + if end_of_stream { + Action::Continue + } else { + Action::Pause + } + } + Some("PrependAndAppendToBufferedBody") => { + self.set_http_response_body(0, 0, b"prepend."); + self.set_http_response_body(0xffffffff, 0, b".append"); + let body = self.get_http_response_body(0, 0xffffffff).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + if end_of_stream { + Action::Continue + } else { + Action::Pause + } + } + Some("ReplaceBufferedBody") => { + self.set_http_response_body(0, 0xffffffff, b"replace"); + let body = self.get_http_response_body(0, 0xffffffff).unwrap(); + error!("onBody {}", String::from_utf8(body).unwrap()); + if end_of_stream { + Action::Continue + } else { + Action::Pause + } + } + Some("RemoveBufferedBody") => { + self.set_http_response_body(0, 0xffffffff, b""); + if let Some(body) = self.get_http_response_body(0, 0xffffffff) { + error!("onBody {}", String::from_utf8(body).unwrap()); + } else { + error!("onBody "); + } + if end_of_stream { + Action::Continue + } else { + Action::Pause + } + } + Some("BufferTwoBodies") => { + if let Some(body) = self.get_http_response_body(0, body_size) { + error!("onBody {}", String::from_utf8(body).unwrap()); + } + self.body_chunks += 1; + if end_of_stream || self.body_chunks > 2 { + Action::Continue + } else { + Action::Pause + } + } + _ => Action::Continue, + } + } +} + +impl Context for TestStream {} diff --git a/test/extensions/filters/http/wasm/test_data/headers_rust/Cargo.toml b/test/extensions/filters/http/wasm/test_data/headers_rust/Cargo.toml new file mode 100644 index 000000000000..4d03b9e6358a --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/headers_rust/Cargo.toml @@ -0,0 +1,26 @@ +[package] +description = "Proxy-Wasm HTTP headers test" +name = "headers_rust" +version = "0.0.1" +authors = ["Piotr Sikora "] +edition = "2018" + +[dependencies] +proxy-wasm = "0.1" +log = "0.4" + +[lib] +crate-type = ["cdylib"] +path = "src/*.rs" + +[profile.release] +lto = true +opt-level = 3 +panic = "abort" + +[raze] +workspace_path = "//bazel/external/cargo" +genmode = "Remote" + +[raze.crates.log.'0.4.11'] +additional_flags = ["--cfg=atomic_cas"] diff --git a/test/extensions/filters/http/wasm/test_data/headers_rust/src/lib.rs b/test/extensions/filters/http/wasm/test_data/headers_rust/src/lib.rs new file mode 100644 index 000000000000..6d9fc94a7a9c --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/headers_rust/src/lib.rs @@ -0,0 +1,55 @@ +use log::{debug, error, info, warn}; +use proxy_wasm::traits::{Context, HttpContext}; +use proxy_wasm::types::*; + +#[no_mangle] +pub fn _start() { + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_http_context(|context_id, _| -> Box { + Box::new(TestStream { context_id }) + }); +} + +struct TestStream { + context_id: u32, +} + +impl HttpContext for TestStream { + fn on_http_request_headers(&mut self, _: usize) -> Action { + debug!("onRequestHeaders {} headers", self.context_id); + if let Some(path) = self.get_http_request_header(":path") { + info!("header path {}", path); + } + let action = match self.get_http_request_header("server").as_deref() { + Some("envoy-wasm-pause") => Action::Pause, + _ => Action::Continue, + }; + self.set_http_request_header("newheader", Some("newheadervalue")); + self.set_http_request_header("server", Some("envoy-wasm")); + action + } + + fn on_http_request_body(&mut self, body_size: usize, _: bool) -> Action { + if let Some(body) = self.get_http_request_body(0, body_size) { + error!("onBody {}", String::from_utf8(body).unwrap()); + } + Action::Continue + } + + fn on_http_response_trailers(&mut self, _: usize) -> Action { + Action::Pause + } + + fn on_log(&mut self) { + if let Some(path) = self.get_http_request_header(":path") { + warn!("onLog {} {}", self.context_id, path); + } + } +} + +impl Context for TestStream { + fn on_done(&mut self) -> bool { + warn!("onDone {}", self.context_id); + true + } +} diff --git a/test/extensions/filters/http/wasm/test_data/metadata_rust/Cargo.toml b/test/extensions/filters/http/wasm/test_data/metadata_rust/Cargo.toml new file mode 100644 index 000000000000..e070d6454be3 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/metadata_rust/Cargo.toml @@ -0,0 +1,26 @@ +[package] +description = "Proxy-Wasm metadata test" +name = "metadata_rust" +version = "0.0.1" +authors = ["Piotr Sikora "] +edition = "2018" + +[dependencies] +proxy-wasm = "0.1" +log = "0.4" + +[lib] +crate-type = ["cdylib"] +path = "src/*.rs" + +[profile.release] +lto = true +opt-level = 3 +panic = "abort" + +[raze] +workspace_path = "//bazel/external/cargo" +genmode = "Remote" + +[raze.crates.log.'0.4.11'] +additional_flags = ["--cfg=atomic_cas"] diff --git a/test/extensions/filters/http/wasm/test_data/metadata_rust/src/lib.rs b/test/extensions/filters/http/wasm/test_data/metadata_rust/src/lib.rs new file mode 100644 index 000000000000..3b708b2d3599 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/metadata_rust/src/lib.rs @@ -0,0 +1,93 @@ +use log::{debug, error, info, trace}; +use proxy_wasm::traits::{Context, HttpContext, RootContext}; +use proxy_wasm::types::*; +use std::convert::TryFrom; + +#[no_mangle] +pub fn _start() { + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_root_context(|_| -> Box { Box::new(TestRoot) }); + proxy_wasm::set_http_context(|_, _| -> Box { Box::new(TestStream) }); +} + +struct TestRoot; + +impl Context for TestRoot {} + +impl RootContext for TestRoot { + fn on_tick(&mut self) { + if let Some(value) = self.get_property(vec!["node", "metadata", "wasm_node_get_key"]) { + debug!("onTick {}", String::from_utf8(value).unwrap()); + } else { + debug!("missing node metadata"); + } + } +} + +struct TestStream; + +impl Context for TestStream {} + +impl HttpContext for TestStream { + fn on_http_request_headers(&mut self, _: usize) -> Action { + if self + .get_property(vec!["node", "metadata", "wasm_node_get_key"]) + .is_none() + { + debug!("missing node metadata"); + } + + self.set_property( + vec!["wasm_request_set_key"], + Some(b"wasm_request_set_value"), + ); + + if let Some(path) = self.get_http_request_header(":path") { + info!("header path {}", path); + } + self.set_http_request_header("newheader", Some("newheadervalue")); + self.set_http_request_header("server", Some("envoy-wasm")); + + if let Some(value) = self.get_property(vec!["request", "duration"]) { + info!( + "duration is {}", + u64::from_le_bytes(<[u8; 8]>::try_from(&value[0..8]).unwrap()) + ); + } else { + error!("failed to get request duration"); + } + Action::Continue + } + + fn on_http_request_body(&mut self, _: usize, _: bool) -> Action { + if let Some(value) = self.get_property(vec!["node", "metadata", "wasm_node_get_key"]) { + error!("onBody {}", String::from_utf8(value).unwrap()); + } else { + debug!("missing node metadata"); + } + let key1 = self.get_property(vec![ + "metadata", + "filter_metadata", + "envoy.filters.http.wasm", + "wasm_request_get_key", + ]); + if key1.is_none() { + debug!("missing request metadata"); + } + let key2 = self.get_property(vec![ + "metadata", + "filter_metadata", + "envoy.filters.http.wasm", + "wasm_request_get_key", + ]); + if key2.is_none() { + debug!("missing request metadata"); + } + trace!( + "Struct {} {}", + String::from_utf8(key1.unwrap()).unwrap(), + String::from_utf8(key2.unwrap()).unwrap() + ); + Action::Continue + } +} diff --git a/test/extensions/filters/http/wasm/test_data/shared_data_rust/Cargo.toml b/test/extensions/filters/http/wasm/test_data/shared_data_rust/Cargo.toml new file mode 100644 index 000000000000..795905cb03e7 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/shared_data_rust/Cargo.toml @@ -0,0 +1,26 @@ +[package] +description = "Proxy-Wasm shared key-value store test" +name = "shared_data_rust" +version = "0.0.1" +authors = ["Piotr Sikora "] +edition = "2018" + +[dependencies] +proxy-wasm = "0.1" +log = "0.4" + +[lib] +crate-type = ["cdylib"] +path = "src/*.rs" + +[profile.release] +lto = true +opt-level = 3 +panic = "abort" + +[raze] +workspace_path = "//bazel/external/cargo" +genmode = "Remote" + +[raze.crates.log.'0.4.11'] +additional_flags = ["--cfg=atomic_cas"] diff --git a/test/extensions/filters/http/wasm/test_data/shared_data_rust/src/lib.rs b/test/extensions/filters/http/wasm/test_data/shared_data_rust/src/lib.rs new file mode 100644 index 000000000000..8a19c684abc5 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/shared_data_rust/src/lib.rs @@ -0,0 +1,49 @@ +use log::{debug, info, warn}; +use proxy_wasm::traits::{Context, RootContext}; +use proxy_wasm::types::*; + +#[no_mangle] +pub fn _start() { + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_root_context(|_| -> Box { Box::new(TestRoot) }); +} + +struct TestRoot; + +impl Context for TestRoot {} + +impl RootContext for TestRoot { + fn on_tick(&mut self) { + if self.get_shared_data("shared_data_key_bad") == (None, None) { + debug!("get of bad key not found"); + } + self.set_shared_data("shared_data_key1", Some(b"shared_data_value0"), None) + .unwrap(); + self.set_shared_data("shared_data_key1", Some(b"shared_data_value1"), None) + .unwrap(); + self.set_shared_data("shared_data_key2", Some(b"shared_data_value2"), None) + .unwrap(); + if let (_, Some(cas)) = self.get_shared_data("shared_data_key2") { + match self.set_shared_data( + "shared_data_key2", + Some(b"shared_data_value3"), + Some(cas + 1), + ) { + Err(Status::CasMismatch) => info!("set CasMismatch"), + _ => panic!(), + }; + } + } + + fn on_queue_ready(&mut self, _: u32) { + if self.get_shared_data("shared_data_key_bad") == (None, None) { + debug!("second get of bad key not found"); + } + if let (Some(value), _) = self.get_shared_data("shared_data_key1") { + debug!("get 1 {}", String::from_utf8(value).unwrap()); + } + if let (Some(value), _) = self.get_shared_data("shared_data_key2") { + warn!("get 2 {}", String::from_utf8(value).unwrap()); + } + } +} diff --git a/test/extensions/filters/http/wasm/test_data/shared_queue_rust/Cargo.toml b/test/extensions/filters/http/wasm/test_data/shared_queue_rust/Cargo.toml new file mode 100644 index 000000000000..0ba3e9070c32 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/shared_queue_rust/Cargo.toml @@ -0,0 +1,26 @@ +[package] +description = "Proxy-Wasm shared queue test" +name = "shared_queue_rust" +version = "0.0.1" +authors = ["Piotr Sikora "] +edition = "2018" + +[dependencies] +proxy-wasm = "0.1" +log = "0.4" + +[lib] +crate-type = ["cdylib"] +path = "src/*.rs" + +[profile.release] +lto = true +opt-level = 3 +panic = "abort" + +[raze] +workspace_path = "//bazel/external/cargo" +genmode = "Remote" + +[raze.crates.log.'0.4.11'] +additional_flags = ["--cfg=atomic_cas"] diff --git a/test/extensions/filters/http/wasm/test_data/shared_queue_rust/src/lib.rs b/test/extensions/filters/http/wasm/test_data/shared_queue_rust/src/lib.rs new file mode 100644 index 000000000000..1269f37f9cff --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/shared_queue_rust/src/lib.rs @@ -0,0 +1,61 @@ +use log::{debug, info, warn}; +use proxy_wasm::traits::{Context, HttpContext, RootContext}; +use proxy_wasm::types::*; + +#[no_mangle] +pub fn _start() { + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_root_context(|_| -> Box { + Box::new(TestRoot { queue_id: None }) + }); + proxy_wasm::set_http_context(|_, _| -> Box { Box::new(TestStream) }); +} + +struct TestRoot { + queue_id: Option, +} + +impl Context for TestRoot {} + +impl RootContext for TestRoot { + fn on_vm_start(&mut self, _: usize) -> bool { + self.queue_id = Some(self.register_shared_queue("my_shared_queue")); + true + } + + fn on_queue_ready(&mut self, queue_id: u32) { + if Some(queue_id) == self.queue_id { + info!("onQueueReady"); + match self.dequeue_shared_queue(9999999 /* bad queue_id */) { + Err(Status::NotFound) => warn!("onQueueReady bad token not found"), + _ => (), + } + if let Some(value) = self.dequeue_shared_queue(queue_id).unwrap() { + debug!("data {} Ok", String::from_utf8(value).unwrap()); + } + if self.dequeue_shared_queue(queue_id).unwrap().is_none() { + warn!("onQueueReady extra data not found"); + } + } + } +} + +struct TestStream; + +impl Context for TestStream {} + +impl HttpContext for TestStream { + fn on_http_request_headers(&mut self, _: usize) -> Action { + if self + .resolve_shared_queue("vm_id", "bad_shared_queue") + .is_none() + { + warn!("onRequestHeaders not found bad_shared_queue"); + } + if let Some(queue_id) = self.resolve_shared_queue("vm_id", "my_shared_queue") { + self.enqueue_shared_queue(queue_id, Some(b"data1")).unwrap(); + warn!("onRequestHeaders enqueue Ok"); + } + Action::Continue + } +} diff --git a/test/extensions/filters/http/wasm/test_data/test.proto b/test/extensions/filters/http/wasm/test_data/test.proto new file mode 100644 index 000000000000..1b055c7ca760 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/test.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package wasmtest; + +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +message TestProto { + uint64 i = 1; + double j = 2; + bool k = 3; + string s = 4; + google.protobuf.Timestamp t = 5; + google.protobuf.Any a = 6; + TestProto b = 7; + repeated string l = 8; + map m = 9; +}; diff --git a/test/extensions/filters/http/wasm/test_data/test_async_call_cpp.cc b/test/extensions/filters/http/wasm/test_data/test_async_call_cpp.cc new file mode 100644 index 000000000000..8075ef63b578 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/test_async_call_cpp.cc @@ -0,0 +1,76 @@ +// NOLINT(namespace-envoy) +#include +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics_lite.h" +#else +#include "extensions/common/wasm/ext/envoy_null_plugin.h" +#endif + +START_WASM_PLUGIN(HttpWasmTestCpp) + +class AsyncCallContext : public Context { +public: + explicit AsyncCallContext(uint32_t id, RootContext* root) : Context(id, root) {} + + FilterHeadersStatus onRequestHeaders(uint32_t, bool) override; +}; + +class AsyncCallRootContext : public RootContext { +public: + explicit AsyncCallRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {} +}; + +static RegisterContextFactory register_AsyncCallContext(CONTEXT_FACTORY(AsyncCallContext), + ROOT_FACTORY(AsyncCallRootContext), + "async_call"); + +FilterHeadersStatus AsyncCallContext::onRequestHeaders(uint32_t, bool end_of_stream) { + auto context_id = id(); + auto callback = [context_id](uint32_t, size_t body_size, uint32_t) { + if (body_size == 0) { + logInfo("async_call failed"); + return; + } + auto response_headers = getHeaderMapPairs(WasmHeaderMapType::HttpCallResponseHeaders); + // Switch context after getting headers, but before getting body to exercise both code paths. + getContext(context_id)->setEffectiveContext(); + auto body = getBufferBytes(WasmBufferType::HttpCallResponseBody, 0, body_size); + auto response_trailers = getHeaderMapPairs(WasmHeaderMapType::HttpCallResponseTrailers); + for (auto& p : response_headers->pairs()) { + logInfo(std::string(p.first) + std::string(" -> ") + std::string(p.second)); + } + logDebug(std::string(body->view())); + for (auto& p : response_trailers->pairs()) { + logWarn(std::string(p.first) + std::string(" -> ") + std::string(p.second)); + } + }; + if (end_of_stream) { + if (root()->httpCall("cluster", {{":method", "POST"}, {":path", "/"}, {":authority", "foo"}}, + "hello world", {{"trail", "cow"}}, 1000, callback) == WasmResult::Ok) { + logError("expected failure did not"); + } + return FilterHeadersStatus::Continue; + } + if (root()->httpCall("bogus cluster", + {{":method", "POST"}, {":path", "/"}, {":authority", "foo"}}, "hello world", + {{"trail", "cow"}}, 1000, callback) == WasmResult::Ok) { + logError("bogus cluster found error"); + } + if (root()->httpCall("cluster", {{":method", "POST"}, {":path", "/"}, {":authority", "foo"}}, + "hello world", {{"trail", "cow"}}, 0xFFFFFFFF, callback) == WasmResult::Ok) { + logError("bogus timeout accepted error"); + } + if (root()->httpCall("cluster", {{":method", "POST"}, {":authority", "foo"}}, "hello world", + {{"trail", "cow"}}, 1000, callback) == WasmResult::Ok) { + logError("emissing path accepted error"); + } + root()->httpCall("cluster", {{":method", "POST"}, {":path", "/"}, {":authority", "foo"}}, + "hello world", {{"trail", "cow"}}, 1000, callback); + logInfo("onRequestHeaders"); + return FilterHeadersStatus::StopIteration; +} + +END_WASM_PLUGIN diff --git a/test/extensions/filters/http/wasm/test_data/test_body_cpp.cc b/test/extensions/filters/http/wasm/test_data/test_body_cpp.cc new file mode 100644 index 000000000000..27d0e0626ff4 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/test_body_cpp.cc @@ -0,0 +1,136 @@ +// NOLINT(namespace-envoy) +#include +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics_lite.h" +#else +#include "extensions/common/wasm/ext/envoy_null_plugin.h" +#endif + +START_WASM_PLUGIN(HttpWasmTestCpp) + +class BodyRootContext : public RootContext { +public: + explicit BodyRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {} +}; + +class BodyContext : public Context { +public: + explicit BodyContext(uint32_t id, RootContext* root) : Context(id, root) {} + + FilterHeadersStatus onRequestHeaders(uint32_t, bool) override; + FilterDataStatus onRequestBody(size_t body_buffer_length, bool end_of_stream) override; + FilterHeadersStatus onResponseHeaders(uint32_t, bool) override; + FilterDataStatus onResponseBody(size_t body_buffer_length, bool end_of_stream) override; + +private: + BodyRootContext* root() { return static_cast(Context::root()); } + static void logBody(WasmBufferType type); + FilterDataStatus onBody(WasmBufferType type, size_t buffer_length, bool end); + std::string body_op_; + int num_chunks_ = 0; +}; + +static RegisterContextFactory register_BodyContext(CONTEXT_FACTORY(BodyContext), + ROOT_FACTORY(BodyRootContext), "body"); + +void BodyContext::logBody(WasmBufferType type) { + size_t buffered_size; + uint32_t flags; + getBufferStatus(type, &buffered_size, &flags); + auto body = getBufferBytes(type, 0, buffered_size); + logError(std::string("onBody ") + std::string(body->view())); +} + +FilterDataStatus BodyContext::onBody(WasmBufferType type, size_t buffer_length, + bool end_of_stream) { + size_t size; + uint32_t flags; + if (body_op_ == "ReadBody") { + auto body = getBufferBytes(type, 0, buffer_length); + logError("onBody " + std::string(body->view())); + + } else if (body_op_ == "PrependAndAppendToBody") { + setBuffer(WasmBufferType::HttpRequestBody, 0, 0, "prepend."); + getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags); + setBuffer(WasmBufferType::HttpRequestBody, size, 0, ".append"); + getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags); + auto updated = getBufferBytes(WasmBufferType::HttpRequestBody, 0, size); + logError("onBody " + std::string(updated->view())); + return FilterDataStatus::StopIterationNoBuffer; + } else if (body_op_ == "ReplaceBody") { + setBuffer(WasmBufferType::HttpRequestBody, 0, buffer_length, "replace"); + getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags); + auto replaced = getBufferBytes(WasmBufferType::HttpRequestBody, 0, size); + logError("onBody " + std::string(replaced->view())); + return FilterDataStatus::StopIterationAndWatermark; + } else if (body_op_ == "RemoveBody") { + setBuffer(WasmBufferType::HttpRequestBody, 0, buffer_length, ""); + getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags); + auto erased = getBufferBytes(WasmBufferType::HttpRequestBody, 0, size); + logError("onBody " + std::string(erased->view())); + + } else if (body_op_ == "BufferBody") { + logBody(type); + return end_of_stream ? FilterDataStatus::Continue : FilterDataStatus::StopIterationAndBuffer; + + } else if (body_op_ == "PrependAndAppendToBufferedBody") { + setBuffer(WasmBufferType::HttpRequestBody, 0, 0, "prepend."); + getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags); + setBuffer(WasmBufferType::HttpRequestBody, size, 0, ".append"); + logBody(type); + return end_of_stream ? FilterDataStatus::Continue : FilterDataStatus::StopIterationAndBuffer; + + } else if (body_op_ == "ReplaceBufferedBody") { + setBuffer(WasmBufferType::HttpRequestBody, 0, buffer_length, "replace"); + getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags); + auto replaced = getBufferBytes(WasmBufferType::HttpRequestBody, 0, size); + logBody(type); + return end_of_stream ? FilterDataStatus::Continue : FilterDataStatus::StopIterationAndBuffer; + + } else if (body_op_ == "RemoveBufferedBody") { + setBuffer(WasmBufferType::HttpRequestBody, 0, buffer_length, ""); + getBufferStatus(WasmBufferType::HttpRequestBody, &size, &flags); + auto erased = getBufferBytes(WasmBufferType::HttpRequestBody, 0, size); + logBody(type); + return end_of_stream ? FilterDataStatus::Continue : FilterDataStatus::StopIterationAndBuffer; + + } else if (body_op_ == "BufferTwoBodies") { + logBody(type); + num_chunks_++; + if (end_of_stream || num_chunks_ > 2) { + return FilterDataStatus::Continue; + } + return FilterDataStatus::StopIterationAndBuffer; + + } else { + // This is a test and the test was configured incorrectly. + logError("Invalid body test op " + body_op_); + abort(); + } + return FilterDataStatus::Continue; +} + +FilterHeadersStatus BodyContext::onRequestHeaders(uint32_t, bool) { + body_op_ = getRequestHeader("x-test-operation")->toString(); + setRequestHeaderPairs({{"a", "a"}, {"b", "b"}}); + return FilterHeadersStatus::Continue; +} + +FilterHeadersStatus BodyContext::onResponseHeaders(uint32_t, bool) { + body_op_ = getResponseHeader("x-test-operation")->toString(); + CHECK_RESULT(replaceResponseHeader("x-test-operation", body_op_)); + return FilterHeadersStatus::Continue; +} + +FilterDataStatus BodyContext::onRequestBody(size_t body_buffer_length, bool end_of_stream) { + return onBody(WasmBufferType::HttpRequestBody, body_buffer_length, end_of_stream); +} + +FilterDataStatus BodyContext::onResponseBody(size_t body_buffer_length, bool end_of_stream) { + return onBody(WasmBufferType::HttpResponseBody, body_buffer_length, end_of_stream); +} + +END_WASM_PLUGIN diff --git a/test/extensions/filters/http/wasm/test_data/test_cpp.cc b/test/extensions/filters/http/wasm/test_data/test_cpp.cc new file mode 100644 index 000000000000..3e009a3da8c3 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/test_cpp.cc @@ -0,0 +1,756 @@ +// NOLINT(namespace-envoy) +#include +#include +#include +#include "test/extensions/filters/http/wasm/test_data/test.pb.h" + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics_lite.h" +#include "source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h" +#include "source/extensions/common/wasm/ext/declare_property.pb.h" +#else +#include "extensions/common/wasm/ext/envoy_null_plugin.h" +#include "absl/base/casts.h" +#endif + +START_WASM_PLUGIN(HttpWasmTestCpp) + +#include "contrib/proxy_expr.h" + +class TestRootContext : public RootContext { +public: + explicit TestRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {} + + bool onStart(size_t configuration_size) override; + void onTick() override; + bool onConfigure(size_t) override; + + std::string test_; + uint32_t stream_context_id_; +}; + +class TestContext : public Context { +public: + explicit TestContext(uint32_t id, RootContext* root) : Context(id, root) {} + + FilterHeadersStatus onRequestHeaders(uint32_t, bool) override; + FilterTrailersStatus onRequestTrailers(uint32_t) override; + FilterTrailersStatus onResponseTrailers(uint32_t) override; + FilterDataStatus onRequestBody(size_t body_buffer_length, bool end_of_stream) override; + void onLog() override; + void onDone() override; + +private: + TestRootContext* root() { return static_cast(Context::root()); } +}; + +static RegisterContextFactory register_TestContext(CONTEXT_FACTORY(TestContext), + ROOT_FACTORY(TestRootContext)); + +bool TestRootContext::onStart(size_t configuration_size) { + test_ = getBufferBytes(WasmBufferType::VmConfiguration, 0, configuration_size)->toString(); + return true; +} + +bool TestRootContext::onConfigure(size_t) { + if (test_ == "property") { + { + // Many properties are not available in the root context. + const std::vector properties = { + "string_state", "metadata", "request", "response", "connection", + "connection_id", "upstream", "source", "destination", "cluster_name", + "cluster_metadata", "route_name", "route_metadata", + }; + for (const auto& property : properties) { + if (getProperty({property}).has_value()) { + logWarn("getProperty should not return a value in the root context"); + } + } + } + { + // Some properties are defined in the root context. + std::vector, std::string>> properties = { + {{"plugin_name"}, "plugin_name"}, + {{"plugin_vm_id"}, "vm_id"}, + {{"listener_direction"}, std::string("\x1\0\0\0\0\0\0\0\0", 8)}, // INBOUND + {{"listener_metadata"}, ""}, + }; + for (const auto& property : properties) { + std::string value; + if (!getValue(property.first, &value)) { + logWarn("getValue should provide a value in the root context: " + property.second); + } + if (value != property.second) { + logWarn("getValue returned " + value + ", expect " + property.second); + } + } + } + } + return true; +} + +FilterHeadersStatus TestContext::onRequestHeaders(uint32_t, bool) { + root()->stream_context_id_ = id(); + auto test = root()->test_; + if (test == "headers") { + logDebug(std::string("onRequestHeaders ") + std::to_string(id()) + std::string(" ") + test); + auto path = getRequestHeader(":path"); + logInfo(std::string("header path ") + std::string(path->view())); + std::string protocol; + addRequestHeader("newheader", "newheadervalue"); + auto server = getRequestHeader("server"); + replaceRequestHeader("server", "envoy-wasm"); + auto r = addResponseHeader("bad", "bad"); + if (r != WasmResult::BadArgument) { + logWarn("unexpected success of addResponseHeader"); + } + if (addResponseTrailer("bad", "bad") != WasmResult::BadArgument) { + logWarn("unexpected success of addResponseTrailer"); + } + if (removeResponseTrailer("bad") != WasmResult::BadArgument) { + logWarn("unexpected success of remoteResponseTrailer"); + } + size_t size; + if (getRequestHeaderSize(&size) != WasmResult::Ok) { + logWarn("unexpected failure of getRequestHeaderMapSize"); + } + if (getResponseHeaderSize(&size) != WasmResult::BadArgument) { + logWarn("unexpected success of getResponseHeaderMapSize"); + } + if (server->view() == "envoy-wasm-pause") { + return FilterHeadersStatus::StopIteration; + } else if (server->view() == "envoy-wasm-end-stream") { + return FilterHeadersStatus::ContinueAndEndStream; + } else if (server->view() == "envoy-wasm-stop-buffer") { + return FilterHeadersStatus::StopAllIterationAndBuffer; + } else if (server->view() == "envoy-wasm-stop-watermark") { + return FilterHeadersStatus::StopAllIterationAndWatermark; + } else { + return FilterHeadersStatus::Continue; + } + } else if (test == "metadata") { + std::string value; + if (!getValue({"node", "metadata", "wasm_node_get_key"}, &value)) { + logDebug("missing node metadata"); + } + auto r = setFilterStateStringValue("wasm_request_set_key", "wasm_request_set_value"); + if (r != WasmResult::Ok) { + logDebug(toString(r)); + } + auto path = getRequestHeader(":path"); + logInfo(std::string("header path ") + path->toString()); + addRequestHeader("newheader", "newheadervalue"); + replaceRequestHeader("server", "envoy-wasm"); + + { + const std::string expr = R"("server is " + request.headers["server"])"; + uint32_t token = 0; + if (WasmResult::Ok != createExpression(expr, &token)) { + logError("expr_create error"); + } else { + std::string eval_result; + if (!evaluateExpression(token, &eval_result)) { + logError("expr_eval error"); + } else { + logInfo(eval_result); + } + if (WasmResult::Ok != exprDelete(token)) { + logError("failed to delete an expression"); + } + } + } + + { + // Validate a valid CEL expression + const std::string expr = R"( + envoy.api.v2.core.GrpcService{ + envoy_grpc: envoy.api.v2.core.GrpcService.EnvoyGrpc { + cluster_name: "test" + } + })"; + uint32_t token = 0; + if (WasmResult::Ok != createExpression(expr, &token)) { + logError("expr_create error"); + } else { + GrpcService eval_result; + if (!evaluateMessage(token, &eval_result)) { + logError("expr_eval error"); + } else { + logInfo("grpc service: " + eval_result.envoy_grpc().cluster_name()); + } + if (WasmResult::Ok != exprDelete(token)) { + logError("failed to delete an expression"); + } + } + } + + { + // Create a syntactically wrong CEL expression + uint32_t token = 0; + if (createExpression("/ /", &token) != WasmResult::BadArgument) { + logError("expect an error on a syntactically wrong expressions"); + } + } + + { + // Create an invalid CEL expression + uint32_t token = 0; + if (createExpression("_&&_(a, b, c)", &token) != WasmResult::BadArgument) { + logError("expect an error on invalid expressions"); + } + } + + { + // Evaluate a bad token + std::string result; + uint64_t token = 0; + if (evaluateExpression(token, &result)) { + logError("expect an error on invalid token in evaluate"); + } + } + + { + // Evaluate a missing token + std::string result; + uint32_t token = 0xFFFFFFFF; + if (evaluateExpression(token, &result)) { + logError("expect an error on unknown token in evaluate"); + } + // Delete a missing token + if (exprDelete(token) != WasmResult::Ok) { + logError("expect no error on unknown token in delete expression"); + } + } + + { + // Evaluate two expressions to an error + uint32_t token1 = 0; + if (createExpression("1/0", &token1) != WasmResult::Ok) { + logError("unexpected error on division by zero expression"); + } + uint32_t token2 = 0; + if (createExpression("request.duration.size", &token2) != WasmResult::Ok) { + logError("unexpected error on integer field access expression"); + } + std::string result; + if (evaluateExpression(token1, &result)) { + logError("expect an error on division by zero"); + } + if (evaluateExpression(token2, &result)) { + logError("expect an error on integer field access expression"); + } + if (exprDelete(token1) != WasmResult::Ok) { + logError("failed to delete an expression"); + } + if (exprDelete(token2) != WasmResult::Ok) { + logError("failed to delete an expression"); + } + } + + { + int64_t dur; + if (getValue({"request", "duration"}, &dur)) { + logInfo("duration is " + std::to_string(dur)); + } else { + logError("failed to get request duration"); + } + } + + return FilterHeadersStatus::Continue; + } + return FilterHeadersStatus::Continue; +} + +FilterTrailersStatus TestContext::onRequestTrailers(uint32_t) { + auto request_trailer = getRequestTrailer("bogus-trailer"); + if (request_trailer && request_trailer->view() != "") { + logWarn("request bogus-trailer found"); + } + CHECK_RESULT(replaceRequestTrailer("new-trailer", "value")); + CHECK_RESULT(removeRequestTrailer("x")); + // Not available yet. + replaceResponseTrailer("new-trailer", "value"); + auto response_trailer = getResponseTrailer("bogus-trailer"); + if (response_trailer && response_trailer->view() != "") { + logWarn("request bogus-trailer found"); + } + return FilterTrailersStatus::Continue; +} + +FilterTrailersStatus TestContext::onResponseTrailers(uint32_t) { + auto value = getResponseTrailer("bogus-trailer"); + if (value && value->view() != "") { + logWarn("response bogus-trailer found"); + } + CHECK_RESULT(replaceResponseTrailer("new-trailer", "value")); + return FilterTrailersStatus::StopIteration; +} + +FilterDataStatus TestContext::onRequestBody(size_t body_buffer_length, bool) { + auto test = root()->test_; + if (test == "headers") { + auto body = getBufferBytes(WasmBufferType::HttpRequestBody, 0, body_buffer_length); + logError(std::string("onBody ") + std::string(body->view())); + } else if (test == "metadata") { + std::string value; + if (!getValue({"node", "metadata", "wasm_node_get_key"}, &value)) { + logDebug("missing node metadata"); + } + logError(std::string("onBody ") + value); + std::string request_string; + std::string request_string2; + if (!getValue( + {"metadata", "filter_metadata", "envoy.filters.http.wasm", "wasm_request_get_key"}, + &request_string)) { + logDebug("missing request metadata"); + } + if (!getValue( + {"metadata", "filter_metadata", "envoy.filters.http.wasm", "wasm_request_get_key"}, + &request_string2)) { + logDebug("missing request metadata"); + } + logTrace(std::string("Struct ") + request_string + " " + request_string2); + return FilterDataStatus::Continue; + } + return FilterDataStatus::Continue; +} + +void TestContext::onLog() { + auto test = root()->test_; + if (test == "headers") { + auto path = getRequestHeader(":path"); + logWarn("onLog " + std::to_string(id()) + " " + std::string(path->view())); + auto response_header = getResponseHeader("bogus-header"); + if (response_header && response_header->view() != "") { + logWarn("response bogus-header found"); + } + auto response_trailer = getResponseTrailer("bogus-trailer"); + if (response_trailer && response_trailer->view() != "") { + logWarn("response bogus-trailer found"); + } + } else if (test == "cluster_metadata") { + std::string cluster_metadata; + if (getValue({"cluster_metadata", "filter_metadata", "namespace", "key"}, &cluster_metadata)) { + logWarn("cluster metadata: " + cluster_metadata); + } + } else if (test == "property") { + setFilterState("wasm_state", "wasm_value"); + auto path = getRequestHeader(":path"); + if (path->view() == "/test_context") { + logWarn("request.path: " + getProperty({"request", "path"}).value()->toString()); + logWarn("node.metadata: " + + getProperty({"node", "metadata", "istio.io/metadata"}).value()->toString()); + logWarn("metadata: " + getProperty({"metadata", "filter_metadata", "envoy.filters.http.wasm", + "wasm_request_get_key"}) + .value() + ->toString()); + int64_t responseCode; + if (getValue({"response", "code"}, &responseCode)) { + logWarn("response.code: " + std::to_string(responseCode)); + } + std::string upstream_host_metadata; + if (getValue({"upstream_host_metadata", "filter_metadata", "namespace", "key"}, &upstream_host_metadata)) { + logWarn("upstream host metadata: " + upstream_host_metadata); + } + logWarn("state: " + getProperty({"wasm_state"}).value()->toString()); + } else { + logWarn("onLog " + std::to_string(id()) + " " + std::string(path->view())); + } + + // Wasm state property set and read validation for {i: 1337} + // Generated using the following input.json: + // { + // "i": 1337 + // } + // flatc -b schema.fbs input.json + { + static const char data[24] = {0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, + 0x0c, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x39, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + if (WasmResult::Ok != setFilterState("structured_state", std::string_view(data, 24))) { + logWarn("setProperty(structured_state) failed"); + } + int64_t value = 0; + if (!getValue({"structured_state", "i"}, &value)) { + logWarn("getProperty(structured_state) failed"); + } + if (value != 1337) { + logWarn("getProperty(structured_state) returned " + std::to_string(value)); + } + std::string buffer; + if (!getValue({"structured_state"}, &buffer)) { + logWarn("getValue for structured_state should not fail"); + } + if (buffer.size() != 24) { + logWarn("getValue for structured_state should return the buffer"); + } + } + { + if (setFilterState("string_state", "unicorns") != WasmResult::Ok) { + logWarn("setProperty(string_state) failed"); + } + std::string value; + if (!getValue({"string_state"}, &value)) { + logWarn("getProperty(string_state) failed"); + } + if (value != "unicorns") { + logWarn("getProperty(string_state) returned " + value); + } + } + { + // access via "filter_state" property + std::string value; + if (!getValue({"filter_state", "wasm.string_state"}, &value)) { + logWarn("accessing via filter_state failed"); + } + if (value != "unicorns") { + logWarn("unexpected value: " + value); + } + } + { + // attempt to write twice for a read only wasm state + if (setFilterState("string_state", "ponies") == WasmResult::Ok) { + logWarn("expected second setProperty(string_state) to fail"); + } + std::string value; + if (!getValue({"string_state"}, &value)) { + logWarn("getProperty(string_state) failed"); + } + if (value != "unicorns") { + logWarn("getProperty(string_state) returned " + value); + } + } + { + if (setFilterState("bytes_state", "ponies") != WasmResult::Ok) { + logWarn("setProperty(bytes_state) failed"); + } + std::string value; + if (!getValue({"bytes_state"}, &value)) { + logWarn("getProperty(bytes_state) failed"); + } + if (value != "ponies") { + logWarn("getProperty(bytes_state) returned " + value); + } + } + { + wasmtest::TestProto test_proto; + uint32_t i = 53; + test_proto.set_i(i); + double j = 13.0; + test_proto.set_j(j); + bool k = true; + test_proto.set_k(k); + std::string s = "centaur"; + test_proto.set_s(s); + test_proto.mutable_t()->set_seconds(2); + test_proto.mutable_t()->set_nanos(3); + test_proto.add_l("abc"); + test_proto.add_l("xyz"); + (*test_proto.mutable_m())["a"] = "b"; + + // validate setting a filter state + std::string in; + test_proto.SerializeToString(&in); + if (setFilterState("protobuf_state", in) != WasmResult::Ok) { + logWarn("setProperty(protobuf_state) failed"); + } + // validate uint field + uint64_t i2; + if (!getValue({"protobuf_state", "i"}, &i2) || i2 != i) { + logWarn("uint field returned " + std::to_string(i2)); + } + + // validate double field + double j2; + if (!getValue({"protobuf_state", "j"}, &j2) || j2 != j) { + logWarn("double field returned " + std::to_string(j2)); + } + + // validate bool field + bool k2; + if (!getValue({"protobuf_state", "k"}, &k2) || k2 != k) { + logWarn("bool field returned " + std::to_string(k2)); + } + + // validate string field + std::string s2; + if (!getValue({"protobuf_state", "s"}, &s2) || s2 != s) { + logWarn("string field returned " + s2); + } + + // validate timestamp field + int64_t t; + if (!getValue({"protobuf_state", "t"}, &t) || t != 2000000003ull) { + logWarn("timestamp field returned " + std::to_string(t)); + } + + // validate malformed field + std::string a; + if (getValue({"protobuf_state", "a"}, &a)) { + logWarn("expect serialization error for malformed type_url string, got " + a); + } + + // validate null field + std::string b; + if (!getValue({"protobuf_state", "b"}, &b) || b != "") { + logWarn("null field returned " + b); + } + + // validate list field + auto l = getProperty({"protobuf_state", "l"}); + if (l.has_value()) { + auto pairs = l.value()->pairs(); + if (pairs.size() != 2 || pairs[0].first != "abc" || pairs[1].first != "xyz") { + logWarn("list field did not return the expected value"); + } + } else { + logWarn("list field returned none"); + } + + // validate map field + auto m = getProperty({"protobuf_state", "m"}); + if (m.has_value()) { + auto pairs = m.value()->pairs(); + if (pairs.size() != 1 || pairs[0].first != "a" || pairs[0].second != "b") { + logWarn("map field did not return the expected value: " + std::to_string(pairs.size())); + } + } else { + logWarn("map field returned none"); + } + + // validate entire message + std::string buffer; + if (!getValue({"protobuf_state"}, &buffer)) { + logWarn("getValue for protobuf_state should not fail"); + } + if (buffer.size() != in.size()) { + logWarn("getValue for protobuf_state should return the buffer"); + } + } + { + // Some properties are not available in the stream context. + const std::vector properties = {"xxx", "request", "route_name", "node"}; + for (const auto& property : properties) { + if (getProperty({property, "xxx"}).has_value()) { + logWarn("getProperty should not return a value in the root context"); + } + } + } + { + // Some properties are defined in the stream context. + std::vector, std::string>> properties = { + {{"plugin_name"}, "plugin_name"}, + {{"plugin_vm_id"}, "vm_id"}, + {{"listener_direction"}, std::string("\x1\0\0\0\0\0\0\0\0", 8)}, // INBOUND + {{"listener_metadata"}, ""}, + {{"route_name"}, "route12"}, + {{"cluster_name"}, "fake_cluster"}, + {{"connection_id"}, std::string("\x4\0\0\0\0\0\0\0\0", 8)}, + {{"connection", "requested_server_name"}, "w3.org"}, + {{"source", "address"}, "127.0.0.1:0"}, + {{"destination", "address"}, "127.0.0.2:0"}, + {{"upstream", "address"}, "10.0.0.1:443"}, + {{"route_metadata"}, ""}, + }; + for (const auto& property : properties) { + std::string value; + if (!getValue(property.first, &value)) { + logWarn("getValue should provide a value in the root context: " + property.second); + } + if (value != property.second) { + logWarn("getValue returned " + value + ", expect " + property.second); + } + } + } + } +} + +void TestContext::onDone() { + auto test = root()->test_; + if (test == "headers") { + logWarn("onDone " + std::to_string(id())); + } +} + +void TestRootContext::onTick() { + if (test_ == "headers") { + getContext(stream_context_id_)->setEffectiveContext(); + replaceRequestHeader("server", "envoy-wasm-continue"); + continueRequest(); + if (getBufferBytes(WasmBufferType::PluginConfiguration, 0, 1)->view() != "") { + logDebug("unexpectd success of getBufferBytes PluginConfiguration"); + } + } else if (test_ == "metadata") { + std::string value; + if (!getValue({"node", "metadata", "wasm_node_get_key"}, &value)) { + logDebug("missing node metadata"); + } + logDebug(std::string("onTick ") + value); + + std::string list_value; + if (!getValue({"node", "metadata", "wasm_node_list_key", "0"}, &list_value)) { + logDebug("missing node metadata list value"); + } + if (list_value != "wasm_node_get_value") { + logWarn("unexpected list value: " + list_value); + } + if (getValue({"node", "metadata", "wasm_node_list_key", "bad_key"}, &list_value)) { + logDebug("unexpected list value for a bad_key"); + } + if (getValue({"node", "metadata", "wasm_node_list_key", "1"}, &list_value)) { + logDebug("unexpected list value outside the range"); + } + } else if (test_ == "property") { + uint64_t t; + if (WasmResult::Ok != proxy_get_current_time_nanoseconds(&t)) { + logError(std::string("bad proxy_get_current_time_nanoseconds result")); + } + std::string function = "declare_property"; + { + envoy::source::extensions::common::wasm::DeclarePropertyArguments args; + args.set_name("structured_state"); + args.set_type(envoy::source::extensions::common::wasm::WasmType::FlatBuffers); + args.set_span(envoy::source::extensions::common::wasm::LifeSpan::DownstreamConnection); + // Reflection flatbuffer for a simple table {i : int64}. + // Generated using the following schema.fbs: + // + // namespace Wasm.Common; + // table T { + // i: int64; + // } + // root_type T; + // + // flatc --cpp --bfbs-gen-embed schema.fbs + static const char bfbsData[192] = { + 0x18, 0x00, 0x00, 0x00, 0x42, 0x46, 0x42, 0x53, 0x10, 0x00, 0x1C, 0x00, 0x04, 0x00, 0x08, + 0x00, 0x0C, 0x00, 0x10, 0x00, 0x14, 0x00, 0x18, 0x00, 0x10, 0x00, 0x00, 0x00, 0x30, 0x00, + 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x34, + 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x10, 0x00, 0x04, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x08, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, + 0x0D, 0x00, 0x00, 0x00, 0x57, 0x61, 0x73, 0x6D, 0x2E, 0x43, 0x6F, 0x6D, 0x6D, 0x6F, 0x6E, + 0x2E, 0x54, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x12, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00, + 0x06, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00, 0x0C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x09, 0x01, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00}; + args.set_schema(bfbsData, 192); + std::string in; + args.SerializeToString(&in); + char* out = nullptr; + size_t out_size = 0; + if (WasmResult::Ok != proxy_call_foreign_function(function.data(), function.size(), in.data(), + in.size(), &out, &out_size)) { + logError("declare_property failed for flatbuffers"); + } + ::free(out); + } + { + envoy::source::extensions::common::wasm::DeclarePropertyArguments args; + args.set_name("string_state"); + args.set_type(envoy::source::extensions::common::wasm::WasmType::String); + args.set_span(envoy::source::extensions::common::wasm::LifeSpan::FilterChain); + args.set_readonly(true); + std::string in; + args.SerializeToString(&in); + char* out = nullptr; + size_t out_size = 0; + if (WasmResult::Ok != proxy_call_foreign_function(function.data(), function.size(), in.data(), + in.size(), &out, &out_size)) { + logError("declare_property failed for strings"); + } + ::free(out); + } + { + envoy::source::extensions::common::wasm::DeclarePropertyArguments args; + args.set_name("bytes_state"); + args.set_type(envoy::source::extensions::common::wasm::WasmType::Bytes); + args.set_span(envoy::source::extensions::common::wasm::LifeSpan::DownstreamRequest); + std::string in; + args.SerializeToString(&in); + char* out = nullptr; + size_t out_size = 0; + if (WasmResult::Ok != proxy_call_foreign_function(function.data(), function.size(), in.data(), + in.size(), &out, &out_size)) { + logError("declare_property failed for bytes"); + } + ::free(out); + } + { + // double declaration of "bytes_state" should return BAD_ARGUMENT + envoy::source::extensions::common::wasm::DeclarePropertyArguments args; + args.set_name("bytes_state"); + std::string in; + args.SerializeToString(&in); + char* out = nullptr; + size_t out_size = 0; + if (WasmResult::BadArgument != proxy_call_foreign_function(function.data(), function.size(), + in.data(), in.size(), &out, + &out_size)) { + logError("declare_property must fail for double declaration"); + } + ::free(out); + } + { + envoy::source::extensions::common::wasm::DeclarePropertyArguments args; + args.set_name("protobuf_state"); + args.set_type(envoy::source::extensions::common::wasm::WasmType::Protobuf); + args.set_span(envoy::source::extensions::common::wasm::LifeSpan::DownstreamRequest); + args.set_schema("type.googleapis.com/wasmtest.TestProto"); + std::string in; + args.SerializeToString(&in); + char* out = nullptr; + size_t out_size = 0; + if (WasmResult::Ok != proxy_call_foreign_function(function.data(), function.size(), in.data(), + in.size(), &out, &out_size)) { + logError("declare_property failed for protobuf"); + } + ::free(out); + } + { + char* out = nullptr; + size_t out_size = 0; + if (WasmResult::Ok == proxy_call_foreign_function(function.data(), function.size(), + function.data(), function.size(), &out, + &out_size)) { + logError("expected declare_property to fail"); + } + ::free(out); + } + { + // setting a filter state in root context returns NOT_FOUND + if (setFilterState("string_state", "unicorns") != WasmResult::NotFound) { + logWarn("setProperty(string_state) should fail in root context"); + } + } + } +} + +class Context1 : public Context { +public: + Context1(uint32_t id, RootContext* root) : Context(id, root) {} + FilterHeadersStatus onRequestHeaders(uint32_t, bool) override; +}; + +class Context2 : public Context { +public: + Context2(uint32_t id, RootContext* root) : Context(id, root) {} + FilterHeadersStatus onRequestHeaders(uint32_t, bool) override; +}; + +static RegisterContextFactory register_Context1(CONTEXT_FACTORY(Context1), "context1"); +static RegisterContextFactory register_Contxt2(CONTEXT_FACTORY(Context2), "context2"); + +FilterHeadersStatus Context1::onRequestHeaders(uint32_t, bool) { + logDebug(std::string("onRequestHeaders1 ") + std::to_string(id())); + return FilterHeadersStatus::Continue; +} + +FilterHeadersStatus Context2::onRequestHeaders(uint32_t, bool) { + logDebug(std::string("onRequestHeaders2 ") + std::to_string(id())); + CHECK_RESULT(sendLocalResponse(200, "ok", "body", {{"foo", "bar"}})); + return FilterHeadersStatus::Continue; +} + +END_WASM_PLUGIN diff --git a/test/extensions/filters/http/wasm/test_data/test_cpp_null_plugin.cc b/test/extensions/filters/http/wasm/test_data/test_cpp_null_plugin.cc new file mode 100644 index 000000000000..38f1e82c78b3 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/test_cpp_null_plugin.cc @@ -0,0 +1,15 @@ +// NOLINT(namespace-envoy) +#include "include/proxy-wasm/null_plugin.h" + +namespace proxy_wasm { +namespace null_plugin { +namespace HttpWasmTestCpp { +NullPluginRegistry* context_registry_; +} // namespace HttpWasmTestCpp + +RegisterNullVmPluginFactory register_common_wasm_test_cpp_plugin("HttpWasmTestCpp", []() { + return std::make_unique(HttpWasmTestCpp::context_registry_); +}); + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/test/extensions/filters/http/wasm/test_data/test_grpc_call_cpp.cc b/test/extensions/filters/http/wasm/test_data/test_grpc_call_cpp.cc new file mode 100644 index 000000000000..0abdc79ce7c2 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/test_grpc_call_cpp.cc @@ -0,0 +1,83 @@ +// NOLINT(namespace-envoy) +#include +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics_lite.h" +#else +#include "extensions/common/wasm/ext/envoy_null_plugin.h" +#endif + +START_WASM_PLUGIN(HttpWasmTestCpp) + +class MyGrpcCallHandler : public GrpcCallHandler { +public: + MyGrpcCallHandler() : GrpcCallHandler() {} + void onSuccess(size_t body_size) override { + auto response = getBufferBytes(WasmBufferType::GrpcReceiveBuffer, 0, body_size); + logDebug(response->proto().string_value()); + cancel(); + } + void onFailure(GrpcStatus) override { + auto p = getStatus(); + logDebug(std::string("failure ") + std::string(p.second->view())); + } +}; + +class GrpcCallRootContext : public RootContext { +public: + explicit GrpcCallRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {} + + void onQueueReady(uint32_t op) override { + if (op == 0) { + handler_->cancel(); + } else { + grpcClose(handler_->token()); + } + } + + MyGrpcCallHandler* handler_ = nullptr; +}; + +class GrpcCallContext : public Context { +public: + explicit GrpcCallContext(uint32_t id, RootContext* root) : Context(id, root) {} + + FilterHeadersStatus onRequestHeaders(uint32_t, bool) override; + + GrpcCallRootContext* root() { return static_cast(Context::root()); } +}; + +static RegisterContextFactory register_GrpcCallContext(CONTEXT_FACTORY(GrpcCallContext), + ROOT_FACTORY(GrpcCallRootContext), + "grpc_call"); + +FilterHeadersStatus GrpcCallContext::onRequestHeaders(uint32_t, bool end_of_stream) { + GrpcService grpc_service; + grpc_service.mutable_envoy_grpc()->set_cluster_name("cluster"); + std::string grpc_service_string; + grpc_service.SerializeToString(&grpc_service_string); + google::protobuf::Value value; + value.set_string_value("request"); + HeaderStringPairs initial_metadata; + root()->handler_ = new MyGrpcCallHandler(); + if (end_of_stream) { + if (root()->grpcCallHandler(grpc_service_string, "service", "method", initial_metadata, value, + 1000, std::unique_ptr(root()->handler_)) == + WasmResult::Ok) { + logError("expected failure did not occur"); + } + return FilterHeadersStatus::Continue; + } + root()->grpcCallHandler(grpc_service_string, "service", "method", initial_metadata, value, 1000, + std::unique_ptr(root()->handler_)); + if (root()->grpcCallHandler( + "bogus grpc_service", "service", "method", initial_metadata, value, 1000, + std::unique_ptr(new MyGrpcCallHandler())) == WasmResult::Ok) { + logError("bogus grpc_service accepted error"); + } + return FilterHeadersStatus::StopIteration; +} + +END_WASM_PLUGIN diff --git a/test/extensions/filters/http/wasm/test_data/test_grpc_stream_cpp.cc b/test/extensions/filters/http/wasm/test_data/test_grpc_stream_cpp.cc new file mode 100644 index 000000000000..6a357de65b87 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/test_grpc_stream_cpp.cc @@ -0,0 +1,94 @@ +// NOLINT(namespace-envoy) +#include +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics_lite.h" +#else +#include "extensions/common/wasm/ext/envoy_null_plugin.h" +#endif + +START_WASM_PLUGIN(HttpWasmTestCpp) + +class GrpcStreamContext : public Context { +public: + explicit GrpcStreamContext(uint32_t id, RootContext* root) : Context(id, root) {} + + FilterHeadersStatus onRequestHeaders(uint32_t, bool) override; +}; + +class GrpcStreamRootContext : public RootContext { +public: + explicit GrpcStreamRootContext(uint32_t id, std::string_view root_id) + : RootContext(id, root_id) {} +}; + +static RegisterContextFactory register_GrpcStreamContext(CONTEXT_FACTORY(GrpcStreamContext), + ROOT_FACTORY(GrpcStreamRootContext), + "grpc_stream"); +class MyGrpcStreamHandler + : public GrpcStreamHandler { +public: + MyGrpcStreamHandler() : GrpcStreamHandler() {} + void onReceiveInitialMetadata(uint32_t) override { + auto h = getHeaderMapValue(WasmHeaderMapType::GrpcReceiveInitialMetadata, "test"); + if (h->view() == "reset") { + reset(); + return; + } + // Not Found. + h = getHeaderMapValue(WasmHeaderMapType::HttpCallResponseHeaders, "foo"); + h = getHeaderMapValue(WasmHeaderMapType::HttpCallResponseTrailers, "foo"); + addHeaderMapValue(WasmHeaderMapType::GrpcReceiveInitialMetadata, "foo", "bar"); + } + void onReceive(size_t body_size) override { + auto response = getBufferBytes(WasmBufferType::GrpcReceiveBuffer, 0, body_size); + auto response_string = response->proto().string_value(); + google::protobuf::Value message; + if (response_string == "close") { + close(); + } else { + send(message, false); + } + logDebug(std::string("response ") + response_string); + } + void onReceiveTrailingMetadata(uint32_t) override { + auto h = getHeaderMapValue(WasmHeaderMapType::GrpcReceiveTrailingMetadata, "foo"); + addHeaderMapValue(WasmHeaderMapType::GrpcReceiveTrailingMetadata, "foo", "bar"); + } + void onRemoteClose(GrpcStatus) override { + auto p = getStatus(); + logDebug(std::string("close ") + std::string(p.second->view())); + if (p.second->view() == "close") { + close(); + } else if (p.second->view() == "ok") { + return; + } else { + reset(); + } + } +}; + +FilterHeadersStatus GrpcStreamContext::onRequestHeaders(uint32_t, bool) { + GrpcService grpc_service; + grpc_service.mutable_envoy_grpc()->set_cluster_name("cluster"); + std::string grpc_service_string; + grpc_service.SerializeToString(&grpc_service_string); + HeaderStringPairs initial_metadata; + if (root()->grpcStreamHandler("bogus service string", "service", "method", initial_metadata, + std::unique_ptr( + new MyGrpcStreamHandler())) != WasmResult::ParseFailure) { + logError("unexpected bogus service string OK"); + } + if (root()->grpcStreamHandler(grpc_service_string, "service", "bad method", initial_metadata, + std::unique_ptr( + new MyGrpcStreamHandler())) != WasmResult::InternalFailure) { + logError("unexpected bogus method OK"); + } + root()->grpcStreamHandler(grpc_service_string, "service", "method", initial_metadata, + std::unique_ptr(new MyGrpcStreamHandler())); + return FilterHeadersStatus::StopIteration; +} + +END_WASM_PLUGIN diff --git a/test/extensions/filters/http/wasm/test_data/test_shared_data_cpp.cc b/test/extensions/filters/http/wasm/test_data/test_shared_data_cpp.cc new file mode 100644 index 000000000000..6ecf802903d9 --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/test_shared_data_cpp.cc @@ -0,0 +1,55 @@ +// NOLINT(namespace-envoy) +#include +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics_lite.h" +#else +#include "extensions/common/wasm/ext/envoy_null_plugin.h" +#endif + +START_WASM_PLUGIN(HttpWasmTestCpp) + +class SharedDataRootContext : public RootContext { +public: + explicit SharedDataRootContext(uint32_t id, std::string_view root_id) + : RootContext(id, root_id) {} + + void onTick() override; + void onQueueReady(uint32_t) override; +}; + +static RegisterContextFactory register_SharedDataRootContext(ROOT_FACTORY(SharedDataRootContext), + "shared_data"); + +void SharedDataRootContext::onTick() { + setHeaderMapPairs(WasmHeaderMapType::GrpcReceiveInitialMetadata, {}); + setRequestHeaderPairs({{"foo", "bar"}}); + WasmDataPtr value0; + if (getSharedData("shared_data_key_bad", &value0) == WasmResult::NotFound) { + logDebug("get of bad key not found"); + } + CHECK_RESULT(setSharedData("shared_data_key1", "shared_data_value0")); + CHECK_RESULT(setSharedData("shared_data_key1", "shared_data_value1")); + CHECK_RESULT(setSharedData("shared_data_key2", "shared_data_value2")); + uint32_t cas = 0; + auto value2 = getSharedDataValue("shared_data_key2", &cas); + if (WasmResult::CasMismatch == + setSharedData("shared_data_key2", "shared_data_value3", cas + 1)) { // Bad cas. + logInfo("set CasMismatch"); + } +} + +void SharedDataRootContext::onQueueReady(uint32_t) { + WasmDataPtr value0; + if (getSharedData("shared_data_key_bad", &value0) == WasmResult::NotFound) { + logDebug("second get of bad key not found"); + } + auto value1 = getSharedDataValue("shared_data_key1"); + logDebug("get 1 " + value1->toString()); + auto value2 = getSharedDataValue("shared_data_key2"); + logCritical("get 2 " + value2->toString()); +} + +END_WASM_PLUGIN diff --git a/test/extensions/filters/http/wasm/test_data/test_shared_queue_cpp.cc b/test/extensions/filters/http/wasm/test_data/test_shared_queue_cpp.cc new file mode 100644 index 000000000000..ea171e251bff --- /dev/null +++ b/test/extensions/filters/http/wasm/test_data/test_shared_queue_cpp.cc @@ -0,0 +1,69 @@ +// NOLINT(namespace-envoy) +#include +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics_lite.h" +#else +#include "extensions/common/wasm/ext/envoy_null_plugin.h" +#endif + +START_WASM_PLUGIN(HttpWasmTestCpp) + +class SharedQueueContext : public Context { +public: + explicit SharedQueueContext(uint32_t id, RootContext* root) : Context(id, root) {} + + FilterHeadersStatus onRequestHeaders(uint32_t, bool) override; +}; + +class SharedQueueRootContext : public RootContext { +public: + explicit SharedQueueRootContext(uint32_t id, std::string_view root_id) + : RootContext(id, root_id) {} + + bool onStart(size_t) override; + void onQueueReady(uint32_t) override; + + uint32_t shared_queue_token_; +}; + +static RegisterContextFactory register_SharedQueueContext(CONTEXT_FACTORY(SharedQueueContext), + ROOT_FACTORY(SharedQueueRootContext), + "shared_queue"); + +bool SharedQueueRootContext::onStart(size_t) { + CHECK_RESULT(registerSharedQueue("my_shared_queue", &shared_queue_token_)); + return true; +} + +FilterHeadersStatus SharedQueueContext::onRequestHeaders(uint32_t, bool) { + uint32_t token; + if (resolveSharedQueue("vm_id", "bad_shared_queue", &token) == WasmResult::NotFound) { + logWarn("onRequestHeaders not found bad_shared_queue"); + } + CHECK_RESULT(resolveSharedQueue("vm_id", "my_shared_queue", &token)); + if (enqueueSharedQueue(token, "data1") == WasmResult::Ok) { + logWarn("onRequestHeaders enqueue Ok"); + } + return FilterHeadersStatus::Continue; +} + +void SharedQueueRootContext::onQueueReady(uint32_t token) { + if (token == shared_queue_token_) { + logInfo("onQueueReady"); + } + std::unique_ptr data; + if (dequeueSharedQueue(9999999 /* bad token */, &data) == WasmResult::NotFound) { + logWarn("onQueueReady bad token not found"); + } + if (dequeueSharedQueue(token, &data) == WasmResult::Ok) { + logDebug("data " + data->toString() + " Ok"); + } + if (dequeueSharedQueue(token, &data) == WasmResult::Empty) { + logWarn("onQueueReady extra data not found"); + } +} + +END_WASM_PLUGIN diff --git a/test/extensions/filters/http/wasm/wasm_filter_test.cc b/test/extensions/filters/http/wasm/wasm_filter_test.cc new file mode 100644 index 000000000000..538481f36f6c --- /dev/null +++ b/test/extensions/filters/http/wasm/wasm_filter_test.cc @@ -0,0 +1,1445 @@ +#include "common/http/message_impl.h" + +#include "extensions/filters/http/wasm/wasm_filter.h" + +#include "test/mocks/network/connection.h" +#include "test/mocks/router/mocks.h" +#include "test/test_common/wasm_base.h" + +using testing::Eq; +using testing::Invoke; +using testing::Return; +using testing::ReturnRef; + +MATCHER_P(MapEq, rhs, "") { + const Envoy::ProtobufWkt::Struct& obj = arg; + EXPECT_TRUE(rhs.size() > 0); + for (auto const& entry : rhs) { + EXPECT_EQ(obj.fields().at(entry.first).string_value(), entry.second); + } + return true; +} + +using BufferFunction = std::function; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Wasm { + +using Envoy::Extensions::Common::Wasm::CreateContextFn; +using Envoy::Extensions::Common::Wasm::Plugin; +using Envoy::Extensions::Common::Wasm::PluginSharedPtr; +using Envoy::Extensions::Common::Wasm::Wasm; +using Envoy::Extensions::Common::Wasm::WasmHandleSharedPtr; +using proxy_wasm::ContextBase; +using GrpcService = envoy::config::core::v3::GrpcService; +using WasmFilterConfig = envoy::extensions::filters::http::wasm::v3::Wasm; + +class TestFilter : public Envoy::Extensions::Common::Wasm::Context { +public: + TestFilter(Wasm* wasm, uint32_t root_context_id, + Envoy::Extensions::Common::Wasm::PluginSharedPtr plugin) + : Envoy::Extensions::Common::Wasm::Context(wasm, root_context_id, plugin) {} + MOCK_CONTEXT_LOG_; +}; + +class TestRoot : public Envoy::Extensions::Common::Wasm::Context { +public: + TestRoot(Wasm* wasm, const std::shared_ptr& plugin) : Context(wasm, plugin) {} + MOCK_CONTEXT_LOG_; +}; + +class WasmHttpFilterTest : public Common::Wasm::WasmHttpFilterTestBase< + testing::TestWithParam>> { +public: + WasmHttpFilterTest() = default; + ~WasmHttpFilterTest() override = default; + + CreateContextFn createContextFn() { + return [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + return new TestRoot(wasm, plugin); + }; + } + + void setup(const std::string& code, std::string root_id = "", std::string vm_configuration = "") { + setupBase(std::get<0>(GetParam()), code, createContextFn(), root_id, vm_configuration); + } + void setupTest(std::string root_id = "", std::string vm_configuration = "") { + std::string code; + if (std::get<0>(GetParam()) == "null") { + code = "HttpWasmTestCpp"; + } else { + if (std::get<1>(GetParam()) == "cpp") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::runfilesPath( + "test/extensions/filters/http/wasm/test_data/test_cpp.wasm")); + } else { + auto filename = !root_id.empty() ? root_id : vm_configuration; + const auto basic_path = TestEnvironment::runfilesPath( + absl::StrCat("test/extensions/filters/http/wasm/test_data/", filename)); + code = TestEnvironment::readFileToStringForTest(basic_path + "_rust.wasm"); + } + } + setupBase(std::get<0>(GetParam()), code, createContextFn(), root_id, vm_configuration); + } + void setupFilter(const std::string root_id = "") { setupFilterBase(root_id); } + + void setupGrpcStreamTest(Grpc::RawAsyncStreamCallbacks*& callbacks); + + TestRoot& rootContext() { return *static_cast(root_context_); } + TestFilter& filter() { return *static_cast(context_.get()); } + +protected: + NiceMock async_stream_; + Grpc::MockAsyncClientManager async_client_manager_; +}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto testing_values = testing::Values( +#if defined(ENVOY_WASM_V8) + std::make_tuple("v8", "cpp"), std::make_tuple("v8", "rust"), +#endif +#if defined(ENVOY_WASM_WAVM) + std::make_tuple("wavm", "cpp"), std::make_tuple("wavm", "rust"), +#endif + std::make_tuple("null", "cpp")); +INSTANTIATE_TEST_SUITE_P(RuntimesAndLanguages, WasmHttpFilterTest, testing_values); + +// Bad code in initial config. +TEST_P(WasmHttpFilterTest, BadCode) { + setup("bad code"); + EXPECT_EQ(wasm_, nullptr); +} + +// Script touching headers only, request that is headers only. +TEST_P(WasmHttpFilterTest, HeadersOnlyRequestHeadersOnly) { + setupTest("", "headers"); + setupFilter(); + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + EXPECT_CALL(filter(), + log_(spdlog::level::debug, Eq(absl::string_view("onRequestHeaders 2 headers")))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("header path /")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onDone 2")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, {"server", "envoy"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true)); + EXPECT_THAT(request_headers.get_("newheader"), Eq("newheadervalue")); + EXPECT_THAT(request_headers.get_("server"), Eq("envoy-wasm")); + // Test some errors. + EXPECT_EQ(filter().continueStream(static_cast(9999)), + proxy_wasm::WasmResult::BadArgument); + EXPECT_EQ(filter().closeStream(static_cast(9999)), + proxy_wasm::WasmResult::BadArgument); + Http::TestResponseHeaderMapImpl response_headers; + EXPECT_EQ(filter().encode100ContinueHeaders(response_headers), + Http::FilterHeadersStatus::Continue); + filter().onDestroy(); +} + +TEST_P(WasmHttpFilterTest, AllHeadersAndTrailers) { + setupTest("", "headers"); + setupFilter(); + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + EXPECT_CALL(filter(), + log_(spdlog::level::debug, Eq(absl::string_view("onRequestHeaders 2 headers")))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("header path /")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onDone 2")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, {"server", "envoy"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + EXPECT_THAT(request_headers.get_("newheader"), Eq("newheadervalue")); + EXPECT_THAT(request_headers.get_("server"), Eq("envoy-wasm")); + Http::TestRequestTrailerMapImpl request_trailers{}; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter().decodeTrailers(request_trailers)); + Http::MetadataMap request_metadata{}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter().decodeMetadata(request_metadata)); + Http::TestResponseHeaderMapImpl response_headers{}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false)); + Http::TestResponseTrailerMapImpl response_trailers{}; + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter().encodeTrailers(response_trailers)); + Http::MetadataMap response_metadata{}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter().encodeMetadata(response_metadata)); + filter().onDestroy(); +} + +TEST_P(WasmHttpFilterTest, AllHeadersAndTrailersNotStarted) { + setupTest("", "headers"); + setupFilter(); + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + Http::TestRequestTrailerMapImpl request_trailers{}; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter().decodeTrailers(request_trailers)); + Http::MetadataMap request_metadata{}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter().decodeMetadata(request_metadata)); + Http::TestResponseHeaderMapImpl response_headers{}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false)); + Http::TestResponseTrailerMapImpl response_trailers{}; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter().encodeTrailers(response_trailers)); + Http::MetadataMap response_metadata{}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter().encodeMetadata(response_metadata)); + Buffer::OwnedImpl data("data"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data, false)); + filter().onDestroy(); +} + +// Script touching headers only, request that is headers only. +TEST_P(WasmHttpFilterTest, HeadersOnlyRequestHeadersAndBody) { + setupTest("", "headers"); + setupFilter(); + EXPECT_CALL(filter(), + log_(spdlog::level::debug, Eq(absl::string_view("onRequestHeaders 2 headers")))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("header path /")))); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody hello")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onDone 2")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + EXPECT_FALSE(filter().endOfStream(proxy_wasm::WasmStreamType::Request)); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true)); + filter().onDestroy(); +} + +TEST_P(WasmHttpFilterTest, HeadersStopAndContinue) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): This hand off is not currently possible in the Rust SDK. + return; + } + setupTest("", "headers"); + setupFilter(); + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + EXPECT_CALL(filter(), + log_(spdlog::level::debug, Eq(absl::string_view("onRequestHeaders 2 headers")))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("header path /")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onDone 2")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, {"server", "envoy-wasm-pause"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, true)); + root_context_->onTick(0); + filter().clearRouteCache(); + EXPECT_THAT(request_headers.get_("newheader"), Eq("newheadervalue")); + EXPECT_THAT(request_headers.get_("server"), Eq("envoy-wasm-continue")); + filter().onDestroy(); +} + +#if 0 +TEST_P(WasmHttpFilterTest, HeadersStopAndEndStream) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): This hand off is not currently possible in the Rust SDK. + return; + } + setupTest("", "headers"); + setupFilter(); + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + EXPECT_CALL(filter(), + log_(spdlog::level::debug, Eq(absl::string_view("onRequestHeaders 2 headers")))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("header path /")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onDone 2")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, + {"server", "envoy-wasm-end-stream"}}; + EXPECT_EQ(Http::FilterHeadersStatus::ContinueAndEndStream, + filter().decodeHeaders(request_headers, true)); + root_context_->onTick(0); + EXPECT_THAT(request_headers.get_("newheader"), Eq("newheadervalue")); + EXPECT_THAT(request_headers.get_("server"), Eq("envoy-wasm-continue")); + filter().onDestroy(); +} +#endif + +TEST_P(WasmHttpFilterTest, HeadersStopAndBuffer) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): This hand off is not currently possible in the Rust SDK. + return; + } + setupTest("", "headers"); + setupFilter(); + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + EXPECT_CALL(filter(), + log_(spdlog::level::debug, Eq(absl::string_view("onRequestHeaders 2 headers")))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("header path /")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onDone 2")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, + {"server", "envoy-wasm-stop-buffer"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer, + filter().decodeHeaders(request_headers, true)); + root_context_->onTick(0); + EXPECT_THAT(request_headers.get_("newheader"), Eq("newheadervalue")); + EXPECT_THAT(request_headers.get_("server"), Eq("envoy-wasm-continue")); + filter().onDestroy(); +} + +TEST_P(WasmHttpFilterTest, HeadersStopAndWatermark) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): This hand off is not currently possible in the Rust SDK. + return; + } + setupTest("", "headers"); + setupFilter(); + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + EXPECT_CALL(filter(), + log_(spdlog::level::debug, Eq(absl::string_view("onRequestHeaders 2 headers")))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("header path /")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onDone 2")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, + {"server", "envoy-wasm-stop-watermark"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter().decodeHeaders(request_headers, true)); + root_context_->onTick(0); + EXPECT_THAT(request_headers.get_("newheader"), Eq("newheadervalue")); + EXPECT_THAT(request_headers.get_("server"), Eq("envoy-wasm-continue")); + filter().onDestroy(); +} + +// Script that reads the body. +TEST_P(WasmHttpFilterTest, BodyRequestReadBody) { + setupTest("body"); + setupFilter("body"); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody hello")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, {"x-test-operation", "ReadBody"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true)); + filter().onDestroy(); +} + +// Script that prepends and appends to the body. +TEST_P(WasmHttpFilterTest, BodyRequestPrependAndAppendToBody) { + setupTest("body"); + setupFilter("body"); + EXPECT_CALL(filter(), + log_(spdlog::level::err, Eq(absl::string_view("onBody prepend.hello.append")))); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("onBody prepend.prepend.hello.append.append")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, + {"x-test-operation", "PrependAndAppendToBody"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + Buffer::OwnedImpl data("hello"); + if (std::get<1>(GetParam()) == "rust") { + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data, true)); + } else { + // This status is not available in the rust SDK. + // TODO: update all SDKs to the new revision of the spec and update the tests accordingly. + EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter().decodeData(data, true)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter().encodeData(data, true)); + } + filter().onDestroy(); +} + +// Script that replaces the body. +TEST_P(WasmHttpFilterTest, BodyRequestReplaceBody) { + setupTest("body"); + setupFilter("body"); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody replace")))).Times(2); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, + {"x-test-operation", "ReplaceBody"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + Buffer::OwnedImpl data("hello"); + if (std::get<1>(GetParam()) == "rust") { + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data, true)); + } else { + // This status is not available in the rust SDK. + // TODO: update all SDKs to the new revision of the spec and update the tests accordingly. + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter().decodeData(data, true)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter().encodeData(data, true)); + } + filter().onDestroy(); +} + +// Script that removes the body. +TEST_P(WasmHttpFilterTest, BodyRequestRemoveBody) { + setupTest("body"); + setupFilter("body"); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody ")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, + {"x-test-operation", "RemoveBody"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true)); + filter().onDestroy(); +} + +// Script that buffers the body. +TEST_P(WasmHttpFilterTest, BodyRequestBufferBody) { + setupTest("body"); + setupFilter("body"); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, + {"x-test-operation", "BufferBody"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + + Buffer::OwnedImpl bufferedBody; + EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(&bufferedBody)); + EXPECT_CALL(decoder_callbacks_, modifyDecodingBuffer(_)) + .WillRepeatedly(Invoke([&bufferedBody](BufferFunction f) { f(bufferedBody); })); + + Buffer::OwnedImpl data1("hello"); + bufferedBody.add(data1); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody hello")))).Times(1); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().decodeData(data1, false)); + + Buffer::OwnedImpl data2(" again "); + bufferedBody.add(data2); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody hello again ")))) + .Times(1); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().decodeData(data2, false)); + + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody hello again hello")))) + .Times(1); + Buffer::OwnedImpl data3("hello"); + bufferedBody.add(data3); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data3, true)); + + // Verify that the response still works even though we buffered the request. + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"x-test-operation", "ReadBody"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false)); + // Should not buffer this time + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody hello")))).Times(2); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data1, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data1, true)); + + filter().onDestroy(); +} + +// Script that prepends and appends to the buffered body. +TEST_P(WasmHttpFilterTest, BodyRequestPrependAndAppendToBufferedBody) { + setupTest("body"); + setupFilter("body"); + EXPECT_CALL(filter(), + log_(spdlog::level::err, Eq(absl::string_view("onBody prepend.hello.append")))); + Http::TestRequestHeaderMapImpl request_headers{ + {":path", "/"}, {"x-test-operation", "PrependAndAppendToBufferedBody"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true)); + filter().onDestroy(); +} + +// Script that replaces the buffered body. +TEST_P(WasmHttpFilterTest, BodyRequestReplaceBufferedBody) { + setupTest("body"); + setupFilter("body"); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody replace")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, + {"x-test-operation", "ReplaceBufferedBody"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true)); + filter().onDestroy(); +} + +// Script that removes the buffered body. +TEST_P(WasmHttpFilterTest, BodyRequestRemoveBufferedBody) { + setupTest("body"); + setupFilter("body"); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody ")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, + {"x-test-operation", "RemoveBufferedBody"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true)); + filter().onDestroy(); +} + +// Script that buffers the first part of the body and streams the rest +TEST_P(WasmHttpFilterTest, BodyRequestBufferThenStreamBody) { + setupTest("body"); + setupFilter("body"); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + + Buffer::OwnedImpl bufferedBody; + EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(&bufferedBody)); + EXPECT_CALL(decoder_callbacks_, modifyDecodingBuffer(_)) + .WillRepeatedly(Invoke([&bufferedBody](BufferFunction f) { f(bufferedBody); })); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"x-test-operation", "BufferTwoBodies"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false)); + + Buffer::OwnedImpl data1("hello"); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody hello")))).Times(1); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().decodeData(data1, false)); + bufferedBody.add(data1); + + Buffer::OwnedImpl data2(", there, "); + bufferedBody.add(data2); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody hello, there, ")))) + .Times(1); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().decodeData(data2, false)); + + // Previous callbacks returned "Buffer" so we have buffered so far + Buffer::OwnedImpl data3("world!"); + bufferedBody.add(data3); + EXPECT_CALL(filter(), + log_(spdlog::level::err, Eq(absl::string_view("onBody hello, there, world!")))) + .Times(1); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data3, false)); + + // Last callback returned "continue" so we just see individual chunks. + Buffer::OwnedImpl data4("So it's "); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody So it's ")))) + .Times(1); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data4, false)); + + Buffer::OwnedImpl data5("goodbye, then!"); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody goodbye, then!")))) + .Times(1); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data5, true)); + + filter().onDestroy(); +} + +// Script that buffers the first part of the body and streams the rest +TEST_P(WasmHttpFilterTest, BodyResponseBufferThenStreamBody) { + setupTest("body"); + setupFilter("body"); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + + Buffer::OwnedImpl bufferedBody; + EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer(_)) + .WillRepeatedly(Invoke([&bufferedBody](BufferFunction f) { f(bufferedBody); })); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"x-test-operation", "BufferTwoBodies"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false)); + + Buffer::OwnedImpl data1("hello"); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody hello")))).Times(1); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().encodeData(data1, false)); + bufferedBody.add(data1); + + Buffer::OwnedImpl data2(", there, "); + bufferedBody.add(data2); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody hello, there, ")))) + .Times(1); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter().encodeData(data2, false)); + + // Previous callbacks returned "Buffer" so we have buffered so far + Buffer::OwnedImpl data3("world!"); + bufferedBody.add(data3); + EXPECT_CALL(filter(), + log_(spdlog::level::err, Eq(absl::string_view("onBody hello, there, world!")))) + .Times(1); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data3, false)); + + // Last callback returned "continue" so we just see individual chunks. + Buffer::OwnedImpl data4("So it's "); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody So it's ")))) + .Times(1); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data4, false)); + + Buffer::OwnedImpl data5("goodbye, then!"); + EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("onBody goodbye, then!")))) + .Times(1); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(data5, true)); + + filter().onDestroy(); +} + +// Script testing AccessLog::Instance::log. +TEST_P(WasmHttpFilterTest, AccessLog) { + setupTest("", "headers"); + setupFilter(); + EXPECT_CALL(filter(), + log_(spdlog::level::debug, Eq(absl::string_view("onRequestHeaders 2 headers")))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("header path /")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onLog 2 /")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onDone 2")))); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + Http::TestResponseHeaderMapImpl response_headers{}; + Http::TestResponseTrailerMapImpl response_trailers{}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + filter().continueStream(proxy_wasm::WasmStreamType::Response); + filter().closeStream(proxy_wasm::WasmStreamType::Response); + StreamInfo::MockStreamInfo log_stream_info; + filter().log(&request_headers, &response_headers, &response_trailers, log_stream_info); + filter().onDestroy(); +} + +TEST_P(WasmHttpFilterTest, AccessLogCreate) { + setupTest("", "headers"); + setupFilter(); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onLog 2 /")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("onDone 2")))); + + StreamInfo::MockStreamInfo log_stream_info; + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + Http::TestResponseHeaderMapImpl response_headers{}; + Http::TestResponseTrailerMapImpl response_trailers{}; + filter().log(&request_headers, &response_headers, &response_trailers, log_stream_info); + filter().onDestroy(); +} + +TEST_P(WasmHttpFilterTest, AsyncCall) { + setupTest("async_call"); + setupFilter("async_call"); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + Http::MockAsyncClientRequest request(&cluster_manager_.async_client_); + Http::AsyncClient::Callbacks* callbacks = nullptr; + EXPECT_CALL(cluster_manager_, get(Eq("cluster"))).Times(testing::AtLeast(1)); + EXPECT_CALL(cluster_manager_, get(Eq("bogus cluster"))).WillRepeatedly(Return(nullptr)); + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster")); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/"}, + {":authority", "foo"}, + {"content-length", "11"}}), + message->headers()); + EXPECT_EQ((Http::TestRequestTrailerMapImpl{{"trail", "cow"}}), *message->trailers()); + callbacks = &cb; + return &request; + })); + + EXPECT_CALL(filter(), log_(spdlog::level::debug, Eq("response"))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(":status -> 200"))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq("onRequestHeaders"))) + .WillOnce(Invoke([&](uint32_t, absl::string_view) -> proxy_wasm::WasmResult { + Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl( + Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response_message->body().add("response"); + NiceMock span; + Http::TestResponseHeaderMapImpl response_header{{":status", "200"}}; + callbacks->onBeforeFinalizeUpstreamSpan(span, &response_header); + callbacks->onSuccess(request, std::move(response_message)); + return proxy_wasm::WasmResult::Ok; + })); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + EXPECT_NE(callbacks, nullptr); +} + +TEST_P(WasmHttpFilterTest, AsyncCallBadCall) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): The Rust SDK does not support end_of_stream in on_http_request_headers. + return; + } + setupTest("async_call"); + setupFilter("async_call"); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + Http::MockAsyncClientRequest request(&cluster_manager_.async_client_); + EXPECT_CALL(cluster_manager_, get(Eq("cluster"))).Times(testing::AtLeast(1)); + EXPECT_CALL(cluster_manager_, get(Eq("bogus cluster"))).WillRepeatedly(Return(nullptr)); + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster")); + // Just fail the send. + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks&, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + return nullptr; + })); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true)); +} + +TEST_P(WasmHttpFilterTest, AsyncCallFailure) { + setupTest("async_call"); + setupFilter("async_call"); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + Http::MockAsyncClientRequest request(&cluster_manager_.async_client_); + Http::AsyncClient::Callbacks* callbacks = nullptr; + EXPECT_CALL(cluster_manager_, get(Eq("cluster"))).Times(testing::AtLeast(1)); + EXPECT_CALL(cluster_manager_, get(Eq("bogus cluster"))).WillRepeatedly(Return(nullptr)); + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster")); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/"}, + {":authority", "foo"}, + {"content-length", "11"}}), + message->headers()); + EXPECT_EQ((Http::TestRequestTrailerMapImpl{{"trail", "cow"}}), *message->trailers()); + callbacks = &cb; + return &request; + })); + + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq("onRequestHeaders"))) + .WillOnce(Invoke([&](uint32_t, absl::string_view) -> proxy_wasm::WasmResult { + callbacks->onFailure(request, Http::AsyncClient::FailureReason::Reset); + return proxy_wasm::WasmResult::Ok; + })); + // TODO(PiotrSikora): RootContext handling is incomplete in the Rust SDK. + if (std::get<1>(GetParam()) == "rust") { + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq("async_call failed"))); + } else { + EXPECT_CALL(rootContext(), log_(spdlog::level::info, Eq("async_call failed"))); + } + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + EXPECT_NE(callbacks, nullptr); +} + +TEST_P(WasmHttpFilterTest, AsyncCallAfterDestroyed) { + setupTest("async_call"); + setupFilter("async_call"); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + Http::MockAsyncClientRequest request(&cluster_manager_.async_client_); + Http::AsyncClient::Callbacks* callbacks = nullptr; + EXPECT_CALL(cluster_manager_, get(Eq("cluster"))).Times(testing::AtLeast(1)); + EXPECT_CALL(cluster_manager_, get(Eq("bogus cluster"))).WillRepeatedly(Return(nullptr)); + EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster")); + EXPECT_CALL(cluster_manager_.async_client_, send_(_, _, _)) + .WillOnce( + Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/"}, + {":authority", "foo"}, + {"content-length", "11"}}), + message->headers()); + EXPECT_EQ((Http::TestRequestTrailerMapImpl{{"trail", "cow"}}), *message->trailers()); + callbacks = &cb; + return &request; + })); + + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq("onRequestHeaders"))); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + EXPECT_CALL(request, cancel()).WillOnce([&]() { callbacks = nullptr; }); + + // Destroy the Context, Plugin and VM. + context_.reset(); + plugin_.reset(); + wasm_.reset(); + + Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl( + Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + response_message->body().add("response"); + + // (Don't) Make the callback on the destroyed VM. + EXPECT_EQ(callbacks, nullptr); + if (callbacks) { + callbacks->onSuccess(request, std::move(response_message)); + } +} + +TEST_P(WasmHttpFilterTest, GrpcCall) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + setupTest("grpc_call"); + setupFilter("grpc_call"); + NiceMock request; + Grpc::RawAsyncRequestCallbacks* callbacks = nullptr; + Grpc::MockAsyncClientManager client_manager; + auto client_factory = std::make_unique(); + auto async_client = std::make_unique(); + Tracing::Span* parent_span{}; + EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _)) + .WillOnce(Invoke([&](absl::string_view service_full_name, absl::string_view method_name, + Buffer::InstancePtr&& message, Grpc::RawAsyncRequestCallbacks& cb, + Tracing::Span& span, const Http::AsyncClient::RequestOptions& options) + -> Grpc::AsyncRequest* { + EXPECT_EQ(service_full_name, "service"); + EXPECT_EQ(method_name, "method"); + ProtobufWkt::Value value; + EXPECT_TRUE(value.ParseFromArray(message->linearize(message->length()), message->length())); + EXPECT_EQ(value.string_value(), "request"); + callbacks = &cb; + parent_span = &span; + EXPECT_EQ(options.timeout->count(), 1000); + return &request; + })); + EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr { + return std::move(async_client); + })); + EXPECT_CALL(cluster_manager_, grpcAsyncClientManager()) + .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; })); + EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr { + return std::move(client_factory); + })); + EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq("response"))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + ProtobufWkt::Value value; + value.set_string_value("response"); + std::string response_string; + EXPECT_TRUE(value.SerializeToString(&response_string)); + auto response = std::make_unique(response_string); + EXPECT_NE(callbacks, nullptr); + NiceMock span; + if (callbacks) { + callbacks->onCreateInitialMetadata(request_headers); + callbacks->onSuccessRaw(std::move(response), span); + } +} + +TEST_P(WasmHttpFilterTest, GrpcCallBadCall) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + setupTest("grpc_call"); + setupFilter("grpc_call"); + Grpc::MockAsyncClientManager client_manager; + auto client_factory = std::make_unique(); + auto async_client = std::make_unique(); + EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _)) + .WillOnce(Invoke([&](absl::string_view, absl::string_view, Buffer::InstancePtr&&, + Grpc::RawAsyncRequestCallbacks&, Tracing::Span&, + const Http::AsyncClient::RequestOptions&) -> Grpc::AsyncRequest* { + return nullptr; + })); + EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr { + return std::move(async_client); + })); + EXPECT_CALL(cluster_manager_, grpcAsyncClientManager()) + .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; })); + EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr { + return std::move(client_factory); + })); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true)); +} + +TEST_P(WasmHttpFilterTest, GrpcCallFailure) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + setupTest("grpc_call"); + setupFilter("grpc_call"); + NiceMock request; + Grpc::RawAsyncRequestCallbacks* callbacks = nullptr; + Grpc::MockAsyncClientManager client_manager; + auto client_factory = std::make_unique(); + auto async_client = std::make_unique(); + Tracing::Span* parent_span{}; + EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _)) + .WillOnce(Invoke([&](absl::string_view service_full_name, absl::string_view method_name, + Buffer::InstancePtr&& message, Grpc::RawAsyncRequestCallbacks& cb, + Tracing::Span& span, const Http::AsyncClient::RequestOptions& options) + -> Grpc::AsyncRequest* { + EXPECT_EQ(service_full_name, "service"); + EXPECT_EQ(method_name, "method"); + ProtobufWkt::Value value; + EXPECT_TRUE(value.ParseFromArray(message->linearize(message->length()), message->length())); + EXPECT_EQ(value.string_value(), "request"); + callbacks = &cb; + parent_span = &span; + EXPECT_EQ(options.timeout->count(), 1000); + return &request; + })); + EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr { + return std::move(async_client); + })); + EXPECT_CALL(cluster_manager_, grpcAsyncClientManager()) + .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; })); + EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr { + return std::move(client_factory); + })); + EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq("failure bad"))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + // Test some additional error paths. + EXPECT_EQ(filter().grpcSend(99999, "", false), proxy_wasm::WasmResult::BadArgument); + EXPECT_EQ(filter().grpcSend(10000, "", false), proxy_wasm::WasmResult::NotFound); + EXPECT_EQ(filter().grpcCancel(9999), proxy_wasm::WasmResult::NotFound); + EXPECT_EQ(filter().grpcCancel(10000), proxy_wasm::WasmResult::NotFound); + EXPECT_EQ(filter().grpcClose(9999), proxy_wasm::WasmResult::NotFound); + EXPECT_EQ(filter().grpcClose(10000), proxy_wasm::WasmResult::NotFound); + + ProtobufWkt::Value value; + value.set_string_value("response"); + std::string response_string; + EXPECT_TRUE(value.SerializeToString(&response_string)); + auto response = std::make_unique(response_string); + EXPECT_NE(callbacks, nullptr); + NiceMock span; + if (callbacks) { + callbacks->onFailure(Grpc::Status::WellKnownGrpcStatus::Canceled, "bad", span); + } +} + +TEST_P(WasmHttpFilterTest, GrpcCallCancel) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + setupTest("grpc_call"); + setupFilter("grpc_call"); + NiceMock request; + Grpc::RawAsyncRequestCallbacks* callbacks = nullptr; + Grpc::MockAsyncClientManager client_manager; + auto client_factory = std::make_unique(); + auto async_client = std::make_unique(); + Tracing::Span* parent_span{}; + EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _)) + .WillOnce(Invoke([&](absl::string_view service_full_name, absl::string_view method_name, + Buffer::InstancePtr&& message, Grpc::RawAsyncRequestCallbacks& cb, + Tracing::Span& span, const Http::AsyncClient::RequestOptions& options) + -> Grpc::AsyncRequest* { + EXPECT_EQ(service_full_name, "service"); + EXPECT_EQ(method_name, "method"); + ProtobufWkt::Value value; + EXPECT_TRUE(value.ParseFromArray(message->linearize(message->length()), message->length())); + EXPECT_EQ(value.string_value(), "request"); + callbacks = &cb; + parent_span = &span; + EXPECT_EQ(options.timeout->count(), 1000); + return &request; + })); + EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr { + return std::move(async_client); + })); + EXPECT_CALL(cluster_manager_, grpcAsyncClientManager()) + .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; })); + EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr { + return std::move(client_factory); + })); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + rootContext().onQueueReady(0); +} + +TEST_P(WasmHttpFilterTest, GrpcCallClose) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + setupTest("grpc_call"); + setupFilter("grpc_call"); + NiceMock request; + Grpc::RawAsyncRequestCallbacks* callbacks = nullptr; + Grpc::MockAsyncClientManager client_manager; + auto client_factory = std::make_unique(); + auto async_client = std::make_unique(); + Tracing::Span* parent_span{}; + EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _)) + .WillOnce(Invoke([&](absl::string_view service_full_name, absl::string_view method_name, + Buffer::InstancePtr&& message, Grpc::RawAsyncRequestCallbacks& cb, + Tracing::Span& span, const Http::AsyncClient::RequestOptions& options) + -> Grpc::AsyncRequest* { + EXPECT_EQ(service_full_name, "service"); + EXPECT_EQ(method_name, "method"); + ProtobufWkt::Value value; + EXPECT_TRUE(value.ParseFromArray(message->linearize(message->length()), message->length())); + EXPECT_EQ(value.string_value(), "request"); + callbacks = &cb; + parent_span = &span; + EXPECT_EQ(options.timeout->count(), 1000); + return &request; + })); + EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr { + return std::move(async_client); + })); + EXPECT_CALL(cluster_manager_, grpcAsyncClientManager()) + .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; })); + EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr { + return std::move(client_factory); + })); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + rootContext().onQueueReady(1); +} + +TEST_P(WasmHttpFilterTest, GrpcCallAfterDestroyed) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + setupTest("grpc_call"); + setupFilter("grpc_call"); + Grpc::MockAsyncRequest request; + Grpc::RawAsyncRequestCallbacks* callbacks = nullptr; + Grpc::MockAsyncClientManager client_manager; + auto client_factory = std::make_unique(); + auto async_client = std::make_unique(); + Tracing::Span* parent_span{}; + EXPECT_CALL(*async_client, sendRaw(_, _, _, _, _, _)) + .WillOnce(Invoke([&](absl::string_view service_full_name, absl::string_view method_name, + Buffer::InstancePtr&& message, Grpc::RawAsyncRequestCallbacks& cb, + Tracing::Span& span, const Http::AsyncClient::RequestOptions& options) + -> Grpc::AsyncRequest* { + EXPECT_EQ(service_full_name, "service"); + EXPECT_EQ(method_name, "method"); + ProtobufWkt::Value value; + EXPECT_TRUE(value.ParseFromArray(message->linearize(message->length()), message->length())); + EXPECT_EQ(value.string_value(), "request"); + callbacks = &cb; + parent_span = &span; + EXPECT_EQ(options.timeout->count(), 1000); + return &request; + })); + EXPECT_CALL(*client_factory, create).WillOnce(Invoke([&]() -> Grpc::RawAsyncClientPtr { + return std::move(async_client); + })); + EXPECT_CALL(cluster_manager_, grpcAsyncClientManager()) + .WillOnce(Invoke([&]() -> Grpc::AsyncClientManager& { return client_manager; })); + EXPECT_CALL(client_manager, factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr { + return std::move(client_factory); + })); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + EXPECT_CALL(request, cancel()).WillOnce([&]() { callbacks = nullptr; }); + + // Destroy the Context, Plugin and VM. + context_.reset(); + plugin_.reset(); + wasm_.reset(); + + ProtobufWkt::Value value; + value.set_string_value("response"); + std::string response_string; + EXPECT_TRUE(value.SerializeToString(&response_string)); + auto response = std::make_unique(response_string); + EXPECT_EQ(callbacks, nullptr); + NiceMock span; + if (callbacks) { + callbacks->onSuccessRaw(std::move(response), span); + } +} + +void WasmHttpFilterTest::setupGrpcStreamTest(Grpc::RawAsyncStreamCallbacks*& callbacks) { + setupTest("grpc_stream"); + setupFilter("grpc_stream"); + + EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, _)) + .WillRepeatedly( + Invoke([&](const GrpcService&, Stats::Scope&, bool) -> Grpc::AsyncClientFactoryPtr { + auto client_factory = std::make_unique(); + EXPECT_CALL(*client_factory, create) + .WillRepeatedly(Invoke([&]() -> Grpc::RawAsyncClientPtr { + auto async_client = std::make_unique(); + EXPECT_CALL(*async_client, startRaw(_, _, _, _)) + .WillRepeatedly(Invoke( + [&](absl::string_view service_full_name, absl::string_view method_name, + Grpc::RawAsyncStreamCallbacks& cb, + const Http::AsyncClient::StreamOptions&) -> Grpc::RawAsyncStream* { + EXPECT_EQ(service_full_name, "service"); + if (method_name != "method") { + return nullptr; + } + callbacks = &cb; + return &async_stream_; + })); + return async_client; + })); + return client_factory; + })); + EXPECT_CALL(cluster_manager_, grpcAsyncClientManager()) + .WillRepeatedly(Invoke([&]() -> Grpc::AsyncClientManager& { return async_client_manager_; })); +} + +TEST_P(WasmHttpFilterTest, GrpcStream) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + Grpc::RawAsyncStreamCallbacks* callbacks = nullptr; + setupGrpcStreamTest(callbacks); + + EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq("response response"))); + EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq("close done"))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + ProtobufWkt::Value value; + value.set_string_value("response"); + std::string response_string; + EXPECT_TRUE(value.SerializeToString(&response_string)); + auto response = std::make_unique(response_string); + EXPECT_NE(callbacks, nullptr); + if (callbacks) { + Http::TestRequestHeaderMapImpl create_initial_metadata{{"test", "create_initial_metadata"}}; + callbacks->onCreateInitialMetadata(create_initial_metadata); + callbacks->onReceiveInitialMetadata(std::make_unique()); + callbacks->onReceiveMessageRaw(std::move(response)); + callbacks->onReceiveTrailingMetadata(std::make_unique()); + callbacks->onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Ok, "done"); + } +} + +// Local close followed by remote close. +TEST_P(WasmHttpFilterTest, GrpcStreamCloseLocal) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + Grpc::RawAsyncStreamCallbacks* callbacks = nullptr; + setupGrpcStreamTest(callbacks); + + EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq("response close"))); + EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq("close ok"))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + ProtobufWkt::Value value; + value.set_string_value("close"); + std::string response_string; + EXPECT_TRUE(value.SerializeToString(&response_string)); + auto response = std::make_unique(response_string); + EXPECT_NE(callbacks, nullptr); + if (callbacks) { + Http::TestRequestHeaderMapImpl create_initial_metadata{{"test", "create_initial_metadata"}}; + callbacks->onCreateInitialMetadata(create_initial_metadata); + callbacks->onReceiveInitialMetadata(std::make_unique()); + callbacks->onReceiveMessageRaw(std::move(response)); + callbacks->onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Ok, "ok"); + } +} + +// Remote close followed by local close. +TEST_P(WasmHttpFilterTest, GrpcStreamCloseRemote) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + Grpc::RawAsyncStreamCallbacks* callbacks = nullptr; + setupGrpcStreamTest(callbacks); + + EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq("response response"))); + EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq("close close"))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + ProtobufWkt::Value value; + value.set_string_value("response"); + std::string response_string; + EXPECT_TRUE(value.SerializeToString(&response_string)); + auto response = std::make_unique(response_string); + EXPECT_NE(callbacks, nullptr); + if (callbacks) { + Http::TestRequestHeaderMapImpl create_initial_metadata{{"test", "create_initial_metadata"}}; + callbacks->onCreateInitialMetadata(create_initial_metadata); + callbacks->onReceiveInitialMetadata(std::make_unique()); + callbacks->onReceiveMessageRaw(std::move(response)); + callbacks->onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Ok, "close"); + } +} + +TEST_P(WasmHttpFilterTest, GrpcStreamCancel) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + Grpc::RawAsyncStreamCallbacks* callbacks = nullptr; + setupGrpcStreamTest(callbacks); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + ProtobufWkt::Value value; + value.set_string_value("response"); + std::string response_string; + EXPECT_TRUE(value.SerializeToString(&response_string)); + auto response = std::make_unique(response_string); + EXPECT_NE(callbacks, nullptr); + NiceMock span; + if (callbacks) { + Http::TestRequestHeaderMapImpl create_initial_metadata{{"test", "create_initial_metadata"}}; + callbacks->onCreateInitialMetadata(create_initial_metadata); + callbacks->onReceiveInitialMetadata(std::make_unique( + Http::TestResponseHeaderMapImpl{{"test", "reset"}})); + } +} + +TEST_P(WasmHttpFilterTest, GrpcStreamOpenAtShutdown) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): gRPC call outs not yet supported in the Rust SDK. + return; + } + Grpc::RawAsyncStreamCallbacks* callbacks = nullptr; + setupGrpcStreamTest(callbacks); + + EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq("response response"))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter().decodeHeaders(request_headers, false)); + + ProtobufWkt::Value value; + value.set_string_value("response"); + std::string response_string; + EXPECT_TRUE(value.SerializeToString(&response_string)); + auto response = std::make_unique(response_string); + EXPECT_NE(callbacks, nullptr); + NiceMock span; + if (callbacks) { + Http::TestRequestHeaderMapImpl create_initial_metadata{{"test", "create_initial_metadata"}}; + callbacks->onCreateInitialMetadata(create_initial_metadata); + callbacks->onReceiveInitialMetadata(std::make_unique()); + callbacks->onReceiveMessageRaw(std::move(response)); + callbacks->onReceiveTrailingMetadata(std::make_unique()); + } + + // Destroy the Context, Plugin and VM. + context_.reset(); + plugin_.reset(); + wasm_.reset(); +} + +// Test metadata access including CEL expressions. +// TODO: re-enable this on Windows if and when the CEL `Antlr` parser compiles on Windows. +#if defined(ENVOY_WASM_V8) || defined(ENVOY_WASM_WAVM) +TEST_P(WasmHttpFilterTest, Metadata) { + setupTest("", "metadata"); + setupFilter(); + envoy::config::core::v3::Node node_data; + ProtobufWkt::Value node_val; + node_val.set_string_value("wasm_node_get_value"); + (*node_data.mutable_metadata()->mutable_fields())["wasm_node_get_key"] = node_val; + (*node_data.mutable_metadata()->mutable_fields())["wasm_node_list_key"] = + ValueUtil::listValue({node_val}); + EXPECT_CALL(local_info_, node()).WillRepeatedly(ReturnRef(node_data)); + EXPECT_CALL(rootContext(), + log_(spdlog::level::debug, Eq(absl::string_view("onTick wasm_node_get_value")))); + + EXPECT_CALL(filter(), + log_(spdlog::level::err, Eq(absl::string_view("onBody wasm_node_get_value")))); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("header path /")))); + EXPECT_CALL(filter(), + log_(spdlog::level::trace, + Eq(absl::string_view("Struct wasm_request_get_value wasm_request_get_value")))); + if (std::get<1>(GetParam()) != "rust") { + // TODO(PiotrSikora): not yet supported in the Rust SDK. + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("server is envoy-wasm")))); + } + + request_stream_info_.metadata_.mutable_filter_metadata()->insert( + Protobuf::MapPair( + HttpFilters::HttpFilterNames::get().Wasm, + MessageUtil::keyValueStruct("wasm_request_get_key", "wasm_request_get_value"))); + + rootContext().onTick(0); + + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + absl::optional dur = std::chrono::nanoseconds(15000000); + EXPECT_CALL(request_stream_info_, requestComplete()).WillRepeatedly(Return(dur)); + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("duration is 15000000")))); + if (std::get<1>(GetParam()) != "rust") { + // TODO(PiotrSikora): not yet supported in the Rust SDK. + EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("grpc service: test")))); + } + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, {"biz", "baz"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(data, true)); + + StreamInfo::MockStreamInfo log_stream_info; + filter().log(&request_headers, nullptr, nullptr, log_stream_info); + + const auto& result = request_stream_info_.filterState()->getDataReadOnly( + "wasm.wasm_request_set_key"); + EXPECT_EQ("wasm_request_set_value", result.value()); + + filter().onDestroy(); + filter().onDestroy(); // Does nothing. +} +#endif + +TEST_P(WasmHttpFilterTest, Property) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): test not yet implemented using Rust SDK. + return; + } + setupTest("", "property"); + setupFilter(); + envoy::config::core::v3::Node node_data; + ProtobufWkt::Value node_val; + node_val.set_string_value("sample_data"); + (*node_data.mutable_metadata()->mutable_fields())["istio.io/metadata"] = node_val; + EXPECT_CALL(local_info_, node()).WillRepeatedly(ReturnRef(node_data)); + + request_stream_info_.metadata_.mutable_filter_metadata()->insert( + Protobuf::MapPair( + HttpFilters::HttpFilterNames::get().Wasm, + MessageUtil::keyValueStruct("wasm_request_get_key", "wasm_request_get_value"))); + EXPECT_CALL(request_stream_info_, responseCode()).WillRepeatedly(Return(403)); + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + + // test outputs should match inputs + EXPECT_CALL(filter(), + log_(spdlog::level::warn, Eq(absl::string_view("request.path: /test_context")))); + EXPECT_CALL(filter(), + log_(spdlog::level::warn, Eq(absl::string_view("node.metadata: sample_data")))); + EXPECT_CALL(filter(), + log_(spdlog::level::warn, Eq(absl::string_view("metadata: wasm_request_get_value")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("response.code: 403")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("state: wasm_value")))); + EXPECT_CALL(filter(), + log_(spdlog::level::warn, Eq(absl::string_view("upstream host metadata: endpoint")))); + + root_context_->onTick(0); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/test_context"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true)); + StreamInfo::MockStreamInfo log_stream_info; + request_stream_info_.route_name_ = "route12"; + request_stream_info_.requested_server_name_ = "w3.org"; + NiceMock connection; + EXPECT_CALL(connection, id()).WillRepeatedly(Return(4)); + EXPECT_CALL(encoder_callbacks_, connection()).WillRepeatedly(Return(&connection)); + NiceMock route_entry; + EXPECT_CALL(request_stream_info_, routeEntry()).WillRepeatedly(Return(&route_entry)); + std::shared_ptr> host_description( + new NiceMock()); + auto metadata = std::make_shared( + TestUtility::parseYaml( + R"EOF( + filter_metadata: + namespace: + key: endpoint + )EOF")); + EXPECT_CALL(*host_description, metadata()).WillRepeatedly(Return(metadata)); + EXPECT_CALL(request_stream_info_, upstreamHost()).WillRepeatedly(Return(host_description)); + filter().log(&request_headers, nullptr, nullptr, log_stream_info); +} + +TEST_P(WasmHttpFilterTest, ClusterMetadata) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): test not yet implemented using Rust SDK. + return; + } + setupTest("", "cluster_metadata"); + setupFilter(); + EXPECT_CALL(filter(), + log_(spdlog::level::warn, Eq(absl::string_view("cluster metadata: cluster")))); + auto cluster = std::make_shared>(); + auto cluster_metadata = std::make_shared( + TestUtility::parseYaml( + R"EOF( + filter_metadata: + namespace: + key: cluster + )EOF")); + + std::shared_ptr> host_description( + new NiceMock()); + StreamInfo::MockStreamInfo log_stream_info; + Http::TestRequestHeaderMapImpl request_headers{{}}; + + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + EXPECT_CALL(*cluster, metadata()).WillRepeatedly(ReturnRef(*cluster_metadata)); + EXPECT_CALL(*host_description, cluster()).WillRepeatedly(ReturnRef(*cluster)); + EXPECT_CALL(request_stream_info_, upstreamHost()).WillRepeatedly(Return(host_description)); + filter().log(&request_headers, nullptr, nullptr, log_stream_info); + + // If upstream host is empty, fallback to upstream cluster info for cluster metadata. + EXPECT_CALL(request_stream_info_, upstreamHost()).WillRepeatedly(Return(nullptr)); + EXPECT_CALL(request_stream_info_, upstreamClusterInfo()).WillRepeatedly(Return(cluster)); + EXPECT_CALL(filter(), + log_(spdlog::level::warn, Eq(absl::string_view("cluster metadata: cluster")))); + filter().log(&request_headers, nullptr, nullptr, log_stream_info); +} + +TEST_P(WasmHttpFilterTest, SharedData) { + setupTest("shared_data"); + EXPECT_CALL(rootContext(), log_(spdlog::level::info, Eq(absl::string_view("set CasMismatch")))); + EXPECT_CALL(rootContext(), + log_(spdlog::level::debug, Eq(absl::string_view("get 1 shared_data_value1")))); + if (std::get<1>(GetParam()) == "rust") { + EXPECT_CALL(rootContext(), + log_(spdlog::level::warn, Eq(absl::string_view("get 2 shared_data_value2")))); + } else { + EXPECT_CALL(rootContext(), + log_(spdlog::level::critical, Eq(absl::string_view("get 2 shared_data_value2")))); + } + EXPECT_CALL(rootContext(), + log_(spdlog::level::debug, Eq(absl::string_view("get of bad key not found")))); + EXPECT_CALL(rootContext(), + log_(spdlog::level::debug, Eq(absl::string_view("second get of bad key not found")))); + rootContext().onTick(0); + rootContext().onQueueReady(0); +} + +TEST_P(WasmHttpFilterTest, SharedQueue) { + setupTest("shared_queue"); + setupFilter("shared_queue"); + EXPECT_CALL(filter(), + log_(spdlog::level::warn, Eq(absl::string_view("onRequestHeaders enqueue Ok")))); + EXPECT_CALL(filter(), log_(spdlog::level::warn, + Eq(absl::string_view("onRequestHeaders not found bad_shared_queue")))); + EXPECT_CALL(rootContext(), + log_(spdlog::level::warn, Eq(absl::string_view("onQueueReady bad token not found")))) + .Times(2); + EXPECT_CALL(rootContext(), + log_(spdlog::level::warn, Eq(absl::string_view("onQueueReady extra data not found")))) + .Times(2); + EXPECT_CALL(rootContext(), log_(spdlog::level::info, Eq(absl::string_view("onQueueReady")))) + .Times(2); + EXPECT_CALL(rootContext(), log_(spdlog::level::debug, Eq(absl::string_view("data data1 Ok")))); + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true)); + auto token = proxy_wasm::resolveQueueForTest("vm_id", "my_shared_queue"); + root_context_->onQueueReady(token); +} + +// Script using a root_id which is not registered. +TEST_P(WasmHttpFilterTest, RootIdNotRegistered) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): proxy_get_property("root_id") is not yet supported in the Rust SDK. + return; + } + setupTest(); + setupFilter(); + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true)); +} + +// Script using an explicit root_id which is registered. +TEST_P(WasmHttpFilterTest, RootId1) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): proxy_get_property("root_id") is not yet supported in the Rust SDK. + return; + } + setupTest("context1"); + setupFilter("context1"); + EXPECT_CALL(filter(), log_(spdlog::level::debug, Eq(absl::string_view("onRequestHeaders1 2")))); + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true)); +} + +// Script using an explicit root_id which is registered. +TEST_P(WasmHttpFilterTest, RootId2) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): proxy_get_property("root_id") is not yet supported in the Rust SDK. + return; + } + setupTest("context2"); + setupFilter("context2"); + EXPECT_CALL(filter(), log_(spdlog::level::debug, Eq(absl::string_view("onRequestHeaders2 2")))); + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true)); +} + +} // namespace Wasm +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc index 896ef07321af..95e3974fe626 100644 --- a/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc @@ -87,6 +87,8 @@ Api::SysCallIntResult FakeConnectionSocket::getSocketOption(int level, int, void #endif } +absl::optional FakeConnectionSocket::lastRoundTripTime() { return {}; } + } // namespace ListenerFilters } // namespace Extensions } // namespace Envoy diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h index f6cc0b6e31d6..ee3ede59681f 100644 --- a/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h @@ -49,6 +49,8 @@ class FakeConnectionSocket : public Network::MockConnectionSocket { Api::SysCallIntResult getSocketOption(int level, int, void* optval, socklen_t*) const override; + absl::optional lastRoundTripTime() override; + private: const Network::IoHandlePtr io_handle_; Network::Address::InstanceConstSharedPtr local_address_; diff --git a/test/extensions/filters/listener/proxy_protocol/BUILD b/test/extensions/filters/listener/proxy_protocol/BUILD index b9c84edf4046..e62c3ecd352c 100644 --- a/test/extensions/filters/listener/proxy_protocol/BUILD +++ b/test/extensions/filters/listener/proxy_protocol/BUILD @@ -17,7 +17,6 @@ envoy_extension_cc_test( name = "proxy_protocol_test", srcs = ["proxy_protocol_test.cc"], extension_name = "envoy.filters.listener.proxy_protocol", - tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_includes", diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 9cf36ab76c83..e51ebd62fbde 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -60,8 +60,8 @@ class ProxyProtocolTest : public testing::TestWithParamallocateDispatcher("test_thread")), socket_(std::make_shared( Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true)), - connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_)), name_("proxy"), - filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), + connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, absl::nullopt)), + name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), init_manager_(nullptr) { EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream)); EXPECT_CALL(socket_factory_, localAddress()).WillOnce(ReturnRef(socket_->localAddress())); @@ -88,6 +88,9 @@ class ProxyProtocolTest : public testing::TestWithParam os_calls(&os_sys_calls); + + // TODO(davinci26): Mocking should not be used to provide real system calls. EXPECT_CALL(os_sys_calls, connect(_, _, _)) .Times(AnyNumber()) .WillRepeatedly(Invoke([this](os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) { @@ -348,6 +353,12 @@ TEST_P(ProxyProtocolTest, ErrorRecv_2) { EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([this](os_fd_t fd) { return os_sys_calls_actual_.close(fd); })); + EXPECT_CALL(os_sys_calls, accept(_, _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke( + [this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallSocketResult { + return os_sys_calls_actual_.accept(sockfd, addr, addrlen); + })); connect(false); write(buffer, sizeof(buffer)); @@ -363,6 +374,8 @@ TEST_P(ProxyProtocolTest, ErrorRecv_1) { 'r', 'e', ' ', 'd', 'a', 't', 'a'}; Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + + // TODO(davinci26): Mocking should not be used to provide real system calls. EXPECT_CALL(os_sys_calls, recv(_, _, _, _)) .Times(AnyNumber()) .WillRepeatedly(Return(Api::SysCallSizeResult{-1, 0})); @@ -400,6 +413,12 @@ TEST_P(ProxyProtocolTest, ErrorRecv_1) { EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([this](os_fd_t fd) { return os_sys_calls_actual_.close(fd); })); + EXPECT_CALL(os_sys_calls, accept(_, _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke( + [this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallSocketResult { + return os_sys_calls_actual_.accept(sockfd, addr, addrlen); + })); connect(false); write(buffer, sizeof(buffer)); @@ -580,6 +599,7 @@ TEST_P(ProxyProtocolTest, V2ParseExtensionsRecvError) { Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + // TODO(davinci26): Mocking should not be used to provide real system calls. EXPECT_CALL(os_sys_calls, recv(_, _, _, _)) .Times(AnyNumber()) .WillRepeatedly(Invoke([this](os_fd_t fd, void* buf, size_t n, int flags) { @@ -624,6 +644,12 @@ TEST_P(ProxyProtocolTest, V2ParseExtensionsRecvError) { EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([this](os_fd_t fd) { return os_sys_calls_actual_.close(fd); })); + EXPECT_CALL(os_sys_calls, accept(_, _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke( + [this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallSocketResult { + return os_sys_calls_actual_.accept(sockfd, addr, addrlen); + })); connect(false); write(buffer, sizeof(buffer)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -724,6 +750,7 @@ TEST_P(ProxyProtocolTest, V2Fragmented3Error) { Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + // TODO(davinci26): Mocking should not be used to provide real system calls. EXPECT_CALL(os_sys_calls, recv(_, _, _, _)) .Times(AnyNumber()) .WillRepeatedly(Invoke([this](os_fd_t fd, void* buf, size_t len, int flags) { @@ -771,6 +798,12 @@ TEST_P(ProxyProtocolTest, V2Fragmented3Error) { EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([this](os_fd_t fd) { return os_sys_calls_actual_.close(fd); })); + EXPECT_CALL(os_sys_calls, accept(_, _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke( + [this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallSocketResult { + return os_sys_calls_actual_.accept(sockfd, addr, addrlen); + })); connect(false); write(buffer, 17); @@ -788,6 +821,7 @@ TEST_P(ProxyProtocolTest, V2Fragmented4Error) { Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + // TODO(davinci26): Mocking should not be used to provide real system calls. EXPECT_CALL(os_sys_calls, recv(_, _, _, _)) .Times(AnyNumber()) .WillRepeatedly(Invoke([this](os_fd_t fd, void* buf, size_t len, int flags) { @@ -835,6 +869,12 @@ TEST_P(ProxyProtocolTest, V2Fragmented4Error) { EXPECT_CALL(os_sys_calls, close(_)).Times(AnyNumber()).WillRepeatedly(Invoke([this](os_fd_t fd) { return os_sys_calls_actual_.close(fd); })); + EXPECT_CALL(os_sys_calls, accept(_, _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke( + [this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallSocketResult { + return os_sys_calls_actual_.accept(sockfd, addr, addrlen); + })); connect(false); write(buffer, 10); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -1248,8 +1288,8 @@ class WildcardProxyProtocolTest : public testing::TestWithParamlocalAddress()->ip()->port())), - connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_)), name_("proxy"), - filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), + connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, absl::nullopt)), + name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), init_manager_(nullptr) { EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream)); EXPECT_CALL(socket_factory_, localAddress()).WillOnce(ReturnRef(socket_->localAddress())); @@ -1286,6 +1326,9 @@ class WildcardProxyProtocolTest : public testing::TestWithParambody() = std::make_unique( - api_->fileSystem().fileReadToEnd(TestEnvironment::runfilesPath( - "test/extensions/filters/network/client_ssl_auth/test_data/vpn_response_1.json"))); + message->body().add(api_->fileSystem().fileReadToEnd(TestEnvironment::runfilesPath( + "test/extensions/filters/network/client_ssl_auth/test_data/vpn_response_1.json"))); callbacks_->onSuccess(request_, std::move(message)); EXPECT_EQ(1U, stats_store_ @@ -237,7 +236,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { EXPECT_CALL(*interval_timer_, enableTimer(_, _)); message = std::make_unique( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}}); - message->body() = std::make_unique("bad_json"); + message->body().add("bad_json"); callbacks_->onSuccess(request_, std::move(message)); // Interval timer fires. diff --git a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc index 20f1484c2712..ab0c6492627c 100644 --- a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc @@ -119,9 +119,9 @@ void UberFilterFuzzer::fuzz( break; } case test::extensions::filters::network::Action::kAdvanceTime: { - time_source_.advanceTimeAsync( - std::chrono::milliseconds(action.advance_time().milliseconds())); - factory_context_.dispatcher().run(Event::Dispatcher::RunType::NonBlock); + time_source_.advanceTimeAndRun( + std::chrono::milliseconds(action.advance_time().milliseconds()), + factory_context_.dispatcher(), Event::Dispatcher::RunType::NonBlock); break; } default: { diff --git a/test/extensions/filters/network/common/fuzz/uber_writefilter.cc b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc index 517429a1dd4b..b1c99c4fd846 100644 --- a/test/extensions/filters/network/common/fuzz/uber_writefilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc @@ -102,9 +102,9 @@ void UberWriteFilterFuzzer::fuzz( break; } case test::extensions::filters::network::WriteAction::kAdvanceTime: { - time_source_.advanceTimeAsync( - std::chrono::milliseconds(action.advance_time().milliseconds())); - factory_context_.dispatcher().run(Event::Dispatcher::RunType::NonBlock); + time_source_.advanceTimeAndRun( + std::chrono::milliseconds(action.advance_time().milliseconds()), + factory_context_.dispatcher(), Event::Dispatcher::RunType::NonBlock); break; } default: { diff --git a/test/extensions/filters/network/common/fuzz/utils/fakes.h b/test/extensions/filters/network/common/fuzz/utils/fakes.h index 035dcb3e29ca..a0b0db525470 100644 --- a/test/extensions/filters/network/common/fuzz/utils/fakes.h +++ b/test/extensions/filters/network/common/fuzz/utils/fakes.h @@ -16,7 +16,6 @@ class FakeFactoryContext : public MockFactoryContext { Init::Manager& initManager() override { return init_manager_; } ServerLifecycleNotifier& lifecycleNotifier() override { return lifecycle_notifier_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } - Envoy::Random::RandomGenerator& random() override { return random_; } Envoy::Runtime::Loader& runtime() override { return runtime_loader_; } Stats::Scope& scope() override { return scope_; } Singleton::Manager& singletonManager() override { return *singleton_manager_; } diff --git a/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc index c2e816c748d5..a78571199e7e 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc +++ b/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc @@ -83,7 +83,7 @@ DEFINE_PROTO_FUZZER(const envoy::extensions::filters::network::ext_authz::ExtAut case envoy::extensions::filters::network::ext_authz::Action::kOnData: { // Optional input field to set default authorization check result for the following "onData()" if (action.on_data().has_result()) { - ON_CALL(*client, check(_, _, _, _)) + ON_CALL(*client, check(_, _, _, _, _)) .WillByDefault(WithArgs<0>( Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(makeAuthzResponse( diff --git a/test/extensions/filters/network/ext_authz/ext_authz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_test.cc index 9f50f576ac42..e3dab5cdd2f2 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/network/ext_authz/ext_authz_test.cc @@ -37,11 +37,9 @@ namespace ExtAuthz { class ExtAuthzFilterTest : public testing::Test { public: - ExtAuthzFilterTest() { initialize(); } - - void initialize() { + void initialize(std::string yaml) { envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config{}; - TestUtility::loadFromYaml(default_yaml_string_, proto_config); + TestUtility::loadFromYaml(yaml, proto_config); config_ = std::make_shared(proto_config, stats_store_); client_ = new Filters::Common::ExtAuthz::MockClient(); filter_ = std::make_unique(config_, Filters::Common::ExtAuthz::ClientPtr{client_}); @@ -67,6 +65,63 @@ class ExtAuthzFilterTest : public testing::Test { } } + void expectOKWithOnData() { + EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); + // Confirm that the invocation of onNewConnection did NOT increment the active or total count! + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.total").value()); + EXPECT_EQ( + 0U, + stats_store_.gauge("ext_authz.name.active", Stats::Gauge::ImportMode::Accumulate).value()); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); + // Confirm that the invocation of onData does increment the active and total count! + EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); + EXPECT_EQ( + 1U, + stats_store_.gauge("ext_authz.name.active", Stats::Gauge::ImportMode::Accumulate).value()); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + + auto* fields = response.dynamic_metadata.mutable_fields(); + (*fields)["foo"] = ValueUtil::stringValue("ok"); + (*fields)["bar"] = ValueUtil::numberValue(1); + + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _)) + .WillOnce(Invoke([&response](const std::string& ns, + const ProtobufWkt::Struct& returned_dynamic_metadata) { + EXPECT_EQ(ns, NetworkFilterNames::get().ExtAuthorization); + EXPECT_TRUE( + TestUtility::protoEqual(returned_dynamic_metadata, response.dynamic_metadata)); + })); + + EXPECT_CALL(filter_callbacks_, continueReading()); + request_callbacks_->onComplete(std::make_unique(response)); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); + + EXPECT_CALL(*client_, cancel()).Times(0); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); + + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); + EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.timeout").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.denied").value()); + EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.ok").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.cx_closed").value()); + } + Stats::TestUtil::TestStore stats_store_; ConfigSharedPtr config_; Filters::Common::ExtAuthz::MockClient* client_; @@ -82,6 +137,20 @@ class ExtAuthzFilterTest : public testing::Test { failure_mode_allow: true stat_prefix: name )EOF"; + const std::string metadata_yaml_string_ = R"EOF( +grpc_service: + envoy_grpc: + cluster_name: ext_authz_server +failure_mode_allow: true +stat_prefix: name +filter_enabled_metadata: + filter: "abc.xyz" + path: + - key: "k1" + value: + string_match: + exact: "check" + )EOF"; }; TEST_F(ExtAuthzFilterTest, BadExtAuthzConfig) { @@ -100,65 +169,17 @@ stat_prefix: name } TEST_F(ExtAuthzFilterTest, OKWithOnData) { - EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, testing::A(), _)) - .WillOnce( - WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { - request_callbacks_ = &callbacks; - }))); - - EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); - // Confirm that the invocation of onNewConnection did NOT increment the active or total count! - EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.total").value()); - EXPECT_EQ( - 0U, - stats_store_.gauge("ext_authz.name.active", Stats::Gauge::ImportMode::Accumulate).value()); - Buffer::OwnedImpl data("hello"); - EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); - // Confirm that the invocation of onData does increment the active and total count! - EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); - EXPECT_EQ( - 1U, - stats_store_.gauge("ext_authz.name.active", Stats::Gauge::ImportMode::Accumulate).value()); - - Filters::Common::ExtAuthz::Response response{}; - response.status = Filters::Common::ExtAuthz::CheckStatus::OK; - response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; - - auto* fields = response.dynamic_metadata.mutable_fields(); - (*fields)["foo"] = ValueUtil::stringValue("ok"); - (*fields)["bar"] = ValueUtil::numberValue(1); - - EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _)) - .WillOnce(Invoke([&response](const std::string& ns, - const ProtobufWkt::Struct& returned_dynamic_metadata) { - EXPECT_EQ(ns, NetworkFilterNames::get().ExtAuthorization); - EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, response.dynamic_metadata)); - })); - - EXPECT_CALL(filter_callbacks_, continueReading()); - request_callbacks_->onComplete(std::make_unique(response)); - - EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); - - EXPECT_CALL(*client_, cancel()).Times(0); - filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); - - EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); - EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.error").value()); - EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); - EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.denied").value()); - EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.ok").value()); - EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.cx_closed").value()); + initialize(default_yaml_string_); + expectOKWithOnData(); } TEST_F(ExtAuthzFilterTest, DeniedWithOnData) { + initialize(default_yaml_string_); InSequence s; EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -184,8 +205,10 @@ TEST_F(ExtAuthzFilterTest, DeniedWithOnData) { EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.timeout").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.denied").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.ok").value()); @@ -193,11 +216,12 @@ TEST_F(ExtAuthzFilterTest, DeniedWithOnData) { } TEST_F(ExtAuthzFilterTest, FailOpen) { + initialize(default_yaml_string_); InSequence s; EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -214,8 +238,10 @@ TEST_F(ExtAuthzFilterTest, FailOpen) { EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.timeout").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.denied").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.ok").value()); @@ -223,13 +249,14 @@ TEST_F(ExtAuthzFilterTest, FailOpen) { } TEST_F(ExtAuthzFilterTest, FailClose) { + initialize(default_yaml_string_); InSequence s; // Explicitly set the failure_mode_allow to false. config_->setFailModeAllow(false); EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -243,8 +270,10 @@ TEST_F(ExtAuthzFilterTest, FailClose) { EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); request_callbacks_->onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Error)); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.timeout").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.denied").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.ok").value()); @@ -254,11 +283,12 @@ TEST_F(ExtAuthzFilterTest, FailClose) { // Test to verify that when callback from the authorization service has completed the filter // does not invoke Cancel on RemoteClose event. TEST_F(ExtAuthzFilterTest, DoNotCallCancelonRemoteClose) { + initialize(default_yaml_string_); InSequence s; EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -276,8 +306,10 @@ TEST_F(ExtAuthzFilterTest, DoNotCallCancelonRemoteClose) { EXPECT_CALL(*client_, cancel()).Times(0); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.timeout").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.denied").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.ok").value()); @@ -287,11 +319,12 @@ TEST_F(ExtAuthzFilterTest, DoNotCallCancelonRemoteClose) { // Test to verify that Cancel is invoked when a RemoteClose event occurs while the call // to the authorization service was in progress. TEST_F(ExtAuthzFilterTest, VerifyCancelOnRemoteClose) { + initialize(default_yaml_string_); InSequence s; EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -304,8 +337,10 @@ TEST_F(ExtAuthzFilterTest, VerifyCancelOnRemoteClose) { EXPECT_CALL(*client_, cancel()); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.timeout").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.denied").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.ok").value()); @@ -315,12 +350,13 @@ TEST_F(ExtAuthzFilterTest, VerifyCancelOnRemoteClose) { // Test to verify that on stack response from the authorization service does NOT // result in calling cancel. TEST_F(ExtAuthzFilterTest, ImmediateOK) { + initialize(default_yaml_string_); InSequence s; EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::OK)); @@ -334,8 +370,10 @@ TEST_F(ExtAuthzFilterTest, ImmediateOK) { EXPECT_CALL(*client_, cancel()).Times(0); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.timeout").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.denied").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.ok").value()); @@ -345,12 +383,13 @@ TEST_F(ExtAuthzFilterTest, ImmediateOK) { // Test to verify that on stack denied response from the authorization service does // result in stoppage of the filter chain. TEST_F(ExtAuthzFilterTest, ImmediateNOK) { + initialize(default_yaml_string_); InSequence s; EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Denied)); @@ -360,8 +399,10 @@ TEST_F(ExtAuthzFilterTest, ImmediateNOK) { Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.timeout").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.denied").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.ok").value()); @@ -371,12 +412,13 @@ TEST_F(ExtAuthzFilterTest, ImmediateNOK) { // Test to verify that on stack Error response when failure_mode_allow is configured // result in request being allowed. TEST_F(ExtAuthzFilterTest, ImmediateErrorFailOpen) { + initialize(default_yaml_string_); InSequence s; EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); - EXPECT_CALL(*client_, check(_, _, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Error)); @@ -390,14 +432,101 @@ TEST_F(ExtAuthzFilterTest, ImmediateErrorFailOpen) { EXPECT_CALL(*client_, cancel()).Times(0); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.timeout").value()); EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.denied").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.ok").value()); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.cx_closed").value()); } +// Test to verify that timeout the proper stat is incremented. +TEST_F(ExtAuthzFilterTest, TimeoutError) { + initialize(default_yaml_string_); + InSequence s; + + EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + auto resp = makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Error); + resp->error_kind = Filters::Common::ExtAuthz::ErrorKind::Timedout; + callbacks.onComplete(std::move(resp)); + }))); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); + + EXPECT_CALL(*client_, cancel()).Times(0); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); + + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); + EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.total").value()); + EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.timeout").value()); + EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.denied").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.ok").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.cx_closed").value()); +} + +// Test to verify the filter is disabled with metadata. +TEST_F(ExtAuthzFilterTest, DisabledWithMetadata) { + initialize(metadata_yaml_string_); + + // Disable in filter_enabled_metadata. + const std::string yaml = R"EOF( + filter_metadata: + abc.xyz: + k1: skip + )EOF"; + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(yaml, metadata); + ON_CALL(filter_callbacks_.connection_.stream_info_, dynamicMetadata()) + .WillByDefault(ReturnRef(metadata)); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); + + EXPECT_CALL(*client_, check(_, _, _, _, _)).Times(0); + EXPECT_CALL(filter_callbacks_.connection_, close(_)).Times(0); + EXPECT_CALL(*client_, cancel()).Times(0); + + EXPECT_EQ(1U, stats_store_.counter("ext_authz.name.disabled").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.total").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.error").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.timeout").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.failure_mode_allowed").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.denied").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.ok").value()); + EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.cx_closed").value()); +} + +// Test to verify the filter is enabled with metadata. +TEST_F(ExtAuthzFilterTest, EnabledWithMetadata) { + initialize(metadata_yaml_string_); + + // Enable in filter_enabled_metadata. + const std::string yaml = R"EOF( + filter_metadata: + abc.xyz: + k1: check + )EOF"; + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(yaml, metadata); + ON_CALL(filter_callbacks_.connection_.stream_info_, dynamicMetadata()) + .WillByDefault(ReturnRef(metadata)); + + expectOKWithOnData(); +} + } // namespace ExtAuthz } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_test.cc b/test/extensions/filters/network/local_ratelimit/local_ratelimit_test.cc index 395f4c0a891c..d764a1bab8ec 100644 --- a/test/extensions/filters/network/local_ratelimit/local_ratelimit_test.cc +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_test.cc @@ -30,12 +30,11 @@ class LocalRateLimitTestBase : public testing::Test { fill_timer_ = new Event::MockTimer(&dispatcher_); if (expect_timer_create) { EXPECT_CALL(*fill_timer_, enableTimer(_, nullptr)); + EXPECT_CALL(*fill_timer_, disableTimer()); } config_ = std::make_shared(proto_config, dispatcher_, stats_store_, runtime_); } - Thread::ThreadSynchronizer& synchronizer() { return config_->synchronizer_; } - NiceMock dispatcher_; Stats::IsolatedStoreImpl stats_store_; NiceMock runtime_; @@ -43,168 +42,6 @@ class LocalRateLimitTestBase : public testing::Test { ConfigSharedPtr config_; }; -// Make sure we fail with a fill rate this is too fast. -TEST_F(LocalRateLimitTestBase, TooFastFillRate) { - EXPECT_THROW_WITH_MESSAGE(initialize(R"EOF( -stat_prefix: local_rate_limit_stats -token_bucket: - max_tokens: 1 - fill_interval: 0.049s -)EOF", - false), - EnvoyException, - "local rate limit token bucket fill timer must be >= 50ms"); -} - -// Verify various token bucket CAS edge cases. -TEST_F(LocalRateLimitTestBase, CasEdgeCases) { - // This tests the case in which a connection creation races with the fill timer. - { - initialize(R"EOF( - stat_prefix: local_rate_limit_stats - token_bucket: - max_tokens: 1 - fill_interval: 0.05s - )EOF"); - - synchronizer().enable(); - - // Start a thread and start the fill callback. This will wait pre-CAS. - synchronizer().waitOn("on_fill_timer_pre_cas"); - std::thread t1([&] { - EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); - fill_timer_->invokeCallback(); - }); - // Wait until the thread is actually waiting. - synchronizer().barrierOn("on_fill_timer_pre_cas"); - - // Create a connection. This should succeed. - EXPECT_TRUE(config_->canCreateConnection()); - - // Now signal the thread to continue which should cause a CAS failure and the loop to repeat. - synchronizer().signal("on_fill_timer_pre_cas"); - t1.join(); - - // 1 -> 0 tokens - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_FALSE(config_->canCreateConnection()); - } - - // This tests the case in which two connection creations race. - { - initialize(R"EOF( - stat_prefix: local_rate_limit_stats - token_bucket: - max_tokens: 1 - fill_interval: 0.2s - )EOF"); - - synchronizer().enable(); - - // Start a thread and see if we can create a connection. This will wait pre-CAS. - synchronizer().waitOn("can_create_connection_pre_cas"); - std::thread t1([&] { EXPECT_FALSE(config_->canCreateConnection()); }); - // Wait until the thread is actually waiting. - synchronizer().barrierOn("can_create_connection_pre_cas"); - - // Create the connection on this thread, which should cause the CAS to fail on the other thread. - EXPECT_TRUE(config_->canCreateConnection()); - synchronizer().signal("can_create_connection_pre_cas"); - t1.join(); - } -} - -// Verify token bucket functionality with a single token. -TEST_F(LocalRateLimitTestBase, TokenBucket) { - initialize(R"EOF( -stat_prefix: local_rate_limit_stats -token_bucket: - max_tokens: 1 - fill_interval: 0.2s -)EOF"); - - // 1 -> 0 tokens - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_FALSE(config_->canCreateConnection()); - EXPECT_FALSE(config_->canCreateConnection()); - - // 0 -> 1 tokens - EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); - fill_timer_->invokeCallback(); - - // 1 -> 0 tokens - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_FALSE(config_->canCreateConnection()); - - // 0 -> 1 tokens - EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); - fill_timer_->invokeCallback(); - - // 1 -> 1 tokens - EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); - fill_timer_->invokeCallback(); - - // 1 -> 0 tokens - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_FALSE(config_->canCreateConnection()); -} - -// Verify token bucket functionality with max tokens and tokens per fill > 1. -TEST_F(LocalRateLimitTestBase, TokenBucketMultipleTokensPerFill) { - initialize(R"EOF( -stat_prefix: local_rate_limit_stats -token_bucket: - max_tokens: 2 - tokens_per_fill: 2 - fill_interval: 0.2s -)EOF"); - - // 2 -> 0 tokens - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_FALSE(config_->canCreateConnection()); - - // 0 -> 2 tokens - EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); - fill_timer_->invokeCallback(); - - // 2 -> 1 tokens - EXPECT_TRUE(config_->canCreateConnection()); - - // 1 -> 2 tokens - EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); - fill_timer_->invokeCallback(); - - // 2 -> 0 tokens - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_FALSE(config_->canCreateConnection()); -} - -// Verify token bucket functionality with max tokens > tokens per fill. -TEST_F(LocalRateLimitTestBase, TokenBucketMaxTokensGreaterThanTokensPerFill) { - initialize(R"EOF( -stat_prefix: local_rate_limit_stats -token_bucket: - max_tokens: 2 - tokens_per_fill: 1 - fill_interval: 0.2s -)EOF"); - - // 2 -> 0 tokens - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_FALSE(config_->canCreateConnection()); - - // 0 -> 1 tokens - EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(200), nullptr)); - fill_timer_->invokeCallback(); - - // 1 -> 0 tokens - EXPECT_TRUE(config_->canCreateConnection()); - EXPECT_FALSE(config_->canCreateConnection()); -} - class LocalRateLimitFilterTest : public LocalRateLimitTestBase { public: struct ActiveFilter { diff --git a/test/extensions/filters/network/mongo_proxy/config_test.cc b/test/extensions/filters/network/mongo_proxy/config_test.cc index 35f46fad6cbc..9df9c5530cf6 100644 --- a/test/extensions/filters/network/mongo_proxy/config_test.cc +++ b/test/extensions/filters/network/mongo_proxy/config_test.cc @@ -30,6 +30,9 @@ TEST(MongoFilterConfigTest, CorrectConfigurationNoFaults) { const std::string yaml_string = R"EOF( stat_prefix: my_stat_prefix access_log: path/to/access/log + commands: + - foo + - bar )EOF"; envoy::extensions::filters::network::mongo_proxy::v3::MongoProxy proto_config; @@ -47,6 +50,8 @@ TEST(MongoFilterConfigTest, ValidProtoConfigurationNoFaults) { config.set_access_log("path/to/access/log"); config.set_stat_prefix("my_stat_prefix"); + config.add_commands("foo"); + config.add_commands("bar"); NiceMock context; MongoProxyFilterConfigFactory factory; @@ -64,6 +69,8 @@ TEST(MongoFilterConfigTest, MongoFilterWithEmptyProto) { factory.createEmptyConfigProto().get()); config.set_access_log("path/to/access/log"); config.set_stat_prefix("my_stat_prefix"); + config.add_commands("foo"); + config.add_commands("bar"); Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context); Network::MockConnection connection; @@ -89,7 +96,7 @@ TEST(MongoFilterConfigTest, InvalidExtraProperty) { TEST(MongoFilterConfigTest, EmptyConfig) { handleInvalidConfiguration( - "{}", R"(StatPrefix: \["value length must be at least " '\\x01' " bytes"\])"); + "{}", R"(StatPrefix: \["value length must be at least " '\\x01' " runes"\])"); } TEST(MongoFilterConfigTest, InvalidFaultsEmptyConfig) { diff --git a/test/extensions/filters/network/mongo_proxy/proxy_test.cc b/test/extensions/filters/network/mongo_proxy/proxy_test.cc index 71df3fb3d7d7..5b20f6cd5106 100644 --- a/test/extensions/filters/network/mongo_proxy/proxy_test.cc +++ b/test/extensions/filters/network/mongo_proxy/proxy_test.cc @@ -59,7 +59,11 @@ class TestProxyFilter : public ProxyFilter { class MongoProxyFilterTest : public testing::Test { public: - MongoProxyFilterTest() : mongo_stats_(std::make_shared(store_, "test")) { setup(); } + MongoProxyFilterTest() + : mongo_stats_(std::make_shared(store_, "test", + std::vector{"insert", "count"})) { + setup(); + } void setup() { ON_CALL(runtime_.snapshot_, featureEnabled("mongo.proxy_enabled", 100)) diff --git a/test/extensions/filters/network/mongo_proxy/utility_test.cc b/test/extensions/filters/network/mongo_proxy/utility_test.cc index ad28e35e9cc1..db1ad741325c 100644 --- a/test/extensions/filters/network/mongo_proxy/utility_test.cc +++ b/test/extensions/filters/network/mongo_proxy/utility_test.cc @@ -189,13 +189,18 @@ TEST(QueryMessageInfoTest, Command) { EXPECT_THROW((QueryMessageInfo(q)), EnvoyException); } - { + std::vector> test_cases = { + {"collstats", "collStats"}, {"dbstats", "dbStats"}, + {"findandmodify", "findAndModify"}, {"getlasterror", "getLastError"}, + {"ismaster", "isMaster"}, + }; + for (const auto& test : test_cases) { QueryMessageImpl q(0, 0); q.fullCollectionName("db.$cmd"); q.query(Bson::DocumentImpl::create()->addDocument( - "$query", Bson::DocumentImpl::create()->addInt32("ismaster", 1))); + "$query", Bson::DocumentImpl::create()->addInt32(test.first, 1))); QueryMessageInfo info(q); - EXPECT_EQ("ismaster", info.command()); + EXPECT_EQ(test.second, info.command()); } } diff --git a/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml b/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml index d65f3e3aea42..237076f98045 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml +++ b/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml @@ -27,10 +27,10 @@ static_resources: - filters: - name: mysql typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy stat_prefix: mysql_stats - name: tcp typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp_stats cluster: cluster_0 diff --git a/test/extensions/filters/network/postgres_proxy/BUILD b/test/extensions/filters/network/postgres_proxy/BUILD index 6fb11396556e..e95a2cffe158 100644 --- a/test/extensions/filters/network/postgres_proxy/BUILD +++ b/test/extensions/filters/network/postgres_proxy/BUILD @@ -35,6 +35,18 @@ envoy_extension_cc_test( ], ) +envoy_extension_cc_test( + name = "postgres_message_tests", + srcs = [ + "postgres_message_test.cc", + ], + extension_name = "envoy.filters.network.postgres_proxy", + deps = [ + "//source/common/buffer:buffer_lib", + "//source/extensions/filters/network/postgres_proxy:filter", + ], +) + envoy_extension_cc_test( name = "postgres_filter_tests", srcs = [ diff --git a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc index aa2d9ff2c7b7..e787a18f2d5b 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc @@ -242,6 +242,9 @@ TEST_P(PostgresProxyFrontendDecoderTest, FrontendInc) { EXPECT_CALL(callbacks_, incMessagesFrontend()).Times(1); createPostgresMsg(data_, GetParam(), "SELECT 1;"); decoder_->onData(data_, true); + + // Make sure that decoder releases memory used during message processing. + ASSERT_TRUE(decoder_->getMessage().empty()); } // Run the above test for each frontend message. @@ -507,6 +510,89 @@ TEST_P(PostgresProxyFrontendEncrDecoderTest, EncyptedTraffic) { INSTANTIATE_TEST_SUITE_P(FrontendEncryptedMessagesTests, PostgresProxyFrontendEncrDecoderTest, ::testing::Values(80877103, 80877104)); +class FakeBuffer : public Buffer::Instance { +public: + MOCK_METHOD(void, addDrainTracker, (std::function), (override)); + MOCK_METHOD(void, add, (const void*, uint64_t), (override)); + MOCK_METHOD(void, addBufferFragment, (Buffer::BufferFragment&), (override)); + MOCK_METHOD(void, add, (absl::string_view), (override)); + MOCK_METHOD(void, add, (const Instance&), (override)); + MOCK_METHOD(void, prepend, (absl::string_view), (override)); + MOCK_METHOD(void, prepend, (Instance&), (override)); + MOCK_METHOD(void, commit, (Buffer::RawSlice*, uint64_t), (override)); + MOCK_METHOD(void, copyOut, (size_t, uint64_t, void*), (const, override)); + MOCK_METHOD(void, drain, (uint64_t), (override)); + MOCK_METHOD(Buffer::RawSliceVector, getRawSlices, (absl::optional), (const, override)); + MOCK_METHOD(Buffer::SliceDataPtr, extractMutableFrontSlice, (), (override)); + MOCK_METHOD(uint64_t, length, (), (const, override)); + MOCK_METHOD(void*, linearize, (uint32_t), (override)); + MOCK_METHOD(void, move, (Instance&), (override)); + MOCK_METHOD(void, move, (Instance&, uint64_t), (override)); + MOCK_METHOD(uint64_t, reserve, (uint64_t, Buffer::RawSlice*, uint64_t), (override)); + MOCK_METHOD(ssize_t, search, (const void*, uint64_t, size_t, size_t), (const, override)); + MOCK_METHOD(bool, startsWith, (absl::string_view), (const, override)); + MOCK_METHOD(std::string, toString, (), (const, override)); +}; + +// Test verifies that decoder calls Buffer::linearize method +// for messages which have associated 'action'. +TEST_F(PostgresProxyDecoderTest, Linearize) { + testing::NiceMock fake_buf; + uint8_t body[] = "test\0"; + + decoder_->setStartup(false); + + // Simulate that decoder reads message which needs processing. + // Query 'Q' message's body is just string. + // Message header is 5 bytes and body will contain string "test\0". + EXPECT_CALL(fake_buf, length).WillRepeatedly(testing::Return(10)); + // The decoder will first ask for 1-byte message type + // Then for length and finally for message body. + EXPECT_CALL(fake_buf, copyOut) + .WillOnce([](size_t start, uint64_t size, void* data) { + ASSERT_THAT(start, 0); + ASSERT_THAT(size, 1); + *(static_cast(data)) = 'Q'; + }) + .WillOnce([](size_t start, uint64_t size, void* data) { + ASSERT_THAT(start, 1); + ASSERT_THAT(size, 4); + *(static_cast(data)) = htonl(9); + }) + .WillRepeatedly([=](size_t start, uint64_t size, void* data) { + ASSERT_THAT(start, 0); + ASSERT_THAT(size, 5); + memcpy(data, body, 5); + }); + + // It should call "Buffer::linearize". + EXPECT_CALL(fake_buf, linearize).WillOnce([&](uint32_t) -> void* { return body; }); + + decoder_->onData(fake_buf, false); + + // Simulate that decoder reads message which does not need processing. + // BindComplete message has type '2' and empty body. + // Total message length is equal to length of header (5 bytes). + EXPECT_CALL(fake_buf, length).WillRepeatedly(testing::Return(5)); + // The decoder will first ask for 1-byte message type and next for length. + EXPECT_CALL(fake_buf, copyOut) + .WillOnce([](size_t start, uint64_t size, void* data) { + ASSERT_THAT(start, 0); + ASSERT_THAT(size, 1); + *(static_cast(data)) = '2'; + }) + .WillOnce([](size_t start, uint64_t size, void* data) { + ASSERT_THAT(start, 1); + ASSERT_THAT(size, 4); + *(static_cast(data)) = htonl(4); + }); + + // Make sure that decoder does not call linearize. + EXPECT_CALL(fake_buf, linearize).Times(0); + + decoder_->onData(fake_buf, false); +} + } // namespace PostgresProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/filters/network/postgres_proxy/postgres_message_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_message_test.cc new file mode 100644 index 000000000000..3500083442f4 --- /dev/null +++ b/test/extensions/filters/network/postgres_proxy/postgres_message_test.cc @@ -0,0 +1,932 @@ +#include +#include + +#include "common/buffer/buffer_impl.h" + +#include "extensions/filters/network/postgres_proxy/postgres_message.h" + +#include "fmt/printf.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace PostgresProxy { + +// Tests for individual types used in Postgres messages. +// +// Integer types. + +// Fixture class for testing Integer types. +template class IntTest : public testing::Test { +public: + T field_; + Buffer::OwnedImpl data_; +}; + +using IntTypes = ::testing::Types; +TYPED_TEST_SUITE(IntTest, IntTypes); + +TYPED_TEST(IntTest, BasicRead) { + this->data_.template writeBEInt().get())>(12); + uint64_t pos = 0; + uint64_t left = this->data_.length(); + ASSERT_TRUE(this->field_.read(this->data_, pos, left)); + + ASSERT_THAT(this->field_.toString(), "[12]"); + // pos should be moved forward by the number of bytes read. + ASSERT_THAT(pos, sizeof(TypeParam)); + ASSERT_THAT(12, this->field_.get()); + + // Make sure that all bytes have been read from the buffer. + ASSERT_THAT(left, 0); +} + +TYPED_TEST(IntTest, ReadWithLeftovers) { + this->data_.template writeBEInt().get())>(12); + // Write 1 byte more. + this->data_.template writeBEInt(11); + uint64_t pos = 0; + uint64_t left = this->data_.length(); + ASSERT_TRUE(this->field_.read(this->data_, pos, left)); + ASSERT_THAT(this->field_.toString(), "[12]"); + // pos should be moved forward by the number of bytes read. + ASSERT_THAT(pos, sizeof(TypeParam)); + + // Make sure that all bytes have been read from the buffer. + ASSERT_THAT(left, 1); +} + +TYPED_TEST(IntTest, ReadAtOffset) { + // write 1 byte before the actual value. + this->data_.template writeBEInt(11); + this->data_.template writeBEInt().get())>(12); + uint64_t pos = 1; + uint64_t left = this->data_.length() - 1; + ASSERT_TRUE(this->field_.read(this->data_, pos, left)); + ASSERT_THAT(this->field_.toString(), "[12]"); + // pos should be moved forward by the number of bytes read. + ASSERT_THAT(pos, 1 + sizeof(TypeParam)); + // Nothing should be left to read. + ASSERT_THAT(left, 0); +} + +TYPED_TEST(IntTest, NotEnoughData) { + this->data_.template writeBEInt().get())>(12); + // Start from offset 1. There is not enough data in the buffer for the required type. + uint64_t pos = 1; + uint64_t left = this->data_.length() - pos; + ASSERT_FALSE(this->field_.read(this->data_, pos, left)); +} + +// Byte1 should format content as char. +TEST(Byte1, Formatting) { + Byte1 field; + + Buffer::OwnedImpl data; + data.add("I"); + + uint64_t pos = 0; + uint64_t left = 1; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 1); + ASSERT_THAT(left, 0); + + ASSERT_THAT(field.toString(), "[I]"); +} + +// Tests for String type. +TEST(StringType, SingleString) { + String field; + + Buffer::OwnedImpl data; + data.add("test"); + data.writeBEInt(0); + uint64_t pos = 0; + uint64_t left = 5; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 5); + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_THAT(out, "[test]"); +} + +TEST(StringType, MultipleStrings) { + String field; + + // Add 3 strings. + Buffer::OwnedImpl data; + data.add("test1"); + data.writeBEInt(0); + data.add("test2"); + data.writeBEInt(0); + data.add("test3"); + data.writeBEInt(0); + uint64_t pos = 0; + uint64_t left = 3 * 6; + + // Read the first string. + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 1 * 6); + ASSERT_THAT(left, 2 * 6); + auto out = field.toString(); + ASSERT_THAT(out, "[test1]"); + + // Read the second string. + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 2 * 6); + ASSERT_THAT(left, 1 * 6); + out = field.toString(); + ASSERT_THAT(out, "[test2]"); + + // Read the third string. + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 3 * 6); + ASSERT_THAT(left, 0); + out = field.toString(); + ASSERT_THAT(out, "[test3]"); +} + +TEST(StringType, NoTerminatingByte) { + String field; + + Buffer::OwnedImpl data; + data.add("test"); + uint64_t pos = 0; + uint64_t left = 4; + ASSERT_FALSE(field.read(data, pos, left)); +} + +// ByteN type is always placed at the end of Postgres message. +// There is no explicit message length. Length must be deduced from +// "length" field on Postgres message. +TEST(ByteN, BasicTest) { + ByteN field; + + Buffer::OwnedImpl data; + // Write 11 bytes. We will read only 10 to make sure + // that len is used, not buffer's length. + for (auto i = 0; i < 11; i++) { + data.writeBEInt(i); + } + uint64_t pos = 0; + uint64_t left = 10; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 10); + // One byte should be left in the buffer. + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_THAT(out, "[0 1 2 3 4 5 6 7 8 9]"); +} + +TEST(ByteN, NotEnoughData) { + ByteN field; + + Buffer::OwnedImpl data; + // Write 10 bytes, but set message length to be 11. + for (auto i = 0; i < 10; i++) { + data.writeBEInt(i); + } + uint64_t pos = 0; + uint64_t left = 11; + ASSERT_FALSE(field.read(data, pos, left)); +} + +TEST(ByteN, Empty) { + ByteN field; + + Buffer::OwnedImpl data; + // Write nothing to data buffer. + uint64_t pos = 0; + uint64_t left = 0; + ASSERT_TRUE(field.read(data, pos, left)); + + auto out = field.toString(); + ASSERT_THAT(out, "[]"); +} + +// VarByteN type. It contains 4 bytes length field with value which follows. +TEST(VarByteN, BasicTest) { + VarByteN field; + + Buffer::OwnedImpl data; + // Write VarByteN with length equal to zero. No value follows. + data.writeBEInt(0); + + // Write value with 5 bytes. + data.writeBEInt(5); + for (auto i = 0; i < 5; i++) { + data.writeBEInt(10 + i); + } + + // Write special case value with length -1. No value follows. + data.writeBEInt(-1); + + uint64_t pos = 0; + uint64_t left = 4 + 4 + 5 + 4; + uint64_t expected_left = left; + + // Read the first value. + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 4); + expected_left -= 4; + ASSERT_THAT(left, expected_left); + auto out = field.toString(); + ASSERT_TRUE(out.find("0 bytes") != std::string::npos); + + // Read the second value. + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 4 + 4 + 5); + expected_left -= (4 + 5); + ASSERT_THAT(left, expected_left); + out = field.toString(); + ASSERT_TRUE(out.find("5 bytes") != std::string::npos); + ASSERT_TRUE(out.find("10 11 12 13 14") != std::string::npos); + + // Read the third value. + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 4 + 4 + 5 + 4); + expected_left -= 4; + ASSERT_THAT(left, expected_left); + out = field.toString(); + ASSERT_TRUE(out.find("-1 bytes") != std::string::npos); +} + +TEST(VarByteN, NotEnoughLengthData) { + VarByteN field; + + Buffer::OwnedImpl data; + // Write 3 bytes. Minimum for this type is 4 bytes of length. + data.writeBEInt(0); + data.writeBEInt(1); + data.writeBEInt(2); + + uint64_t pos = 0; + uint64_t left = 3; + ASSERT_FALSE(field.read(data, pos, left)); +} + +TEST(VarByteN, NotEnoughValueData) { + VarByteN field; + + Buffer::OwnedImpl data; + // Write length of the value to be 5 bytes, but supply only 4 bytes. + data.writeBEInt(5); + data.writeBEInt(0); + data.writeBEInt(1); + data.writeBEInt(2); + data.writeBEInt(3); + + uint64_t pos = 0; + uint64_t left = 5 + 4; + ASSERT_FALSE(field.read(data, pos, left)); +} + +// Array composite type tests. +TEST(Array, SingleInt) { + Array field; + + Buffer::OwnedImpl data; + // Write the number of elements in the array. + data.writeBEInt(1); + data.writeBEInt(123); + + uint64_t pos = 0; + uint64_t left = 2 + 4; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 6); + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_TRUE(out.find("Array of 1") != std::string::npos); + ASSERT_TRUE(out.find("123") != std::string::npos); +} + +TEST(Array, MultipleInts) { + Array field; + + Buffer::OwnedImpl data; + // Write 3 elements into array. + data.writeBEInt(3); + data.writeBEInt(211); + data.writeBEInt(212); + data.writeBEInt(213); + + uint64_t pos = 0; + uint64_t left = 2 + 3 * 1; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 5); + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_TRUE(out.find("Array of 3") != std::string::npos); + ASSERT_TRUE(out.find("211") != std::string::npos); + ASSERT_TRUE(out.find("212") != std::string::npos); + ASSERT_TRUE(out.find("213") != std::string::npos); +} + +TEST(Array, Empty) { + Array field; + + Buffer::OwnedImpl data; + // Write 0 elements into array. + data.writeBEInt(0); + + uint64_t pos = 0; + uint64_t left = 2; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 2); + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_TRUE(out.find("Array of 0") != std::string::npos); +} + +// Test situation when there is not enough data to read the length of the Array. +TEST(Array, NotEnoughDataForLength) { + Array field; + + Buffer::OwnedImpl data; + // Data field is 2 bytes long. Write just one byte. + data.writeBEInt(1); + + uint64_t pos = 0; + uint64_t left = 1; + ASSERT_FALSE(field.read(data, pos, left)); +} + +// Test situation when there is not enough data in the buffer to read one of the elements +// in the array. +TEST(Array, NotEnoughDataForValues) { + Array field; + + Buffer::OwnedImpl data; + // There will be 2 elements in the array. + // The first element is 4 bytes long. + // The second element should be 4 bytes long but is only 2 bytes long. + data.writeBEInt(2); + data.writeBEInt(101); + data.writeBEInt(102); + + uint64_t pos = 0; + uint64_t left = 2 + 4 + 2; + ASSERT_FALSE(field.read(data, pos, left)); +} + +// Repeated composite type tests. +TEST(Repeated, BasicTestWithStrings) { + Repeated field; + + Buffer::OwnedImpl data; + // Write some data to simulate message header. + // It will be ignored. + data.writeBEInt(101); + data.writeBEInt(102); + // Now write 3 strings. Each terminated by zero byte. + data.add("test1"); + data.writeBEInt(0); + data.add("test2"); + data.writeBEInt(0); + data.add("test3"); + data.writeBEInt(0); + uint64_t pos = 5; + uint64_t left = 3 * 6; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 5 + 3 * 6); + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_TRUE(out.find("test1") != std::string::npos); + ASSERT_TRUE(out.find("test2") != std::string::npos); + ASSERT_TRUE(out.find("test3") != std::string::npos); +} + +// Test verifies that read fails when there is less +// bytes in the buffer than bytes needed to read to the end of the message. +TEST(Repeated, NotEnoughData) { + Repeated field; + + Buffer::OwnedImpl data; + // Write some data to simulate message header. + // It will be ignored. + data.writeBEInt(101); + data.writeBEInt(102); + data.add("test"); + + // "test" with terminating zero is 5 bytes. + // Set "left" to indicate that 6 bytes are needed. + uint64_t pos = 5; + uint64_t left = 5 + 6; + ASSERT_FALSE(field.read(data, pos, left)); +} + +// Test verifies that entire read fails when one of +// subordinate reads fails. +TEST(Repeated, NotEnoughDataForSecondString) { + Repeated field; + + Buffer::OwnedImpl data; + // Write some data to simulate message header. + // It will be ignored. + data.writeBEInt(101); + data.writeBEInt(102); + // Now write 3 strings. Each terminated by zero byte. + data.add("test1"); + data.writeBEInt(0); + data.add("test2"); + // Do not write terminating zero. + // Read should fail here. + uint64_t pos = 5; + uint64_t left = 6 + 5; + ASSERT_FALSE(field.read(data, pos, left)); +} + +// Sequence composite type tests. +TEST(Sequence, Int32SingleValue) { + Sequence field; + + Buffer::OwnedImpl data; + data.writeBEInt(101); + + uint64_t pos = 0; + uint64_t left = 4; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 4); + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_TRUE(out.find("101") != std::string::npos); +} + +TEST(Sequence, Int16SingleValue) { + Sequence field; + + Buffer::OwnedImpl data; + data.writeBEInt(101); + + uint64_t pos = 0; + uint64_t left = 2; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 2); + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_TRUE(out.find("101") != std::string::npos); +} + +TEST(Sequence, BasicMultipleValues1) { + Sequence field; + + Buffer::OwnedImpl data; + data.writeBEInt(101); + data.add("test"); + data.writeBEInt(0); + + uint64_t pos = 0; + uint64_t left = 4 + 5; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, 4 + 5); + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_TRUE(out.find("101") != std::string::npos); + ASSERT_TRUE(out.find("test") != std::string::npos); +} + +TEST(Sequence, BasicMultipleValues2) { + Sequence field; + + Buffer::OwnedImpl data; + data.writeBEInt(100); + data.writeBEInt(101); + + uint64_t pos = 0; + uint64_t left = 4 + 2; + uint64_t expected_pos = left; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, expected_pos); + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_TRUE(out.find("100") != std::string::npos); + ASSERT_TRUE(out.find("101") != std::string::npos); +} + +TEST(Sequence, BasicMultipleValues3) { + Sequence field; + + Buffer::OwnedImpl data; + data.writeBEInt(100); + data.writeBEInt(101); + data.writeBEInt(102); + data.writeBEInt(103); + + uint64_t pos = 0; + uint64_t left = 4 + 2 + 4 + 2; + uint64_t expected_pos = left; + ASSERT_TRUE(field.read(data, pos, left)); + ASSERT_THAT(pos, expected_pos); + ASSERT_THAT(left, 0); + + auto out = field.toString(); + ASSERT_TRUE(out.find("100") != std::string::npos); + ASSERT_TRUE(out.find("101") != std::string::npos); + ASSERT_TRUE(out.find("102") != std::string::npos); + ASSERT_TRUE(out.find("103") != std::string::npos); +} + +// Test versifies that read fails when reading of one element +// in Sequence fails. +TEST(Sequence, NotEnoughData) { + Sequence field; + + Buffer::OwnedImpl data; + data.writeBEInt(101); + // Do not write terminating zero for the string. + data.add("test"); + + uint64_t pos = 0; + uint64_t left = 4 + 4; + ASSERT_FALSE(field.read(data, pos, left)); +} + +// Tests for Message interface and helper function createMsgBodyReader. +TEST(PostgresMessage, SingleFieldInt32) { + std::unique_ptr msg = createMsgBodyReader(); + + Buffer::OwnedImpl data; + data.writeBEInt(12); + ASSERT_TRUE(msg->read(data, 4)); + auto out = msg->toString(); + ASSERT_THAT(out, "[12]"); +} + +TEST(PostgresMessage, SingleFieldInt16) { + std::unique_ptr msg = createMsgBodyReader(); + + Buffer::OwnedImpl data; + data.writeBEInt(12); + ASSERT_TRUE(msg->read(data, 2)); + auto out = msg->toString(); + ASSERT_THAT(out, "[12]"); +} + +TEST(PostgresMessage, SingleByteN) { + std::unique_ptr msg = createMsgBodyReader(); + + Buffer::OwnedImpl data; + data.writeBEInt(0); + data.writeBEInt(1); + data.writeBEInt(2); + data.writeBEInt(3); + data.writeBEInt(4); + ASSERT_TRUE(msg->read(data, 5 * 1)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("0") != std::string::npos); // NOLINT + ASSERT_TRUE(out.find("1") != std::string::npos); // NOLINT + ASSERT_TRUE(out.find("2") != std::string::npos); // NOLINT + ASSERT_TRUE(out.find("3") != std::string::npos); // NOLINT + ASSERT_TRUE(out.find("4") != std::string::npos); // NOLINT +} + +TEST(PostgresMessage, MultipleValues1) { + std::unique_ptr msg = createMsgBodyReader(); + + Buffer::OwnedImpl data; + data.writeBEInt(12); + data.writeBEInt(13); + ASSERT_TRUE(msg->read(data, 4 + 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("12") != std::string::npos); + ASSERT_TRUE(out.find("13") != std::string::npos); +} + +TEST(PostgresMessage, MultipleValues2) { + std::unique_ptr msg = createMsgBodyReader(); + + Buffer::OwnedImpl data; + data.writeBEInt(13); + data.writeBEInt(14); + data.writeBEInt(15); + ASSERT_TRUE(msg->read(data, 2 + 4 + 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("13") != std::string::npos); + ASSERT_TRUE(out.find("14") != std::string::npos); + ASSERT_TRUE(out.find("15") != std::string::npos); +} + +TEST(PostgresMessage, MultipleValues3) { + std::unique_ptr msg = createMsgBodyReader(); + + Buffer::OwnedImpl data; + data.writeBEInt(12); + data.writeBEInt(13); + data.writeBEInt(14); + data.writeBEInt(15); + ASSERT_TRUE(msg->read(data, 4 + 2 + 4 + 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("12") != std::string::npos); + ASSERT_TRUE(out.find("13") != std::string::npos); + ASSERT_TRUE(out.find("14") != std::string::npos); + ASSERT_TRUE(out.find("15") != std::string::npos); +} + +TEST(PostgresMessage, MultipleValues4) { + std::unique_ptr msg = createMsgBodyReader(); + + Buffer::OwnedImpl data; + data.writeBEInt(13); + data.writeBEInt(14); + data.writeBEInt(15); + data.writeBEInt(16); + data.writeBEInt(17); + ASSERT_TRUE(msg->read(data, 2 + 4 + 2 + 4 + 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("13") != std::string::npos); + ASSERT_TRUE(out.find("14") != std::string::npos); + ASSERT_TRUE(out.find("15") != std::string::npos); + ASSERT_TRUE(out.find("16") != std::string::npos); + ASSERT_TRUE(out.find("17") != std::string::npos); +} + +TEST(PostgresMessage, MultipleValues5) { + std::unique_ptr msg = createMsgBodyReader(); + + Buffer::OwnedImpl data; + data.writeBEInt(12); + data.writeBEInt(13); + data.writeBEInt(14); + data.writeBEInt(15); + data.writeBEInt(16); + data.writeBEInt(17); + ASSERT_TRUE(msg->read(data, 4 + 2 + 4 + 2 + 4 + 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("12") != std::string::npos); + ASSERT_TRUE(out.find("13") != std::string::npos); + ASSERT_TRUE(out.find("14") != std::string::npos); + ASSERT_TRUE(out.find("15") != std::string::npos); + ASSERT_TRUE(out.find("16") != std::string::npos); + ASSERT_TRUE(out.find("17") != std::string::npos); +} + +TEST(PostgresMessage, MultipleValues6) { + std::unique_ptr msg = + createMsgBodyReader(); + + Buffer::OwnedImpl data; + data.add("test"); + data.writeBEInt(0); + data.writeBEInt(12); + data.writeBEInt(13); + data.writeBEInt(14); + data.writeBEInt(15); + data.writeBEInt(16); + data.writeBEInt(17); + ASSERT_TRUE(msg->read(data, 5 + 4 + 2 + 4 + 2 + 4 + 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("test") != std::string::npos); + ASSERT_TRUE(out.find("12") != std::string::npos); + ASSERT_TRUE(out.find("13") != std::string::npos); + ASSERT_TRUE(out.find("14") != std::string::npos); + ASSERT_TRUE(out.find("15") != std::string::npos); + ASSERT_TRUE(out.find("16") != std::string::npos); + ASSERT_TRUE(out.find("17") != std::string::npos); +} + +TEST(PostgresMessage, MultipleValues7) { + std::unique_ptr msg = createMsgBodyReader>(); + + Buffer::OwnedImpl data; + data.add("test"); + data.writeBEInt(0); + + // Array of 3 elements. + data.writeBEInt(3); + data.writeBEInt(13); + data.writeBEInt(14); + data.writeBEInt(15); + ASSERT_TRUE(msg->read(data, 5 + 2 + 3 * 4)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("test") != std::string::npos); + ASSERT_TRUE(out.find("13") != std::string::npos); + ASSERT_TRUE(out.find("14") != std::string::npos); + ASSERT_TRUE(out.find("15") != std::string::npos); +} + +TEST(PostgresMessage, ArraySet1) { + std::unique_ptr msg = createMsgBodyReader>(); + + Buffer::OwnedImpl data; + // There will be 3 elements in the array. + data.writeBEInt(3); + data.writeBEInt(13); + data.writeBEInt(14); + data.writeBEInt(15); + ASSERT_TRUE(msg->read(data, 2 + 3 * 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("13") != std::string::npos); + ASSERT_TRUE(out.find("14") != std::string::npos); + ASSERT_TRUE(out.find("15") != std::string::npos); +} + +TEST(PostgresMessage, ArraySet2) { + std::unique_ptr msg = createMsgBodyReader, Int16>(); + + Buffer::OwnedImpl data; + // Array of 1 element of VarByteN. + data.writeBEInt(1); + // VarByteN of 5 bytes long. + data.writeBEInt(5); + data.writeBEInt(0); + data.writeBEInt(1); + data.writeBEInt(2); + data.writeBEInt(3); + data.writeBEInt(114); + + // 16-bits value. + data.writeBEInt(115); + + ASSERT_TRUE(msg->read(data, 2 + 4 + 5 + 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("114") != std::string::npos); + ASSERT_TRUE(out.find("115") != std::string::npos); +} + +TEST(PostgresMessage, ArraySet3) { + std::unique_ptr msg = createMsgBodyReader, Array, Int16>(); + + Buffer::OwnedImpl data; + // There will be 3 elements in the array. + data.writeBEInt(3); + data.writeBEInt(13); + data.writeBEInt(14); + data.writeBEInt(15); + + // Array of 1 element of VarByteN. + data.writeBEInt(1); + // VarByteN of 5 bytes long. + data.writeBEInt(5); + data.writeBEInt(0); + data.writeBEInt(1); + data.writeBEInt(2); + data.writeBEInt(3); + data.writeBEInt(4); + + // 16-bits value. + data.writeBEInt(115); + + ASSERT_TRUE(msg->read(data, 2 + 3 * 2 + 2 + 4 + 5 + 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("13") != std::string::npos); + ASSERT_TRUE(out.find("115") != std::string::npos); +} + +TEST(PostgresMessage, ArraySet4) { + std::unique_ptr msg = createMsgBodyReader, Array>(); + + Buffer::OwnedImpl data; + // Array of 1 element of VarByteN. + data.writeBEInt(1); + // VarByteN of 5 bytes long. + data.writeBEInt(5); + data.writeBEInt(0); + data.writeBEInt(111); + data.writeBEInt(2); + data.writeBEInt(3); + data.writeBEInt(4); + + // Array of 2 elements in the second array. + data.writeBEInt(2); + data.writeBEInt(113); + data.writeBEInt(114); + + ASSERT_TRUE(msg->read(data, 2 + 4 + 5 + 2 + 2 * 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("111") != std::string::npos); + ASSERT_TRUE(out.find("114") != std::string::npos); +} + +TEST(PostgresMessage, ArraySet5) { + std::unique_ptr msg = createMsgBodyReader, Array, Array>(); + + Buffer::OwnedImpl data; + // There will be 3 elements in the first array. + data.writeBEInt(3); + data.writeBEInt(13); + data.writeBEInt(14); + data.writeBEInt(15); + + // Array of 1 element of VarByteN. + data.writeBEInt(1); + // VarByteN of 5 bytes long. + data.writeBEInt(5); + data.writeBEInt(0); + data.writeBEInt(1); + data.writeBEInt(2); + data.writeBEInt(3); + data.writeBEInt(4); + + // Array of 2 elements in the third array. + data.writeBEInt(2); + data.writeBEInt(113); + data.writeBEInt(114); + + ASSERT_TRUE(msg->read(data, 2 + 3 * 2 + 2 + 4 + 5 + 2 + 2 * 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("13") != std::string::npos); + ASSERT_TRUE(out.find("114") != std::string::npos); +} + +TEST(PostgresMessage, ArraySet6) { + std::unique_ptr msg = + createMsgBodyReader, Array, Array>(); + + Buffer::OwnedImpl data; + // Write string. + data.add("test"); + data.writeBEInt(0); + + // There will be 3 elements in the first array. + data.writeBEInt(3); + data.writeBEInt(13); + data.writeBEInt(14); + data.writeBEInt(15); + + // Array of 1 element of VarByteN. + data.writeBEInt(1); + // VarByteN of 5 bytes long. + data.writeBEInt(5); + data.writeBEInt(0); + data.writeBEInt(1); + data.writeBEInt(2); + data.writeBEInt(3); + data.writeBEInt(4); + + // Array of 2 elements in the third array. + data.writeBEInt(2); + data.writeBEInt(113); + data.writeBEInt(114); + + ASSERT_TRUE(msg->read(data, 5 + 2 + 3 * 2 + 2 + 4 + 5 + 2 + 2 * 2)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("test") != std::string::npos); + ASSERT_TRUE(out.find("13") != std::string::npos); + ASSERT_TRUE(out.find("114") != std::string::npos); +} + +TEST(PostgresMessage, Repeated1) { + std::unique_ptr msg = createMsgBodyReader>(); + + Buffer::OwnedImpl data; + // Write 3 strings. + data.add("test1"); + data.writeBEInt(0); + data.add("test2"); + data.writeBEInt(0); + data.add("test3"); + data.writeBEInt(0); + + ASSERT_TRUE(msg->read(data, 3 * 6)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("test1") != std::string::npos); + ASSERT_TRUE(out.find("test2") != std::string::npos); + ASSERT_TRUE(out.find("test3") != std::string::npos); +} + +TEST(PostgresMessage, Repeated2) { + std::unique_ptr msg = createMsgBodyReader>(); + + Buffer::OwnedImpl data; + data.writeBEInt(115); + // Write 3 strings. + data.add("test1"); + data.writeBEInt(0); + data.add("test2"); + data.writeBEInt(0); + data.add("test3"); + data.writeBEInt(0); + + ASSERT_TRUE(msg->read(data, 4 + 3 * 6)); + auto out = msg->toString(); + ASSERT_TRUE(out.find("115") != std::string::npos); + ASSERT_TRUE(out.find("test1") != std::string::npos); + ASSERT_TRUE(out.find("test2") != std::string::npos); + ASSERT_TRUE(out.find("test3") != std::string::npos); +} + +TEST(PostgresMessage, NotEnoughData) { + std::unique_ptr msg = createMsgBodyReader(); + Buffer::OwnedImpl data; + // Write only 3 bytes into the buffer. + data.writeBEInt(0); + data.writeBEInt(1); + data.writeBEInt(2); + + ASSERT_FALSE(msg->read(data, 3)); +} + +} // namespace PostgresProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/ratelimit/ratelimit_test.cc index ac64a1d6d108..f2255e356cd2 100644 --- a/test/extensions/filters/network/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/ratelimit/ratelimit_test.cc @@ -102,7 +102,7 @@ TEST_F(RateLimitFilterTest, OK) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"hello", "world"}, {"foo", "bar"}}}, {{{"foo2", "bar2"}}}}), - testing::A())) + testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -130,7 +130,7 @@ TEST_F(RateLimitFilterTest, OverLimit) { InSequence s; SetUpTest(filter_config_); - EXPECT_CALL(*client_, limit(_, "foo", _, _)) + EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -156,7 +156,7 @@ TEST_F(RateLimitFilterTest, OverLimitNotEnforcing) { InSequence s; SetUpTest(filter_config_); - EXPECT_CALL(*client_, limit(_, "foo", _, _)) + EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -185,7 +185,7 @@ TEST_F(RateLimitFilterTest, Error) { InSequence s; SetUpTest(filter_config_); - EXPECT_CALL(*client_, limit(_, "foo", _, _)) + EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -213,7 +213,7 @@ TEST_F(RateLimitFilterTest, Disconnect) { InSequence s; SetUpTest(filter_config_); - EXPECT_CALL(*client_, limit(_, "foo", _, _)) + EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -234,7 +234,7 @@ TEST_F(RateLimitFilterTest, ImmediateOK) { SetUpTest(filter_config_); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); - EXPECT_CALL(*client_, limit(_, "foo", _, _)) + EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, @@ -258,7 +258,7 @@ TEST_F(RateLimitFilterTest, ImmediateError) { SetUpTest(filter_config_); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); - EXPECT_CALL(*client_, limit(_, "foo", _, _)) + EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, @@ -284,7 +284,7 @@ TEST_F(RateLimitFilterTest, RuntimeDisable) { EXPECT_CALL(runtime_.snapshot_, featureEnabled("ratelimit.tcp_filter_enabled", 100)) .WillOnce(Return(false)); - EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); @@ -295,7 +295,7 @@ TEST_F(RateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { InSequence s; SetUpTest(fail_close_config_); - EXPECT_CALL(*client_, limit(_, "foo", _, _)) + EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; diff --git a/test/extensions/filters/network/rbac/integration_test.cc b/test/extensions/filters/network/rbac/integration_test.cc index 7f90867687b5..9b2f70bab6ba 100644 --- a/test/extensions/filters/network/rbac/integration_test.cc +++ b/test/extensions/filters/network/rbac/integration_test.cc @@ -131,6 +131,33 @@ name: rbac EXPECT_EQ(0U, test_server_->counter("tcp.rbac.shadow_denied")->value()); } +TEST_P(RoleBasedAccessControlNetworkFilterIntegrationTest, DeniedWithDenyAction) { + useListenerAccessLog("%CONNECTION_TERMINATION_DETAILS%"); + initializeFilter(R"EOF( +name: rbac +typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.rbac.v2.RBAC + stat_prefix: tcp. + rules: + action: DENY + policies: + "deny all": + permissions: + - any: true + principals: + - any: true +)EOF"); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); + ASSERT_TRUE(tcp_client->write("hello", false, false)); + tcp_client->waitForDisconnect(); + + EXPECT_EQ(0U, test_server_->counter("tcp.rbac.allowed")->value()); + EXPECT_EQ(1U, test_server_->counter("tcp.rbac.denied")->value()); + // Note the whitespace in the policy id is replaced by '_'. + EXPECT_THAT(waitForAccessLog(listener_access_log_name_), + testing::HasSubstr("rbac_access_denied_matched_policy[deny_all]")); +} + } // namespace RBAC } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index 11ceaf6a5e8a..d7345dbac788 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -748,8 +748,7 @@ TEST_P(RedisProxyWithRedirectionIntegrationTest, RedirectToUnknownServer) { auto endpoint = Network::Utility::parseInternetAddress(Network::Test::getAnyAddressString(version_), 0); - FakeUpstreamPtr target_server{ - new FakeUpstream(endpoint, upstreamProtocol(), timeSystem(), enable_half_close_)}; + FakeUpstreamPtr target_server{createFakeUpstream(endpoint, upstreamProtocol())}; std::stringstream redirection_error; redirection_error << "-MOVED 1111 " << redisAddressAndPort(target_server) << "\r\n"; diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index 66d033ebfb87..88a98919f2a2 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -33,6 +33,8 @@ envoy_extension_cc_test( "//test/config/integration/certs", ], extension_name = "envoy.filters.network.sni_dynamic_forward_proxy", + # TODO(envoyproxy/windows-dev): diagnose clang-cl build test failure + tags = ["fails_on_windows"], deps = [ "//source/extensions/clusters/dynamic_forward_proxy:cluster", "//source/extensions/filters/listener/tls_inspector:config", diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc index 39a57e7781c7..1559f7e5aa61 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -82,9 +82,9 @@ name: envoy.clusters.dynamic_forward_proxy } void createUpstreams() override { - fake_upstreams_.emplace_back(new FakeUpstream( + addFakeUpstream( Ssl::createFakeUpstreamSslContext(upstream_cert_name_, context_manager_, factory_context_), - 0, FakeHttpConnection::Type::HTTP1, version_, timeSystem())); + FakeHttpConnection::Type::HTTP1); } Network::ClientConnectionPtr diff --git a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc index 47a49f9bad32..9798db6e2e9c 100644 --- a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc @@ -1366,9 +1366,9 @@ TEST_F(ThriftConnectionManagerTest, DecoderFiltersModifyRequests) { })); EXPECT_CALL(*decoder_filter_, transportBegin(_)) .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> FilterStatus { - const Http::HeaderEntry* header = metadata->headers().get(key); - EXPECT_NE(nullptr, header); - EXPECT_EQ("value", header->value().getStringView()); + const auto header = metadata->headers().get(key); + EXPECT_FALSE(header.empty()); + EXPECT_EQ("value", header[0]->value().getStringView()); return FilterStatus::Continue; })); diff --git a/test/extensions/filters/network/thrift_proxy/driver/generate_bindings.sh b/test/extensions/filters/network/thrift_proxy/driver/generate_bindings.sh index 6b65871512c0..04d23939005b 100755 --- a/test/extensions/filters/network/thrift_proxy/driver/generate_bindings.sh +++ b/test/extensions/filters/network/thrift_proxy/driver/generate_bindings.sh @@ -3,7 +3,7 @@ # Generates the thrift bindings for example.thrift. Requires that # apache-thrift's thrift generator is installed and on the path. -DIR=$(cd `dirname $0` && pwd) -cd "${DIR}" +DIR=$(cd "$(dirname "$0")" && pwd) +cd "${DIR}" || exit 1 thrift --gen py --out ./generated example.thrift diff --git a/test/extensions/filters/network/thrift_proxy/driver/generate_fixture.sh b/test/extensions/filters/network/thrift_proxy/driver/generate_fixture.sh index 74ed8a9cceaa..a4c58cb798d8 100755 --- a/test/extensions/filters/network/thrift_proxy/driver/generate_fixture.sh +++ b/test/extensions/filters/network/thrift_proxy/driver/generate_fixture.sh @@ -18,7 +18,7 @@ mkdir -p "${FIXTURE_DIR}" DRIVER_DIR="${TEST_SRCDIR}/envoy/test/extensions/filters/network/thrift_proxy/driver" if [[ -z "${TEST_UDSDIR}" ]]; then - TEST_UDSDIR=`mktemp -d /tmp/envoy_test_thrift.XXXXXX` + TEST_UDSDIR=$(mktemp -d /tmp/envoy_test_thrift.XXXXXX) fi MODE="$1" @@ -86,7 +86,7 @@ fi "${DRIVER_DIR}/server" "${SERVICE_FLAGS[@]}" & SERVER_PID="$!" -trap "kill ${SERVER_PID}" EXIT; +trap 'kill ${SERVER_PID}' EXIT; while [[ ! -a "${SOCKET}" ]]; do sleep 0.1 diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc index d4862b1a378d..d0b8031938ab 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc @@ -176,7 +176,7 @@ TEST_F(ThriftRateLimitFilterTest, NoApplicableRateLimit) { setupTest(filter_config_); filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.clear(); - EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_)); } @@ -185,7 +185,7 @@ TEST_F(ThriftRateLimitFilterTest, NoDescriptor) { setupTest(filter_config_); EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(1); - EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_)); } @@ -212,7 +212,7 @@ TEST_F(ThriftRateLimitFilterTest, OkResponse) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -243,7 +243,7 @@ TEST_F(ThriftRateLimitFilterTest, ImmediateOkResponse) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, @@ -267,7 +267,7 @@ TEST_F(ThriftRateLimitFilterTest, ImmediateErrorResponse) { EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), - _)) + _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, @@ -291,7 +291,7 @@ TEST_F(ThriftRateLimitFilterTest, ErrorResponse) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -322,7 +322,7 @@ TEST_F(ThriftRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -355,7 +355,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponse) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -386,7 +386,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseWithHeaders) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -419,7 +419,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseRuntimeDisabled) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -444,7 +444,7 @@ TEST_F(ThriftRateLimitFilterTest, ResetDuringCall) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); - EXPECT_CALL(*client_, limit(_, _, _, _)) + EXPECT_CALL(*client_, limit(_, _, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -464,7 +464,7 @@ TEST_F(ThriftRateLimitFilterTest, RouteRateLimitDisabledForRouteKey) { .WillByDefault(Return(false)); EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); - EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); + EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_)); } diff --git a/test/extensions/filters/network/wasm/BUILD b/test/extensions/filters/network/wasm/BUILD new file mode 100644 index 000000000000..d21eba6c0853 --- /dev/null +++ b/test/extensions/filters/network/wasm/BUILD @@ -0,0 +1,54 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//bazel:envoy_select.bzl", + "envoy_select_wasm", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + data = envoy_select_wasm([ + "//test/extensions/filters/network/wasm/test_data:test_cpp.wasm", + ]), + extension_name = "envoy.filters.network.wasm", + deps = [ + "//source/common/common:base64_lib", + "//source/common/common:hex_lib", + "//source/common/crypto:utility_lib", + "//source/extensions/common/crypto:utility_lib", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/filters/network/wasm:config", + "//test/extensions/filters/network/wasm/test_data:test_cpp_plugin", + "//test/mocks/server:server_mocks", + "//test/test_common:environment_lib", + "@envoy_api//envoy/extensions/filters/network/wasm/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "wasm_filter_test", + srcs = ["wasm_filter_test.cc"], + data = envoy_select_wasm([ + "//test/extensions/filters/network/wasm/test_data:logging_rust.wasm", + "//test/extensions/filters/network/wasm/test_data:test_cpp.wasm", + ]), + extension_name = "envoy.filters.network.wasm", + deps = [ + "//source/extensions/filters/network/wasm:wasm_filter_lib", + "//test/extensions/filters/network/wasm/test_data:test_cpp_plugin", + "//test/mocks/network:network_mocks", + "//test/mocks/server:server_mocks", + "//test/test_common:wasm_lib", + ], +) diff --git a/test/extensions/filters/network/wasm/config_test.cc b/test/extensions/filters/network/wasm/config_test.cc new file mode 100644 index 000000000000..58d17c177fb7 --- /dev/null +++ b/test/extensions/filters/network/wasm/config_test.cc @@ -0,0 +1,190 @@ +#include "envoy/extensions/filters/network/wasm/v3/wasm.pb.validate.h" + +#include "common/common/base64.h" +#include "common/common/hex.h" +#include "common/crypto/utility.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/filters/network/wasm/config.h" +#include "extensions/filters/network/wasm/wasm_filter.h" + +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Wasm { + +class WasmNetworkFilterConfigTest : public testing::TestWithParam { +protected: + WasmNetworkFilterConfigTest() : api_(Api::createApiForTest(stats_store_)) { + ON_CALL(context_, api()).WillByDefault(ReturnRef(*api_)); + ON_CALL(context_, scope()).WillByDefault(ReturnRef(stats_store_)); + ON_CALL(context_, listenerMetadata()).WillByDefault(ReturnRef(listener_metadata_)); + ON_CALL(context_, initManager()).WillByDefault(ReturnRef(init_manager_)); + ON_CALL(context_, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(context_, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + } + + void SetUp() override { Envoy::Extensions::Common::Wasm::clearCodeCacheForTesting(); } + + void initializeForRemote() { + retry_timer_ = new Event::MockTimer(); + + EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) { + retry_timer_cb_ = timer_cb; + return retry_timer_; + })); + } + + NiceMock context_; + Stats::IsolatedStoreImpl stats_store_; + Api::ApiPtr api_; + envoy::config::core::v3::Metadata listener_metadata_; + Init::ManagerImpl init_manager_{"init_manager"}; + NiceMock cluster_manager_; + Init::ExpectableWatcherImpl init_watcher_; + NiceMock dispatcher_; + Event::MockTimer* retry_timer_; + Event::TimerCb retry_timer_cb_; +}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto testing_values = testing::Values( +#if defined(ENVOY_WASM_V8) + "v8", +#endif +#if defined(ENVOY_WASM_WAVM) + "wavm", +#endif + "null"); +INSTANTIATE_TEST_SUITE_P(Runtimes, WasmNetworkFilterConfigTest, testing_values); + +TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromFileWasm) { + if (GetParam() == "null") { + return; + } + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + local: + filename: "{{ test_rundir }}/test/extensions/filters/network/wasm/test_data/test_cpp.wasm" + )EOF")); + + envoy::extensions::filters::network::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + Network::MockConnection connection; + EXPECT_CALL(connection, addFilter(_)); + cb(connection); +} + +TEST_P(WasmNetworkFilterConfigTest, YamlLoadInlineWasm) { + const std::string code = + GetParam() != "null" + ? TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/filters/network/wasm/test_data/test_cpp.wasm")) + : "NetworkTestCpp"; + EXPECT_FALSE(code.empty()); + const std::string yaml = absl::StrCat(R"EOF( + config: + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + local: { inline_bytes: ")EOF", + Base64::encode(code.data(), code.size()), R"EOF(" } + )EOF"); + + envoy::extensions::filters::network::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_); + EXPECT_CALL(init_watcher_, ready()); + context_.initManager().initialize(init_watcher_); + EXPECT_EQ(context_.initManager().state(), Init::Manager::State::Initialized); + Network::MockConnection connection; + EXPECT_CALL(connection, addFilter(_)); + cb(connection); +} + +TEST_P(WasmNetworkFilterConfigTest, YamlLoadInlineBadCode) { + const std::string yaml = absl::StrCat(R"EOF( + config: + name: "test" + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + local: { inline_string: "bad code" } + )EOF"); + + envoy::extensions::filters::network::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, context_), + Extensions::Common::Wasm::WasmException, + "Unable to create Wasm network filter test"); +} + +TEST_P(WasmNetworkFilterConfigTest, YamlLoadInlineBadCodeFailOpenNackConfig) { + const std::string yaml = absl::StrCat(R"EOF( + config: + name: "test" + fail_open: true + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + local: { inline_string: "bad code" } + )EOF"); + + envoy::extensions::filters::network::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + WasmFilterConfig factory; + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, context_), + Extensions::Common::Wasm::WasmException, + "Unable to create Wasm network filter test"); +} + +TEST_P(WasmNetworkFilterConfigTest, FilterConfigFailOpen) { + if (GetParam() == "null") { + return; + } + const std::string yaml = TestEnvironment::substitute(absl::StrCat(R"EOF( + config: + fail_open: true + vm_config: + runtime: "envoy.wasm.runtime.)EOF", + GetParam(), R"EOF(" + code: + local: + filename: "{{ test_rundir }}/test/extensions/filters/network/wasm/test_data/test_cpp.wasm" + )EOF")); + + envoy::extensions::filters::network::wasm::v3::Wasm proto_config; + TestUtility::loadFromYaml(yaml, proto_config); + NetworkFilters::Wasm::FilterConfig filter_config(proto_config, context_); + filter_config.wasm()->fail(proxy_wasm::FailState::RuntimeError, ""); + EXPECT_EQ(filter_config.createFilter(), nullptr); +} + +} // namespace Wasm +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/wasm/test_data/BUILD b/test/extensions/filters/network/wasm/test_data/BUILD new file mode 100644 index 000000000000..a33d53447d03 --- /dev/null +++ b/test/extensions/filters/network/wasm/test_data/BUILD @@ -0,0 +1,44 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) +load("//bazel/wasm:wasm.bzl", "envoy_wasm_cc_binary", "wasm_rust_binary") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +wasm_rust_binary( + name = "logging_rust.wasm", + srcs = ["logging_rust/src/lib.rs"], + deps = [ + "//bazel/external/cargo:log", + "//bazel/external/cargo:proxy_wasm", + ], +) + +envoy_cc_library( + name = "test_cpp_plugin", + srcs = [ + "test_cpp.cc", + "test_cpp_null_plugin.cc", + ], + copts = ["-DNULL_PLUGIN=1"], + deps = [ + "//external:abseil_node_hash_map", + "//source/common/common:assert_lib", + "//source/common/common:c_smart_ptr_lib", + "//source/extensions/common/wasm:wasm_hdr", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/common/wasm:well_known_names", + ], +) + +envoy_wasm_cc_binary( + name = "test_cpp.wasm", + srcs = ["test_cpp.cc"], + deps = [ + "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", + ], +) diff --git a/test/extensions/filters/network/wasm/test_data/logging_rust/Cargo.toml b/test/extensions/filters/network/wasm/test_data/logging_rust/Cargo.toml new file mode 100644 index 000000000000..a82aed3df58d --- /dev/null +++ b/test/extensions/filters/network/wasm/test_data/logging_rust/Cargo.toml @@ -0,0 +1,26 @@ +[package] +description = "Proxy-Wasm logging test" +name = "logging_rust" +version = "0.0.1" +authors = ["Piotr Sikora "] +edition = "2018" + +[dependencies] +proxy-wasm = "0.1" +log = "0.4" + +[lib] +crate-type = ["cdylib"] +path = "src/*.rs" + +[profile.release] +lto = true +opt-level = 3 +panic = "abort" + +[raze] +workspace_path = "//bazel/external/cargo" +genmode = "Remote" + +[raze.crates.log.'0.4.11'] +additional_flags = ["--cfg=atomic_cas"] diff --git a/test/extensions/filters/network/wasm/test_data/logging_rust/src/lib.rs b/test/extensions/filters/network/wasm/test_data/logging_rust/src/lib.rs new file mode 100644 index 000000000000..d03230863f29 --- /dev/null +++ b/test/extensions/filters/network/wasm/test_data/logging_rust/src/lib.rs @@ -0,0 +1,68 @@ +use log::trace; +use proxy_wasm::hostcalls; +use proxy_wasm::traits::{Context, StreamContext}; +use proxy_wasm::types::*; + +#[no_mangle] +pub fn _start() { + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_stream_context(|context_id, _| -> Box { + Box::new(TestStream { context_id }) + }); +} + +struct TestStream { + context_id: u32, +} + +impl Context for TestStream {} + +impl StreamContext for TestStream { + fn on_new_connection(&mut self) -> Action { + trace!("onNewConnection {}", self.context_id); + Action::Continue + } + + fn on_downstream_data(&mut self, data_size: usize, end_of_stream: bool) -> Action { + if let Some(data) = self.get_downstream_data(0, data_size) { + trace!( + "onDownstreamData {} len={} end_stream={}\n{}", + self.context_id, + data_size, + end_of_stream as u32, + String::from_utf8(data).unwrap() + ); + } + hostcalls::set_buffer(BufferType::DownstreamData, 0, data_size, b"write").unwrap(); + Action::Continue + } + + fn on_upstream_data(&mut self, data_size: usize, end_of_stream: bool) -> Action { + if let Some(data) = self.get_upstream_data(0, data_size) { + trace!( + "onUpstreamData {} len={} end_stream={}\n{}", + self.context_id, + data_size, + end_of_stream as u32, + String::from_utf8(data).unwrap() + ); + } + Action::Continue + } + + fn on_downstream_close(&mut self, peer_type: PeerType) { + trace!( + "onDownstreamConnectionClose {} {}", + self.context_id, + peer_type as u32, + ); + } + + fn on_upstream_close(&mut self, peer_type: PeerType) { + trace!( + "onUpstreamConnectionClose {} {}", + self.context_id, + peer_type as u32, + ); + } +} diff --git a/test/extensions/filters/network/wasm/test_data/test_cpp.cc b/test/extensions/filters/network/wasm/test_data/test_cpp.cc new file mode 100644 index 000000000000..644b52eb6174 --- /dev/null +++ b/test/extensions/filters/network/wasm/test_data/test_cpp.cc @@ -0,0 +1,63 @@ +// NOLINT(namespace-envoy) +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics.h" +#else +#include "include/proxy-wasm/null_plugin.h" +#endif + +START_WASM_PLUGIN(NetworkTestCpp) + +static int* badptr = nullptr; + +class ExampleContext : public Context { +public: + explicit ExampleContext(uint32_t id, RootContext* root) : Context(id, root) {} + + FilterStatus onNewConnection() override; + FilterStatus onDownstreamData(size_t data_length, bool end_stream) override; + FilterStatus onUpstreamData(size_t data_length, bool end_stream) override; + void onForeignFunction(uint32_t, uint32_t) override; + void onDownstreamConnectionClose(CloseType close_type) override; + void onUpstreamConnectionClose(CloseType close_type) override; +}; +static RegisterContextFactory register_ExampleContext(CONTEXT_FACTORY(ExampleContext)); + +FilterStatus ExampleContext::onNewConnection() { + logTrace("onNewConnection " + std::to_string(id())); + return FilterStatus::Continue; +} + +FilterStatus ExampleContext::onDownstreamData(size_t data_length, bool end_stream) { + WasmDataPtr data = getBufferBytes(WasmBufferType::NetworkDownstreamData, 0, data_length); + logTrace("onDownstreamData " + std::to_string(id()) + " len=" + std::to_string(data_length) + + " end_stream=" + std::to_string(end_stream) + "\n" + std::string(data->view())); + setBuffer(WasmBufferType::NetworkDownstreamData, 0, 5, "write"); + return FilterStatus::Continue; +} + +FilterStatus ExampleContext::onUpstreamData(size_t data_length, bool end_stream) { + WasmDataPtr data = getBufferBytes(WasmBufferType::NetworkUpstreamData, 0, data_length); + logTrace("onUpstreamData " + std::to_string(id()) + " len=" + std::to_string(data_length) + + " end_stream=" + std::to_string(end_stream) + "\n" + std::string(data->view())); + return FilterStatus::Continue; +} + +void ExampleContext::onForeignFunction(uint32_t, uint32_t) { + logTrace("before segv"); + *badptr = 1; + logTrace("after segv"); +} + +void ExampleContext::onDownstreamConnectionClose(CloseType close_type) { + logTrace("onDownstreamConnectionClose " + std::to_string(id()) + " " + + std::to_string(static_cast(close_type))); +} + +void ExampleContext::onUpstreamConnectionClose(CloseType close_type) { + logTrace("onUpstreamConnectionClose " + std::to_string(id()) + " " + + std::to_string(static_cast(close_type))); +} + +END_WASM_PLUGIN diff --git a/test/extensions/filters/network/wasm/test_data/test_cpp_null_plugin.cc b/test/extensions/filters/network/wasm/test_data/test_cpp_null_plugin.cc new file mode 100644 index 000000000000..d626a15f607e --- /dev/null +++ b/test/extensions/filters/network/wasm/test_data/test_cpp_null_plugin.cc @@ -0,0 +1,15 @@ +// NOLINT(namespace-envoy) +#include "include/proxy-wasm/null_plugin.h" + +namespace proxy_wasm { +namespace null_plugin { +namespace NetworkTestCpp { +NullPluginRegistry* context_registry_; +} // namespace NetworkTestCpp + +RegisterNullVmPluginFactory register_common_wasm_test_cpp_plugin("NetworkTestCpp", []() { + return std::make_unique(NetworkTestCpp::context_registry_); +}); + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/test/extensions/filters/network/wasm/wasm_filter_test.cc b/test/extensions/filters/network/wasm/wasm_filter_test.cc new file mode 100644 index 000000000000..6bf1ca8151e6 --- /dev/null +++ b/test/extensions/filters/network/wasm/wasm_filter_test.cc @@ -0,0 +1,208 @@ +#include "envoy/server/lifecycle_notifier.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/filters/network/wasm/wasm_filter.h" + +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/wasm_base.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Eq; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Wasm { + +using Envoy::Extensions::Common::Wasm::Context; +using Envoy::Extensions::Common::Wasm::Plugin; +using Envoy::Extensions::Common::Wasm::PluginSharedPtr; +using Envoy::Extensions::Common::Wasm::Wasm; +using proxy_wasm::ContextBase; + +class TestFilter : public Context { +public: + TestFilter(Wasm* wasm, uint32_t root_context_id, PluginSharedPtr plugin) + : Context(wasm, root_context_id, plugin) {} + MOCK_CONTEXT_LOG_; + + void testClose() { onCloseTCP(); } +}; + +class TestRoot : public Context { +public: + TestRoot(Wasm* wasm, const std::shared_ptr& plugin) : Context(wasm, plugin) {} + MOCK_CONTEXT_LOG_; +}; + +class WasmNetworkFilterTest : public Common::Wasm::WasmNetworkFilterTestBase< + testing::TestWithParam>> { +public: + WasmNetworkFilterTest() = default; + ~WasmNetworkFilterTest() override = default; + + void setupConfig(const std::string& code, std::string vm_configuration, bool fail_open = false) { + if (code.empty()) { + setupWasmCode(vm_configuration); + } else { + code_ = code; + } + setupBase( + std::get<0>(GetParam()), code_, + [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + return new TestRoot(wasm, plugin); + }, + "" /* root_id */, "" /* vm_configuration */, fail_open); + } + + void setupFilter() { setupFilterBase(""); } + + TestFilter& filter() { return *static_cast(context_.get()); } + +private: + void setupWasmCode(std::string vm_configuration) { + if (std::get<0>(GetParam()) == "null") { + code_ = "NetworkTestCpp"; + } else { + if (std::get<1>(GetParam()) == "cpp") { + code_ = TestEnvironment::readFileToStringForTest(TestEnvironment::runfilesPath( + "test/extensions/filters/network/wasm/test_data/test_cpp.wasm")); + } else { + code_ = TestEnvironment::readFileToStringForTest(TestEnvironment::runfilesPath(absl::StrCat( + "test/extensions/filters/network/wasm/test_data/", vm_configuration + "_rust.wasm"))); + } + } + EXPECT_FALSE(code_.empty()); + } + +protected: + std::string code_; +}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto testing_values = testing::Values( +#if defined(ENVOY_WASM_V8) + std::make_tuple("v8", "cpp"), std::make_tuple("v8", "rust"), +#endif +#if defined(ENVOY_WASM_WAVM) + std::make_tuple("wavm", "cpp"), std::make_tuple("wavm", "rust"), +#endif + std::make_tuple("null", "cpp")); +INSTANTIATE_TEST_SUITE_P(RuntimesAndLanguages, WasmNetworkFilterTest, testing_values); + +// Bad code in initial config. +TEST_P(WasmNetworkFilterTest, BadCode) { + setupConfig("bad code", ""); + EXPECT_EQ(wasm_, nullptr); + setupFilter(); + filter().isFailed(); + EXPECT_CALL(read_filter_callbacks_.connection_, + close(Envoy::Network::ConnectionCloseType::FlushWrite)); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter().onNewConnection()); +} + +TEST_P(WasmNetworkFilterTest, BadCodeFailOpen) { + setupConfig("bad code", "", true); + EXPECT_EQ(wasm_, nullptr); + setupFilter(); + filter().isFailed(); + EXPECT_EQ(Network::FilterStatus::Continue, filter().onNewConnection()); +} + +// Test happy path. +TEST_P(WasmNetworkFilterTest, HappyPath) { + setupConfig("", "logging"); + setupFilter(); + + EXPECT_CALL(filter(), log_(spdlog::level::trace, Eq(absl::string_view("onNewConnection 2")))); + EXPECT_EQ(Network::FilterStatus::Continue, filter().onNewConnection()); + + Buffer::OwnedImpl fake_downstream_data("Fake"); + EXPECT_CALL(filter(), log_(spdlog::level::trace, + Eq(absl::string_view("onDownstreamData 2 len=4 end_stream=0\nFake")))); + EXPECT_EQ(Network::FilterStatus::Continue, filter().onData(fake_downstream_data, false)); + EXPECT_EQ(fake_downstream_data.toString(), "write"); + + Buffer::OwnedImpl fake_upstream_data("Done"); + EXPECT_CALL(filter(), log_(spdlog::level::trace, + Eq(absl::string_view("onUpstreamData 2 len=4 end_stream=1\nDone")))); + EXPECT_CALL(filter(), + log_(spdlog::level::trace, Eq(absl::string_view("onUpstreamConnectionClose 2 0")))); + EXPECT_EQ(Network::FilterStatus::Continue, filter().onWrite(fake_upstream_data, true)); + filter().onAboveWriteBufferHighWatermark(); + filter().onBelowWriteBufferLowWatermark(); + + EXPECT_CALL(filter(), + log_(spdlog::level::trace, Eq(absl::string_view("onDownstreamConnectionClose 2 1")))); + read_filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite); + // Noop. + read_filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite); + filter().testClose(); +} + +TEST_P(WasmNetworkFilterTest, CloseDownstreamFirst) { + setupConfig("", "logging"); + setupFilter(); + + EXPECT_CALL(filter(), log_(spdlog::level::trace, Eq(absl::string_view("onNewConnection 2")))); + EXPECT_EQ(Network::FilterStatus::Continue, filter().onNewConnection()); + + EXPECT_CALL(filter(), + log_(spdlog::level::trace, Eq(absl::string_view("onDownstreamConnectionClose 2 1")))); + write_filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite); + read_filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite); +} + +TEST_P(WasmNetworkFilterTest, CloseStream) { + setupConfig("", "logging"); + setupFilter(); + + // No Context, does nothing. + filter().onEvent(Network::ConnectionEvent::RemoteClose); + Buffer::OwnedImpl fake_upstream_data("Done"); + EXPECT_EQ(Network::FilterStatus::Continue, filter().onWrite(fake_upstream_data, true)); + Buffer::OwnedImpl fake_downstream_data("Fake"); + EXPECT_EQ(Network::FilterStatus::Continue, filter().onData(fake_downstream_data, false)); + + // Create context. + EXPECT_CALL(filter(), log_(spdlog::level::trace, Eq(absl::string_view("onNewConnection 2")))); + EXPECT_EQ(Network::FilterStatus::Continue, filter().onNewConnection()); + EXPECT_CALL(filter(), + log_(spdlog::level::trace, Eq(absl::string_view("onDownstreamConnectionClose 2 1")))); + EXPECT_CALL(filter(), + log_(spdlog::level::trace, Eq(absl::string_view("onDownstreamConnectionClose 2 2")))); + + filter().onEvent(static_cast(9999)); // Does nothing. + filter().onEvent(Network::ConnectionEvent::RemoteClose); + filter().closeStream(proxy_wasm::WasmStreamType::Downstream); + filter().closeStream(proxy_wasm::WasmStreamType::Upstream); +} + +TEST_P(WasmNetworkFilterTest, SegvFailOpen) { + if (std::get<0>(GetParam()) != "v8" || std::get<1>(GetParam()) != "cpp") { + return; + } + setupConfig("", "logging", true); + EXPECT_TRUE(plugin_->fail_open_); + setupFilter(); + + EXPECT_CALL(filter(), log_(spdlog::level::trace, Eq(absl::string_view("onNewConnection 2")))); + EXPECT_EQ(Network::FilterStatus::Continue, filter().onNewConnection()); + + EXPECT_CALL(filter(), log_(spdlog::level::trace, Eq(absl::string_view("before segv")))); + filter().onForeignFunction(0, 0); + EXPECT_TRUE(wasm_->wasm()->isFailed()); + + Buffer::OwnedImpl fake_downstream_data("Fake"); + // No logging expected. + EXPECT_EQ(Network::FilterStatus::Continue, filter().onData(fake_downstream_data, false)); +} + +} // namespace Wasm +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_corpus/3d6e5d07c6ce8f5996f0ac001fed89f0962e50d7 b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/3d6e5d07c6ce8f5996f0ac001fed89f0962e50d7 new file mode 100644 index 000000000000..813da23ee246 Binary files /dev/null and b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/3d6e5d07c6ce8f5996f0ac001fed89f0962e50d7 differ diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc index 96d8f7508ef2..d07e2e955a11 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -40,7 +40,7 @@ class DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime public: DnsFilterTest() : listener_address_(Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353")), - api_(Api::createApiForTest()), + api_(Api::createApiForTest(random_)), counters_(mock_query_buffer_underflow_, mock_record_name_overflow_, query_parsing_failure_) { udp_response_.addresses_.local_ = listener_address_; @@ -93,6 +93,7 @@ class DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime } const Network::Address::InstanceConstSharedPtr listener_address_; + NiceMock random_; Api::ApiPtr api_; DnsFilterEnvoyConfigSharedPtr config_; NiceMock mock_query_buffer_underflow_; @@ -105,7 +106,6 @@ class DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime Network::UdpRecvData udp_response_; NiceMock file_system_; NiceMock histogram_; - NiceMock random_; NiceMock listener_factory_; Stats::IsolatedStoreImpl stats_store_; std::shared_ptr resolver_; @@ -250,6 +250,10 @@ external_retry_count: 3 address_list: address: - "10.0.0.1" + - name: "www.external_foo1.com" + endpoint: + address_list: + address: - "10.0.0.2" - name: "www.external_foo2.com" endpoint: @@ -263,6 +267,70 @@ external_retry_count: 3 - "10.0.3.1" )EOF"; + const std::string max_records_table_yaml = R"EOF( +external_retry_count: 3 +known_suffixes: + - suffix: "ermac.com" +virtual_domains: + - name: "one.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.1" ] } + - name: "two.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.2" ] } + - name: "three.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.3" ] } + - name: "four.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.4" ] } + - name: "five.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.5" ] } + - name: "six.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.6" ] } + - name: "seven.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.7" ] } + - name: "eight.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.8" ] } + - name: "nine.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.9" ] } + - name: "ten.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.10" ] } + - name: "eleven.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.11" ] } + - name: "twelve.web.ermac.com" + endpoint: + address_list: { address: [ "10.0.17.12" ] } + - name: "web.ermac.com" + endpoint: + service_list: + services: + - service_name: "http" + protocol: { number: 6 } + ttl: 86400s + targets: [ + { host_name: "one.web.ermac.com" , weight: 120, priority: 10, port: 80 }, + { host_name: "two.web.ermac.com", weight: 110, priority: 10, port: 80 }, + { host_name: "three.web.ermac.com", weight: 100, priority: 10, port: 80 }, + { host_name: "four.web.ermac.com", weight: 90, priority: 10, port: 80 }, + { host_name: "five.web.ermac.com" , weight: 80, priority: 10, port: 80 }, + { host_name: "six.web.ermac.com", weight: 70, priority: 10, port: 80 }, + { host_name: "seven.web.ermac.com", weight: 60, priority: 10, port: 80 }, + { host_name: "eight.web.ermac.com", weight: 50, priority: 10, port: 80 }, + { host_name: "nine.web.ermac.com" , weight: 40, priority: 10, port: 80 }, + { host_name: "ten.web.ermac.com", weight: 30, priority: 10, port: 80 }, + { host_name: "eleven.web.ermac.com", weight: 20, priority: 10, port: 80 }, + { host_name: "twelve.web.ermac.com", weight: 10, priority: 10, port: 80 } + ] +)EOF"; + const std::string external_dns_table_services_yaml = R"EOF( external_retry_count: 3 known_suffixes: @@ -277,6 +345,9 @@ external_retry_count: 3 - name: "backup.voip.subzero.com" endpoint: address_list: { address: [ "10.0.3.3" ] } + - name: "emergency.voip.subzero.com" + endpoint: + address_list: { address: [ "2200:823f::cafe:beef" ] } - name: "voip.subzero.com" endpoint: service_list: @@ -287,7 +358,8 @@ external_retry_count: 3 targets: [ { host_name: "primary.voip.subzero.com" , weight: 30, priority: 10, port: 5060 }, { host_name: "secondary.voip.subzero.com", weight: 20, priority: 10, port: 5061 }, - { host_name: "backup.voip.subzero.com", weight: 10, priority: 10, port: 5062 } + { host_name: "backup.voip.subzero.com", weight: 10, priority: 10, port: 5062 }, + { host_name: "emergency.voip.subzero.com", weight: 40, priority: 10, port: 5063 } ] - name: "web.subzero.com" endpoint: @@ -1111,6 +1183,183 @@ TEST_F(DnsFilterTest, InvalidAnswerNameTest) { 0x00, 0x00, 0x01, 0x19, // Answer TTL 0x00, 0x04, // Answer Data Length 0x42, 0xdc, 0x02, 0x4b, // Answer IP Address + 0x00, // Additional RR + 0x00, 0x29, 0x10, 0x00, // UDP Payload Size (4096) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + + Network::UdpRecvData data{}; + data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort("10.0.0.1:1000"); + data.addresses_.local_ = listener_address_; + data.buffer_ = std::make_unique(dns_request, count); + data.receive_time_ = MonotonicTime(std::chrono::seconds(0)); + + query_ctx_ = response_parser_->createQueryContext(data, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + + // We should have zero parsed answers + EXPECT_TRUE(query_ctx_->answers_.empty()); +} + +TEST_F(DnsFilterTest, InvalidAnswerTypeTest) { + InSequence s; + + // In this buffer the answer type is incorrect for the given query. The answer is a NS + // type when an A record was requested. This should not happen on the wire. + constexpr unsigned char dns_request[] = { + 0x36, 0x6b, // Transaction ID + 0x81, 0x80, // Flags + 0x00, 0x01, // Questions + 0x00, 0x01, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x01, // Additional RRs + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x01, // Record Type + 0x00, 0x01, // Record Class + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x02, // Answer Record Type + 0x00, 0x01, // Answer Record Class + 0x00, 0x00, 0x01, 0x19, // Answer TTL + 0x00, 0x04, // Answer Data Length + 0x42, 0xdc, 0x02, 0x4b, // Answer IP Address + 0x00, // Additional RR + 0x00, 0x29, 0x10, 0x00, // UDP Payload Size (4096) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + + Network::UdpRecvData data{}; + data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort("10.0.0.1:1000"); + data.addresses_.local_ = listener_address_; + data.buffer_ = std::make_unique(dns_request, count); + data.receive_time_ = MonotonicTime(std::chrono::seconds(0)); + + query_ctx_ = response_parser_->createQueryContext(data, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + + // We should have zero parsed answers + EXPECT_TRUE(query_ctx_->answers_.empty()); +} + +TEST_F(DnsFilterTest, InvalidAnswerClassTest) { + InSequence s; + + // In this buffer the answer class is incorrect for the given query. The answer is a CH + // class when an IN class was requested. This should not happen on the wire. + constexpr unsigned char dns_request[] = { + 0x36, 0x6b, // Transaction ID + 0x81, 0x80, // Flags + 0x00, 0x01, // Questions + 0x00, 0x01, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x01, // Additional RRs + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x01, // Record Type + 0x00, 0x01, // Record Class + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x01, // Answer Record Type + 0x00, 0x03, // Answer Record Class + 0x00, 0x00, 0x01, 0x19, // Answer TTL + 0x00, 0x04, // Answer Data Length + 0x42, 0xdc, 0x02, 0x4b, // Answer IP Address + 0x00, // Additional RR + 0x00, 0x29, 0x10, 0x00, // UDP Payload Size (4096) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + + Network::UdpRecvData data{}; + data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort("10.0.0.1:1000"); + data.addresses_.local_ = listener_address_; + data.buffer_ = std::make_unique(dns_request, count); + data.receive_time_ = MonotonicTime(std::chrono::seconds(0)); + + query_ctx_ = response_parser_->createQueryContext(data, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + + // We should have zero parsed answers + EXPECT_TRUE(query_ctx_->answers_.empty()); +} + +TEST_F(DnsFilterTest, InvalidAnswerAddressTest) { + InSequence s; + + // In this buffer the address in the answer record is invalid. The IP should + // fail to parse. The class suggests it's an IPv6 address but there are only 4 + // bytes available. + constexpr unsigned char dns_request[] = { + 0x36, 0x6b, // Transaction ID + 0x81, 0x80, // Flags + 0x00, 0x01, // Questions + 0x00, 0x01, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x01, // Additional RRs + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x01, // Record Type + 0x00, 0x01, // Record Class + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x1c, // Answer Record Type + 0x00, 0x01, // Answer Record Class + 0x00, 0x00, 0x01, 0x19, // Answer TTL + 0x00, 0x10, // Answer Data Length + 0x42, 0xdc, 0x02, 0x4b, // Answer IP Address + 0x00, // Additional RR + 0x00, 0x29, 0x10, 0x00, // UDP Payload Size (4096) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + + Network::UdpRecvData data{}; + data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort("10.0.0.1:1000"); + data.addresses_.local_ = listener_address_; + data.buffer_ = std::make_unique(dns_request, count); + data.receive_time_ = MonotonicTime(std::chrono::seconds(0)); + + setup(forward_query_off_config); + query_ctx_ = response_parser_->createQueryContext(data, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + + // We should have one parsed query + EXPECT_FALSE(query_ctx_->queries_.empty()); + + // We should have zero parsed answers due to the IP parsing failure + EXPECT_TRUE(query_ctx_->answers_.empty()); +} + +TEST_F(DnsFilterTest, InvalidAnswerDataLengthTest) { + InSequence s; + + // In this buffer the answer data length is invalid (zero). This should not + // occur in data on the wire. + constexpr unsigned char dns_request[] = { + 0x36, 0x6b, // Transaction ID + 0x81, 0x80, // Flags + 0x00, 0x01, // Questions + 0x00, 0x01, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x01, // Additional RRs + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x01, // Record Type + 0x00, 0x01, // Record Class + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x01, // Answer Record Type + 0x00, 0x01, // Answer Record Class + 0x00, 0x00, 0x01, 0x19, // Answer TTL + 0x00, 0x00, // Answer Data Length + 0x42, 0xdc, 0x02, 0x4b, // Answer IP Address 0x00, // Additional RR (we do not parse this) 0x00, 0x29, 0x10, 0x00, // UDP Payload Size (4096) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1131,6 +1380,82 @@ TEST_F(DnsFilterTest, InvalidAnswerNameTest) { EXPECT_TRUE(query_ctx_->answers_.empty()); } +TEST_F(DnsFilterTest, TruncatedAnswerRecordTest) { + InSequence s; + + // In this buffer the answer record is truncated. The filter should indicate + // a parsing failure + constexpr unsigned char dns_request[] = { + 0x36, 0x6b, // Transaction ID + 0x81, 0x80, // Flags + 0x00, 0x01, // Questions + 0x00, 0x01, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x01, // Record Type + 0x00, 0x01, // Record Class + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Answer record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x01, // Answer Record Type + 0x00, 0x01, // Answer Record Class + 0x00, 0x00, 0x01, 0x19, // Answer TTL + // Remaining data is truncated + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + + Network::UdpRecvData data{}; + data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort("10.0.0.1:1000"); + data.addresses_.local_ = listener_address_; + data.buffer_ = std::make_unique(dns_request, count); + data.receive_time_ = MonotonicTime(std::chrono::seconds(0)); + + setup(forward_query_off_config); + query_ctx_ = response_parser_->createQueryContext(data, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + + // We should have one parsed query + EXPECT_FALSE(query_ctx_->queries_.empty()); + + // We should have zero parsed answers due to the IP parsing failure + EXPECT_TRUE(query_ctx_->answers_.empty()); +} + +TEST_F(DnsFilterTest, TruncatedQueryBufferTest) { + InSequence s; + + // In this buffer the query record is truncated. The filter should indicate + // a parsing failure + constexpr unsigned char dns_request[] = { + 0x36, 0x6b, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x01, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x01, // Additional RRs + 0x04, 0x69, 0x70, 0x76, 0x36, 0x02, 0x68, // Query record for + 0x65, 0x03, 0x6e, 0x65, 0x74, 0x00, // ipv6.he.net + 0x00, 0x01 // Record Type + // Truncated bytes here + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + + Network::UdpRecvData data{}; + data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort("10.0.0.1:1000"); + data.addresses_.local_ = listener_address_; + data.buffer_ = std::make_unique(dns_request, count); + data.receive_time_ = MonotonicTime(std::chrono::seconds(0)); + + query_ctx_ = response_parser_->createQueryContext(data, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + + // We should have zero parsed answers + EXPECT_TRUE(query_ctx_->answers_.empty()); +} + TEST_F(DnsFilterTest, InvalidQueryClassAndAnswerTypeTest) { InSequence s; @@ -1471,12 +1796,14 @@ TEST_F(DnsFilterTest, ConsumeExternalTableWithServicesTest) { {10, "backup.voip.subzero.com"}, {20, "secondary.voip.subzero.com"}, {30, "primary.voip.subzero.com"}, + {40, "emergency.voip.subzero.com"}, }; std::map validation_port_map = { {5062, "backup.voip.subzero.com"}, {5061, "secondary.voip.subzero.com"}, {5060, "primary.voip.subzero.com"}, + {5063, "emergency.voip.subzero.com"}, }; // Validate the weight for each SRV record. The TTL and priority are the same value for each @@ -1503,25 +1830,31 @@ TEST_F(DnsFilterTest, ConsumeExternalTableWithServicesTest) { EXPECT_EQ(expected_target, port_entry); } - // Validate additional records from the SRV query - const std::map target_map = { + // Validate additional records from the SRV query. Remove a matching + // entry to ensure that we are getting unique addresses in the additional + // records + std::map target_map = { {"primary.voip.subzero.com", "10.0.3.1"}, {"secondary.voip.subzero.com", "10.0.3.2"}, {"backup.voip.subzero.com", "10.0.3.3"}, + {"emergency.voip.subzero.com", "2200:823f::cafe:beef"}, }; + const size_t target_size = target_map.size(); - EXPECT_EQ(3, query_ctx_->additional_.size()); - for (const auto& answer : query_ctx_->additional_) { - const auto& entry = target_map.find(answer.first); + EXPECT_EQ(target_map.size(), query_ctx_->additional_.size()); + for (const auto& [hostname, address] : query_ctx_->additional_) { + const auto& entry = target_map.find(hostname); EXPECT_NE(entry, target_map.end()); - Utils::verifyAddress({entry->second}, answer.second); + Utils::verifyAddress({entry->second}, address); + target_map.erase(hostname); } // Validate stats EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); EXPECT_EQ(1, config_->stats().known_domain_queries_.value()); - EXPECT_EQ(3, config_->stats().local_srv_record_answers_.value()); - EXPECT_EQ(3, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(target_size, config_->stats().local_srv_record_answers_.value()); + EXPECT_EQ(target_size - 1, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().local_aaaa_record_answers_.value()); EXPECT_EQ(1, config_->stats().srv_record_queries_.value()); } @@ -1533,18 +1866,23 @@ TEST_F(DnsFilterTest, SrvTargetResolution) { std::string config_to_use = fmt::format(external_dns_table_config, temp_path); setup(config_to_use); - const std::map target_map = { - {"primary.voip.subzero.com", "10.0.3.1"}, - {"secondary.voip.subzero.com", "10.0.3.2"}, - {"backup.voip.subzero.com", "10.0.3.3"}, + struct RecordProperties { + uint16_t type; + std::string address; }; - for (const auto& target : target_map) { - const std::string& domain = target.first; - const std::string& ip = target.second; + const std::map target_map = { + {"primary.voip.subzero.com", {DNS_RECORD_TYPE_A, "10.0.3.1"}}, + {"secondary.voip.subzero.com", {DNS_RECORD_TYPE_A, "10.0.3.2"}}, + {"backup.voip.subzero.com", {DNS_RECORD_TYPE_A, "10.0.3.3"}}, + {"emergency.voip.subzero.com", {DNS_RECORD_TYPE_AAAA, "2200:823f::cafe:beef"}}, + }; - const std::string query = - Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + for (const auto& [domain, properties] : target_map) { + const uint16_t address_type = properties.type; + const std::string& ip = properties.address; + + const std::string query = Utils::buildQueryForDomain(domain, address_type, DNS_RECORD_CLASS_IN); ASSERT_FALSE(query.empty()); sendQueryFromClient("10.0.0.1:1000", query); @@ -1560,8 +1898,10 @@ TEST_F(DnsFilterTest, SrvTargetResolution) { // Validate stats EXPECT_EQ(target_map.size(), config_->stats().downstream_rx_queries_.value()); EXPECT_EQ(target_map.size(), config_->stats().known_domain_queries_.value()); - EXPECT_EQ(target_map.size(), config_->stats().local_a_record_answers_.value()); - EXPECT_EQ(target_map.size(), config_->stats().a_record_queries_.value()); + EXPECT_EQ(target_map.size() - 1, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().local_aaaa_record_answers_.value()); + EXPECT_EQ(target_map.size() - 1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value()); } TEST_F(DnsFilterTest, NonExistentClusterServiceLookup) { @@ -1634,6 +1974,57 @@ TEST_F(DnsFilterTest, SrvRecordQuery) { EXPECT_EQ(1, config_->stats().srv_record_queries_.value()); } +TEST_F(DnsFilterTest, SrvQueryMaxRecords) { + InSequence s; + + std::string temp_path = + TestEnvironment::writeStringToFileForTest("dns_table.yaml", max_records_table_yaml); + std::string config_to_use = fmt::format(external_dns_table_config, temp_path); + setup(config_to_use); + + const std::string service{"_http._tcp.web.ermac.com"}; + const std::string query = + Utils::buildQueryForDomain(service, DNS_RECORD_TYPE_SRV, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + + // We can only serialize 7 records before reaching the 512 byte limit + EXPECT_LT(query_ctx_->answers_.size(), MAX_RETURNED_RECORDS); + EXPECT_LT(query_ctx_->additional_.size(), MAX_RETURNED_RECORDS); + + const std::list hosts{ + "one.web.ermac.com", "two.web.ermac.com", "three.web.ermac.com", "four.web.ermac.com", + "five.web.ermac.com", "six.web.ermac.com", "seven.web.ermac.com", + }; + + // Verify the service name and targets are sufficiently randomized + size_t exact_matches = 0; + auto host = hosts.begin(); + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.second->type_, DNS_RECORD_TYPE_SRV); + DnsSrvRecord* srv_rec = dynamic_cast(answer.second.get()); + + EXPECT_STREQ(service.c_str(), srv_rec->name_.c_str()); + + const auto target = srv_rec->targets_.begin(); + const auto target_name = target->first; + exact_matches += (target_name.compare(*host++) == 0); + } + EXPECT_LT(exact_matches, hosts.size()); + + // Verify that the additional records are not in the same order as the configuration + exact_matches = 0; + host = hosts.begin(); + for (const auto& answer : query_ctx_->additional_) { + exact_matches += (answer.first.compare(*host++) == 0); + } + EXPECT_LT(exact_matches, hosts.size()); +} + } // namespace } // namespace DnsFilter } // namespace UdpFilters diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc index b57a769f62a2..f891e0f8402f 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc @@ -49,9 +49,6 @@ TEST_F(DnsFilterUtilsTest, ServiceNameSynthesisTest) { {"name2", "_proto2", "test2.com", "_name2._proto2.test2.com"}, {"_name3", "proto3", "test3.com", "_name3._proto3.test3.com"}, {"name4", "proto4", "_sites.test4.com", "_name4._proto4._sites.test4.com"}, - {"_name5", "", "test5.com", ""}, - {"", "proto6", "test6.com", ""}, - {"name7", "proto7", "", ""}, }; for (auto& ptr : service_data) { diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc index c7676e7ebc8b..d396126c1dcc 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc @@ -21,6 +21,7 @@ using testing::AtLeast; using testing::ByMove; +using testing::DoAll; using testing::InSequence; using testing::InvokeWithoutArgs; using testing::Return; diff --git a/test/extensions/health_checkers/redis/config_test.cc b/test/extensions/health_checkers/redis/config_test.cc index 0bdd2269ef29..b54620d614ab 100644 --- a/test/extensions/health_checkers/redis/config_test.cc +++ b/test/extensions/health_checkers/redis/config_test.cc @@ -167,8 +167,8 @@ TEST(HealthCheckerFactoryTest, CreateRedisViaUpstreamHealthCheckerFactory) { EXPECT_NE(nullptr, dynamic_cast( Upstream::HealthCheckerFactory::create( - Upstream::parseHealthCheckFromV3Yaml(yaml), cluster, runtime, random, - dispatcher, log_manager, ProtobufMessage::getStrictValidationVisitor(), api) + Upstream::parseHealthCheckFromV3Yaml(yaml), cluster, runtime, dispatcher, + log_manager, ProtobufMessage::getStrictValidationVisitor(), api) .get())); } } // namespace diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index bde3bfc54f05..a751b8be6428 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -59,7 +59,7 @@ class RedisHealthCheckerTest health_check_config, ProtobufMessage::getStrictValidationVisitor()); health_checker_ = std::make_shared( - *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } @@ -95,7 +95,7 @@ class RedisHealthCheckerTest EXPECT_CALL(*cluster_->info_, extensionProtocolOptions(_)).WillRepeatedly(Return(options)); health_checker_ = std::make_shared( - *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } @@ -119,7 +119,7 @@ class RedisHealthCheckerTest health_check_config, ProtobufMessage::getStrictValidationVisitor()); health_checker_ = std::make_shared( - *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } @@ -143,7 +143,7 @@ class RedisHealthCheckerTest health_check_config, ProtobufMessage::getStrictValidationVisitor()); health_checker_ = std::make_shared( - *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } @@ -180,7 +180,7 @@ class RedisHealthCheckerTest EXPECT_CALL(*cluster_->info_, extensionProtocolOptions(_)).WillRepeatedly(Return(options)); health_checker_ = std::make_shared( - *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } @@ -203,7 +203,7 @@ class RedisHealthCheckerTest health_check_config, ProtobufMessage::getStrictValidationVisitor()); health_checker_ = std::make_shared( - *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } @@ -227,7 +227,7 @@ class RedisHealthCheckerTest health_check_config, ProtobufMessage::getStrictValidationVisitor()); health_checker_ = std::make_shared( - *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } @@ -286,7 +286,6 @@ class RedisHealthCheckerTest std::shared_ptr cluster_; NiceMock dispatcher_; NiceMock runtime_; - NiceMock random_; Upstream::MockHealthCheckEventLogger* event_logger_{}; Event::MockTimer* timeout_timer_{}; Event::MockTimer* interval_timer_{}; diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index bb259455a7c1..92aad3fe9a2e 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -27,11 +27,7 @@ envoy_cc_test( name = "envoy_quic_writer_test", srcs = ["envoy_quic_writer_test.cc"], external_deps = ["quiche_quic_platform"], - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows - tags = [ - "nofips", - "skip_on_windows", - ], + tags = ["nofips"], deps = [ "//source/common/network:io_socket_error_lib", "//source/common/network:udp_packet_writer_handler_lib", @@ -74,11 +70,7 @@ envoy_cc_test( envoy_cc_test( name = "envoy_quic_server_stream_test", srcs = ["envoy_quic_server_stream_test.cc"], - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows - tags = [ - "nofips", - "skip_on_windows", - ], + tags = ["nofips"], deps = [ ":quic_test_utils_for_envoy_lib", ":test_utils_lib", @@ -99,11 +91,7 @@ envoy_cc_test( envoy_cc_test( name = "envoy_quic_client_stream_test", srcs = ["envoy_quic_client_stream_test.cc"], - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows - tags = [ - "nofips", - "skip_on_windows", - ], + tags = ["nofips"], deps = [ ":quic_test_utils_for_envoy_lib", ":test_utils_lib", @@ -123,11 +111,7 @@ envoy_cc_test( envoy_cc_test( name = "envoy_quic_server_session_test", srcs = ["envoy_quic_server_session_test.cc"], - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows - tags = [ - "nofips", - "skip_on_windows", - ], + tags = ["nofips"], deps = [ ":quic_test_utils_for_envoy_lib", ":test_proof_source_lib", @@ -156,11 +140,7 @@ envoy_cc_test( envoy_cc_test( name = "envoy_quic_client_session_test", srcs = ["envoy_quic_client_session_test.cc"], - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows - tags = [ - "nofips", - "skip_on_windows", - ], + tags = ["nofips"], deps = [ ":quic_test_utils_for_envoy_lib", ":test_utils_lib", @@ -182,10 +162,9 @@ envoy_cc_test( envoy_cc_test( name = "active_quic_listener_test", srcs = ["active_quic_listener_test.cc"], - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows tags = [ + "fails_on_windows", "nofips", - "skip_on_windows", ], deps = [ ":quic_test_utils_for_envoy_lib", @@ -207,11 +186,7 @@ envoy_cc_test( envoy_cc_test( name = "envoy_quic_dispatcher_test", srcs = ["envoy_quic_dispatcher_test.cc"], - # quic_sent_packet_manager.cc does not compile error: warning C4715: 'quic::QuicSentPacketManager::OnRetransmissionTimeout': not all control paths return a value - tags = [ - "nofips", - "skip_on_windows", - ], + tags = ["nofips"], deps = [ ":quic_test_utils_for_envoy_lib", ":test_proof_source_lib", @@ -278,11 +253,7 @@ envoy_cc_test( envoy_cc_test( name = "envoy_quic_utils_test", srcs = ["envoy_quic_utils_test.cc"], - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows - tags = [ - "nofips", - "skip_on_windows", - ], + tags = ["nofips"], deps = [ ":quic_test_utils_for_envoy_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_utils_lib", @@ -294,11 +265,7 @@ envoy_cc_test( envoy_cc_test( name = "active_quic_listener_config_test", srcs = ["active_quic_listener_config_test.cc"], - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows - tags = [ - "nofips", - "skip_on_windows", - ], + tags = ["nofips"], deps = [ "//source/common/config:utility_lib", "//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib", @@ -309,11 +276,7 @@ envoy_cc_test( envoy_cc_test( name = "envoy_quic_simulated_watermark_buffer_test", srcs = ["envoy_quic_simulated_watermark_buffer_test.cc"], - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows - tags = [ - "nofips", - "skip_on_windows", - ], + tags = ["nofips"], deps = ["//source/extensions/quic_listeners/quiche:envoy_quic_simulated_watermark_buffer_lib"], ) diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index 5d7cfdea89d8..8677ca8252b7 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -1,40 +1,40 @@ #include - -#pragma GCC diagnostic push -// QUICHE allows unused parameters. -#pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). -#pragma GCC diagnostic ignored "-Winvalid-offsetof" - #include -#include "common/runtime/runtime_impl.h" - #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/base.pb.validate.h" #include "envoy/network/exception.h" +#if defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif + #include "quiche/quic/core/crypto/crypto_protocol.h" #include "quiche/quic/test_tools/crypto_test_utils.h" #include "quiche/quic/test_tools/quic_dispatcher_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" #include "quiche/quic/test_tools/quic_crypto_server_config_peer.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "server/configuration_impl.h" #include "common/common/logger.h" #include "common/network/listen_socket_impl.h" #include "common/network/socket_option_factory.h" +#include "common/network/udp_packet_writer_handler_impl.h" +#include "common/runtime/runtime_impl.h" #include "extensions/quic_listeners/quiche/active_quic_listener.h" #include "test/extensions/quic_listeners/quiche/test_utils.h" #include "test/extensions/quic_listeners/quiche/test_proof_source.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/environment.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/instance.h" - #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/utility.h" #include "test/test_common/network_utility.h" #include "absl/time/time.h" @@ -81,14 +81,13 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { : version_(GetParam().first), api_(Api::createApiForTest(simulated_time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), clock_(*dispatcher_), local_address_(Network::Test::getCanonicalLoopbackAddress(version_)), - connection_handler_(*dispatcher_), quic_version_([]() { + connection_handler_(*dispatcher_, absl::nullopt), quic_version_([]() { if (GetParam().second == QuicVersionType::GquicQuicCrypto) { return quic::CurrentSupportedVersionsWithQuicCrypto(); } bool use_http3 = GetParam().second == QuicVersionType::Iquic; SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); - SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); return quic::CurrentSupportedVersions(); }()[0]) {} @@ -112,23 +111,27 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { ON_CALL(listener_config_, listenSocketFactory()).WillByDefault(ReturnRef(socket_factory_)); ON_CALL(socket_factory_, getListenSocket()).WillByDefault(Return(listen_socket_)); - // Use UdpGsoBatchWriter to perform non-batched writes for the purpose of this test + // Use UdpGsoBatchWriter to perform non-batched writes for the purpose of this test, if it is + // supported. ON_CALL(listener_config_, udpPacketWriterFactory()) .WillByDefault(Return( std::reference_wrapper(udp_packet_writer_factory_))); ON_CALL(udp_packet_writer_factory_, createUdpPacketWriter(_, _)) .WillByDefault(Invoke( [&](Network::IoHandle& io_handle, Stats::Scope& scope) -> Network::UdpPacketWriterPtr { - Network::UdpPacketWriterPtr udp_packet_writer = - std::make_unique(io_handle, scope); - return udp_packet_writer; +#if UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT + return std::make_unique(io_handle, scope); +#else + UNREFERENCED_PARAMETER(scope); + return std::make_unique(io_handle); +#endif })); listener_factory_ = createQuicListenerFactory(yamlForQuicConfig()); EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager_)); quic_listener_ = staticUniquePointerCast(listener_factory_->createActiveUdpListener( - connection_handler_, *dispatcher_, listener_config_)); + 0, connection_handler_, *dispatcher_, listener_config_)); quic_dispatcher_ = ActiveQuicListenerPeer::quicDispatcher(*quic_listener_); quic::QuicCryptoServerConfig& crypto_config = ActiveQuicListenerPeer::cryptoConfig(*quic_listener_); @@ -136,7 +139,14 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { auto proof_source = std::make_unique(); filter_chain_ = &proof_source->filterChain(); crypto_config_peer.ResetProofSource(std::move(proof_source)); - simulated_time_system_.advanceTimeAsync(std::chrono::milliseconds(100)); + simulated_time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); + + // The state of whether client hellos can be buffered or not is different before and after + // the first packet processed by the listener. This only matters in tests. Force an event + // to get it into a consistent state. + dispatcher_->post([this]() { quic_listener_->onReadReady(); }); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } @@ -201,6 +211,14 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { auto send_rc = Network::Utility::writeToSocket(client_sockets_.back()->ioHandle(), slice.data(), 1, nullptr, *listen_socket_->localAddress()); ASSERT_EQ(slice[0].len_, send_rc.rc_); + +#if defined(__APPLE__) + // This sleep makes the tests pass more reliably. Some debugging showed that without this, + // no packet is received when the event loop is running. + // TODO(ggreenway): make tests more reliable, and handle packet loss during the tests, possibly + // by retransmitting on a timer. + ::usleep(1000); +#endif } void readFromClientSockets() { @@ -212,7 +230,7 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { do { Api::IoCallUint64Result result = - result_buffer->read(client_socket->ioHandle(), bytes_to_read - bytes_read); + client_socket->ioHandle().read(*result_buffer, bytes_to_read - bytes_read); if (result.ok()) { bytes_read += result.rc_; @@ -231,7 +249,9 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { } void TearDown() override { - quic_listener_->onListenerShutdown(); + if (quic_listener_ != nullptr) { + quic_listener_->onListenerShutdown(); + } // Trigger alarm to fire before listener destruction. dispatcher_->run(Event::Dispatcher::RunType::NonBlock); Runtime::LoaderSingleton::clear(); @@ -293,10 +313,11 @@ TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { .WillOnce(Return(false)); auto options = std::make_shared>(); options->emplace_back(std::move(option)); + quic_listener_.reset(); EXPECT_THROW_WITH_REGEX( - std::make_unique( - *dispatcher_, connection_handler_, listen_socket_, listener_config_, quic_config_, - options, + (void)std::make_unique( + 0, 1, *dispatcher_, connection_handler_, listen_socket_, listener_config_, quic_config_, + options, false, ActiveQuicListenerFactoryPeer::runtimeEnabled( static_cast(listener_factory_.get()))), Network::CreateListenerException, "Failed to apply socket options."); @@ -316,50 +337,46 @@ TEST_P(ActiveQuicListenerTest, ReceiveCHLO) { TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { quic::QuicBufferedPacketStore* const buffered_packets = quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); - maybeConfigureMocks(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2); + const uint32_t count = (ActiveQuicListener::kNumSessionsToCreatePerLoop * 2) + 1; + maybeConfigureMocks(count); // Generate one more CHLO than can be processed immediately. - for (size_t i = 1; i <= ActiveQuicListener::kNumSessionsToCreatePerLoop + 1; ++i) { + for (size_t i = 1; i <= count; ++i) { sendCHLO(quic::test::TestConnectionId(i)); } dispatcher_->run(Event::Dispatcher::RunType::NonBlock); - // The first kNumSessionsToCreatePerLoop CHLOs are processed, - // the last one is buffered. - for (size_t i = 1; i <= ActiveQuicListener::kNumSessionsToCreatePerLoop; ++i) { - EXPECT_FALSE(buffered_packets->HasBufferedPackets(quic::test::TestConnectionId(i))); - } - EXPECT_TRUE(buffered_packets->HasBufferedPackets( - quic::test::TestConnectionId(ActiveQuicListener::kNumSessionsToCreatePerLoop + 1))); - EXPECT_TRUE(buffered_packets->HasChlosBuffered()); - EXPECT_FALSE(quic_dispatcher_->session_map().empty()); + // The first kNumSessionsToCreatePerLoop were processed immediately, the next + // kNumSessionsToCreatePerLoop were buffered for the next run of the event loop, and the last one + // was buffered to the subsequent event loop. + EXPECT_EQ(2, quic_listener_->eventLoopsWithBufferedChlosForTest()); - // Generate more data to trigger a socket read during the next event loop. - sendCHLO(quic::test::TestConnectionId(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); - - // The socket read results in processing all CHLOs. - for (size_t i = 1; i <= ActiveQuicListener::kNumSessionsToCreatePerLoop + 2; ++i) { + for (size_t i = 1; i <= count; ++i) { EXPECT_FALSE(buffered_packets->HasBufferedPackets(quic::test::TestConnectionId(i))); } EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - + EXPECT_FALSE(quic_dispatcher_->session_map().empty()); readFromClientSockets(); } TEST_P(ActiveQuicListenerTest, QuicProcessingDisabledAndEnabled) { + maybeConfigureMocks(/* connection_count = */ 2); EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); - Runtime::LoaderSingleton::getExisting()->mergeValues({{"quic.enabled", " false"}}); sendCHLO(quic::test::TestConnectionId(1)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_EQ(quic_dispatcher_->session_map().size(), 1); + + Runtime::LoaderSingleton::getExisting()->mergeValues({{"quic.enabled", " false"}}); + sendCHLO(quic::test::TestConnectionId(2)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); // If listener was enabled, there should have been session created for active connection. - EXPECT_TRUE(quic_dispatcher_->session_map().empty()); + EXPECT_EQ(quic_dispatcher_->session_map().size(), 1); EXPECT_FALSE(ActiveQuicListenerPeer::enabled(*quic_listener_)); + Runtime::LoaderSingleton::getExisting()->mergeValues({{"quic.enabled", " true"}}); - maybeConfigureMocks(/* connection_count = */ 1); - sendCHLO(quic::test::TestConnectionId(1)); + sendCHLO(quic::test::TestConnectionId(2)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); - EXPECT_FALSE(quic_dispatcher_->session_map().empty()); + EXPECT_EQ(quic_dispatcher_->session_map().size(), 2); EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); } diff --git a/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc b/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc index cafdce0c6227..3ed00db00274 100644 --- a/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc +++ b/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc @@ -3,19 +3,17 @@ // This file defines platform dependent test utility functions which is declared // in quiche/quic/test_tools/crypto_test_utils.h. -#ifdef __GNUC__ +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" #pragma GCC diagnostic ignored "-Wtype-limits" +#endif #include "quiche/quic/test_tools/crypto_test_utils.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop -#else -#include "quiche/quic/test_tools/crypto_test_utils.h" #endif #include diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc index 0e0ab28bb48d..cdbae27f9ba1 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc @@ -36,8 +36,8 @@ class EnvoyQuicAlarmTest : public ::testing::Test { alarm_factory_(*dispatcher_, clock_) {} void advanceMsAndLoop(int64_t delay_ms) { - time_system_.advanceTimeAsync(std::chrono::milliseconds(delay_ms)); - dispatcher_->run(Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(delay_ms), *dispatcher_, + Dispatcher::RunType::NonBlock); } protected: diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc index ef50798517da..e2d90d916469 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc @@ -1,14 +1,16 @@ +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/core/crypto/null_encrypter.h" #include "quiche/quic/test_tools/crypto_test_utils.h" #include "quiche/quic/test_tools/quic_test_utils.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "extensions/quic_listeners/quiche/envoy_quic_client_session.h" #include "extensions/quic_listeners/quiche/envoy_quic_client_connection.h" @@ -97,7 +99,6 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); - SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), @@ -215,9 +216,13 @@ TEST_P(EnvoyQuicClientSessionTest, OnGoAwayFrame) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; - quic::QuicGoAwayFrame goaway; EXPECT_CALL(http_connection_callbacks_, onGoAway(Http::GoAwayErrorCode::NoError)); - quic_connection_->OnGoAwayFrame(goaway); + if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { + envoy_quic_session_.OnHttp3GoAway(4u); + } else { + quic::QuicGoAwayFrame goaway; + quic_connection_->OnGoAwayFrame(goaway); + } } TEST_P(EnvoyQuicClientSessionTest, ConnectionClose) { diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc index 9784c7231ff2..ac82239db0bb 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc @@ -27,7 +27,6 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); - SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::CurrentSupportedVersions()[0]; }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), @@ -157,8 +156,8 @@ TEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) { .WillOnce(Invoke([](const Http::ResponseTrailerMapPtr& headers) { Http::LowerCaseString key1("key1"); Http::LowerCaseString key2(":final-offset"); - EXPECT_EQ("value1", headers->get(key1)->value().getStringView()); - EXPECT_EQ(nullptr, headers->get(key2)); + EXPECT_EQ("value1", headers->get(key1)[0]->value().getStringView()); + EXPECT_TRUE(headers->get(key2).empty()); })); quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_); } @@ -206,8 +205,8 @@ TEST_P(EnvoyQuicClientStreamTest, OutOfOrderTrailers) { .WillOnce(Invoke([](const Http::ResponseTrailerMapPtr& headers) { Http::LowerCaseString key1("key1"); Http::LowerCaseString key2(":final-offset"); - EXPECT_EQ("value1", headers->get(key1)->value().getStringView()); - EXPECT_EQ(nullptr, headers->get(key2)); + EXPECT_EQ("value1", headers->get(key1)[0]->value().getStringView()); + EXPECT_TRUE(headers->get(key2).empty()); })); quic_stream_->OnStreamFrame(frame); } @@ -231,30 +230,30 @@ TEST_P(EnvoyQuicClientStreamTest, WatermarkSendBuffer) { quic_stream_->encodeData(buffer, false); EXPECT_EQ(0u, buffer.length()); - EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_TRUE(quic_stream_->IsFlowControlBlocked()); // Receive a WINDOW_UPDATE frame not large enough to drain half of the send // buffer. quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(), 16 * 1024 + 8 * 1024); quic_stream_->OnWindowUpdateFrame(window_update1); - EXPECT_FALSE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_FALSE(quic_stream_->IsFlowControlBlocked()); quic_session_.OnCanWrite(); - EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_TRUE(quic_stream_->IsFlowControlBlocked()); // Receive another WINDOW_UPDATE frame to drain the send buffer till below low // watermark. quic::QuicWindowUpdateFrame window_update2(quic::kInvalidControlFrameId, quic_stream_->id(), 16 * 1024 + 8 * 1024 + 1024); quic_stream_->OnWindowUpdateFrame(window_update2); - EXPECT_FALSE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_FALSE(quic_stream_->IsFlowControlBlocked()); EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([this]() { std::string rest_request(1, 'a'); Buffer::OwnedImpl buffer(rest_request); quic_stream_->encodeData(buffer, true); })); quic_session_.OnCanWrite(); - EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_TRUE(quic_stream_->IsFlowControlBlocked()); quic::QuicWindowUpdateFrame window_update3(quic::kInvalidControlFrameId, quic_stream_->id(), 32 * 1024 + 1024); @@ -309,7 +308,7 @@ TEST_P(EnvoyQuicClientStreamTest, HeadersContributeToWatermarkIquic) { })); EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()); quic_session_.OnCanWrite(); - EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_TRUE(quic_stream_->IsFlowControlBlocked()); // Update flow control window to write all the buffered data. quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(), diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc index 9a9098ea9334..67ddffd4d411 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc @@ -1,18 +1,20 @@ #include +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/core/quic_dispatcher.h" #include "quiche/quic/test_tools/quic_dispatcher_peer.h" #include "quiche/quic/test_tools/crypto_test_utils.h" - #include "quiche/quic/test_tools/quic_test_utils.h" #include "quiche/common/platform/api/quiche_text_utils.h" + +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include @@ -67,7 +69,6 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, bool use_http3 = GetParam().second == QuicVersionType::Iquic; SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); - SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); return quic::CurrentSupportedVersions(); }()), quic_version_(version_manager_.GetSupportedVersions()[0]), @@ -77,7 +78,7 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, per_worker_stats_({ALL_PER_HANDLER_LISTENER_STATS( POOL_COUNTER_PREFIX(listener_config_.listenerScope(), "worker."), POOL_GAUGE_PREFIX(listener_config_.listenerScope(), "worker."))}), - connection_handler_(*dispatcher_), + connection_handler_(*dispatcher_, absl::nullopt), envoy_quic_dispatcher_( &crypto_config_, quic_config_, &version_manager_, std::make_unique(*dispatcher_), @@ -93,8 +94,8 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, void SetUp() override { // Advance time a bit because QuicTime regards 0 as uninitialized timestamp. - time_system_.advanceTimeAsync(std::chrono::milliseconds(100)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(listener_config_, perConnectionBufferLimitBytes()) .WillRepeatedly(Return(1024 * 1024)); } diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc index 13ac84877a46..05307c6b9b7c 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc @@ -1,8 +1,8 @@ +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/core/crypto/null_encrypter.h" #include "quiche/quic/core/quic_crypto_server_stream.h" @@ -13,7 +13,9 @@ #include "quiche/quic/test_tools/quic_server_session_base_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include @@ -90,7 +92,7 @@ class ProofSourceDetailsSetter { virtual void setProofSourceDetails(std::unique_ptr details) = 0; }; -class TestQuicCryptoServerStream : public EnvoyQuicCryptoServerStream, +class TestQuicCryptoServerStream : public quic::QuicCryptoServerStream, public ProofSourceDetailsSetter { public: ~TestQuicCryptoServerStream() override = default; @@ -99,11 +101,11 @@ class TestQuicCryptoServerStream : public EnvoyQuicCryptoServerStream, quic::QuicCompressedCertsCache* compressed_certs_cache, quic::QuicSession* session, quic::QuicCryptoServerStreamBase::Helper* helper) - : EnvoyQuicCryptoServerStream(crypto_config, compressed_certs_cache, session, helper) {} + : quic::QuicCryptoServerStream(crypto_config, compressed_certs_cache, session, helper) {} bool encryption_established() const override { return true; } - const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { return details_.get(); } + const EnvoyQuicProofSourceDetails* ProofSourceDetails() const override { return details_.get(); } void setProofSourceDetails(std::unique_ptr details) override { details_ = std::move(details); @@ -113,20 +115,20 @@ class TestQuicCryptoServerStream : public EnvoyQuicCryptoServerStream, std::unique_ptr details_; }; -class TestEnvoyQuicTlsServerHandshaker : public EnvoyQuicTlsServerHandshaker, +class TestEnvoyQuicTlsServerHandshaker : public quic::TlsServerHandshaker, public ProofSourceDetailsSetter { public: ~TestEnvoyQuicTlsServerHandshaker() override = default; TestEnvoyQuicTlsServerHandshaker(quic::QuicSession* session, const quic::QuicCryptoServerConfig& crypto_config) - : EnvoyQuicTlsServerHandshaker(session, crypto_config), + : quic::TlsServerHandshaker(session, crypto_config), params_(new quic::QuicCryptoNegotiatedParameters) { params_->cipher_suite = 1; } bool encryption_established() const override { return true; } - const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { return details_.get(); } + const EnvoyQuicProofSourceDetails* ProofSourceDetails() const override { return details_.get(); } void setProofSourceDetails(std::unique_ptr details) override { details_ = std::move(details); } @@ -147,7 +149,6 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); - SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), quic_connection_(new TestEnvoyQuicServerConnection( @@ -167,8 +168,8 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { // Advance time and trigger update of Dispatcher::approximateMonotonicTime() // because zero QuicTime is considered uninitialized. - time_system_.advanceTimeAsync(std::chrono::milliseconds(1)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(1), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); connection_helper_.GetClock()->Now(); ON_CALL(writer_, WritePacket(_, _, _, _, _)) @@ -506,23 +507,23 @@ TEST_P(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) { stream->encodeData(buffer, false); // Stream become write blocked. EXPECT_TRUE(envoy_quic_session_.HasDataToWrite()); - EXPECT_TRUE(stream->flow_controller()->IsBlocked()); + EXPECT_TRUE(stream->IsFlowControlBlocked()); EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked()); // Connection shouldn't be closed right away as there is a stream write blocked. envoy_quic_session_.close(Network::ConnectionCloseType::FlushWrite); EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state()); - time_system_.advanceTimeAsync(std::chrono::milliseconds(10)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); // Another write event without updating flow control window shouldn't trigger // connection close, but it should update the timer. envoy_quic_session_.OnCanWrite(); EXPECT_TRUE(envoy_quic_session_.HasDataToWrite()); // Timer shouldn't fire at original deadline. - time_system_.advanceTimeAsync(std::chrono::milliseconds(90)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(90), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state()); EXPECT_CALL(*quic_connection_, @@ -530,8 +531,8 @@ TEST_P(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) { EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _)); // Advance the time to fire connection close timer. - time_system_.advanceTimeAsync(std::chrono::milliseconds(10)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); EXPECT_FALSE(quic_connection_->connected()); } @@ -599,7 +600,7 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseNoTimeout) { stream->encodeData(buffer, true); // Stream become write blocked. EXPECT_TRUE(envoy_quic_session_.HasDataToWrite()); - EXPECT_TRUE(stream->flow_controller()->IsBlocked()); + EXPECT_TRUE(stream->IsFlowControlBlocked()); EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked()); // Connection shouldn't be closed right away as there is a stream write blocked. @@ -611,8 +612,8 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseNoTimeout) { EXPECT_TRUE(envoy_quic_session_.HasDataToWrite()); // No timeout set, so alarm shouldn't fire. - time_system_.advanceTimeAsync(std::chrono::milliseconds(100)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state()); // Force close connection. @@ -641,7 +642,8 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseWithTimeout) { // Advance the time a bit and try to close again. The delay close timer // shouldn't be rescheduled by this call. - time_system_.advanceTimeAsync(std::chrono::milliseconds(10)); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); envoy_quic_session_.close(Network::ConnectionCloseType::FlushWriteAndDelay); EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state()); @@ -650,8 +652,8 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseWithTimeout) { EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _)); // Advance the time to fire connection close timer. - time_system_.advanceTimeAsync(std::chrono::milliseconds(90)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(90), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); EXPECT_FALSE(quic_connection_->connected()); } @@ -670,12 +672,12 @@ TEST_P(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithTimeout) { EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state()); // Unblocking the stream shouldn't close the connection as it should be // delayed. - time_system_.advanceTimeAsync(std::chrono::milliseconds(10)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); envoy_quic_session_.OnCanWrite(); // delay close alarm should have been rescheduled. - time_system_.advanceTimeAsync(std::chrono::milliseconds(90)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(90), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state()); EXPECT_CALL(*quic_connection_, @@ -683,8 +685,8 @@ TEST_P(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithTimeout) { EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _)); // Advance the time to fire connection close timer. - time_system_.advanceTimeAsync(std::chrono::milliseconds(10)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); EXPECT_FALSE(quic_connection_->connected()); } @@ -702,8 +704,8 @@ TEST_P(EnvoyQuicServerSessionTest, FlusWriteTransitToFlushWriteWithDelay) { envoy_quic_session_.close(Network::ConnectionCloseType::FlushWrite); EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state()); - time_system_.advanceTimeAsync(std::chrono::milliseconds(10)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); // The closing behavior should be changed. envoy_quic_session_.close(Network::ConnectionCloseType::FlushWriteAndDelay); // Unblocking the stream shouldn't close the connection as it should be @@ -711,8 +713,8 @@ TEST_P(EnvoyQuicServerSessionTest, FlusWriteTransitToFlushWriteWithDelay) { envoy_quic_session_.OnCanWrite(); // delay close alarm should have been rescheduled. - time_system_.advanceTimeAsync(std::chrono::milliseconds(90)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(90), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state()); EXPECT_CALL(*quic_connection_, @@ -720,8 +722,8 @@ TEST_P(EnvoyQuicServerSessionTest, FlusWriteTransitToFlushWriteWithDelay) { EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionTermination, _)); // Advance the time to fire connection close timer. - time_system_.advanceTimeAsync(std::chrono::milliseconds(10)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); EXPECT_FALSE(quic_connection_->connected()); } @@ -735,8 +737,8 @@ TEST_P(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithNoPendingData) { // Advance the time a bit and try to close again. The delay close timer // shouldn't be rescheduled by this call. - time_system_.advanceTimeAsync(std::chrono::milliseconds(10)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(10), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); envoy_quic_session_.close(Network::ConnectionCloseType::FlushWriteAndDelay); EXPECT_EQ(Network::Connection::State::Open, envoy_quic_session_.state()); @@ -744,16 +746,20 @@ TEST_P(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithNoPendingData) { SendConnectionClosePacket(quic::QUIC_NO_ERROR, "Closed by application")); EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); // Advance the time to fire connection close timer. - time_system_.advanceTimeAsync(std::chrono::milliseconds(90)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(90), *dispatcher_, + Event::Dispatcher::RunType::NonBlock); EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); } TEST_P(EnvoyQuicServerSessionTest, ShutdownNotice) { installReadFilter(); - // Not verifying dummy implementation, just to have coverage. - EXPECT_DEATH(envoy_quic_session_.enableHalfClose(true), ""); - EXPECT_EQ(nullptr, envoy_quic_session_.ssl()); + testing::NiceMock debug_visitor; + envoy_quic_session_.set_debug_visitor(&debug_visitor); + if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { + EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(_)); + } else { + // This is a no-op for pre-HTTP3 versions of QUIC. + } http_connection_->shutdownNotice(); } @@ -795,10 +801,8 @@ TEST_P(EnvoyQuicServerSessionTest, InitializeFilterChain) { if (!quic_version_[0].UsesTls()) { envoy_quic_session_.SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); } else { - if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { - EXPECT_CALL(*quic_connection_, SendControlFrame(_)); - } - envoy_quic_session_.OnOneRttKeysAvailable(); + EXPECT_CALL(*quic_connection_, SendControlFrame(_)); + envoy_quic_session_.OnTlsHandshakeComplete(); } EXPECT_EQ(nullptr, envoy_quic_session_.socketOptions()); EXPECT_TRUE(quic_connection_->connectionSocket()->ioHandle().isOpen()); @@ -877,7 +881,7 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { Buffer::OwnedImpl buffer(response); EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark()); stream1->encodeData(buffer, false); - EXPECT_TRUE(stream1->flow_controller()->IsBlocked()); + EXPECT_TRUE(stream1->IsFlowControlBlocked()); EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked()); // Receive another request and send back response to trigger connection level @@ -943,7 +947,7 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { stream1->encodeData(buffer, true); })); envoy_quic_session_.OnCanWrite(); - EXPECT_TRUE(stream1->flow_controller()->IsBlocked()); + EXPECT_TRUE(stream1->IsFlowControlBlocked()); // Update flow control window for stream2. quic::QuicWindowUpdateFrame window_update2(quic::kInvalidControlFrameId, stream2->id(), @@ -974,7 +978,7 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { })); EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark()); envoy_quic_session_.OnCanWrite(); - EXPECT_TRUE(stream2->flow_controller()->IsBlocked()); + EXPECT_TRUE(stream2->IsFlowControlBlocked()); // Resetting stream3 should lower the buffered bytes, but callbacks will not // be triggered because reset callback has been already triggered. @@ -1082,7 +1086,7 @@ TEST_P(EnvoyQuicServerSessionTest, HeadersContributeToWatermarkGquic) { [this]() { http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); })); EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).Times(2); envoy_quic_session_.OnCanWrite(); - EXPECT_TRUE(stream1->flow_controller()->IsBlocked()); + EXPECT_TRUE(stream1->IsFlowControlBlocked()); // Buffer more response because of flow control. The buffered bytes become just below connection // level high watermark. diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc index 4a4236737bd0..42ba39344f4b 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc @@ -1,14 +1,17 @@ #include +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/test_tools/quic_connection_peer.h" #include "quiche/quic/test_tools/quic_session_peer.h" + +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "common/event/libevent_scheduler.h" #include "common/http/headers.h" @@ -42,7 +45,6 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); - SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::CurrentSupportedVersions()[0]; }()), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), @@ -200,8 +202,8 @@ TEST_P(EnvoyQuicServerStreamTest, DecodeHeadersBodyAndTrailers) { .WillOnce(Invoke([](const Http::RequestTrailerMapPtr& headers) { Http::LowerCaseString key1("key1"); Http::LowerCaseString key2(":final-offset"); - EXPECT_EQ("value1", headers->get(key1)->value().getStringView()); - EXPECT_EQ(nullptr, headers->get(key2)); + EXPECT_EQ("value1", headers->get(key1)[0]->value().getStringView()); + EXPECT_TRUE(headers->get(key2).empty()); })); quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_); EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); @@ -237,8 +239,8 @@ TEST_P(EnvoyQuicServerStreamTest, OutOfOrderTrailers) { .WillOnce(Invoke([](const Http::RequestTrailerMapPtr& headers) { Http::LowerCaseString key1("key1"); Http::LowerCaseString key2(":final-offset"); - EXPECT_EQ("value1", headers->get(key1)->value().getStringView()); - EXPECT_EQ(nullptr, headers->get(key2)); + EXPECT_EQ("value1", headers->get(key1)[0]->value().getStringView()); + EXPECT_TRUE(headers->get(key2).empty()); })); quic_stream_->OnStreamFrame(frame); } @@ -344,30 +346,30 @@ TEST_P(EnvoyQuicServerStreamTest, WatermarkSendBuffer) { quic_stream_->encodeData(buffer, false); EXPECT_EQ(0u, buffer.length()); - EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_TRUE(quic_stream_->IsFlowControlBlocked()); // Receive a WINDOW_UPDATE frame not large enough to drain half of the send // buffer. quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(), 16 * 1024 + 8 * 1024); quic_stream_->OnWindowUpdateFrame(window_update1); - EXPECT_FALSE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_FALSE(quic_stream_->IsFlowControlBlocked()); quic_session_.OnCanWrite(); - EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_TRUE(quic_stream_->IsFlowControlBlocked()); // Receive another WINDOW_UPDATE frame to drain the send buffer till below low // watermark. quic::QuicWindowUpdateFrame window_update2(quic::kInvalidControlFrameId, quic_stream_->id(), 16 * 1024 + 8 * 1024 + 1024); quic_stream_->OnWindowUpdateFrame(window_update2); - EXPECT_FALSE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_FALSE(quic_stream_->IsFlowControlBlocked()); EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([this]() { std::string rest_response(1, 'a'); Buffer::OwnedImpl buffer(rest_response); quic_stream_->encodeData(buffer, true); })); quic_session_.OnCanWrite(); - EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_TRUE(quic_stream_->IsFlowControlBlocked()); quic::QuicWindowUpdateFrame window_update3(quic::kInvalidControlFrameId, quic_stream_->id(), 32 * 1024 + 1024); @@ -420,7 +422,7 @@ TEST_P(EnvoyQuicServerStreamTest, HeadersContributeToWatermarkIquic) { })); EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()); quic_session_.OnCanWrite(); - EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + EXPECT_TRUE(quic_stream_->IsFlowControlBlocked()); // Update flow control window to write all the buffered data. quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(), diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc index 68d606ea54b4..ad5dd5df4870 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc @@ -1,14 +1,16 @@ #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/test_tools/quic_test_utils.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "test/mocks/api/mocks.h" #include "test/test_common/threadsafe_singleton_injector.h" @@ -26,7 +28,7 @@ TEST(EnvoyQuicUtilsTest, ConversionBetweenQuicAddressAndEnvoyAddress) { // Mock out socket() system call to test both V4 and V6 address conversion. testing::NiceMock os_sys_calls; TestThreadsafeSingletonInjector os_calls{&os_sys_calls}; - ON_CALL(os_sys_calls, socket(_, _, _)).WillByDefault(Return(Api::SysCallIntResult{1, 0})); + ON_CALL(os_sys_calls, socket(_, _, _)).WillByDefault(Return(Api::SysCallSocketResult{1, 0})); ON_CALL(os_sys_calls, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0})); quic::QuicSocketAddress quic_uninitialized_addr; diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc index cb22532e69bb..544fdf90b387 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc @@ -29,7 +29,7 @@ class EnvoyQuicWriterTest : public ::testing::Test { quic::QuicIpAddress peer_ip; peer_ip.FromString("::1"); peer_address_ = quic::QuicSocketAddress(peer_ip, /*port=*/123); - ON_CALL(os_sys_calls_, socket(_, _, _)).WillByDefault(Return(Api::SysCallIntResult{3, 0})); + ON_CALL(os_sys_calls_, socket(_, _, _)).WillByDefault(Return(Api::SysCallSocketResult{3, 0})); ON_CALL(os_sys_calls_, close(3)).WillByDefault(Return(Api::SysCallIntResult{0, 0})); } diff --git a/test/extensions/quic_listeners/quiche/integration/BUILD b/test/extensions/quic_listeners/quiche/integration/BUILD index a36af5d08dee..d799fce99d1d 100644 --- a/test/extensions/quic_listeners/quiche/integration/BUILD +++ b/test/extensions/quic_listeners/quiche/integration/BUILD @@ -13,10 +13,10 @@ envoy_cc_test( size = "medium", srcs = ["quic_http_integration_test.cc"], data = ["//test/config/integration/certs"], - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows + # TODO(envoyproxy/windows-dev): diagnose msvc-cl build test failure tags = [ + "fails_on_windows", "nofips", - "skip_on_windows", ], deps = [ "//source/extensions/filters/http/dynamo:config", @@ -31,6 +31,7 @@ envoy_cc_test( "//test/extensions/quic_listeners/quiche:quic_test_utils_for_envoy_lib", "//test/extensions/quic_listeners/quiche:test_utils_lib", "//test/integration:http_integration_lib", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc index ac41e254cda7..11cd3959edad 100644 --- a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc +++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc @@ -10,19 +10,22 @@ #include "test/config/utility.h" #include "test/integration/http_integration.h" #include "test/integration/ssl_utility.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/core/http/quic_client_push_promise_index.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/test_tools/quic_test_utils.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "extensions/quic_listeners/quiche/envoy_quic_client_session.h" #include "extensions/quic_listeners/quiche/envoy_quic_client_connection.h" @@ -102,7 +105,6 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers bool use_http3 = GetParam().second == QuicVersionType::Iquic; SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); - SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); return quic::CurrentSupportedVersions(); }()), conn_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *conn_helper_.GetClock()), @@ -111,6 +113,8 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers file_updater_1_(injected_resource_filename_1_), file_updater_2_(injected_resource_filename_2_) {} + ~QuicHttpIntegrationTest() override { cleanupUpstreamAndDownstream(); } + Network::ClientConnectionPtr makeClientConnectionWithOptions( uint32_t port, const Network::ConnectionSocket::OptionsSharedPtr& options) override { // Setting socket options is not supported. @@ -234,6 +238,48 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers timeSystem())); } + void testMultipleQuicConnections() { + concurrency_ = 8; + set_reuse_port_ = true; + initialize(); + std::vector codec_clients; + for (size_t i = 1; i <= concurrency_; ++i) { + // The BPF filter and ActiveQuicListener::destination() look at the 1st word of connection id + // in the packet header. And currently all QUIC versions support 8 bytes connection id. So + // create connections with the first 4 bytes of connection id different from each + // other so they should be evenly distributed. + designated_connection_ids_.push_back(quic::test::TestConnectionId(i << 32)); + codec_clients.push_back(makeHttpConnection(lookupPort("http"))); + } + constexpr auto timeout_first = std::chrono::seconds(15); + constexpr auto timeout_subsequent = std::chrono::milliseconds(10); + if (GetParam().first == Network::Address::IpVersion::v4) { + test_server_->waitForCounterEq("listener.0.0.0.0_0.downstream_cx_total", 8u, timeout_first); + } else { + test_server_->waitForCounterEq("listener.[__]_0.downstream_cx_total", 8u, timeout_first); + } + for (size_t i = 0; i < concurrency_; ++i) { + if (GetParam().first == Network::Address::IpVersion::v4) { + test_server_->waitForGaugeEq( + fmt::format("listener.0.0.0.0_0.worker_{}.downstream_cx_active", i), 1u, + timeout_subsequent); + test_server_->waitForCounterEq( + fmt::format("listener.0.0.0.0_0.worker_{}.downstream_cx_total", i), 1u, + timeout_subsequent); + } else { + test_server_->waitForGaugeEq( + fmt::format("listener.[__]_0.worker_{}.downstream_cx_active", i), 1u, + timeout_subsequent); + test_server_->waitForCounterEq( + fmt::format("listener.[__]_0.worker_{}.downstream_cx_total", i), 1u, + timeout_subsequent); + } + } + for (size_t i = 0; i < concurrency_; ++i) { + codec_clients[i]->close(); + } + } + protected: quic::QuicConfig quic_config_; quic::QuicServerId server_id_{"lyft.com", 443, false}; @@ -345,100 +391,15 @@ TEST_P(QuicHttpIntegrationTest, TestDelayedConnectionTeardownTimeoutTrigger) { 1); } -TEST_P(QuicHttpIntegrationTest, MultipleQuicListenersWithBPF) { -#if defined(SO_ATTACH_REUSEPORT_CBPF) && defined(__linux__) - concurrency_ = 8; - set_reuse_port_ = true; - initialize(); - std::vector codec_clients; - for (size_t i = 1; i <= concurrency_; ++i) { - // The BPF filter looks at the 1st word of connection id in the packet - // header. And currently all QUIC versions support 8 bytes connection id. So - // create connections with the first 4 bytes of connection id different from each - // other so they should be evenly distributed. - designated_connection_ids_.push_back(quic::test::TestConnectionId(i << 32)); - codec_clients.push_back(makeHttpConnection(lookupPort("http"))); - } - if (GetParam().first == Network::Address::IpVersion::v4) { - test_server_->waitForCounterEq("listener.0.0.0.0_0.downstream_cx_total", 8u); - } else { - test_server_->waitForCounterEq("listener.[__]_0.downstream_cx_total", 8u); - } - for (size_t i = 0; i < concurrency_; ++i) { - if (GetParam().first == Network::Address::IpVersion::v4) { - test_server_->waitForGaugeEq( - fmt::format("listener.0.0.0.0_0.worker_{}.downstream_cx_active", i), 1u); - test_server_->waitForCounterEq( - fmt::format("listener.0.0.0.0_0.worker_{}.downstream_cx_total", i), 1u); - } else { - test_server_->waitForGaugeEq(fmt::format("listener.[__]_0.worker_{}.downstream_cx_active", i), - 1u); - test_server_->waitForCounterEq( - fmt::format("listener.[__]_0.worker_{}.downstream_cx_total", i), 1u); - } - } - for (size_t i = 0; i < concurrency_; ++i) { - codec_clients[i]->close(); - } -#endif -} +TEST_P(QuicHttpIntegrationTest, MultipleQuicConnectionsWithBPF) { testMultipleQuicConnections(); } -#ifndef __APPLE__ -TEST_P(QuicHttpIntegrationTest, MultipleQuicListenersNoBPF) { - concurrency_ = 8; - set_reuse_port_ = true; - initialize(); -#ifdef SO_ATTACH_REUSEPORT_CBPF -#define SO_ATTACH_REUSEPORT_CBPF_TMP SO_ATTACH_REUSEPORT_CBPF -#undef SO_ATTACH_REUSEPORT_CBPF -#endif - std::vector codec_clients; - for (size_t i = 1; i <= concurrency_; ++i) { - // The BPF filter looks at the 1st byte of connection id in the packet - // header. And currently all QUIC versions support 8 bytes connection id. So - // create connections with the first 4 bytes of connection id different from each - // other so they should be evenly distributed. - designated_connection_ids_.push_back(quic::test::TestConnectionId(i << 32)); - codec_clients.push_back(makeHttpConnection(lookupPort("http"))); - } - if (GetParam().first == Network::Address::IpVersion::v4) { - test_server_->waitForCounterEq("listener.0.0.0.0_0.downstream_cx_total", 8u); - } else { - test_server_->waitForCounterEq("listener.[__]_0.downstream_cx_total", 8u); - } - // Even without BPF support, these connections should more or less distributed - // across different workers. - for (size_t i = 0; i < concurrency_; ++i) { - if (GetParam().first == Network::Address::IpVersion::v4) { - EXPECT_LT( - test_server_->gauge(fmt::format("listener.0.0.0.0_0.worker_{}.downstream_cx_active", i)) - ->value(), - 8u); - EXPECT_LT( - test_server_->counter(fmt::format("listener.0.0.0.0_0.worker_{}.downstream_cx_total", i)) - ->value(), - 8u); - } else { - EXPECT_LT( - test_server_->gauge(fmt::format("listener.[__]_0.worker_{}.downstream_cx_active", i)) - ->value(), - 8u); - EXPECT_LT( - test_server_->counter(fmt::format("listener.[__]_0.worker_{}.downstream_cx_total", i)) - ->value(), - 8u); - } - } - for (size_t i = 0; i < concurrency_; ++i) { - codec_clients[i]->close(); - } -#ifdef SO_ATTACH_REUSEPORT_CBPF_TMP -#define SO_ATTACH_REUSEPORT_CBPF SO_ATTACH_REUSEPORT_CBPF_TMP -#endif +TEST_P(QuicHttpIntegrationTest, MultipleQuicConnectionsNoBPF) { + config_helper_.addRuntimeOverride( + "envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing", "false"); + + testMultipleQuicConnections(); } -#endif -#if defined(SO_ATTACH_REUSEPORT_CBPF) && defined(__linux__) TEST_P(QuicHttpIntegrationTest, ConnectionMigration) { concurrency_ = 2; set_reuse_port_ = true; @@ -476,7 +437,6 @@ TEST_P(QuicHttpIntegrationTest, ConnectionMigration) { EXPECT_EQ(1024u * 2, upstream_request_->bodyLength()); cleanupUpstreamAndDownstream(); } -#endif TEST_P(QuicHttpIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { initialize(); diff --git a/test/extensions/quic_listeners/quiche/platform/BUILD b/test/extensions/quic_listeners/quiche/platform/BUILD index 9ccaf9cdedc2..420e812b85a7 100644 --- a/test/extensions/quic_listeners/quiche/platform/BUILD +++ b/test/extensions/quic_listeners/quiche/platform/BUILD @@ -37,7 +37,10 @@ envoy_cc_test( "//bazel:linux": ["quic_platform_test.cc"], "//conditions:default": [], }), - copts = ["-Wno-unused-parameter"], + copts = select({ + "//bazel:windows_x86_64": [], + "//conditions:default": ["-Wno-unused-parameter"], + }), data = ["//test/extensions/transport_sockets/tls/test_data:certs"], external_deps = ["quiche_quic_platform"], tags = ["nofips"], diff --git a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc index bbae62b98598..da1d7b1aeae5 100644 --- a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc +++ b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc @@ -58,7 +58,6 @@ TEST_F(QuicIoHandleWrapperTest, DelegateIoHandleCalls) { EXPECT_CALL(os_sys_calls_, sendmsg(fd, _, 0)).WillOnce(Return(Api::SysCallSizeResult{5u, 0})); wrapper_->sendmsg(&slice, 1, 0, /*self_ip=*/nullptr, *addr); - EXPECT_CALL(os_sys_calls_, getsockname(_, _, _)).WillOnce(Return(Api::SysCallIntResult{0, 0})); wrapper_->domain(); EXPECT_CALL(os_sys_calls_, getsockname(_, _, _)) diff --git a/test/extensions/quic_listeners/quiche/test_proof_source.h b/test/extensions/quic_listeners/quiche/test_proof_source.h index 8b1baf920d69..a249b43144fd 100644 --- a/test/extensions/quic_listeners/quiche/test_proof_source.h +++ b/test/extensions/quic_listeners/quiche/test_proof_source.h @@ -1,15 +1,14 @@ -#ifdef __GNUC__ +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" #pragma GCC diagnostic ignored "-Wtype-limits" +#endif + #include "quiche/quic/test_tools/test_certificates.h" +#if defined(__GNUC__) #pragma GCC diagnostic pop -#else -#include "quiche/quic/test_tools/test_certificates.h" #endif #include diff --git a/test/extensions/quic_listeners/quiche/test_utils.h b/test/extensions/quic_listeners/quiche/test_utils.h index b9cc942af840..102f7608e50b 100644 --- a/test/extensions/quic_listeners/quiche/test_utils.h +++ b/test/extensions/quic_listeners/quiche/test_utils.h @@ -1,10 +1,10 @@ #include "extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h" +#if defined(__GNUC__) #pragma GCC diagnostic push -// QUICHE allows unused parameters. #pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif #include "quiche/quic/core/http/quic_spdy_session.h" #include "quiche/quic/core/http/quic_spdy_client_session.h" @@ -13,7 +13,10 @@ #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/test_tools/crypto_test_utils.h" #include "quiche/quic/test_tools/quic_config_peer.h" + +#if defined(__GNUC__) #pragma GCC diagnostic pop +#endif #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" #include "test/test_common/environment.h" diff --git a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc index 37494edd1f9d..ced09ffb6a45 100644 --- a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc @@ -70,7 +70,7 @@ TEST(UdpOverUdsStatsdSinkTest, InitWithPipeAddress) { // Do the flush which should have somewhere to write now. sink.flush(snapshot); Buffer::OwnedImpl receive_buffer; - receive_buffer.read(sock.ioHandle(), 32); + sock.ioHandle().read(receive_buffer, 32); EXPECT_EQ("envoy.test_counter:1|c", receive_buffer.toString()); } #endif diff --git a/test/extensions/stats_sinks/hystrix/BUILD b/test/extensions/stats_sinks/hystrix/BUILD index 7137983d1810..667757fafe3e 100644 --- a/test/extensions/stats_sinks/hystrix/BUILD +++ b/test/extensions/stats_sinks/hystrix/BUILD @@ -31,7 +31,10 @@ envoy_extension_cc_test( name = "hystrix_test", srcs = ["hystrix_test.cc"], extension_name = "envoy.stat_sinks.hystrix", + # TODO(envoyproxy/windows-dev): Diagnose msvc-cl fastbuild test failure + tags = ["fails_on_windows"], deps = [ + "//source/common/json:json_loader_lib", "//source/common/stats:stats_lib", "//source/extensions/stat_sinks/hystrix:hystrix_lib", "//test/mocks/server:admin_mocks", diff --git a/test/extensions/stats_sinks/hystrix/hystrix_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_test.cc index c45faec28341..7d480908a3a5 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_test.cc @@ -2,6 +2,8 @@ #include #include +#include "common/json/json_loader.h" + #include "extensions/stat_sinks/hystrix/hystrix.h" #include "test/mocks/network/mocks.h" diff --git a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc index 259425222a53..dcdf47945e94 100644 --- a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc +++ b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc @@ -26,8 +26,7 @@ class MetricsServiceIntegrationTest : public Grpc::VersionedGrpcClientIntegratio void createUpstreams() override { HttpIntegrationTest::createUpstreams(); - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); } void initialize() override { diff --git a/test/extensions/stats_sinks/wasm/BUILD b/test/extensions/stats_sinks/wasm/BUILD new file mode 100644 index 000000000000..6135c8cfcf0a --- /dev/null +++ b/test/extensions/stats_sinks/wasm/BUILD @@ -0,0 +1,48 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//bazel:envoy_select.bzl", + "envoy_select_wasm", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + data = envoy_select_wasm([ + "//test/extensions/stats_sinks/wasm/test_data:test_context_cpp.wasm", + ]), + extension_name = "envoy.stat_sinks.wasm", + deps = [ + "//source/extensions/stat_sinks/wasm:config", + "//test/extensions/stats_sinks/wasm/test_data:test_context_cpp_plugin", + "//test/mocks/server:server_mocks", + "@envoy_api//envoy/extensions/stat_sinks/wasm/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "wasm_stat_sink_test", + srcs = ["wasm_stat_sink_test.cc"], + data = envoy_select_wasm([ + "//test/extensions/stats_sinks/wasm/test_data:test_context_cpp.wasm", + ]), + extension_name = "envoy.stat_sinks.wasm", + external_deps = ["abseil_optional"], + deps = [ + "//source/common/stats:stats_lib", + "//source/extensions/common/wasm:wasm_lib", + "//test/extensions/stats_sinks/wasm/test_data:test_context_cpp_plugin", + "//test/mocks/stats:stats_mocks", + "//test/test_common:wasm_lib", + ], +) diff --git a/test/extensions/stats_sinks/wasm/config_test.cc b/test/extensions/stats_sinks/wasm/config_test.cc new file mode 100644 index 000000000000..1e115dd2f946 --- /dev/null +++ b/test/extensions/stats_sinks/wasm/config_test.cc @@ -0,0 +1,106 @@ +#include "envoy/extensions/stat_sinks/wasm/v3/wasm.pb.validate.h" +#include "envoy/registry/registry.h" + +#include "common/protobuf/protobuf.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/stat_sinks/wasm/config.h" +#include "extensions/stat_sinks/wasm/wasm_stat_sink_impl.h" +#include "extensions/stat_sinks/well_known_names.h" + +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/printers.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace StatSinks { +namespace Wasm { + +class WasmStatSinkConfigTest : public testing::TestWithParam { +protected: + WasmStatSinkConfigTest() { + config_.mutable_config()->mutable_vm_config()->set_runtime( + absl::StrCat("envoy.wasm.runtime.", GetParam())); + if (GetParam() != "null") { + config_.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename( + TestEnvironment::substitute( + "{{ test_rundir " + "}}/test/extensions/stats_sinks/wasm/test_data/test_context_cpp.wasm")); + } else { + config_.mutable_config() + ->mutable_vm_config() + ->mutable_code() + ->mutable_local() + ->set_inline_bytes("CommonWasmTestContextCpp"); + } + config_.mutable_config()->set_name("test"); + } + + void initializeWithConfig(const envoy::extensions::stat_sinks::wasm::v3::Wasm& config) { + auto factory = Registry::FactoryRegistry::getFactory( + StatsSinkNames::get().Wasm); + ASSERT_NE(factory, nullptr); + api_ = Api::createApiForTest(stats_store_); + EXPECT_CALL(context_, api()).WillRepeatedly(testing::ReturnRef(*api_)); + EXPECT_CALL(context_, initManager()).WillRepeatedly(testing::ReturnRef(init_manager_)); + EXPECT_CALL(context_, lifecycleNotifier()) + .WillRepeatedly(testing::ReturnRef(lifecycle_notifier_)); + sink_ = factory->createStatsSink(config, context_); + EXPECT_CALL(init_watcher_, ready()); + init_manager_.initialize(init_watcher_); + } + + envoy::extensions::stat_sinks::wasm::v3::Wasm config_; + testing::NiceMock context_; + testing::NiceMock lifecycle_notifier_; + Init::ExpectableWatcherImpl init_watcher_; + Stats::IsolatedStoreImpl stats_store_; + Api::ApiPtr api_; + Init::ManagerImpl init_manager_{"init_manager"}; + Stats::SinkPtr sink_; +}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto testing_values = testing::Values( +#if defined(ENVOY_WASM_V8) + "v8", +#endif +#if defined(ENVOY_WASM_WAVM) + "wavm", +#endif + "null"); +INSTANTIATE_TEST_SUITE_P(Runtimes, WasmStatSinkConfigTest, testing_values); + +TEST_P(WasmStatSinkConfigTest, CreateWasmFromEmpty) { + envoy::extensions::stat_sinks::wasm::v3::Wasm config; + EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config), Extensions::Common::Wasm::WasmException, + "Unable to create Wasm Stat Sink "); +} + +TEST_P(WasmStatSinkConfigTest, CreateWasmFailOpen) { + envoy::extensions::stat_sinks::wasm::v3::Wasm config; + config.mutable_config()->set_fail_open(true); + EXPECT_THROW_WITH_MESSAGE(initializeWithConfig(config), Extensions::Common::Wasm::WasmException, + "Unable to create Wasm Stat Sink "); +} + +TEST_P(WasmStatSinkConfigTest, CreateWasmFromWASM) { + initializeWithConfig(config_); + + EXPECT_NE(sink_, nullptr); + NiceMock snapshot; + sink_->flush(snapshot); + NiceMock histogram; + sink_->onHistogramComplete(histogram, 0); +} + +} // namespace Wasm +} // namespace StatSinks +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/stats_sinks/wasm/test_data/BUILD b/test/extensions/stats_sinks/wasm/test_data/BUILD new file mode 100644 index 000000000000..d3458434aec8 --- /dev/null +++ b/test/extensions/stats_sinks/wasm/test_data/BUILD @@ -0,0 +1,33 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) +load("//bazel/wasm:wasm.bzl", "envoy_wasm_cc_binary") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "test_context_cpp_plugin", + srcs = [ + "test_context_cpp.cc", + "test_context_cpp_null_plugin.cc", + ], + copts = ["-DNULL_PLUGIN=1"], + deps = [ + "//source/extensions/common/wasm:wasm_hdr", + "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/common/wasm:well_known_names", + "//source/extensions/common/wasm/ext:envoy_null_plugin", + ], +) + +envoy_wasm_cc_binary( + name = "test_context_cpp.wasm", + srcs = ["test_context_cpp.cc"], + deps = [ + "//source/extensions/common/wasm/ext:envoy_proxy_wasm_api_lib", + ], +) diff --git a/test/extensions/stats_sinks/wasm/test_data/test_context_cpp.cc b/test/extensions/stats_sinks/wasm/test_data/test_context_cpp.cc new file mode 100644 index 000000000000..1491d1512464 --- /dev/null +++ b/test/extensions/stats_sinks/wasm/test_data/test_context_cpp.cc @@ -0,0 +1,48 @@ +// NOLINT(namespace-envoy) +#include +#include +#include + +#ifndef NULL_PLUGIN +#include "proxy_wasm_intrinsics.h" +#include "source/extensions/common/wasm/ext/envoy_proxy_wasm_api.h" +#else +#include "extensions/common/wasm/ext/envoy_null_plugin.h" +#endif + +START_WASM_PLUGIN(CommonWasmTestContextCpp) + +class TestContext : public EnvoyContext { +public: + explicit TestContext(uint32_t id, RootContext* root) : EnvoyContext(id, root) {} +}; + +class TestRootContext : public EnvoyRootContext { +public: + explicit TestRootContext(uint32_t id, std::string_view root_id) : EnvoyRootContext(id, root_id) {} + + void onStatsUpdate(uint32_t result_size) override; + bool onDone() override; +}; + +static RegisterContextFactory register_TestContext(CONTEXT_FACTORY(TestContext), + ROOT_FACTORY(TestRootContext)); + +void TestRootContext::onStatsUpdate(uint32_t result_size) { + logWarn("TestRootContext::onStat"); + auto stats_buffer = getBufferBytes(WasmBufferType::CallData, 0, result_size); + auto stats = parseStatResults(stats_buffer->view()); + for (auto& e : stats.counters) { + logInfo("TestRootContext::onStat " + std::string(e.name) + ":" + std::to_string(e.delta)); + } + for (auto& e : stats.gauges) { + logInfo("TestRootContext::onStat " + std::string(e.name) + ":" + std::to_string(e.value)); + } +} + +bool TestRootContext::onDone() { + logWarn("TestRootContext::onDone " + std::to_string(id())); + return true; +} + +END_WASM_PLUGIN diff --git a/test/extensions/stats_sinks/wasm/test_data/test_context_cpp_null_plugin.cc b/test/extensions/stats_sinks/wasm/test_data/test_context_cpp_null_plugin.cc new file mode 100644 index 000000000000..88e3a18943f0 --- /dev/null +++ b/test/extensions/stats_sinks/wasm/test_data/test_context_cpp_null_plugin.cc @@ -0,0 +1,16 @@ +// NOLINT(namespace-envoy) +#include "include/proxy-wasm/null_plugin.h" + +namespace proxy_wasm { +namespace null_plugin { +namespace CommonWasmTestContextCpp { +NullPluginRegistry* context_registry_; +} // namespace CommonWasmTestContextCpp + +RegisterNullVmPluginFactory + register_common_wasm_test_context_cpp_plugin("CommonWasmTestContextCpp", []() { + return std::make_unique(CommonWasmTestContextCpp::context_registry_); + }); + +} // namespace null_plugin +} // namespace proxy_wasm diff --git a/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc b/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc new file mode 100644 index 000000000000..acd4df85dbde --- /dev/null +++ b/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc @@ -0,0 +1,129 @@ +#include "envoy/server/lifecycle_notifier.h" + +#include "extensions/common/wasm/wasm.h" + +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/wasm_base.h" + +#include "absl/types/optional.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Eq; + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +class TestContext : public ::Envoy::Extensions::Common::Wasm::Context { +public: + using ::Envoy::Extensions::Common::Wasm::Context::Context; + ~TestContext() override = default; + using ::Envoy::Extensions::Common::Wasm::Context::log; + proxy_wasm::WasmResult log(uint32_t level, absl::string_view message) override { + std::cerr << std::string(message) << "\n"; + log_(static_cast(level), message); + Extensions::Common::Wasm::Context::log(static_cast(level), message); + return proxy_wasm::WasmResult::Ok; + } + MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message)); +}; + +class WasmCommonContextTest + : public Common::Wasm::WasmTestBase> { +public: + WasmCommonContextTest() = default; + + void setup(const std::string& code, std::string root_id = "") { + setupBase( + GetParam(), code, + [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { + return new TestContext(wasm, plugin); + }, + root_id); + } + void setupContext() { + context_ = std::make_unique(wasm_->wasm().get(), root_context_->id(), plugin_); + context_->onCreate(); + } + + TestContext& rootContext() { return *static_cast(root_context_); } + TestContext& context() { return *context_; } + + std::unique_ptr context_; +}; + +// NB: this is required by VC++ which can not handle the use of macros in the macro definitions +// used by INSTANTIATE_TEST_SUITE_P. +auto testing_values = testing::Values( +#if defined(ENVOY_WASM_V8) + "v8", +#endif +#if defined(ENVOY_WASM_WAVM) + "wavm", +#endif + "null"); +INSTANTIATE_TEST_SUITE_P(Runtimes, WasmCommonContextTest, testing_values); + +TEST_P(WasmCommonContextTest, OnStat) { + std::string code; + NiceMock snapshot_; + if (GetParam() != "null") { + code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(absl::StrCat( + "{{ test_rundir }}/test/extensions/stats_sinks/wasm/test_data/test_context_cpp.wasm"))); + } else { + // The name of the Null VM plugin. + code = "CommonWasmTestContextCpp"; + } + EXPECT_FALSE(code.empty()); + setup(code); + setupContext(); + + EXPECT_CALL(rootContext(), log_(spdlog::level::warn, Eq("TestRootContext::onStat"))); + EXPECT_CALL(rootContext(), + log_(spdlog::level::info, Eq("TestRootContext::onStat upstream_rq_2xx:1"))); + + EXPECT_CALL(rootContext(), + log_(spdlog::level::info, Eq("TestRootContext::onStat upstream_rq_5xx:2"))); + + EXPECT_CALL(rootContext(), + log_(spdlog::level::info, Eq("TestRootContext::onStat membership_total:3"))); + + EXPECT_CALL(rootContext(), + log_(spdlog::level::info, Eq("TestRootContext::onStat duration_total:4"))); + + EXPECT_CALL(rootContext(), log_(spdlog::level::warn, Eq("TestRootContext::onDone 1"))); + + NiceMock success_counter; + success_counter.name_ = "upstream_rq_2xx"; + success_counter.latch_ = 1; + success_counter.used_ = true; + + NiceMock error_5xx_counter; + error_5xx_counter.name_ = "upstream_rq_5xx"; + error_5xx_counter.latch_ = 1; + error_5xx_counter.used_ = true; + + snapshot_.counters_.push_back({1, success_counter}); + snapshot_.counters_.push_back({2, error_5xx_counter}); + + NiceMock membership_total; + membership_total.name_ = "membership_total"; + membership_total.value_ = 3; + membership_total.used_ = true; + snapshot_.gauges_.push_back(membership_total); + + NiceMock duration_total; + duration_total.name_ = "duration_total"; + duration_total.value_ = 4; + duration_total.used_ = true; + snapshot_.gauges_.push_back(duration_total); + + rootContext().onStatsUpdate(snapshot_); +} + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc index 70f46537ba61..7fe8691fc25b 100644 --- a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc +++ b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc @@ -175,7 +175,6 @@ TEST_F(DatadogDriverTest, FlushSpansTimer) { Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - msg->body() = std::make_unique(""); callback->onSuccess(request, std::move(msg)); @@ -185,6 +184,47 @@ TEST_F(DatadogDriverTest, FlushSpansTimer) { EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_failed").value()); } +TEST_F(DatadogDriverTest, NoBody) { + setupValidDriver(); + + Http::MockAsyncClientRequest request(&cm_.async_client_); + Http::AsyncClient::Callbacks* callback; + const absl::optional timeout(std::chrono::seconds(1)); + EXPECT_CALL(cm_.async_client_, + send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout))) + .WillOnce( + Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& callbacks, + const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { + callback = &callbacks; + + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/msgpack", message->headers().getContentTypeValue()); + + return &request; + })); + + Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + span->finishSpan(); + + // Timer should be re-enabled. + EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(900), _)); + + timer_->invokeCallback(); + + EXPECT_EQ(1U, stats_.counter("tracing.datadog.timer_flushed").value()); + EXPECT_EQ(1U, stats_.counter("tracing.datadog.traces_sent").value()); + + Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-length", "0"}}})); + callback->onSuccess(request, std::move(msg)); + + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_skipped_no_cluster").value()); + EXPECT_EQ(1U, stats_.counter("tracing.datadog.reports_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_dropped").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_failed").value()); +} + TEST_F(DatadogDriverTest, SkipReportIfCollectorClusterHasBeenRemoved) { Upstream::ClusterUpdateCallbacks* cluster_update_callbacks; EXPECT_CALL(cm_, addThreadLocalClusterUpdateCallbacks_(_)) diff --git a/test/extensions/tracers/lightstep/BUILD b/test/extensions/tracers/lightstep/BUILD index 078d5d43825a..d0d866e61c4f 100644 --- a/test/extensions/tracers/lightstep/BUILD +++ b/test/extensions/tracers/lightstep/BUILD @@ -25,7 +25,6 @@ envoy_extension_cc_test( "//source/common/http:headers_lib", "//source/common/http:message_lib", "//source/common/runtime:runtime_lib", - "//source/common/stats:fake_symbol_table_lib", "//source/extensions/tracers/lightstep:lightstep_tracer_lib", "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", diff --git a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc index 2e2999de544a..84802031580b 100644 --- a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc +++ b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc @@ -11,7 +11,7 @@ #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/runtime/runtime_impl.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "common/tracing/http_tracer_impl.h" #include "extensions/tracers/lightstep/lightstep_tracer_impl.h" @@ -55,7 +55,7 @@ static Http::ResponseMessagePtr makeSuccessResponse() { std::unique_ptr collector_response = lightstep::Transporter::MakeCollectorResponse(); EXPECT_NE(collector_response, nullptr); - msg->body() = Grpc::Common::serializeToGrpcFrame(*collector_response); + msg->body().add(*Grpc::Common::serializeToGrpcFrame(*collector_response)); return msg; } diff --git a/test/extensions/tracers/opencensus/tracer_test.cc b/test/extensions/tracers/opencensus/tracer_test.cc index 88ed7f2f5983..3a5e09fbf58d 100644 --- a/test/extensions/tracers/opencensus/tracer_test.cc +++ b/test/extensions/tracers/opencensus/tracer_test.cc @@ -174,10 +174,10 @@ MATCHER_P2(ContainHeader, header, expected_value, "contains the header " + PrintToString(header) + " with value " + PrintToString(expected_value)) { const auto found_value = arg.get(Http::LowerCaseString(header)); - if (found_value == nullptr) { + if (found_value.empty()) { return false; } - return found_value->value().getStringView() == expected_value; + return found_value[0]->value().getStringView() == expected_value; } // Given incoming headers, test that trace context propagation works and generates all the expected diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index 58b6a2bfc32b..526cab8e9511 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -89,7 +89,7 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_aws_key_value)}); Tracer tracer{expected_span_name, expected_origin_name, aws_metadata_, - std::move(broker_), server_.timeSource(), server_.random()}; + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; auto span = tracer.startSpan(expected_operation_name, server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag("http.method", expected_http_method); @@ -104,14 +104,14 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { TEST_F(XRayTracerTest, NonSampledSpansNotSerialized) { Tracer tracer{"" /*span name*/, "" /*origin*/, aws_metadata_, - std::move(broker_), server_.timeSource(), server_.random()}; + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; auto span = tracer.createNonSampledSpan(); span->finishSpan(); } TEST_F(XRayTracerTest, BaggageNotImplemented) { Tracer tracer{"" /*span name*/, "" /*origin*/, aws_metadata_, - std::move(broker_), server_.timeSource(), server_.random()}; + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; auto span = tracer.createNonSampledSpan(); span->setBaggage("baggage_key", "baggage_value"); span->finishSpan(); @@ -125,10 +125,11 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { constexpr auto expected_span_name = "Service 1"; constexpr auto expected_operation_name = "Create"; const auto& broker = *broker_; - Tracer tracer{expected_span_name, "", aws_metadata_, std::move(broker_), server_.timeSource(), - server_.random()}; + Tracer tracer{expected_span_name, "", + aws_metadata_, std::move(broker_), + server_.timeSource(), server_.api().randomGenerator()}; // Span id taken from random generator - EXPECT_CALL(server_.random_, random()).WillOnce(Return(999)); + EXPECT_CALL(server_.api_.random_, random()).WillOnce(Return(999)); auto parent_span = tracer.startSpan(expected_operation_name, server_.timeSource().systemTime(), absl::nullopt /*headers*/); @@ -147,7 +148,7 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { EXPECT_CALL(broker, send(_)).WillOnce(Invoke(on_send)); // Span id taken from random generator - EXPECT_CALL(server_.random_, random()).WillOnce(Return(262626262626)); + EXPECT_CALL(server_.api_.random_, random()).WillOnce(Return(262626262626)); auto child = parent_span->spawnChild(config, expected_operation_name, server_.timeSource().systemTime()); child->finishSpan(); @@ -160,8 +161,12 @@ TEST_F(XRayTracerTest, UseExistingHeaderInformation) { constexpr auto span_name = "my span"; constexpr auto operation_name = "my operation"; - Tracer tracer{span_name, "", aws_metadata_, std::move(broker_), server_.timeSource(), - server_.random()}; + Tracer tracer{span_name, + "", + aws_metadata_, + std::move(broker_), + server_.timeSource(), + server_.api().randomGenerator()}; auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header); const XRay::Span* xray_span = static_cast(span.get()); @@ -173,37 +178,49 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { constexpr auto span_name = "my span"; constexpr auto operation_name = "my operation"; - Tracer tracer{span_name, "", aws_metadata_, std::move(broker_), server_.timeSource(), - server_.random()}; + Tracer tracer{span_name, + "", + aws_metadata_, + std::move(broker_), + server_.timeSource(), + server_.api().randomGenerator()}; auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), absl::nullopt /*headers*/); Http::TestRequestHeaderMapImpl request_headers; span->injectContext(request_headers); - auto* header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); - ASSERT_NE(header, nullptr); - ASSERT_NE(header->value().getStringView().find("Root="), absl::string_view::npos); - ASSERT_NE(header->value().getStringView().find("Parent="), absl::string_view::npos); - ASSERT_NE(header->value().getStringView().find("Sampled=1"), absl::string_view::npos); + auto header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); + ASSERT_FALSE(header.empty()); + ASSERT_NE(header[0]->value().getStringView().find("Root="), absl::string_view::npos); + ASSERT_NE(header[0]->value().getStringView().find("Parent="), absl::string_view::npos); + ASSERT_NE(header[0]->value().getStringView().find("Sampled=1"), absl::string_view::npos); } TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { constexpr auto span_name = "my span"; - Tracer tracer{span_name, "", aws_metadata_, std::move(broker_), server_.timeSource(), - server_.random()}; + Tracer tracer{span_name, + "", + aws_metadata_, + std::move(broker_), + server_.timeSource(), + server_.api().randomGenerator()}; auto span = tracer.createNonSampledSpan(); Http::TestRequestHeaderMapImpl request_headers; span->injectContext(request_headers); - auto* header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); - ASSERT_NE(header, nullptr); - ASSERT_NE(header->value().getStringView().find("Root="), absl::string_view::npos); - ASSERT_NE(header->value().getStringView().find("Parent="), absl::string_view::npos); - ASSERT_NE(header->value().getStringView().find("Sampled=0"), absl::string_view::npos); + auto header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); + ASSERT_FALSE(header.empty()); + ASSERT_NE(header[0]->value().getStringView().find("Root="), absl::string_view::npos); + ASSERT_NE(header[0]->value().getStringView().find("Parent="), absl::string_view::npos); + ASSERT_NE(header[0]->value().getStringView().find("Sampled=0"), absl::string_view::npos); } TEST_F(XRayTracerTest, TraceIDFormatTest) { constexpr auto span_name = "my span"; - Tracer tracer{span_name, "", aws_metadata_, std::move(broker_), server_.timeSource(), - server_.random()}; + Tracer tracer{span_name, + "", + aws_metadata_, + std::move(broker_), + server_.timeSource(), + server_.api().randomGenerator()}; auto span = tracer.createNonSampledSpan(); // startSpan and createNonSampledSpan use the same // logic to create a trace ID XRay::Span* xray_span = span.get(); @@ -227,7 +244,7 @@ TEST_P(XRayDaemonTest, VerifyUdpPacketContents) { const std::string daemon_endpoint = xray_fake_daemon.localAddress()->asString(); Tracer tracer{"my_segment", "origin", aws_metadata, std::make_unique(daemon_endpoint), - server.timeSource(), server.random()}; + server.timeSource(), server.api().randomGenerator()}; auto span = tracer.startSpan("ingress" /*operation name*/, server.timeSource().systemTime(), absl::nullopt /*headers*/); diff --git a/test/extensions/tracers/zipkin/tracer_test.cc b/test/extensions/tracers/zipkin/tracer_test.cc index 549437dc0bb4..0aa869154e4f 100644 --- a/test/extensions/tracers/zipkin/tracer_test.cc +++ b/test/extensions/tracers/zipkin/tracer_test.cc @@ -395,15 +395,27 @@ TEST_F(ZipkinTracerTest, SharedSpanContext) { const SystemTime timestamp = time_system_.systemTime(); NiceMock config; - ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); + ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); // Create parent span SpanPtr parent_span = tracer.startSpan(config, "parent_span", timestamp); SpanContext parent_context(*parent_span); + // An CS annotation must have been added + EXPECT_EQ(1ULL, parent_span->annotations().size()); + Annotation ann = parent_span->annotations()[0]; + EXPECT_EQ(CLIENT_SEND, ann.value()); + + ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); + SpanPtr child_span = tracer.startSpan(config, "child_span", timestamp, parent_context); EXPECT_EQ(parent_span->id(), child_span->id()); + + // An SR annotation must have been added + EXPECT_EQ(1ULL, child_span->annotations().size()); + ann = child_span->annotations()[0]; + EXPECT_EQ(SERVER_RECV, ann.value()); } // This test checks that when configured to NOT use shared span context, a child span @@ -419,15 +431,27 @@ TEST_F(ZipkinTracerTest, NotSharedSpanContext) { const SystemTime timestamp = time_system_.systemTime(); NiceMock config; - ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); + ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); // Create parent span SpanPtr parent_span = tracer.startSpan(config, "parent_span", timestamp); SpanContext parent_context(*parent_span); + // An CS annotation must have been added + EXPECT_EQ(1ULL, parent_span->annotations().size()); + Annotation ann = parent_span->annotations()[0]; + EXPECT_EQ(CLIENT_SEND, ann.value()); + + ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); + SpanPtr child_span = tracer.startSpan(config, "child_span", timestamp, parent_context); EXPECT_EQ(parent_span->id(), child_span->parentId()); + + // An SR annotation must have been added + EXPECT_EQ(1ULL, child_span->annotations().size()); + ann = child_span->annotations()[0]; + EXPECT_EQ(SERVER_RECV, ann.value()); } } // namespace diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index eb308bc76ef6..5e605999b671 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -475,9 +475,9 @@ TEST_F(ZipkinDriverTest, FlushSpansTimer) { TEST_F(ZipkinDriverTest, NoB3ContextSampledTrue) { setupValidDriver("HTTP_JSON_V1"); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID)); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID)); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED)); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID).empty()); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID).empty()); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED).empty()); Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); @@ -489,9 +489,9 @@ TEST_F(ZipkinDriverTest, NoB3ContextSampledTrue) { TEST_F(ZipkinDriverTest, NoB3ContextSampledFalse) { setupValidDriver("HTTP_JSON_V1"); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID)); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID)); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED)); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID).empty()); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID).empty()); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED).empty()); Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, false}); @@ -507,7 +507,7 @@ TEST_F(ZipkinDriverTest, PropagateB3NoSampleDecisionSampleTrue) { Hex::uint64ToHex(generateRandom64())); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, Hex::uint64ToHex(generateRandom64())); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED)); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED).empty()); Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); @@ -523,7 +523,7 @@ TEST_F(ZipkinDriverTest, PropagateB3NoSampleDecisionSampleFalse) { Hex::uint64ToHex(generateRandom64())); request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, Hex::uint64ToHex(generateRandom64())); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED)); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED).empty()); Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, false}); @@ -535,8 +535,8 @@ TEST_F(ZipkinDriverTest, PropagateB3NoSampleDecisionSampleFalse) { TEST_F(ZipkinDriverTest, PropagateB3NotSampled) { setupValidDriver("HTTP_JSON_V1"); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID)); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID)); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID).empty()); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID).empty()); // Only context header set is B3 sampled to indicate trace should not be sampled request_headers_.addReferenceKey(ZipkinCoreConstants::get().X_B3_SAMPLED, NOT_SAMPLED); @@ -550,14 +550,14 @@ TEST_F(ZipkinDriverTest, PropagateB3NotSampled) { auto sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED); // Check B3 sampled flag is set to not sample - EXPECT_EQ(NOT_SAMPLED, sampled_entry->value().getStringView()); + EXPECT_EQ(NOT_SAMPLED, sampled_entry[0]->value().getStringView()); } TEST_F(ZipkinDriverTest, PropagateB3NotSampledWithFalse) { setupValidDriver("HTTP_JSON_V1"); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID)); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID)); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID).empty()); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID).empty()); // Only context header set is B3 sampled to indicate trace should not be sampled (using legacy // 'false' value) @@ -572,14 +572,14 @@ TEST_F(ZipkinDriverTest, PropagateB3NotSampledWithFalse) { auto sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED); // Check B3 sampled flag is set to not sample - EXPECT_EQ(NOT_SAMPLED, sampled_entry->value().getStringView()); + EXPECT_EQ(NOT_SAMPLED, sampled_entry[0]->value().getStringView()); } TEST_F(ZipkinDriverTest, PropagateB3SampledWithTrue) { setupValidDriver("HTTP_JSON_V1"); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID)); - EXPECT_EQ(nullptr, request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID)); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_SPAN_ID).empty()); + EXPECT_TRUE(request_headers_.get(ZipkinCoreConstants::get().X_B3_TRACE_ID).empty()); // Only context header set is B3 sampled to indicate trace should be sampled (using legacy // 'true' value) @@ -594,7 +594,7 @@ TEST_F(ZipkinDriverTest, PropagateB3SampledWithTrue) { auto sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED); // Check B3 sampled flag is set to sample - EXPECT_EQ(SAMPLED, sampled_entry->value().getStringView()); + EXPECT_EQ(SAMPLED, sampled_entry[0]->value().getStringView()); } TEST_F(ZipkinDriverTest, PropagateB3SampleFalse) { @@ -829,7 +829,7 @@ TEST_F(ZipkinDriverTest, ExplicitlySetSampledFalse) { auto sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED); // Check B3 sampled flag is set to not sample - EXPECT_EQ(NOT_SAMPLED, sampled_entry->value().getStringView()); + EXPECT_EQ(NOT_SAMPLED, sampled_entry[0]->value().getStringView()); } TEST_F(ZipkinDriverTest, ExplicitlySetSampledTrue) { @@ -846,7 +846,7 @@ TEST_F(ZipkinDriverTest, ExplicitlySetSampledTrue) { auto sampled_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_SAMPLED); // Check B3 sampled flag is set to sample - EXPECT_EQ(SAMPLED, sampled_entry->value().getStringView()); + EXPECT_EQ(SAMPLED, sampled_entry[0]->value().getStringView()); } TEST_F(ZipkinDriverTest, DuplicatedHeader) { diff --git a/test/extensions/transport_sockets/proxy_protocol/BUILD b/test/extensions/transport_sockets/proxy_protocol/BUILD index dbbdb719f507..666da2fdb1d7 100644 --- a/test/extensions/transport_sockets/proxy_protocol/BUILD +++ b/test/extensions/transport_sockets/proxy_protocol/BUILD @@ -23,5 +23,19 @@ envoy_extension_cc_test( "//test/mocks/network:io_handle_mocks", "//test/mocks/network:network_mocks", "//test/mocks/network:transport_socket_mocks", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "proxy_protocol_integration_test", + srcs = ["proxy_protocol_integration_test.cc"], + extension_name = "envoy.transport_sockets.upstream_proxy_protocol", + deps = [ + "//source/extensions/filters/network/tcp_proxy:config", + "//source/extensions/transport_sockets/proxy_protocol:upstream_config", + "//test/integration:integration_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_integration_test.cc b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_integration_test.cc new file mode 100644 index 000000000000..99e04a348746 --- /dev/null +++ b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_integration_test.cc @@ -0,0 +1,204 @@ +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/config/core/v3/proxy_protocol.pb.h" +#include "envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.pb.h" + +#include "test/integration/integration.h" + +namespace Envoy { +namespace { + +class ProxyProtocolIntegrationTest : public testing::TestWithParam, + public BaseIntegrationTest { +public: + ProxyProtocolIntegrationTest() + : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) {} + + void TearDown() override { + test_server_.reset(); + fake_upstreams_.clear(); + } + + void setup(envoy::config::core::v3::ProxyProtocolConfig_Version version, bool health_checks, + std::string inner_socket) { + version_ = version; + health_checks_ = health_checks; + inner_socket_ = inner_socket; + } + + void initialize() override { + config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* transport_socket = + bootstrap.mutable_static_resources()->mutable_clusters(0)->mutable_transport_socket(); + transport_socket->set_name("envoy.transport_sockets.upstream_proxy_protocol"); + envoy::config::core::v3::TransportSocket inner_socket; + inner_socket.set_name(inner_socket_); + envoy::config::core::v3::ProxyProtocolConfig proxy_proto_config; + proxy_proto_config.set_version(version_); + envoy::extensions::transport_sockets::proxy_protocol::v3::ProxyProtocolUpstreamTransport + proxy_proto_transport; + proxy_proto_transport.mutable_transport_socket()->MergeFrom(inner_socket); + proxy_proto_transport.mutable_config()->MergeFrom(proxy_proto_config); + transport_socket->mutable_typed_config()->PackFrom(proxy_proto_transport); + + if (health_checks_) { + auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); + cluster->set_close_connections_on_host_health_failure(false); + cluster->mutable_common_lb_config()->mutable_healthy_panic_threshold()->set_value(0); + cluster->add_health_checks()->mutable_timeout()->set_seconds(20); + cluster->mutable_health_checks(0)->mutable_reuse_connection()->set_value(true); + cluster->mutable_health_checks(0)->mutable_interval()->set_seconds(1); + cluster->mutable_health_checks(0)->mutable_no_traffic_interval()->set_seconds(1); + cluster->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(1); + cluster->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(1); + cluster->mutable_health_checks(0)->mutable_tcp_health_check(); + cluster->mutable_health_checks(0)->mutable_tcp_health_check()->mutable_send()->set_text( + "50696E67"); + cluster->mutable_health_checks(0)->mutable_tcp_health_check()->add_receive()->set_text( + "506F6E67"); + } + }); + BaseIntegrationTest::initialize(); + } + + FakeRawConnectionPtr fake_upstream_connection_; + +private: + envoy::config::core::v3::ProxyProtocolConfig_Version version_; + bool health_checks_; + std::string inner_socket_; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyProtocolIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Test sending proxy protocol v1 +TEST_P(ProxyProtocolIntegrationTest, TestV1ProxyProtocol) { + setup(envoy::config::core::v3::ProxyProtocolConfig::V1, false, + "envoy.transport_sockets.raw_buffer"); + initialize(); + + auto listener_port = lookupPort("listener_0"); + auto tcp_client = makeTcpConnection(listener_port); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_)); + + std::string observed_data; + ASSERT_TRUE(tcp_client->write("data")); + if (GetParam() == Network::Address::IpVersion::v4) { + ASSERT_TRUE(fake_upstream_connection_->waitForData(48, &observed_data)); + EXPECT_THAT(observed_data, testing::StartsWith("PROXY TCP4 127.0.0.1 127.0.0.1 ")); + } else if (GetParam() == Network::Address::IpVersion::v6) { + ASSERT_TRUE(fake_upstream_connection_->waitForData(36, &observed_data)); + EXPECT_THAT(observed_data, testing::StartsWith("PROXY TCP6 ::1 ::1 ")); + } + EXPECT_THAT(observed_data, testing::EndsWith(absl::StrCat(" ", listener_port, "\r\ndata"))); + + auto previous_data = observed_data; + observed_data.clear(); + ASSERT_TRUE(tcp_client->write(" more data")); + ASSERT_TRUE(fake_upstream_connection_->waitForData(previous_data.length() + 10, &observed_data)); + EXPECT_EQ(previous_data + " more data", observed_data); + + tcp_client->close(); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); +} + +// Test header is sent unencrypted using a TLS inner socket +TEST_P(ProxyProtocolIntegrationTest, TestTLSSocket) { + setup(envoy::config::core::v3::ProxyProtocolConfig::V1, false, "envoy.transport_sockets.tls"); + initialize(); + + auto listener_port = lookupPort("listener_0"); + auto tcp_client = makeTcpConnection(listener_port); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_)); + + ASSERT_TRUE(tcp_client->write("data")); + if (GetParam() == Network::Address::IpVersion::v4) { + ASSERT_TRUE(fake_upstream_connection_->waitForData( + fake_upstream_connection_->waitForInexactMatch("PROXY TCP4 127.0.0.1 127.0.0.1 "))); + } else if (GetParam() == Network::Address::IpVersion::v6) { + ASSERT_TRUE(fake_upstream_connection_->waitForData( + fake_upstream_connection_->waitForInexactMatch("PROXY TCP6 ::1 ::1 "))); + } + + tcp_client->close(); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); +} + +// Test sending proxy protocol health check +TEST_P(ProxyProtocolIntegrationTest, TestProxyProtocolHealthCheck) { + setup(envoy::config::core::v3::ProxyProtocolConfig::V1, true, + "envoy.transport_sockets.raw_buffer"); + FakeRawConnectionPtr fake_upstream_health_connection; + on_server_init_function_ = [&](void) -> void { + std::string observed_data; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection)); + if (GetParam() == Network::Address::IpVersion::v4) { + ASSERT_TRUE(fake_upstream_health_connection->waitForData(48, &observed_data)); + EXPECT_THAT(observed_data, testing::StartsWith("PROXY TCP4 127.0.0.1 127.0.0.1 ")); + } else if (GetParam() == Network::Address::IpVersion::v6) { + ASSERT_TRUE(fake_upstream_health_connection->waitForData(36, &observed_data)); + EXPECT_THAT(observed_data, testing::StartsWith("PROXY TCP6 ::1 ::1 ")); + } + ASSERT_TRUE(fake_upstream_health_connection->write("Pong")); + }; + + initialize(); + + ASSERT_TRUE(fake_upstream_health_connection->close()); + ASSERT_TRUE(fake_upstream_health_connection->waitForDisconnect()); +} + +// Test sending proxy protocol v2 +TEST_P(ProxyProtocolIntegrationTest, TestV2ProxyProtocol) { + setup(envoy::config::core::v3::ProxyProtocolConfig::V2, false, + "envoy.transport_sockets.raw_buffer"); + initialize(); + + auto listener_port = lookupPort("listener_0"); + auto tcp_client = makeTcpConnection(listener_port); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_)); + + std::string observed_data; + ASSERT_TRUE(tcp_client->write("data")); + if (GetParam() == Envoy::Network::Address::IpVersion::v4) { + ASSERT_TRUE(fake_upstream_connection_->waitForData(32, &observed_data)); + // - signature + // - version and command type, address family and protocol, length of addresses + // - src address, dest address + auto header_start = "\x0d\x0a\x0d\x0a\x00\x0d\x0a\x51\x55\x49\x54\x0a\ + \x21\x11\x00\x0c\ + \x7f\x00\x00\x01\x7f\x00\x00\x01"; + EXPECT_THAT(observed_data, testing::StartsWith(header_start)); + EXPECT_EQ(static_cast(observed_data[26]), listener_port >> 8); + EXPECT_EQ(static_cast(observed_data[27]), listener_port & 0xFF); + } else if (GetParam() == Envoy::Network::Address::IpVersion::v6) { + ASSERT_TRUE(fake_upstream_connection_->waitForData(56, &observed_data)); + // - signature + // - version and command type, address family and protocol, length of addresses + // - src address + // - dest address + auto header_start = "\x0d\x0a\x0d\x0a\x00\x0d\x0a\x51\x55\x49\x54\x0a\ + \x21\x21\x00\x24\ + \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\ + \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"; + EXPECT_THAT(observed_data, testing::StartsWith(header_start)); + EXPECT_EQ(static_cast(observed_data[50]), listener_port >> 8); + EXPECT_EQ(static_cast(observed_data[51]), listener_port & 0xFF); + } + EXPECT_THAT(observed_data, testing::EndsWith("data")); + + auto previous_data = observed_data; + observed_data.clear(); + ASSERT_TRUE(tcp_client->write(" more data")); + ASSERT_TRUE(fake_upstream_connection_->waitForData(previous_data.length() + 10, &observed_data)); + EXPECT_EQ(previous_data + " more data", observed_data); + + tcp_client->close(); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); +} + +} // namespace +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc index 2823d218c992..953e5999a5fb 100644 --- a/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc @@ -1,3 +1,4 @@ +#include "envoy/config/core/v3/proxy_protocol.pb.h" #include "envoy/network/proxy_protocol.h" #include "common/buffer/buffer_impl.h" @@ -20,8 +21,10 @@ using testing::_; using testing::InSequence; using testing::NiceMock; using testing::Return; +using testing::ReturnNull; using testing::ReturnRef; +using envoy::config::core::v3::ProxyProtocolConfig; using envoy::config::core::v3::ProxyProtocolConfig_Version; namespace Envoy { @@ -30,8 +33,6 @@ namespace TransportSockets { namespace ProxyProtocol { namespace { -constexpr uint64_t MaxSlices = 16; - class ProxyProtocolTest : public testing::Test { public: void initialize(ProxyProtocolConfig_Version version, @@ -42,6 +43,7 @@ class ProxyProtocolTest : public testing::Test { proxy_protocol_socket_ = std::make_unique(std::move(inner_socket), socket_options, version); proxy_protocol_socket_->setTransportSocketCallbacks(transport_callbacks_); + proxy_protocol_socket_->onConnected(); } NiceMock* inner_socket_; @@ -59,14 +61,17 @@ TEST_F(ProxyProtocolTest, InjectesHeaderOnlyOnce) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); auto msg2 = Buffer::OwnedImpl("more data"); + { InSequence s; EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); @@ -86,12 +91,14 @@ TEST_F(ProxyProtocolTest, BytesProcessedIncludesProxyProtocolHeader) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); auto msg2 = Buffer::OwnedImpl("more data"); { @@ -117,19 +124,23 @@ TEST_F(ProxyProtocolTest, ReturnsKeepOpenWhenWriteErrorIsAgain) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); auto msg = Buffer::OwnedImpl("some data"); { InSequence s; - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - 0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), - Network::IoSocketError::deleteIoError))))); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance&) -> Api::IoCallUint64Result { + return Api::IoCallUint64Result( + 0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), + Network::IoSocketError::deleteIoError)); + })); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)) .WillOnce(Return(Network::IoResult{Network::PostIoAction::KeepOpen, msg.length(), false})); } @@ -149,16 +160,17 @@ TEST_F(ProxyProtocolTest, ReturnsCloseWhenWriteErrorIsNotAgain) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); auto msg = Buffer::OwnedImpl("some data"); { InSequence s; - EXPECT_CALL(io_handle_, writev(_, _)) - .WillOnce(Return(testing::ByMove( - Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EADDRNOTAVAIL), - [](Api::IoError* err) { delete err; }))))); + EXPECT_CALL(io_handle_, write(_)) + .WillOnce(Invoke([&](Buffer::Instance&) -> Api::IoCallUint64Result { + return Api::IoCallUint64Result(0, + Api::IoErrorPtr(new Network::IoSocketError(EADDRNOTAVAIL), + Network::IoSocketError::deleteIoError)); + })); } auto resp = proxy_protocol_socket_->doWrite(msg, false); @@ -174,12 +186,14 @@ TEST_F(ProxyProtocolTest, V1IPV4LocalAddressWhenTransportOptionsAreNull) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); @@ -195,13 +209,15 @@ TEST_F(ProxyProtocolTest, V1IPV4LocalAddressesWhenHeaderOptionsAreNull) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, std::make_shared()); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), 1)) - .WillOnce(Return(testing::ByMove( - Api::IoCallUint64Result(43, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = 43; + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); @@ -217,13 +233,15 @@ TEST_F(ProxyProtocolTest, V1IPV6LocalAddressesWhenHeaderOptionsAreNull) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("a:b:c:d::", "e:b:c:f::", 50000, 8080, Network::Address::IpVersion::v6, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, std::make_shared()); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); @@ -248,12 +266,14 @@ TEST_F(ProxyProtocolTest, V1IPV4DownstreamAddresses) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("202.168.0.13", "174.2.2.222", 52000, 80, Network::Address::IpVersion::v4, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, socket_options); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); @@ -278,12 +298,14 @@ TEST_F(ProxyProtocolTest, V1IPV6DownstreamAddresses) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("1::2:3", "a:b:c:d::", 52000, 80, Network::Address::IpVersion::v6, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, socket_options); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); @@ -298,12 +320,14 @@ TEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenTransportOptionsAreNull) { Network::Utility::resolveUrl("tcp://0.1.1.2:513"); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV2LocalHeader(expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, nullptr); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); @@ -318,13 +342,15 @@ TEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenHeaderOptionsAreNull) { Network::Utility::resolveUrl("tcp://0.1.1.2:513"); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV2LocalHeader(expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, std::make_shared()); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); @@ -349,12 +375,14 @@ TEST_F(ProxyProtocolTest, V2IPV4DownstreamAddresses) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV2Header("1.2.3.4", "0.1.1.2", 773, 513, Network::Address::IpVersion::v4, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); @@ -379,18 +407,68 @@ TEST_F(ProxyProtocolTest, V2IPV6DownstreamAddresses) { Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV2Header("1:2:3::4", "1:100:200:3::", 8, 2, Network::Address::IpVersion::v6, expected_buff); - auto expected_slices = expected_buff.getRawSlices(MaxSlices); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options); - EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) - .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( - expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(io_handle_, write(BufferStringEqual(expected_buff.toString()))) + .WillOnce(Invoke([&](Buffer::Instance& buffer) -> Api::IoCallUint64Result { + auto length = buffer.length(); + buffer.drain(length); + return Api::IoCallUint64Result(length, Api::IoErrorPtr(nullptr, [](Api::IoError*) {})); + })); auto msg = Buffer::OwnedImpl("some data"); EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); proxy_protocol_socket_->doWrite(msg, false); } +// Test onConnected calls inner onConnected +TEST_F(ProxyProtocolTest, OnConnectedCallsInnerOnConnected) { + auto src_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("1:2:3::4", 8)); + auto dst_addr = Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv6Instance("1:100:200:3::", 2)); + Network::TransportSocketOptionsSharedPtr socket_options = + std::make_shared( + "", std::vector{}, std::vector{}, absl::nullopt, + absl::optional( + Network::ProxyProtocolData{src_addr, dst_addr})); + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://[1:100:200:3::]:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080"); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options); + + EXPECT_CALL(*inner_socket_, onConnected()).Times(1); + proxy_protocol_socket_->onConnected(); +} + +class ProxyProtocolSocketFactoryTest : public testing::Test { +public: + void initialize() { + auto inner_factory = std::make_unique>(); + inner_factory_ = inner_factory.get(); + factory_ = std::make_unique(std::move(inner_factory), + ProxyProtocolConfig()); + } + + NiceMock* inner_factory_; + std::unique_ptr factory_; +}; + +// Test createTransportSocket returns nullptr if inner call returns nullptr +TEST_F(ProxyProtocolSocketFactoryTest, CreateSocketReturnsNullWhenInnerFactoryReturnsNull) { + initialize(); + EXPECT_CALL(*inner_factory_, createTransportSocket(_)).WillOnce(ReturnNull()); + ASSERT_EQ(nullptr, factory_->createTransportSocket(nullptr)); +} + +// Test implementsSecureTransport calls inner factory +TEST_F(ProxyProtocolSocketFactoryTest, ImplementsSecureTransportCallInnerFactory) { + initialize(); + EXPECT_CALL(*inner_factory_, implementsSecureTransport()).WillOnce(Return(true)); + ASSERT_TRUE(factory_->implementsSecureTransport()); +} + } // namespace } // namespace ProxyProtocol } // namespace TransportSockets diff --git a/test/extensions/transport_sockets/tls/BUILD b/test/extensions/transport_sockets/tls/BUILD index cdd5963d7ee4..48a456162df3 100644 --- a/test/extensions/transport_sockets/tls/BUILD +++ b/test/extensions/transport_sockets/tls/BUILD @@ -21,6 +21,7 @@ envoy_cc_test( # right now we have a bunch of duplication which is confusing. "//test/config/integration/certs", "//test/extensions/transport_sockets/tls/test_data:certs", + "//test/extensions/transport_sockets/tls/ocsp:gen_ocsp_data", ], external_deps = ["ssl"], shard_count = 4, @@ -49,6 +50,7 @@ envoy_cc_test( "//test/mocks/buffer:buffer_mocks", "//test/mocks/init:init_mocks", "//test/mocks/local_info:local_info_mocks", + "//test/mocks/network:io_handle_mocks", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", @@ -58,6 +60,7 @@ envoy_cc_test( "//test/test_common:network_utility_lib", "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", @@ -72,12 +75,14 @@ envoy_cc_test( ], data = [ "gen_unittest_certs.sh", + "//test/extensions/transport_sockets/tls/ocsp:gen_ocsp_data", "//test/extensions/transport_sockets/tls/test_data:certs", ], # Fails intermittantly on local build tags = ["flaky_on_windows"], deps = [ ":ssl_test_utils", + "//source/common/common:base64_lib", "//source/common/json:json_loader_lib", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", @@ -117,6 +122,7 @@ envoy_cc_test( ], data = [ "gen_unittest_certs.sh", + "//test/extensions/transport_sockets/tls/ocsp:gen_ocsp_data", "//test/extensions/transport_sockets/tls/test_data:certs", ], external_deps = ["ssl"], @@ -170,6 +176,9 @@ envoy_cc_test( "//test/extensions/transport_sockets/tls/test_data:certs", ], external_deps = ["ssl"], + # TODO(sunjayBhatia): Diagnose openssl DLL load issue on Windows + # See: https://github.com/envoyproxy/envoy/pull/13276 + tags = ["flaky_on_windows"], deps = [ ":ssl_socket_test", ":ssl_test_utils", diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index 60cec6e1fe17..0307ebb2daef 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -6,6 +6,7 @@ #include "envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h" #include "envoy/type/matcher/v3/string.pb.h" +#include "common/common/base64.h" #include "common/json/json_loader.h" #include "common/secret/sds_api.h" #include "common/stats/isolated_store_impl.h" @@ -571,6 +572,220 @@ TEST_F(SslContextImplTest, MustHaveSubjectOrSAN) { EnvoyException, "has neither subject CN nor SAN names"); } +class SslServerContextImplOcspTest : public SslContextImplTest { +public: + Envoy::Ssl::ServerContextSharedPtr loadConfig(ServerContextConfigImpl& cfg) { + return manager_.createSslServerContext(store_, cfg, std::vector{}); + } + + Envoy::Ssl::ServerContextSharedPtr loadConfigYaml(const std::string& yaml) { + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context); + ServerContextConfigImpl cfg(tls_context, factory_context_); + return loadConfig(cfg); + } +}; + +TEST_F(SslServerContextImplOcspTest, TestFilenameOcspStapleConfigLoads) { + const std::string tls_context_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der" + ocsp_staple_policy: must_staple + )EOF"; + loadConfigYaml(tls_context_yaml); +} + +TEST_F(SslServerContextImplOcspTest, TestInlineBytesOcspStapleConfigLoads) { + auto der_response = TestEnvironment::readFileToStringForTest( + TestEnvironment::substitute("{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der")); + auto base64_response = Base64::encode(der_response.c_str(), der_response.length(), true); + const std::string tls_context_yaml = fmt::format(R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{{{ test_tmpdir }}}}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{{{ test_tmpdir }}}}/ocsp_test_data/good_key.pem" + ocsp_staple: + inline_bytes: "{}" + ocsp_staple_policy: must_staple + )EOF", + base64_response); + + loadConfigYaml(tls_context_yaml); +} + +TEST_F(SslServerContextImplOcspTest, TestInlineStringOcspStapleConfigFails) { + const std::string tls_context_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple: + inline_string: "abcd" + ocsp_staple_policy: must_staple + )EOF"; + + EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(tls_context_yaml), EnvoyException, + "OCSP staple cannot be provided via inline_string"); +} + +TEST_F(SslServerContextImplOcspTest, TestMismatchedOcspStapleConfigFails) { + const std::string tls_context_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der" + ocsp_staple_policy: must_staple + )EOF"; + + EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(tls_context_yaml), EnvoyException, + "OCSP response does not match its TLS certificate"); +} + +TEST_F(SslServerContextImplOcspTest, TestStaplingRequiredWithoutStapleConfigFails) { + const std::string tls_context_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple_policy: must_staple + )EOF"; + + EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(tls_context_yaml), EnvoyException, + "Required OCSP response is missing from TLS context"); +} + +TEST_F(SslServerContextImplOcspTest, TestUnsuccessfulOcspResponseConfigFails) { + std::vector data = { + // SEQUENCE + 0x30, 3, + // OcspResponseStatus - InternalError + 0xau, 1, 2, + // no response bytes + }; + std::string der_response(data.begin(), data.end()); + auto base64_response = Base64::encode(der_response.c_str(), der_response.length(), true); + const std::string tls_context_yaml = fmt::format(R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{{{ test_tmpdir }}}}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{{{ test_tmpdir }}}}/ocsp_test_data/good_key.pem" + ocsp_staple: + inline_bytes: "{}" + ocsp_staple_policy: must_staple + )EOF", + base64_response); + + EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(tls_context_yaml), EnvoyException, + "OCSP response was unsuccessful"); +} + +TEST_F(SslServerContextImplOcspTest, TestMustStapleCertWithoutStapleConfigFails) { + const std::string tls_context_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem" + ocsp_staple_policy: lenient_stapling + )EOF"; + + EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(tls_context_yaml), EnvoyException, + "OCSP response is required for must-staple certificate"); +} + +TEST_F(SslServerContextImplOcspTest, TestMustStapleCertWithoutStapleFeatureFlagOff) { + const std::string tls_context_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem" + ocsp_staple_policy: lenient_stapling + )EOF"; + + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.require_ocsp_response_for_must_staple_certs", "false"}}); + loadConfigYaml(tls_context_yaml); +} + +TEST_F(SslServerContextImplOcspTest, TestGetCertInformationWithOCSP) { + const std::string yaml = R"EOF( + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der" +)EOF"; + + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context); + auto context = loadConfigYaml(yaml); + + constexpr absl::string_view this_update = "This Update: "; + constexpr absl::string_view next_update = "Next Update: "; + + auto ocsp_text_details = + absl::StrSplit(TestEnvironment::readFileToStringForTest( + TestEnvironment::substitute( + "{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp_details.txt"), + true), + '\n'); + std::string valid_from, expiration; + for (const auto& detail : ocsp_text_details) { + std::string::size_type pos = detail.find(this_update); + if (pos != std::string::npos) { + valid_from = detail.substr(pos + this_update.size()); + continue; + } + + pos = detail.find(next_update); + if (pos != std::string::npos) { + expiration = detail.substr(pos + next_update.size()); + continue; + } + } + + std::string ocsp_json = absl::StrCat(R"EOF({ +"valid_from": ")EOF", + convertTimeCertInfoToCertDetails(valid_from), R"EOF(", +"expiration": ")EOF", + convertTimeCertInfoToCertDetails(expiration), R"EOF(" +} +)EOF"); + + envoy::admin::v3::CertificateDetails::OcspDetails ocsp_details; + TestUtility::loadFromJson(ocsp_json, ocsp_details); + + MessageDifferencer message_differencer; + message_differencer.set_scope(MessageDifferencer::Scope::PARTIAL); + EXPECT_TRUE(message_differencer.Compare(ocsp_details, + context->getCertChainInformation()[0]->ocsp_details())); +} + class SslServerContextImplTicketTest : public SslContextImplTest { public: void loadConfig(ServerContextConfigImpl& cfg) { diff --git a/test/extensions/transport_sockets/tls/handshaker_test.cc b/test/extensions/transport_sockets/tls/handshaker_test.cc index 77623f9e13ae..8f21b2c6bc2d 100644 --- a/test/extensions/transport_sockets/tls/handshaker_test.cc +++ b/test/extensions/transport_sockets/tls/handshaker_test.cc @@ -45,6 +45,7 @@ class MockHandshakeCallbacks : public Ssl::HandshakeCallbacks { MOCK_METHOD(Network::Connection&, connection, (), (const, override)); MOCK_METHOD(void, onSuccess, (SSL*), (override)); MOCK_METHOD(void, onFailure, (), (override)); + MOCK_METHOD(Network::TransportSocketCallbacks*, transportSocketCallbacks, (), (override)); }; class HandshakerTest : public SslCertsTest { diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index db9b0afd9ec5..297f328071e7 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -34,7 +34,10 @@ namespace Ssl { void SslIntegrationTestBase::initialize() { config_helper_.addSslConfig(ConfigHelper::ServerSslOptions() .setRsaCert(server_rsa_cert_) + .setRsaCertOcspStaple(server_rsa_cert_ocsp_staple_) .setEcdsaCert(server_ecdsa_cert_) + .setEcdsaCertOcspStaple(server_ecdsa_cert_ocsp_staple_) + .setOcspStapleRequired(ocsp_staple_required_) .setTlsV13(server_tlsv1_3_) .setExpectClientEcdsaCert(client_ecdsa_cert_)); HttpIntegrationTest::initialize(); @@ -347,6 +350,60 @@ TEST_P(SslCertficateIntegrationTest, ServerRsaEcdsaClientEcdsaOnly) { checkStats(); } +// Server has an RSA certificate with an OCSP response works. +TEST_P(SslCertficateIntegrationTest, ServerRsaOnlyOcspResponse) { + server_rsa_cert_ = true; + server_rsa_cert_ocsp_staple_ = true; + ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { + return makeSslClientConnection(rsaOnlyClientOptions()); + }; + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); + checkStats(); +} + +// Server has an ECDSA certificate with an OCSP response works. +TEST_P(SslCertficateIntegrationTest, ServerEcdsaOnlyOcspResponse) { + server_ecdsa_cert_ = true; + server_ecdsa_cert_ocsp_staple_ = true; + client_ecdsa_cert_ = true; + ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { + return makeSslClientConnection(ecdsaOnlyClientOptions()); + }; + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); + checkStats(); +} + +// Server has two certificates one with and one without OCSP response works under optional policy. +TEST_P(SslCertficateIntegrationTest, BothEcdsaAndRsaOnlyRsaOcspResponse) { + server_rsa_cert_ = true; + server_rsa_cert_ocsp_staple_ = true; + server_ecdsa_cert_ = true; + client_ecdsa_cert_ = true; + ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { + return makeSslClientConnection(ecdsaOnlyClientOptions()); + }; + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); + checkStats(); +} + +// Server has ECDSA and RSA certificates with OCSP responses and stapling required policy works. +TEST_P(SslCertficateIntegrationTest, BothEcdsaAndRsaWithOcspResponseStaplingRequired) { + server_rsa_cert_ = true; + server_rsa_cert_ocsp_staple_ = true; + server_ecdsa_cert_ = true; + server_ecdsa_cert_ocsp_staple_ = true; + ocsp_staple_required_ = true; + client_ecdsa_cert_ = true; + ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { + return makeSslClientConnection(ecdsaOnlyClientOptions()); + }; + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); + checkStats(); +} + +// TODO(zuercher): write an additional OCSP integration test that validates behavior with an +// expired OCSP response. (Requires OCSP client-side support in upstream TLS.) + // TODO(mattklein123): Move this into a dedicated integration test for the tap transport socket as // well as add more tests. class SslTapIntegrationTest : public SslIntegrationTest { diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.h b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.h index 133e73bd433e..ba209528805a 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.h +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.h @@ -31,7 +31,10 @@ class SslIntegrationTestBase : public HttpIntegrationTest { protected: bool server_tlsv1_3_{false}; bool server_rsa_cert_{true}; + bool server_rsa_cert_ocsp_staple_{false}; bool server_ecdsa_cert_{false}; + bool server_ecdsa_cert_ocsp_staple_{false}; + bool ocsp_staple_required_{false}; bool client_ecdsa_cert_{false}; // Set this true to debug SSL handshake issues with openssl s_client. The // verbose trace will be in the logs, openssl must be installed separately. diff --git a/test/extensions/transport_sockets/tls/ocsp/BUILD b/test/extensions/transport_sockets/tls/ocsp/BUILD index 9c21e95ff977..c6947269be4d 100644 --- a/test/extensions/transport_sockets/tls/ocsp/BUILD +++ b/test/extensions/transport_sockets/tls/ocsp/BUILD @@ -14,7 +14,7 @@ envoy_cc_test( "ocsp_test.cc", ], data = [ - "gen_unittest_ocsp_data.sh", + ":gen_ocsp_data", ], external_deps = ["ssl"], # TODO: Diagnose intermittent failure on Windows; this script uses the @@ -25,6 +25,7 @@ envoy_cc_test( "//source/common/filesystem:filesystem_lib", "//source/extensions/transport_sockets/tls:utility_lib", "//source/extensions/transport_sockets/tls/ocsp:ocsp_lib", + "//test/extensions/transport_sockets/tls:ssl_socket_test", "//test/extensions/transport_sockets/tls:ssl_test_utils", "//test/test_common:environment_lib", "//test/test_common:logging_lib", @@ -43,3 +44,8 @@ envoy_cc_test( "//test/extensions/transport_sockets/tls:ssl_test_utils", ], ) + +filegroup( + name = "gen_ocsp_data", + srcs = ["gen_unittest_ocsp_data.sh"], +) diff --git a/test/extensions/transport_sockets/tls/ocsp/asn1_utility_test.cc b/test/extensions/transport_sockets/tls/ocsp/asn1_utility_test.cc index e3299c39bd22..85f0024627e3 100644 --- a/test/extensions/transport_sockets/tls/ocsp/asn1_utility_test.cc +++ b/test/extensions/transport_sockets/tls/ocsp/asn1_utility_test.cc @@ -172,27 +172,61 @@ TEST_F(Asn1UtilityTest, GetOptionalMissingValueTest) { TEST_F(Asn1UtilityTest, ParseOptionalTest) { std::vector nothing; std::vector explicit_optional_true = {0, 3, 0x1u, 1, 0xff}; + std::vector missing_val_bool = {0x1u, 1}; - CBS cbs_true, cbs_explicit_optional_true, cbs_empty_seq, cbs_nothing; - CBS_init(&cbs_true, asn1_true.data(), asn1_true.size()); - CBS_init(&cbs_explicit_optional_true, explicit_optional_true.data(), - explicit_optional_true.size()); - CBS_init(&cbs_empty_seq, asn1_empty_seq.data(), asn1_empty_seq.size()); - CBS_init(&cbs_nothing, nothing.data(), nothing.size()); - - auto parseBool = [](CBS& cbs) -> bool { + auto parse_bool = [](CBS& cbs) -> bool { int res; CBS_get_asn1_bool(&cbs, &res); return res; }; - absl::optional expected(true); - EXPECT_EQ(expected, absl::get<0>(Asn1Utility::parseOptional(cbs_explicit_optional_true, - parseBool, 0))); - EXPECT_EQ(absl::nullopt, absl::get<0>(Asn1Utility::parseOptional(cbs_empty_seq, parseBool, - CBS_ASN1_BOOLEAN))); - EXPECT_EQ(absl::nullopt, absl::get<0>(Asn1Utility::parseOptional(cbs_nothing, parseBool, - CBS_ASN1_BOOLEAN))); + auto parse_bool_fail = [](CBS&) -> ParsingResult { + std::cout << "failing" << std::endl; + return absl::string_view{"failed"}; + }; + + { + CBS cbs_explicit_optional_true; + CBS_init(&cbs_explicit_optional_true, explicit_optional_true.data(), + explicit_optional_true.size()); + + absl::optional expected(true); + EXPECT_EQ(expected, absl::get<0>(Asn1Utility::parseOptional(cbs_explicit_optional_true, + parse_bool, 0))); + } + + { + CBS cbs_empty_seq; + CBS_init(&cbs_empty_seq, asn1_empty_seq.data(), asn1_empty_seq.size()); + EXPECT_EQ(absl::nullopt, absl::get<0>(Asn1Utility::parseOptional( + cbs_empty_seq, parse_bool, CBS_ASN1_BOOLEAN))); + } + + { + CBS cbs_nothing; + CBS_init(&cbs_nothing, nothing.data(), nothing.size()); + + EXPECT_EQ(absl::nullopt, absl::get<0>(Asn1Utility::parseOptional(cbs_nothing, parse_bool, + CBS_ASN1_BOOLEAN))); + } + + { + CBS cbs_missing_val; + CBS_init(&cbs_missing_val, missing_val_bool.data(), missing_val_bool.size()); + + EXPECT_EQ("Failed to parse ASN.1 element tag", + absl::get<1>( + Asn1Utility::parseOptional(cbs_missing_val, parse_bool, CBS_ASN1_BOOLEAN))); + } + + { + CBS cbs_explicit_optional_true; + CBS_init(&cbs_explicit_optional_true, explicit_optional_true.data(), + explicit_optional_true.size()); + + EXPECT_EQ("failed", absl::get<1>(Asn1Utility::parseOptional(cbs_explicit_optional_true, + parse_bool_fail, 0))); + } } TEST_F(Asn1UtilityTest, ParseOidTest) { @@ -214,6 +248,16 @@ TEST_F(Asn1UtilityTest, ParseOidTest) { EXPECT_EQ(oid, absl::get<0>(Asn1Utility::parseOid(cbs))); } +TEST_F(Asn1UtilityTest, ParseOidInvalidValueTest) { + // 0x80 is not valid within an OID + std::vector invalid_oid = {0x6, 0x6, 0x29, 0x80, 0x1, 0x1, 0x1, 0x1}; + + CBS cbs; + CBS_init(&cbs, invalid_oid.data(), invalid_oid.size()); + + EXPECT_EQ("Failed to parse oid", absl::get<1>(Asn1Utility::parseOid(cbs))); +} + TEST_F(Asn1UtilityTest, ParseGeneralizedTimeWrongFormatErrorTest) { std::string invalid_time = ""; CBS cbs; @@ -261,31 +305,32 @@ void cbbAddAsn1Int64(CBB* cbb, int64_t value) { ASSERT_TRUE(CBB_add_asn1_uint64(cbb, value)); } - union { - int64_t i; - uint8_t bytes[sizeof(int64_t)]; - } u; - u.i = value; - int start = 7; - // Skip leading sign-extension bytes unless they are necessary. - while (start > 0 && (u.bytes[start] == 0xff && (u.bytes[start - 1] & 0x80))) { - start--; + // Skip past bytes that are purely sign extension. + int start; + for (start = 7; start > 0; start--) { + uint8_t byte = (value >> start * 8) & 0xFF; + if (byte != 0xFF) { + break; + } + + uint8_t next_byte = (value >> (start - 1) * 8) & 0xFF; + if ((next_byte & 0x80) == 0) { + break; + } } CBB child; ASSERT_TRUE(CBB_add_asn1(cbb, &child, CBS_ASN1_INTEGER)); for (int i = start; i >= 0; i--) { - ASSERT_TRUE(CBB_add_u8(&child, u.bytes[i])); + uint8_t byte = (value >> i * 8) & 0xFF; + ASSERT_TRUE(CBB_add_u8(&child, byte)); } CBB_flush(cbb); } TEST_F(Asn1UtilityTest, ParseIntegerTest) { std::vector> integers = { - {1, "01"}, - {10, "0a"}, - {1000000, "0f4240"}, - {-1, "-01"}, + {1, "01"}, {10, "0a"}, {1000000, "0f4240"}, {-1, "-01"}, {-128, "-80"}, }; bssl::ScopedCBB cbb; CBS cbs; @@ -340,6 +385,15 @@ TEST_F(Asn1UtilityTest, SkipOptionalMalformedTagTest) { absl::get<1>(Asn1Utility::skipOptional(cbs, CBS_ASN1_SEQUENCE))); } +TEST_F(Asn1UtilityTest, SkipMalformedTagTest) { + std::vector malformed_seq = {0x30}; + CBS cbs; + CBS_init(&cbs, malformed_seq.data(), malformed_seq.size()); + + EXPECT_EQ("Failed to parse ASN.1 element", + absl::get<1>(Asn1Utility::skip(cbs, CBS_ASN1_SEQUENCE))); +} + } // namespace } // namespace Ocsp diff --git a/test/extensions/transport_sockets/tls/ocsp/gen_unittest_ocsp_data.sh b/test/extensions/transport_sockets/tls/ocsp/gen_unittest_ocsp_data.sh index a2a8f8b3a4e6..dad80edca9a1 100755 --- a/test/extensions/transport_sockets/tls/ocsp/gen_unittest_ocsp_data.sh +++ b/test/extensions/transport_sockets/tls/ocsp/gen_unittest_ocsp_data.sh @@ -6,18 +6,20 @@ set -e trap cleanup EXIT cleanup() { - rm *_index* - rm *.csr - rm *.cnf - rm *_serial* + rm -f ./*_index* + rm -f ./*.csr + rm -f ./*.cnf + rm -f ./*_serial* } -[[ -z "${TEST_TMPDIR}" ]] && TEST_TMPDIR="$(cd $(dirname $0) && pwd)" +[[ -z "${TEST_TMPDIR}" ]] && TEST_TMPDIR="$(cd "$(dirname "$0")" && pwd)" TEST_OCSP_DIR="${TEST_TMPDIR}/ocsp_test_data" mkdir -p "${TEST_OCSP_DIR}" -cd $TEST_OCSP_DIR +rm -f "${TEST_OCSP_DIR}"/* + +cd "$TEST_OCSP_DIR" || exit 1 ################################################## # Make the configuration file @@ -25,6 +27,10 @@ cd $TEST_OCSP_DIR # $1= $2= generate_config() { +touch "${1}_index.txt" +echo "unique_subject = no" > "${1}_index.txt.attr" +echo 1000 > "${1}_serial" + (cat << EOF [ req ] default_bits = 2048 @@ -78,76 +84,95 @@ subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always,issuer basicConstraints = critical, CA:true keyUsage = critical, digitalSignature, cRLSign, keyCertSign + +[ must_staple ] +tlsfeature = status_request EOF -) > $1.cnf +) > "${1}.cnf" } # $1= $2=[issuer name] generate_ca() { - if [[ "$2" != "" ]]; then local EXTRA_ARGS="-CA $2_cert.pem -CAkey $2_key.pem -CAcreateserial"; fi - openssl genrsa -out $1_key.pem 2048 - openssl req -new -key $1_key.pem -out $1_cert.csr \ - -config $1.cnf -batch -sha256 + local extra_args=() + if [[ -n "$2" ]]; then + extra_args=(-CA "${2}_cert.pem" -CAkey "${2}_key.pem" -CAcreateserial) + fi + openssl genrsa -out "${1}_key.pem" 2048 + openssl req -new -key "${1}_key.pem" -out "${1}_cert.csr" \ + -config "${1}.cnf" -batch -sha256 openssl x509 -req \ - -in $1_cert.csr -signkey $1_key.pem -out $1_cert.pem \ - -extensions v3_ca -extfile $1.cnf $EXTRA_ARGS + -in "${1}_cert.csr" -signkey "${1}_key.pem" -out "${1}_cert.pem" \ + -extensions v3_ca -extfile "${1}.cnf" "${extra_args[@]}" } -# $1= $2= -generate_x509_cert() { - openssl genrsa -out $1_key.pem 2048 - openssl req -new -key $1_key.pem -out $1_cert.csr -config $1.cnf -batch -sha256 - openssl ca -config $1.cnf -notext -batch -in $1_cert.csr -out $1_cert.pem +# $1= $2= $3=[req args] +generate_rsa_cert() { + openssl genrsa -out "${1}_key.pem" 2048 + openssl req -new -key "${1}_key.pem" -out "${1}_cert.csr" -config "${1}.cnf" -batch -sha256 + openssl ca -config "${1}.cnf" -notext -batch -in "${1}_cert.csr" -out "${1}_cert.pem" "${@:3}" +} + +# $1= $2= $3=[req args] +generate_ecdsa_cert() { + openssl ecparam -name secp256r1 -genkey -out "${1}_key.pem" + openssl req -new -key "${1}_key.pem" -out "${1}_cert.csr" -config "${1}.cnf" -batch -sha256 + openssl ca -config "${1}.cnf" -notext -batch -in "${1}_cert.csr" -out "${1}_cert.pem" "${@:3}" } # $1= $2= $3= $4=[extra args] generate_ocsp_response() { # Generate an OCSP request - openssl ocsp -CAfile $2_cert.pem -issuer $2_cert.pem \ - -cert $1_cert.pem -reqout $3_ocsp_req.der + openssl ocsp -CAfile "${2}_cert.pem" -issuer "${2}_cert.pem" \ + -cert "${1}_cert.pem" -reqout "${3}_ocsp_req.der" # Generate the OCSP response - openssl ocsp -CA $2_cert.pem \ - -rkey $2_key.pem -rsigner $2_cert.pem -index $2_index.txt \ - -reqin $3_ocsp_req.der -respout $3_ocsp_resp.der $4 + openssl ocsp -CA "${2}_cert.pem" \ + -rkey "${2}_key.pem" -rsigner "${2}_cert.pem" -index "${2}_index.txt" \ + -reqin "${3}_ocsp_req.der" -respout "${3}_ocsp_resp.der" "${@:4}" } # $1= $2= revoke_certificate() { - openssl ca -revoke $1_cert.pem -keyfile $2_key.pem -cert $2_cert.pem -config $2.cnf + openssl ca -revoke "${1}_cert.pem" -keyfile "${2}_key.pem" -cert "${2}_cert.pem" -config "${2}.cnf" +} + +# $1= $2= +dump_ocsp_details() { + openssl ocsp -respin "${1}_ocsp_resp.der" -issuer "${2}_cert.pem" -resp_text \ + -out "${1}_ocsp_resp_details.txt" } # Set up the CA -touch ca_index.txt -echo "unique_subject = no" > ca_index.txt.attr -echo 1000 > ca_serial generate_config ca ca generate_ca ca # Set up an intermediate CA with a different database -touch intermediate_ca_index.txt -echo "unique_subject = no" > intermediate_ca_index.txt.attr -echo 1000 > intermediate_ca_serial generate_config intermediate_ca intermediate_ca generate_ca intermediate_ca ca # Generate valid cert and OCSP response generate_config good ca -generate_x509_cert good ca -generate_ocsp_response good ca good "-ndays 7" +generate_rsa_cert good ca +generate_ocsp_response good ca good -ndays 7 +dump_ocsp_details good ca # Generate OCSP response with the responder key hash instead of name generate_ocsp_response good ca responder_key_hash -resp_key_id # Generate and revoke a cert and create OCSP response generate_config revoked ca -generate_x509_cert revoked ca +generate_rsa_cert revoked ca -extensions must_staple revoke_certificate revoked ca generate_ocsp_response revoked ca revoked # Create OCSP response for cert unknown to the CA generate_ocsp_response good intermediate_ca unknown +# Generate cert with ECDSA key and OCSP response +generate_config ecdsa ca +generate_ecdsa_cert ecdsa ca +generate_ocsp_response ecdsa ca ecdsa + # Generate an OCSP request/response for multiple certs openssl ocsp -CAfile ca_cert.pem -issuer ca_cert.pem \ -cert good_cert.pem -cert revoked_cert.pem -reqout multiple_cert_ocsp_req.der diff --git a/test/extensions/transport_sockets/tls/ocsp/ocsp_test.cc b/test/extensions/transport_sockets/tls/ocsp/ocsp_test.cc index aebca52c04ef..70f24ccaa15e 100644 --- a/test/extensions/transport_sockets/tls/ocsp/ocsp_test.cc +++ b/test/extensions/transport_sockets/tls/ocsp/ocsp_test.cc @@ -69,6 +69,7 @@ TEST_F(OcspFullResponseParsingTest, GoodCertTest) { // Contains nextUpdate that is in the future EXPECT_FALSE(response_->isExpired()); + EXPECT_GT(response_->secondsUntilExpiration(), 0); } TEST_F(OcspFullResponseParsingTest, RevokedCertTest) { @@ -76,6 +77,7 @@ TEST_F(OcspFullResponseParsingTest, RevokedCertTest) { expectSuccessful(); expectCertificateMatches("revoked_cert.pem"); EXPECT_TRUE(response_->isExpired()); + EXPECT_EQ(response_->secondsUntilExpiration(), 0); } TEST_F(OcspFullResponseParsingTest, UnknownCertTest) { @@ -91,6 +93,7 @@ TEST_F(OcspFullResponseParsingTest, ExpiredResponseTest) { setup("good_ocsp_resp.der"); // nextUpdate is present but in the past EXPECT_TRUE(response_->isExpired()); + EXPECT_EQ(response_->secondsUntilExpiration(), 0); } TEST_F(OcspFullResponseParsingTest, ThisUpdateAfterNowTest) { @@ -113,7 +116,7 @@ TEST_F(OcspFullResponseParsingTest, MultiCertResponseTest) { "OCSP Response must be for one certificate only"); } -TEST_F(OcspFullResponseParsingTest, NoResponseBodyTest) { +TEST_F(OcspFullResponseParsingTest, UnsuccessfulResponseTest) { std::vector data = { // SEQUENCE 0x30, 3, @@ -121,6 +124,18 @@ TEST_F(OcspFullResponseParsingTest, NoResponseBodyTest) { 0xau, 1, 2, // no response bytes }; + EXPECT_THROW_WITH_MESSAGE(OcspResponseWrapper response(data, time_system_), EnvoyException, + "OCSP response was unsuccessful"); +} + +TEST_F(OcspFullResponseParsingTest, NoResponseBodyTest) { + std::vector data = { + // SEQUENCE + 0x30, 3, + // OcspResponseStatus - Success + 0xau, 1, 0, + // no response bytes + }; EXPECT_THROW_WITH_MESSAGE(OcspResponseWrapper response(data, time_system_), EnvoyException, "OCSP response has no body"); } diff --git a/test/extensions/transport_sockets/tls/ssl_certs_test.h b/test/extensions/transport_sockets/tls/ssl_certs_test.h index 843273acfcfa..1fac8ff469c5 100644 --- a/test/extensions/transport_sockets/tls/ssl_certs_test.h +++ b/test/extensions/transport_sockets/tls/ssl_certs_test.h @@ -14,6 +14,8 @@ class SslCertsTest : public testing::Test { static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) TestEnvironment::exec({TestEnvironment::runfilesPath( "test/extensions/transport_sockets/tls/gen_unittest_certs.sh")}); + TestEnvironment::exec({TestEnvironment::runfilesPath( + "test/extensions/transport_sockets/tls/ocsp/gen_unittest_ocsp_data.sh")}); } protected: diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 6dd3342d37b5..b4bdb84e5737 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -37,6 +37,7 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" +#include "test/mocks/network/io_handle.h" #include "test/mocks/network/mocks.h" #include "test/mocks/secret/mocks.h" #include "test/mocks/server/transport_socket_factory_context.h" @@ -45,6 +46,7 @@ #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/registry.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "absl/strings/str_replace.h" @@ -240,6 +242,20 @@ class TestUtilOptions : public TestUtilOptionsBase { Network::ConnectionEvent expectedServerCloseEvent() const { return expected_server_close_event_; } + TestUtilOptions& setExpectedOcspResponse(const std::string& expected_ocsp_response) { + expected_ocsp_response_ = expected_ocsp_response; + return *this; + } + + const std::string& expectedOcspResponse() const { return expected_ocsp_response_; } + + TestUtilOptions& enableOcspStapling() { + ocsp_stapling_enabled_ = true; + return *this; + } + + bool ocspStaplingEnabled() const { return ocsp_stapling_enabled_; } + private: const std::string client_ctx_yaml_; const std::string server_ctx_yaml_; @@ -259,6 +275,8 @@ class TestUtilOptions : public TestUtilOptionsBase { std::string expected_peer_cert_chain_; std::string expected_valid_from_peer_cert_; std::string expected_expiration_peer_cert_; + std::string expected_ocsp_response_; + bool ocsp_stapling_enabled_{false}; }; void testUtil(const TestUtilOptions& options) { @@ -330,6 +348,12 @@ void testUtil(const TestUtilOptions& options) { server_connection->addConnectionCallbacks(server_connection_callbacks); })); + if (options.ocspStaplingEnabled()) { + const SslHandshakerImpl* ssl_socket = + dynamic_cast(client_connection->ssl().get()); + SSL_enable_ocsp_stapling(ssl_socket->ssl()); + } + Network::MockConnectionCallbacks client_connection_callbacks; client_connection->addConnectionCallbacks(client_connection_callbacks); client_connection->connect(); @@ -414,6 +438,15 @@ void testUtil(const TestUtilOptions& options) { server_connection->ssl()->urlEncodedPemEncodedPeerCertificateChain()); } + const SslHandshakerImpl* ssl_socket = + dynamic_cast(client_connection->ssl().get()); + SSL* client_ssl_socket = ssl_socket->ssl(); + const uint8_t* response_head; + size_t response_len; + SSL_get0_ocsp_response(client_ssl_socket, &response_head, &response_len); + std::string ocsp_response{reinterpret_cast(response_head), response_len}; + EXPECT_EQ(options.expectedOcspResponse(), ocsp_response); + // By default, the session is not created with session resumption. The // client should see a session ID but the server should not. EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->sessionId()); @@ -4503,6 +4536,38 @@ TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { EXPECT_EQ("TLS error: Secret is not supplied by SDS", transport_socket->failureReason()); } +TEST_P(SslSocketTest, TestTransportSocketCallback) { + // Make MockTransportSocketCallbacks. + Network::MockIoHandle io_handle; + NiceMock callbacks; + ON_CALL(callbacks, ioHandle()).WillByDefault(ReturnRef(io_handle)); + + // Make SslSocket. + testing::NiceMock factory_context; + Stats::TestUtil::TestStore stats_store; + ON_CALL(factory_context, stats()).WillByDefault(ReturnRef(stats_store)); + NiceMock local_info; + ON_CALL(factory_context, localInfo()).WillByDefault(ReturnRef(local_info)); + + envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; + auto client_cfg = std::make_unique(tls_context, factory_context); + + ContextManagerImpl manager(time_system_); + ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, stats_store); + + Network::TransportSocketPtr transport_socket = + client_ssl_socket_factory.createTransportSocket(nullptr); + + SslSocket* ssl_socket = dynamic_cast(transport_socket.get()); + + // If no transport socket callbacks have been set, this method should return nullptr. + EXPECT_EQ(ssl_socket->transportSocketCallbacks(), nullptr); + + // Otherwise, it should return a pointer to the set callbacks object. + ssl_socket->setTransportSocketCallbacks(callbacks); + EXPECT_EQ(ssl_socket->transportSocketCallbacks(), &callbacks); +} + class SslReadBufferLimitTest : public SslSocketTest { protected: void initialize() { @@ -5368,6 +5433,302 @@ TEST_P(SslSocketTest, RsaAndEcdsaPrivateKeyProviderMultiCertFail) { .setExpectedServerStats("ssl.connection_error")); } +TEST_P(SslSocketTest, TestStaplesOcspResponseSuccess) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der" + ocsp_staple_policy: lenient_stapling + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); + + std::string ocsp_response_path = "{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der"; + std::string expected_response = + TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(ocsp_response_path)); + + testUtil(test_options.enableOcspStapling() + .setExpectedOcspResponse(expected_response) + .setExpectedServerStats("ssl.ocsp_staple_responses")); +} + +TEST_P(SslSocketTest, TestNoOcspStapleWhenNotEnabledOnClient) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der" + ocsp_staple_policy: must_staple + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); + testUtil(test_options); +} + +TEST_P(SslSocketTest, TestOcspStapleOmittedOnSkipStaplingAndResponseExpired) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/unknown_ocsp_resp.der" + ocsp_staple_policy: lenient_stapling + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); + testUtil(test_options.setExpectedServerStats("ssl.ocsp_staple_omitted").enableOcspStapling()); +} + +TEST_P(SslSocketTest, TestConnectionFailsOnStapleRequiredAndOcspExpired) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/unknown_ocsp_resp.der" + ocsp_staple_policy: must_staple + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam()); + testUtil(test_options.setExpectedServerStats("ssl.ocsp_staple_failed").enableOcspStapling()); +} + +TEST_P(SslSocketTest, TestConnectionSucceedsWhenRejectOnExpiredNoOcspResponse) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple_policy: strict_stapling + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); + testUtil(test_options.setExpectedServerStats("ssl.ocsp_staple_omitted").enableOcspStapling()); +} + +TEST_P(SslSocketTest, TestConnectionFailsWhenRejectOnExpiredAndResponseExpired) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/unknown_ocsp_resp.der" + ocsp_staple_policy: strict_stapling + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam()); + testUtil(test_options.setExpectedServerStats("ssl.ocsp_staple_failed").enableOcspStapling()); +} + +TEST_P(SslSocketTest, TestConnectionFailsWhenCertIsMustStapleAndResponseExpired) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_ocsp_resp.der" + ocsp_staple_policy: lenient_stapling + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam()); + testUtil(test_options.setExpectedServerStats("ssl.ocsp_staple_failed").enableOcspStapling()); +} + +TEST_P(SslSocketTest, TestConnectionSucceedsForMustStapleCertExpirationValidationOff) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_ocsp_resp.der" + ocsp_staple_policy: must_staple + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.check_ocsp_policy", "false"}}); + + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); + std::string ocsp_response_path = "{{ test_tmpdir }}/ocsp_test_data/revoked_ocsp_resp.der"; + std::string expected_response = + TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(ocsp_response_path)); + testUtil(test_options.enableOcspStapling() + .setExpectedServerStats("ssl.ocsp_staple_responses") + .setExpectedOcspResponse(expected_response)); +} + +TEST_P(SslSocketTest, TestConnectionSucceedsForMustStapleCertNoValidationNoResponse) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem" + ocsp_staple_policy: lenient_stapling + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.require_ocsp_response_for_must_staple_certs", "false"}, + {"envoy.reloadable_features.check_ocsp_policy", "false"}}); + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); + testUtil(test_options.setExpectedServerStats("ssl.ocsp_staple_omitted") + .enableOcspStapling() + .setExpectedOcspResponse("")); +} + +TEST_P(SslSocketTest, TestFilterMultipleCertsFilterByOcspPolicyFallbackOnFirst) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der" + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/ecdsa_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/ecdsa_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/ecdsa_ocsp_resp.der" + ocsp_staple_policy: must_staple + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - ECDHE-ECDSA-AES128-GCM-SHA256 + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + + std::string ocsp_response_path = "{{ test_tmpdir }}/ocsp_test_data/good_ocsp_resp.der"; + std::string expected_response = + TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(ocsp_response_path)); + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); + testUtil(test_options.enableOcspStapling() + .setExpectedServerStats("ssl.ocsp_staple_responses") + .setExpectedOcspResponse(expected_response)); +} + +TEST_P(SslSocketTest, TestConnectionFailsOnMultipleCertificatesNonePassOcspPolicy) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/revoked_ocsp_resp.der" + - certificate_chain: + filename: "{{ test_tmpdir }}/ocsp_test_data/ecdsa_cert.pem" + private_key: + filename: "{{ test_tmpdir }}/ocsp_test_data/ecdsa_key.pem" + ocsp_staple: + filename: "{{ test_tmpdir }}/ocsp_test_data/ecdsa_ocsp_resp.der" + ocsp_staple_policy: must_staple + )EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_params: + cipher_suites: + - ECDHE-ECDSA-AES128-GCM-SHA256 + - TLS_RSA_WITH_AES_128_GCM_SHA256 +)EOF"; + + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam()); + testUtil(test_options.setExpectedServerStats("ssl.ocsp_staple_failed").enableOcspStapling()); +} + } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/test/extensions/transport_sockets/tls/test_data/certs.sh b/test/extensions/transport_sockets/tls/test_data/certs.sh index 30e27a8a4acd..d3ad086c98d4 100755 --- a/test/extensions/transport_sockets/tls/test_data/certs.sh +++ b/test/extensions/transport_sockets/tls/test_data/certs.sh @@ -3,64 +3,78 @@ set -e readonly DEFAULT_VALIDITY_DAYS=${DEFAULT_VALIDITY_DAYS:-730} -readonly HERE=$(cd $(dirname $0) && pwd) +readonly HERE=$(cd "$(dirname "$0")" && pwd) -cd $HERE +cd "$HERE" || exit 1 trap cleanup EXIT cleanup() { - rm *csr - rm *srl - rm crl_* - rm intermediate_crl_* + rm ./*csr + rm ./*srl + rm ./crl_* + rm ./intermediate_crl_* } # $1= $2=[issuer name] generate_ca() { - if [[ "$2" != "" ]]; then - local EXTRA_ARGS="-CA $2_cert.pem -CAkey $2_key.pem -CAcreateserial"; - else - local EXTRA_ARGS="-signkey $1_key.pem"; - fi - openssl genrsa -out $1_key.pem 2048 - openssl req -new -key $1_key.pem -out $1_cert.csr -config $1_cert.cfg -batch -sha256 - openssl x509 -req -days ${DEFAULT_VALIDITY_DAYS} -in $1_cert.csr -out $1_cert.pem \ - -extensions v3_ca -extfile $1_cert.cfg $EXTRA_ARGS - generate_info_header $1 + local extra_args=() + if [[ -n "$2" ]]; then + extra_args=(-CA "${2}_cert.pem" -CAkey "${2}_key.pem" -CAcreateserial); + else + extra_args=(-signkey "${1}_key.pem"); + fi + openssl genrsa -out "${1}_key.pem" 2048 + openssl req -new -key "${1}_key.pem" -out "${1}_cert.csr" -config "${1}_cert.cfg" -batch -sha256 + openssl x509 -req -days "${DEFAULT_VALIDITY_DAYS}" -in "${1}_cert.csr" -out "${1}_cert.pem" \ + -extensions v3_ca -extfile "${1}_cert.cfg" "${extra_args[@]}" + generate_info_header "$1" } # $1= $2=[key size] $3=[password] generate_rsa_key() { - if [[ "$2" != "" ]]; then local KEYSIZE=$2; else local KEYSIZE="2048"; fi - if [[ "$3" != "" ]]; then echo -n "$3" > $1_password.txt; local EXTRA_ARGS="-aes128 -passout file:$1_password.txt"; fi - openssl genrsa -out $1_key.pem $EXTRA_ARGS $KEYSIZE + local keysize extra_args=() + keysize="${2:-2048}" + if [[ -n "$3" ]]; then + echo -n "$3" > "${1}_password.txt" + extra_args=(-aes128 -passout "file:${1}_password.txt") + fi + openssl genrsa -out "${1}_key.pem" "${extra_args[@]}" "$keysize" } # $1= $2=[curve] generate_ecdsa_key() { - if [[ "$2" != "" ]]; then local CURVE=$2; else local CURVE="secp256r1"; fi - openssl ecparam -name $CURVE -genkey -out $1_key.pem + local curve + curve="${2:-secp256r1}" + openssl ecparam -name "$curve" -genkey -out "${1}_key.pem" } # $1= generate_info_header() { - echo "// NOLINT(namespace-envoy)" > $1_cert_info.h - echo -e "constexpr char TEST_$(echo $1 | tr a-z A-Z)_CERT_256_HASH[] =\n \"$(openssl x509 -in $1_cert.pem -outform DER | openssl dgst -sha256 | cut -d" " -f2)\";" >> $1_cert_info.h - echo "constexpr char TEST_$(echo $1 | tr a-z A-Z)_CERT_1_HASH[] = \"$(openssl x509 -in $1_cert.pem -outform DER | openssl dgst -sha1 | cut -d" " -f2)\";" >> $1_cert_info.h - echo "constexpr char TEST_$(echo $1 | tr a-z A-Z)_CERT_SPKI[] = \"$(openssl x509 -in $1_cert.pem -noout -pubkey | openssl pkey -pubin -outform DER | openssl dgst -sha256 -binary | openssl enc -base64)\";" >> $1_cert_info.h - echo "constexpr char TEST_$(echo $1 | tr a-z A-Z)_CERT_SERIAL[] = \"$(openssl x509 -in $1_cert.pem -noout -serial | cut -d"=" -f2 | awk '{print tolower($0)}')\";" >> $1_cert_info.h - echo "constexpr char TEST_$(echo $1 | tr a-z A-Z)_CERT_NOT_BEFORE[] = \"$(openssl x509 -in $1_cert.pem -noout -startdate | cut -d"=" -f2)\";" >> $1_cert_info.h - echo "constexpr char TEST_$(echo $1 | tr a-z A-Z)_CERT_NOT_AFTER[] = \"$(openssl x509 -in $1_cert.pem -noout -enddate | cut -d"=" -f2)\";" >> $1_cert_info.h + local prefix + prefix="TEST_$(echo "$1" | tr '[:lower:]' '[:upper:]')" + { + echo "// NOLINT(namespace-envoy)" + echo "constexpr char ${prefix}_CERT_256_HASH[] =" + echo " \"$(openssl x509 -in "${1}_cert.pem" -outform DER | openssl dgst -sha256 | cut -d" " -f2)\";" + echo "constexpr char ${prefix}_CERT_1_HASH[] = \"$(openssl x509 -in "${1}_cert.pem" -outform DER | openssl dgst -sha1 | cut -d" " -f2)\";" + echo "constexpr char ${prefix}_CERT_SPKI[] = \"$(openssl x509 -in "${1}_cert.pem" -noout -pubkey | openssl pkey -pubin -outform DER | openssl dgst -sha256 -binary | openssl enc -base64)\";" + echo "constexpr char ${prefix}_CERT_SERIAL[] = \"$(openssl x509 -in "${1}_cert.pem" -noout -serial | cut -d"=" -f2 | awk '{print tolower($0)}')\";" + echo "constexpr char ${prefix}_CERT_NOT_BEFORE[] = \"$(openssl x509 -in "${1}_cert.pem" -noout -startdate | cut -d"=" -f2)\";" + echo "constexpr char ${prefix}_CERT_NOT_AFTER[] = \"$(openssl x509 -in "${1}_cert.pem" -noout -enddate | cut -d"=" -f2)\";" + } > "${1}_cert_info.h" } # $1= $2= $3=[days] generate_x509_cert() { - if [[ "$3" != "" ]]; then local DAYS=$3; else local DAYS="${DEFAULT_VALIDITY_DAYS}"; fi - if [[ -f $1_password.txt ]]; then local EXTRA_ARGS="-passin file:$1_password.txt"; fi - openssl req -new -key $1_key.pem -out $1_cert.csr -config $1_cert.cfg -batch -sha256 $EXTRA_ARGS - openssl x509 -req -days $DAYS -in $1_cert.csr -sha256 -CA $2_cert.pem -CAkey \ - $2_key.pem -CAcreateserial -out $1_cert.pem -extensions v3_ca -extfile $1_cert.cfg $EXTRA_ARGS - generate_info_header $1 + local days extra_args=() + days="${3:-${DEFAULT_VALIDITY_DAYS}}" + if [[ -f "${1}_password.txt" ]]; then + extra_args=(-passin "file:${1}_password.txt") + fi + openssl req -new -key "${1}_key.pem" -out "${1}_cert.csr" -config "${1}_cert.cfg" -batch -sha256 "${extra_args[@]}" + openssl x509 -req -days "$days" -in "${1}_cert.csr" -sha256 -CA "${2}_cert.pem" -CAkey \ + "${2}_key.pem" -CAcreateserial -out "${1}_cert.pem" -extensions v3_ca -extfile "${1}_cert.cfg" "${extra_args[@]}" + generate_info_header "$1" } # $1= $2= $3=[days] @@ -68,18 +82,20 @@ generate_x509_cert() { # Generate a certificate without a subject CN. For this to work, the config # must have an empty [req_distinguished_name] section. generate_x509_cert_nosubject() { - if [[ "$3" != "" ]]; then local DAYS=$3; else local DAYS="${DEFAULT_VALIDITY_DAYS}"; fi - openssl req -new -key $1_key.pem -out $1_cert.csr -config $1_cert.cfg -subj / -batch -sha256 - openssl x509 -req -days $DAYS -in $1_cert.csr -sha256 -CA $2_cert.pem -CAkey \ - $2_key.pem -CAcreateserial -out $1_cert.pem -extensions v3_ca -extfile $1_cert.cfg - generate_info_header $1 + local days + days="${3:-${DEFAULT_VALIDITY_DAYS}}" + openssl req -new -key "${1}_key.pem" -out "${1}_cert.csr" -config "${1}_cert.cfg" -subj / -batch -sha256 + openssl x509 -req -days "$days" -in "${1}_cert.csr" -sha256 -CA "${2}_cert.pem" -CAkey \ + "${2}_key.pem" -CAcreateserial -out "${1}_cert.pem" -extensions v3_ca -extfile "${1}_cert.cfg" + generate_info_header "$1" } # $1= $2=[certificate file name] generate_selfsigned_x509_cert() { - if [[ "$2" != "" ]]; then local OUTPUT_PREFIX=$2; else local OUTPUT_PREFIX=$1; fi - openssl req -new -x509 -days ${DEFAULT_VALIDITY_DAYS} -key $1_key.pem -out ${OUTPUT_PREFIX}_cert.pem -config $1_cert.cfg -batch -sha256 - generate_info_header $OUTPUT_PREFIX + local output_prefix + output_prefix="${2:-$1}" + openssl req -new -x509 -days "${DEFAULT_VALIDITY_DAYS}" -key "${1}_key.pem" -out "${output_prefix}_cert.pem" -config "${1}_cert.cfg" -batch -sha256 + generate_info_header "$output_prefix" } # Generate ca_cert.pem. @@ -150,7 +166,7 @@ cat san_ip_cert.pem intermediate_ca_cert.pem > san_ip_chain.pem # Generate certificate with extensions generate_rsa_key extensions -generate_x509_cert extensions ca +generate_x509_cert extensions ca # Generate password_protected_cert.pem. cp -f san_uri_cert.cfg password_protected_cert.cfg diff --git a/test/extensions/watchdog/profile_action/BUILD b/test/extensions/watchdog/profile_action/BUILD index 7af95504deae..fb8c662ea7bc 100644 --- a/test/extensions/watchdog/profile_action/BUILD +++ b/test/extensions/watchdog/profile_action/BUILD @@ -26,6 +26,7 @@ envoy_extension_cc_test( "//source/common/profiler:profiler_lib", "//source/extensions/watchdog/profile_action:config", "//source/extensions/watchdog/profile_action:profile_action_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", @@ -44,6 +45,7 @@ envoy_extension_cc_test( "//include/envoy/server:guarddog_config_interface", "//source/extensions/watchdog/profile_action:config", "//source/extensions/watchdog/profile_action:profile_action_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto", diff --git a/test/extensions/watchdog/profile_action/config_test.cc b/test/extensions/watchdog/profile_action/config_test.cc index 2b40dfa68f1f..c5ab318c1d3d 100644 --- a/test/extensions/watchdog/profile_action/config_test.cc +++ b/test/extensions/watchdog/profile_action/config_test.cc @@ -4,6 +4,7 @@ #include "extensions/watchdog/profile_action/config.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" #include "test/test_common/utility.h" @@ -42,9 +43,10 @@ TEST(ProfileActionFactoryTest, CanCreateAction) { )EOF", config); + Stats::TestUtil::TestStore stats; Event::MockDispatcher dispatcher; - Api::ApiPtr api = Api::createApiForTest(); - Server::Configuration::GuardDogActionFactoryContext context{*api, dispatcher}; + Api::ApiPtr api = Api::createApiForTest(stats); + Server::Configuration::GuardDogActionFactoryContext context{*api, dispatcher, stats, "test"}; EXPECT_CALL(dispatcher, createTimer_(_)); EXPECT_NE(factory->createGuardDogActionFromProto(config, context), nullptr); diff --git a/test/extensions/watchdog/profile_action/profile_action_test.cc b/test/extensions/watchdog/profile_action/profile_action_test.cc index 01503c07efbb..40f9085eff04 100644 --- a/test/extensions/watchdog/profile_action/profile_action_test.cc +++ b/test/extensions/watchdog/profile_action/profile_action_test.cc @@ -15,6 +15,7 @@ #include "extensions/watchdog/profile_action/config.h" #include "extensions/watchdog/profile_action/profile_action.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" @@ -35,8 +36,9 @@ class ProfileActionTest : public testing::Test { protected: ProfileActionTest() : time_system_(std::make_unique()), - api_(Api::createApiForTest(*time_system_)), dispatcher_(api_->allocateDispatcher("test")), - context_({*api_, *dispatcher_}), test_path_(generateTestPath()) {} + api_(Api::createApiForTest(stats_, *time_system_)), + dispatcher_(api_->allocateDispatcher("test")), + context_({*api_, *dispatcher_, stats_, "test"}), test_path_(generateTestPath()) {} // Generates a unique path for a testcase. static std::string generateTestPath() { @@ -76,6 +78,7 @@ class ProfileActionTest : public testing::Test { outstanding_notifies_ -= 1; } + Stats::TestUtil::TestStore stats_; std::unique_ptr time_system_; Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; @@ -352,6 +355,72 @@ TEST_F(ProfileActionTest, ShouldSaturatedMaxProfiles) { #endif } +// The attempted counter should be updated on profile attempts that don't +// interfere with an existing profile the action is running. +// The successfully captured profile should be updated only if we captured the profile. +TEST_F(ProfileActionTest, ShouldUpdateCountersCorrectly) { + envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + config.set_profile_path(test_path_); + config.mutable_profile_duration()->set_seconds(1); + + // Create the ProfileAction before we start running the dispatcher + // otherwise the timer created will in ProfileActions ctor will + // not be thread safe. + action_ = std::make_unique(config, context_); + Thread::ThreadPtr thread = api_->threadFactory().createThread( + [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); }); + std::vector> tid_ltt_pairs; + + // This will fail since no TIDs are provided. + dispatcher_->post([this, &tid_ltt_pairs]() -> void { + action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, + api_->timeSource().monotonicTime()); + absl::MutexLock lock(&mutex_); + outstanding_notifies_ += 1; + }); + + { + absl::MutexLock lock(&mutex_); + waitForOutstandingNotify(); + time_system_->advanceTimeWait(std::chrono::seconds(2)); + } + + // Check the counters are correct on a fail + EXPECT_EQ(TestUtility::findCounter(stats_, "test.profile_action.attempted")->value(), 1); + EXPECT_EQ(TestUtility::findCounter(stats_, "test.profile_action.successfully_captured")->value(), + 0); + + // Run a profile that will succeed. + const auto now = api_->timeSource().monotonicTime(); + tid_ltt_pairs.emplace_back(Thread::ThreadId(10), now); + + dispatcher_->post([this, &tid_ltt_pairs, &now]() -> void { + action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::MISS, tid_ltt_pairs, now); + absl::MutexLock lock(&mutex_); + outstanding_notifies_ += 1; + }); + + { + absl::MutexLock lock(&mutex_); + waitForOutstandingNotify(); + time_system_->advanceTimeWait(std::chrono::seconds(2)); + } + +#ifdef PROFILER_AVAILABLE + // Check the counters are correct on success + EXPECT_EQ(TestUtility::findCounter(stats_, "test.profile_action.attempted")->value(), 2); + EXPECT_EQ(TestUtility::findCounter(stats_, "test.profile_action.successfully_captured")->value(), + 1); +#else + EXPECT_EQ(TestUtility::findCounter(stats_, "test.profile_action.attempted")->value(), 2); + EXPECT_EQ(TestUtility::findCounter(stats_, "test.profile_action.successfully_captured")->value(), + 0); +#endif + + dispatcher_->exit(); + thread->join(); +} + } // namespace } // namespace ProfileAction } // namespace Watchdog diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index f79aece1041b..332e6ae01fce 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -175,5 +175,22 @@ inline std::unique_ptr fromStreamInfo(const test::fuzz::StreamIn return test_stream_info; } +// Parses http or proto body into chunks. +inline std::vector parseHttpData(const test::fuzz::HttpData& data) { + std::vector data_chunks; + + if (data.has_http_body()) { + data_chunks.reserve(data.http_body().data_size()); + for (const std::string& http_data : data.http_body().data()) { + data_chunks.push_back(http_data); + } + } else if (data.has_proto_body()) { + const std::string serialized = data.proto_body().message().value(); + data_chunks = absl::StrSplit(serialized, absl::ByLength(data.proto_body().chunk_size())); + } + + return data_chunks; +} + } // namespace Fuzz } // namespace Envoy diff --git a/test/integration/BUILD b/test/integration/BUILD index 2433d82c18e6..010d85cc480b 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -312,6 +312,30 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "http2_flood_integration_test", + srcs = [ + "http2_flood_integration_test.cc", + ], + shard_count = 4, + tags = ["flaky_on_windows"], + deps = [ + ":autonomous_upstream_lib", + ":http_integration_lib", + "//test/common/http/http2:http2_frame", + "//test/integration/filters:backpressure_filter_config_lib", + "//test/integration/filters:set_response_code_filter_config_proto_cc_proto", + "//test/integration/filters:set_response_code_filter_lib", + "//test/integration/filters:test_socket_interface_lib", + "//test/mocks/http:http_mocks", + "//test/test_common:utility_lib", + "@com_google_absl//absl/synchronization", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "http2_integration_test", srcs = [ @@ -326,12 +350,12 @@ envoy_cc_test( "//source/common/http:header_map_lib", "//source/extensions/filters/http/buffer:config", "//source/extensions/filters/http/health_check:config", - "//test/common/http/http2:http2_frame", "//test/integration/filters:metadata_stop_all_filter_config_lib", "//test/integration/filters:request_metadata_filter_config_lib", "//test/integration/filters:response_metadata_filter_config_lib", + "//test/integration/filters:set_response_code_filter_config_proto_cc_proto", + "//test/integration/filters:set_response_code_filter_lib", "//test/integration/filters:stop_iteration_and_continue", - "//test/integration/filters:test_socket_interface_lib", "//test/mocks/http:http_mocks", "//test/mocks/upstream:retry_priority_factory_mocks", "//test/mocks/upstream:retry_priority_mocks", @@ -418,6 +442,7 @@ envoy_cc_test( "//source/extensions/filters/http/health_check:config", "//test/integration/filters:continue_headers_only_inject_body", "//test/integration/filters:encoder_decoder_buffer_filter_lib", + "//test/integration/filters:local_reply_during_encoding_filter_lib", "//test/integration/filters:random_pause_filter_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -432,7 +457,7 @@ envoy_cc_test( "http2_upstream_integration_test.cc", "http2_upstream_integration_test.h", ], - tags = ["fails_on_windows"], + tags = ["flaky_on_windows"], deps = [ ":http_integration_lib", "//source/common/http:header_map_lib", @@ -453,7 +478,6 @@ envoy_cc_test( "integration_admin_test.cc", "integration_admin_test.h", ], - tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//include/envoy/http:header_map_interface", @@ -497,17 +521,18 @@ envoy_cc_test_library( ":test_host_predicate_lib", "//include/envoy/event:timer_interface", "//source/common/common:thread_annotations", + "//source/common/network:socket_option_lib", "//source/extensions/filters/http/on_demand:config", "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/transport_sockets/tls:context_lib", + "//test/common/http/http2:http2_frame", "//test/common/upstream:utility_lib", "//test/integration/filters:add_body_filter_config_lib", "//test/integration/filters:add_trailers_filter_config_lib", "//test/integration/filters:call_decodedata_once_filter_config_lib", "//test/integration/filters:decode_headers_return_stop_all_filter_config_lib", "//test/integration/filters:encode_headers_return_stop_all_filter_config_lib", - "//test/integration/filters:headers_only_filter_config_lib", "//test/integration/filters:modify_buffer_filter_config_lib", "//test/integration/filters:passthrough_filter_config_lib", "//test/integration/filters:pause_filter_lib", @@ -806,7 +831,6 @@ envoy_cc_test_library( "//source/common/network:utility_lib", "//source/common/runtime:runtime_lib", "//source/common/stats:isolated_store_lib", - "//source/common/stats:symbol_table_creator_lib", "//source/common/stats:thread_local_store_lib", "//source/common/thread_local:thread_local_lib", "//source/common/upstream:upstream_includes", @@ -929,7 +953,6 @@ envoy_cc_test( envoy_cc_test( name = "socket_interface_integration_test", srcs = ["socket_interface_integration_test.cc"], - tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/network:socket_interface_lib", @@ -946,7 +969,6 @@ envoy_cc_test( deps = [ ":integration_lib", "//source/common/memory:stats_lib", - "//source/common/stats:symbol_table_creator_lib", "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", "//test/common/stats:stat_test_utility_lib", @@ -1021,7 +1043,7 @@ envoy_cc_test( tags = ["flaky_on_windows"], deps = [ ":http_protocol_integration_lib", - "//source/extensions/resource_monitors/injected_resource:config", + "//test/common/config:dummy_config_proto_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", ], @@ -1033,7 +1055,6 @@ envoy_cc_test( "proxy_proto_integration_test.cc", "proxy_proto_integration_test.h", ], - tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/buffer:buffer_lib", @@ -1243,7 +1264,6 @@ envoy_cc_test( "uds_integration_test.cc", "uds_integration_test.h", ], - tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/event:dispatcher_includes", @@ -1548,3 +1568,14 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_cc_test( + name = "health_check_integration_test", + srcs = ["health_check_integration_test.cc"], + deps = [ + ":http_integration_lib", + ":integration_lib", + "//test/common/http/http2:http2_frame", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index bf413b9d91d5..01aae9dc9f73 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -115,6 +115,38 @@ TEST_P(AdsIntegrationTest, Failure) { makeSingleRequest(); } +// Validate that xds can support a mix of v2 and v3 type url. +TEST_P(AdsIntegrationTest, MixV2V3TypeUrlInDiscoveryResponse) { + config_helper_.addRuntimeOverride( + "envoy.reloadable_features.enable_type_url_downgrade_and_upgrade", "true"); + initialize(); + + // Send initial configuration. + // Discovery response with v3 type url. + sendDiscoveryResponse( + Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3), + {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1", false); + // Discovery response with v2 type url. + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "1"); + // Discovery response with v3 type url. + sendDiscoveryResponse( + Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3), + {buildListener("listener_0", "route_config_0")}, + {buildListener("listener_0", "route_config_0")}, {}, "1", false); + // Discovery response with v2 type url. + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig("route_config_0", "cluster_0")}, + {buildRouteConfig("route_config_0", "cluster_0")}, {}, "1"); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + + // Validate that we can process a request. + makeSingleRequest(); +} + // Validate that the request with duplicate listeners is rejected. TEST_P(AdsIntegrationTest, DuplicateWarmingListeners) { initialize(); @@ -1100,6 +1132,26 @@ class AdsClusterV3Test : public AdsIntegrationTest { INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsClusterV3Test, DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); +TEST_P(AdsClusterV3Test, BasicClusterInitialWarming) { + initialize(); + const auto cds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const auto eds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); + sendDiscoveryResponse( + cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1", false); + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"cluster_0"}, {"cluster_0"}, {})); + sendDiscoveryResponse( + eds_type_url, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "1", false); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); +} + // Verify CDS is paused during cluster warming. TEST_P(AdsClusterV3Test, CdsPausedDuringWarming) { initialize(); diff --git a/test/integration/autonomous_upstream.cc b/test/integration/autonomous_upstream.cc index 2fa38ccd54f9..e2185bbbf2a6 100644 --- a/test/integration/autonomous_upstream.cc +++ b/test/integration/autonomous_upstream.cc @@ -21,6 +21,8 @@ const char AutonomousStream::RESPONSE_SIZE_BYTES[] = "response_size_bytes"; const char AutonomousStream::RESPONSE_DATA_BLOCKS[] = "response_data_blocks"; const char AutonomousStream::EXPECT_REQUEST_SIZE_BYTES[] = "expect_request_size_bytes"; const char AutonomousStream::RESET_AFTER_REQUEST[] = "reset_after_request"; +const char AutonomousStream::NO_TRAILERS[] = "no_trailers"; +const char AutonomousStream::NO_END_STREAM[] = "no_end_stream"; AutonomousStream::AutonomousStream(FakeHttpConnection& parent, Http::ResponseEncoder& encoder, AutonomousUpstream& upstream, bool allow_incomplete_streams) @@ -63,11 +65,25 @@ void AutonomousStream::sendResponse() { int32_t response_data_blocks = 1; HeaderToInt(RESPONSE_DATA_BLOCKS, response_data_blocks, headers); - encodeHeaders(upstream_.responseHeaders(), false); - for (int32_t i = 0; i < response_data_blocks; ++i) { - encodeData(response_body_length, false); + const bool end_stream = headers.get_(NO_END_STREAM).empty(); + const bool send_trailers = end_stream && headers.get_(NO_TRAILERS).empty(); + const bool headers_only_response = !send_trailers && response_data_blocks == 0 && end_stream; + + pre_response_headers_metadata_ = upstream_.preResponseHeadersMetadata(); + if (pre_response_headers_metadata_) { + encodeMetadata(*pre_response_headers_metadata_); + } + + encodeHeaders(upstream_.responseHeaders(), headers_only_response); + if (!headers_only_response) { + for (int32_t i = 0; i < response_data_blocks; ++i) { + encodeData(response_body_length, + i == (response_data_blocks - 1) && !send_trailers && end_stream); + } + if (send_trailers) { + encodeTrailers(upstream_.responseTrailers()); + } } - encodeTrailers(upstream_.responseTrailers()); } AutonomousHttpConnection::AutonomousHttpConnection(AutonomousUpstream& autonomous_upstream, @@ -130,6 +146,12 @@ void AutonomousUpstream::setResponseHeaders( response_headers_ = std::move(response_headers); } +void AutonomousUpstream::setPreResponseHeadersMetadata( + std::unique_ptr&& metadata) { + Thread::LockGuard lock(headers_lock_); + pre_response_headers_metadata_ = std::move(metadata); +} + Http::TestResponseTrailerMapImpl AutonomousUpstream::responseTrailers() { Thread::LockGuard lock(headers_lock_); Http::TestResponseTrailerMapImpl return_trailers = *response_trailers_; @@ -142,4 +164,19 @@ Http::TestResponseHeaderMapImpl AutonomousUpstream::responseHeaders() { return return_headers; } +std::unique_ptr AutonomousUpstream::preResponseHeadersMetadata() { + Thread::LockGuard lock(headers_lock_); + return std::move(pre_response_headers_metadata_); +} + +AssertionResult AutonomousUpstream::closeConnection(uint32_t index, + std::chrono::milliseconds timeout) { + return shared_connections_[index]->executeOnDispatcher( + [](Network::Connection& connection) { + ASSERT(connection.state() == Network::Connection::State::Open); + connection.close(Network::ConnectionCloseType::FlushWrite); + }, + timeout); +} + } // namespace Envoy diff --git a/test/integration/autonomous_upstream.h b/test/integration/autonomous_upstream.h index 69814394be35..6f82fac9a5f6 100644 --- a/test/integration/autonomous_upstream.h +++ b/test/integration/autonomous_upstream.h @@ -21,6 +21,10 @@ class AutonomousStream : public FakeStream { // If set, the stream will reset when the request is complete, rather than // sending a response. static const char RESET_AFTER_REQUEST[]; + // Prevents upstream from sending trailers. + static const char NO_TRAILERS[]; + // Prevents upstream from finishing response. + static const char NO_END_STREAM[]; AutonomousStream(FakeHttpConnection& parent, Http::ResponseEncoder& encoder, AutonomousUpstream& upstream, bool allow_incomplete_streams); @@ -32,6 +36,7 @@ class AutonomousStream : public FakeStream { AutonomousUpstream& upstream_; void sendResponse() EXCLUSIVE_LOCKS_REQUIRED(lock_); const bool allow_incomplete_streams_{false}; + std::unique_ptr pre_response_headers_metadata_; }; // An upstream which creates AutonomousStreams for new incoming streams. @@ -78,13 +83,17 @@ class AutonomousUpstream : public FakeUpstream { bool createListenerFilterChain(Network::ListenerFilterManager& listener) override; void createUdpListenerFilterChain(Network::UdpListenerFilterManager& listener, Network::UdpReadFilterCallbacks& callbacks) override; + AssertionResult closeConnection(uint32_t index, + std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); void setLastRequestHeaders(const Http::HeaderMap& headers); std::unique_ptr lastRequestHeaders(); void setResponseTrailers(std::unique_ptr&& response_trailers); void setResponseHeaders(std::unique_ptr&& response_headers); + void setPreResponseHeadersMetadata(std::unique_ptr&& metadata); Http::TestResponseTrailerMapImpl responseTrailers(); Http::TestResponseHeaderMapImpl responseHeaders(); + std::unique_ptr preResponseHeadersMetadata(); const bool allow_incomplete_streams_{false}; private: @@ -92,6 +101,7 @@ class AutonomousUpstream : public FakeUpstream { std::unique_ptr last_request_headers_; std::unique_ptr response_trailers_; std::unique_ptr response_headers_; + std::unique_ptr pre_response_headers_metadata_; std::vector http_connections_; std::vector shared_connections_; }; diff --git a/test/integration/base_integration_test.cc b/test/integration/base_integration_test.cc index 9ff8fc5cbc5c..4a2623f43ae4 100644 --- a/test/integration/base_integration_test.cc +++ b/test/integration/base_integration_test.cc @@ -77,7 +77,7 @@ BaseIntegrationTest::BaseIntegrationTest(Network::Address::IpVersion version, : BaseIntegrationTest( [version](int) { return Network::Utility::parseInternetAddress( - Network::Test::getAnyAddressString(version), 0); + Network::Test::getLoopbackAddressString(version), 0); }, version, config) {} @@ -365,8 +365,7 @@ void BaseIntegrationTest::createXdsUpstream() { return; } if (tls_xds_upstream_ == false) { - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); } else { envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto* common_tls_context = tls_context.mutable_common_tls_context(); @@ -382,8 +381,7 @@ void BaseIntegrationTest::createXdsUpstream() { upstream_stats_store_ = std::make_unique(); auto context = std::make_unique( std::move(cfg), context_manager_, *upstream_stats_store_, std::vector{}); - fake_upstreams_.emplace_back(new FakeUpstream( - std::move(context), 0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); + addFakeUpstream(std::move(context), FakeHttpConnection::Type::HTTP2); } xds_upstream_ = fake_upstreams_[1].get(); } diff --git a/test/integration/base_integration_test.h b/test/integration/base_integration_test.h index 299a976c293e..2a17a1cf07b5 100644 --- a/test/integration/base_integration_test.h +++ b/test/integration/base_integration_test.h @@ -289,6 +289,52 @@ class BaseIntegrationTest : protected Logger::Loggable { *dispatcher_); } + // Helper to create FakeUpstream. + // Creates a fake upstream bound to the specified unix domain socket path. + std::unique_ptr createFakeUpstream(const std::string& uds_path, + FakeHttpConnection::Type type) { + return std::make_unique(uds_path, type, timeSystem()); + } + // Creates a fake upstream bound to the specified |address|. + std::unique_ptr + createFakeUpstream(const Network::Address::InstanceConstSharedPtr& address, + FakeHttpConnection::Type type, bool enable_half_close = false, + bool udp_fake_upstream = false) { + return std::make_unique(address, type, timeSystem(), enable_half_close, + udp_fake_upstream); + } + // Creates a fake upstream bound to INADDR_ANY and there is no specified port. + std::unique_ptr createFakeUpstream(FakeHttpConnection::Type type, + bool enable_half_close = false) { + return std::make_unique(0, type, version_, timeSystem(), enable_half_close); + } + std::unique_ptr + createFakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory, + FakeHttpConnection::Type type) { + return std::make_unique(std::move(transport_socket_factory), 0, type, version_, + timeSystem()); + } + // Helper to add FakeUpstream. + // Add a fake upstream bound to the specified unix domain socket path. + void addFakeUpstream(const std::string& uds_path, FakeHttpConnection::Type type) { + fake_upstreams_.emplace_back(createFakeUpstream(uds_path, type)); + } + // Add a fake upstream bound to the specified |address|. + void addFakeUpstream(const Network::Address::InstanceConstSharedPtr& address, + FakeHttpConnection::Type type, bool enable_half_close = false, + bool udp_fake_upstream = false) { + fake_upstreams_.emplace_back( + createFakeUpstream(address, type, enable_half_close, udp_fake_upstream)); + } + // Add a fake upstream bound to INADDR_ANY and there is no specified port. + void addFakeUpstream(FakeHttpConnection::Type type, bool enable_half_close = false) { + fake_upstreams_.emplace_back(createFakeUpstream(type, enable_half_close)); + } + void addFakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory, + FakeHttpConnection::Type type) { + fake_upstreams_.emplace_back(createFakeUpstream(std::move(transport_socket_factory), type)); + } + protected: bool initialized() const { return initialized_; } diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index dee3493562e8..2fd76b992274 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -49,7 +49,7 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht void initialize() override { use_lds_ = false; test_skipped_ = false; - // Controls how many fake_upstreams_.emplace_back(new FakeUpstream) will happen in + // Controls how many addFakeUpstream() will happen in // BaseIntegrationTest::createUpstreams() (which is part of initialize()). // Make sure this number matches the size of the 'clusters' repeated field in the bootstrap // config that you use! @@ -71,10 +71,8 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht // Create the regular (i.e. not an xDS server) upstreams. We create them manually here after // initialize() because finalize() expects all fake_upstreams_ to correspond to a static // cluster in the bootstrap config - which we don't want since we're testing dynamic CDS! - fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, - timeSystem(), enable_half_close_)); - fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, - timeSystem(), enable_half_close_)); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); cluster1_ = ConfigHelper::buildStaticCluster( ClusterName1, fake_upstreams_[UpstreamIndex1]->localAddress()->ip()->port(), Network::Test::getLoopbackAddressString(ipVersion())); @@ -109,7 +107,7 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht std::string expected_method(sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "/envoy.api.v2.ClusterDiscoveryService/StreamClusters" : "/envoy.api.v2.ClusterDiscoveryService/DeltaClusters"); - EXPECT_EQ(xds_stream_->headers().get(path_string)->value(), expected_method); + EXPECT_EQ(xds_stream_->headers().get(path_string)[0]->value(), expected_method); } void acceptXdsConnection() { diff --git a/test/integration/clusters/custom_static_cluster.h b/test/integration/clusters/custom_static_cluster.h index 23b1d573b262..9691f99234ea 100644 --- a/test/integration/clusters/custom_static_cluster.h +++ b/test/integration/clusters/custom_static_cluster.h @@ -39,6 +39,9 @@ class CustomStaticCluster : public Upstream::ClusterImplBase { Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext*) override { return host_; } + Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { + return nullptr; + } const Upstream::HostSharedPtr host_; }; diff --git a/test/integration/extension_discovery_integration_test.cc b/test/integration/extension_discovery_integration_test.cc index 13ea5c48c0d4..a062ed779999 100644 --- a/test/integration/extension_discovery_integration_test.cc +++ b/test/integration/extension_discovery_integration_test.cc @@ -96,8 +96,7 @@ class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationPara void createUpstreams() override { HttpIntegrationTest::createUpstreams(); // Create the extension config discovery upstream (fake_upstreams_[1]). - fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, - timeSystem(), enable_half_close_)); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); } void waitXdsStream() { diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 2dddab7160f1..5ed0c5cad62a 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -295,12 +295,12 @@ FakeHttpConnection::FakeHttpConnection( Http::Http2::CodecStats& stats = fake_upstream.http2CodecStats(); #ifdef ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS codec_ = std::make_unique( - shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); + shared_connection_.connection(), *this, stats, random_, http2_options, + max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); #else codec_ = std::make_unique( - shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); + shared_connection_.connection(), *this, stats, random_, http2_options, + max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); #endif ASSERT(type == Type::HTTP2); } @@ -446,7 +446,7 @@ FakeUpstream::FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket socket_factory_(std::make_shared(socket_)), api_(Api::createApiForTest(stats_store_)), time_system_(time_system), dispatcher_(api_->allocateDispatcher("fake_upstream")), - handler_(new Server::ConnectionHandlerImpl(*dispatcher_)), + handler_(new Server::ConnectionHandlerImpl(*dispatcher_, 0)), read_disable_on_new_connection_(true), enable_half_close_(enable_half_close), listener_(*this), filter_chain_(Network::Test::createEmptyFilterChain(std::move(transport_socket_factory))) { diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 4d405ec129a0..0607d6926fd7 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -29,10 +29,12 @@ #include "common/network/filter_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/udp_default_writer_config.h" +#include "common/network/udp_listener_impl.h" #include "common/stats/isolated_store_impl.h" #include "server/active_raw_udp_listener_config.h" +#include "test/mocks/common.h" #include "test/test_common/test_time_system.h" #include "test/test_common/utility.h" @@ -456,6 +458,7 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo const Type type_; Http::ServerConnectionPtr codec_; std::list new_streams_ ABSL_GUARDED_BY(lock_); + testing::NiceMock random_; }; using FakeHttpConnectionPtr = std::unique_ptr; @@ -655,9 +658,9 @@ class FakeUpstream : Logger::Loggable, public: FakeListener(FakeUpstream& parent) : parent_(parent), name_("fake_upstream"), - udp_listener_factory_(std::make_unique()), + udp_listener_factory_(std::make_unique(1)), udp_writer_factory_(std::make_unique()), - init_manager_(nullptr) {} + udp_listener_worker_router_(1), init_manager_(nullptr) {} private: // Network::ListenerConfig @@ -680,6 +683,9 @@ class FakeUpstream : Logger::Loggable, Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { return Network::UdpPacketWriterFactoryOptRef(std::ref(*udp_writer_factory_)); } + Network::UdpListenerWorkerRouterOptRef udpListenerWorkerRouter() override { + return udp_listener_worker_router_; + } Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; } envoy::config::core::v3::TrafficDirection direction() const override { return envoy::config::core::v3::UNSPECIFIED; @@ -701,6 +707,7 @@ class FakeUpstream : Logger::Loggable, Network::NopConnectionBalancerImpl connection_balancer_; const Network::ActiveUdpListenerFactoryPtr udp_listener_factory_; const Network::UdpPacketWriterFactoryPtr udp_writer_factory_; + Network::UdpListenerWorkerRouterImpl udp_listener_worker_router_; BasicResourceLimitImpl connection_resource_; const std::vector empty_access_logs_; std::unique_ptr init_manager_; diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index 1746a3952626..372a4c0c359e 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -24,6 +24,21 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "local_reply_during_encoding_filter_lib", + srcs = [ + "local_reply_during_encoding_filter.cc", + ], + deps = [ + ":common_lib", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + ], +) + envoy_cc_test_library( name = "continue_headers_only_inject_body", srcs = [ @@ -142,21 +157,6 @@ envoy_cc_test_library( ], ) -envoy_cc_test_library( - name = "headers_only_filter_config_lib", - srcs = [ - "headers_only_filter.cc", - ], - deps = [ - ":common_lib", - "//include/envoy/http:filter_interface", - "//include/envoy/registry", - "//include/envoy/server:filter_config_interface", - "//source/extensions/filters/http/common:pass_through_filter_lib", - "//test/extensions/filters/http/common:empty_http_filter_config_lib", - ], -) - envoy_cc_test_library( name = "pause_filter_lib", srcs = [ diff --git a/test/integration/filters/backpressure_filter.cc b/test/integration/filters/backpressure_filter.cc index 1d6f8ce92be5..e5eb9ec6ea9a 100644 --- a/test/integration/filters/backpressure_filter.cc +++ b/test/integration/filters/backpressure_filter.cc @@ -14,12 +14,27 @@ namespace Envoy { // the content of the filter buffer. class BackpressureFilter : public Http::PassThroughFilter { public: - void onDestroy() override { decoder_callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); } + void onDestroy() override { + if (!below_write_buffer_low_watermark_called_) { + decoder_callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); + } + } Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override { decoder_callbacks_->onDecoderFilterAboveWriteBufferHighWatermark(); return Http::FilterHeadersStatus::Continue; } + + Http::FilterDataStatus encodeData(Buffer::Instance&, bool end_stream) override { + if (end_stream) { + below_write_buffer_low_watermark_called_ = true; + decoder_callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); + } + return Http::FilterDataStatus::Continue; + } + +private: + bool below_write_buffer_low_watermark_called_{false}; }; class BackpressureConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig { diff --git a/test/integration/filters/call_decodedata_once_filter.cc b/test/integration/filters/call_decodedata_once_filter.cc index 5e742dc8254d..520dbad96b82 100644 --- a/test/integration/filters/call_decodedata_once_filter.cc +++ b/test/integration/filters/call_decodedata_once_filter.cc @@ -16,13 +16,11 @@ class CallDecodeDataOnceFilter : public Http::PassThroughFilter { constexpr static char name[] = "call-decodedata-once-filter"; Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& header_map, bool) override { - const Http::HeaderEntry* entry_content = - header_map.get(Envoy::Http::LowerCaseString("content_size")); - const Http::HeaderEntry* entry_added = - header_map.get(Envoy::Http::LowerCaseString("added_size")); - ASSERT(entry_content != nullptr && entry_added != nullptr); - content_size_ = std::stoul(std::string(entry_content->value().getStringView())); - added_size_ = std::stoul(std::string(entry_added->value().getStringView())); + const auto entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size")); + const auto entry_added = header_map.get(Envoy::Http::LowerCaseString("added_size")); + ASSERT(!entry_content.empty() && !entry_added.empty()); + content_size_ = std::stoul(std::string(entry_content[0]->value().getStringView())); + added_size_ = std::stoul(std::string(entry_added[0]->value().getStringView())); return Http::FilterHeadersStatus::Continue; } diff --git a/test/integration/filters/decode_headers_return_stop_all_filter.cc b/test/integration/filters/decode_headers_return_stop_all_filter.cc index 133604822c8f..91edadb75153 100644 --- a/test/integration/filters/decode_headers_return_stop_all_filter.cc +++ b/test/integration/filters/decode_headers_return_stop_all_filter.cc @@ -27,29 +27,26 @@ class DecodeHeadersReturnStopAllFilter : public Http::PassThroughFilter { // Http::FilterHeadersStatus::StopAllIterationAndWatermark for headers. Triggers a timer to // continue iteration after 5s. Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& header_map, bool) override { - const Http::HeaderEntry* entry_content = - header_map.get(Envoy::Http::LowerCaseString("content_size")); - const Http::HeaderEntry* entry_added = - header_map.get(Envoy::Http::LowerCaseString("added_size")); - ASSERT(entry_content != nullptr && entry_added != nullptr); - content_size_ = std::stoul(std::string(entry_content->value().getStringView())); - added_size_ = std::stoul(std::string(entry_added->value().getStringView())); - const Http::HeaderEntry* entry_is_first_trigger = + const auto entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size")); + const auto entry_added = header_map.get(Envoy::Http::LowerCaseString("added_size")); + ASSERT(!entry_content.empty() && !entry_added.empty()); + content_size_ = std::stoul(std::string(entry_content[0]->value().getStringView())); + added_size_ = std::stoul(std::string(entry_added[0]->value().getStringView())); + const auto entry_is_first_trigger = header_map.get(Envoy::Http::LowerCaseString("is_first_trigger")); - is_first_trigger_ = entry_is_first_trigger != nullptr; + is_first_trigger_ = !entry_is_first_trigger.empty(); // Remove "first_trigger" headers so that if the filter is registered twice in a filter chain, // it would act differently. header_map.remove(Http::LowerCaseString("is_first_trigger")); createTimerForContinue(); - const Http::HeaderEntry* entry_buffer = - header_map.get(Envoy::Http::LowerCaseString("buffer_limit")); - if (entry_buffer == nullptr || !is_first_trigger_) { + const auto entry_buffer = header_map.get(Envoy::Http::LowerCaseString("buffer_limit")); + if (entry_buffer.empty() || !is_first_trigger_) { return Http::FilterHeadersStatus::StopAllIterationAndBuffer; } else { watermark_enabled_ = true; - buffer_limit_ = std::stoul(std::string(entry_buffer->value().getStringView())); + buffer_limit_ = std::stoul(std::string(entry_buffer[0]->value().getStringView())); decoder_callbacks_->setDecoderBufferLimit(buffer_limit_); header_map.remove(Http::LowerCaseString("buffer_limit")); return Http::FilterHeadersStatus::StopAllIterationAndWatermark; diff --git a/test/integration/filters/encode_headers_return_stop_all_filter.cc b/test/integration/filters/encode_headers_return_stop_all_filter.cc index 88a409501cbe..2254204ec100 100644 --- a/test/integration/filters/encode_headers_return_stop_all_filter.cc +++ b/test/integration/filters/encode_headers_return_stop_all_filter.cc @@ -27,13 +27,11 @@ class EncodeHeadersReturnStopAllFilter : public Http::PassThroughFilter { // Http::FilterHeadersStatus::StopAllIterationAndWatermark for headers. Triggers a timer to // continue iteration after 5s. Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& header_map, bool) override { - const Http::HeaderEntry* entry_content = - header_map.get(Envoy::Http::LowerCaseString("content_size")); - const Http::HeaderEntry* entry_added = - header_map.get(Envoy::Http::LowerCaseString("added_size")); - ASSERT(entry_content != nullptr && entry_added != nullptr); - content_size_ = std::stoul(std::string(entry_content->value().getStringView())); - added_size_ = std::stoul(std::string(entry_added->value().getStringView())); + const auto entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size")); + const auto entry_added = header_map.get(Envoy::Http::LowerCaseString("added_size")); + ASSERT(!entry_content.empty() && !entry_added.empty()); + content_size_ = std::stoul(std::string(entry_content[0]->value().getStringView())); + added_size_ = std::stoul(std::string(entry_added[0]->value().getStringView())); createTimerForContinue(); @@ -41,14 +39,13 @@ class EncodeHeadersReturnStopAllFilter : public Http::PassThroughFilter { Http::MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); encoder_callbacks_->addEncodedMetadata(std::move(metadata_map_ptr)); - const Http::HeaderEntry* entry_buffer = - header_map.get(Envoy::Http::LowerCaseString("buffer_limit")); - if (entry_buffer == nullptr) { + const auto entry_buffer = header_map.get(Envoy::Http::LowerCaseString("buffer_limit")); + if (entry_buffer.empty()) { return Http::FilterHeadersStatus::StopAllIterationAndBuffer; } else { watermark_enabled_ = true; encoder_callbacks_->setEncoderBufferLimit( - std::stoul(std::string(entry_buffer->value().getStringView()))); + std::stoul(std::string(entry_buffer[0]->value().getStringView()))); return Http::FilterHeadersStatus::StopAllIterationAndWatermark; } } diff --git a/test/integration/filters/headers_only_filter.cc b/test/integration/filters/headers_only_filter.cc deleted file mode 100644 index c414285882a7..000000000000 --- a/test/integration/filters/headers_only_filter.cc +++ /dev/null @@ -1,42 +0,0 @@ -#include - -#include "envoy/http/filter.h" -#include "envoy/registry/registry.h" -#include "envoy/server/filter_config.h" - -#include "extensions/filters/http/common/pass_through_filter.h" - -#include "test/extensions/filters/http/common/empty_http_filter_config.h" -#include "test/integration/filters/common.h" - -namespace Envoy { - -class HeaderOnlyDecoderFilter : public Http::PassThroughFilter { -public: - constexpr static char name[] = "decode-headers-only"; - - Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override { - return Http::FilterHeadersStatus::ContinueAndEndStream; - } -}; - -constexpr char HeaderOnlyDecoderFilter::name[]; -static Registry::RegisterFactory, - Server::Configuration::NamedHttpFilterConfigFactory> - decoder_register_; - -class HeaderOnlyEncoderFilter : public Http::PassThroughFilter { -public: - constexpr static char name[] = "encode-headers-only"; - - Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override { - return Http::FilterHeadersStatus::ContinueAndEndStream; - } -}; - -constexpr char HeaderOnlyEncoderFilter::name[]; - -static Registry::RegisterFactory, - Server::Configuration::NamedHttpFilterConfigFactory> - encoder_register_; -} // namespace Envoy diff --git a/test/integration/filters/local_reply_during_encoding_filter.cc b/test/integration/filters/local_reply_during_encoding_filter.cc new file mode 100644 index 000000000000..54a6fadce8e9 --- /dev/null +++ b/test/integration/filters/local_reply_during_encoding_filter.cc @@ -0,0 +1,30 @@ +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" +#include "test/integration/filters/common.h" + +namespace Envoy { + +class LocalReplyDuringEncode : public Http::PassThroughFilter { +public: + constexpr static char name[] = "local-reply-during-encode"; + + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override { + encoder_callbacks_->sendLocalReply(Http::Code::InternalServerError, "", nullptr, absl::nullopt, + ""); + return Http::FilterHeadersStatus::StopIteration; + } +}; + +constexpr char LocalReplyDuringEncode::name[]; +static Registry::RegisterFactory, + Server::Configuration::NamedHttpFilterConfigFactory> + register_; + +} // namespace Envoy diff --git a/test/integration/filters/metadata_stop_all_filter.cc b/test/integration/filters/metadata_stop_all_filter.cc index f119c38cc9ab..c7da73957417 100644 --- a/test/integration/filters/metadata_stop_all_filter.cc +++ b/test/integration/filters/metadata_stop_all_filter.cc @@ -22,10 +22,9 @@ class MetadataStopAllFilter : public Http::PassThroughFilter { constexpr static char name[] = "metadata-stop-all-filter"; Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& header_map, bool) override { - const Http::HeaderEntry* entry_content = - header_map.get(Envoy::Http::LowerCaseString("content_size")); - ASSERT(entry_content != nullptr); - content_size_ = std::stoul(std::string(entry_content->value().getStringView())); + const auto entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size")); + ASSERT(!entry_content.empty()); + content_size_ = std::stoul(std::string(entry_content[0]->value().getStringView())); createTimerForContinue(); diff --git a/test/integration/filters/set_response_code_filter.cc b/test/integration/filters/set_response_code_filter.cc index 28653c0ba080..701ef857bbf3 100644 --- a/test/integration/filters/set_response_code_filter.cc +++ b/test/integration/filters/set_response_code_filter.cc @@ -16,12 +16,14 @@ namespace Envoy { // A test filter that responds directly with a code on a prefix match. class SetResponseCodeFilterConfig { public: - SetResponseCodeFilterConfig(const std::string& prefix, uint32_t code, + SetResponseCodeFilterConfig(const std::string& prefix, uint32_t code, const std::string& body, Server::Configuration::FactoryContext& context) - : prefix_(prefix), code_(code), tls_slot_(context.threadLocal().allocateSlot()) {} + : prefix_(prefix), code_(code), body_(body), tls_slot_(context.threadLocal().allocateSlot()) { + } const std::string prefix_; const uint32_t code_; + const std::string body_; // Allocate a slot to validate that it is destroyed on a main thread only. ThreadLocal::SlotPtr tls_slot_; }; @@ -32,8 +34,8 @@ class SetResponseCodeFilter : public Http::PassThroughFilter { Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override { if (absl::StartsWith(headers.Path()->value().getStringView(), config_->prefix_)) { - decoder_callbacks_->sendLocalReply(static_cast(config_->code_), "", nullptr, - absl::nullopt, ""); + decoder_callbacks_->sendLocalReply(static_cast(config_->code_), config_->body_, + nullptr, absl::nullopt, ""); return Http::FilterHeadersStatus::StopIteration; } return Http::FilterHeadersStatus::Continue; @@ -53,7 +55,7 @@ class SetResponseCodeFilterFactory : public Extensions::HttpFilters::Common::Fac const test::integration::filters::SetResponseCodeFilterConfig& proto_config, const std::string&, Server::Configuration::FactoryContext& context) override { auto filter_config = std::make_shared( - proto_config.prefix(), proto_config.code(), context); + proto_config.prefix(), proto_config.code(), proto_config.body(), context); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared(filter_config)); }; diff --git a/test/integration/filters/set_response_code_filter_config.proto b/test/integration/filters/set_response_code_filter_config.proto index f952981ab7a4..09765c970d01 100644 --- a/test/integration/filters/set_response_code_filter_config.proto +++ b/test/integration/filters/set_response_code_filter_config.proto @@ -7,4 +7,5 @@ import "validate/validate.proto"; message SetResponseCodeFilterConfig { string prefix = 1; uint32 code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + string body = 3; } diff --git a/test/integration/filters/test_socket_interface.cc b/test/integration/filters/test_socket_interface.cc index f0907810393c..6e63e6640b3b 100644 --- a/test/integration/filters/test_socket_interface.cc +++ b/test/integration/filters/test_socket_interface.cc @@ -30,11 +30,14 @@ IoHandlePtr TestIoSocketHandle::accept(struct sockaddr* addr, socklen_t* addrlen return nullptr; } - return std::make_unique(writev_override_, result.rc_, socket_v6only_); + return std::make_unique(writev_override_, result.rc_, socket_v6only_, + domain_); } -IoHandlePtr TestSocketInterface::makeSocket(int socket_fd, bool socket_v6only) const { - return std::make_unique(writev_override_proc_, socket_fd, socket_v6only); +IoHandlePtr TestSocketInterface::makeSocket(int socket_fd, bool socket_v6only, + absl::optional domain) const { + return std::make_unique(writev_override_proc_, socket_fd, socket_v6only, + domain); } } // namespace Network diff --git a/test/integration/filters/test_socket_interface.h b/test/integration/filters/test_socket_interface.h index 84c82cfecdee..2ad871a1923d 100644 --- a/test/integration/filters/test_socket_interface.h +++ b/test/integration/filters/test_socket_interface.h @@ -24,8 +24,8 @@ class TestIoSocketHandle : public IoSocketHandleImpl { using WritevOverrideProc = std::function; TestIoSocketHandle(WritevOverrideProc writev_override_proc, os_fd_t fd = INVALID_SOCKET, - bool socket_v6only = false) - : IoSocketHandleImpl(fd, socket_v6only), writev_override_(writev_override_proc) {} + bool socket_v6only = false, absl::optional domain = absl::nullopt) + : IoSocketHandleImpl(fd, socket_v6only, domain), writev_override_(writev_override_proc) {} private: IoHandlePtr accept(struct sockaddr* addr, socklen_t* addrlen) override; @@ -57,7 +57,8 @@ class TestSocketInterface : public SocketInterfaceImpl { private: // SocketInterfaceImpl - IoHandlePtr makeSocket(int socket_fd, bool socket_v6only) const override; + IoHandlePtr makeSocket(int socket_fd, bool socket_v6only, + absl::optional domain) const override; const TestIoSocketHandle::WritevOverrideProc writev_override_proc_; }; diff --git a/test/integration/h2_corpus/buffered_body b/test/integration/h2_corpus/buffered_body new file mode 100644 index 000000000000..e3f67d32d3cd --- /dev/null +++ b/test/integration/h2_corpus/buffered_body @@ -0,0 +1,61 @@ +events { + downstream_send_event { + h2_frames { + settings { + } + } + h2_frames { + request { + stream_index: 1 + host: "host" + path: "/p?th/to/longo" + } + } + } +} +events { + downstream_send_event { + h2_frames { + metadata { + flags: END_HEADERS + stream_index: 1 + metadata { + metadata { + key: "" + value: "@" + } + metadata { + key: "(" + value: "" + } + metadata { + key: "Timeout Seconds" + value: "10" + } + metadata { + key: "tincoes" + value: "15" + } + } + } + } + } +} +events { + downstream_send_event { + h2_frames { + settings { + flags: ACK + } + } + } +} +events { + upstream_send_event { + h2_frames { + window_update { + stream_index: 2147483648 + } + } + } +} diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index 74b36ff94b49..9f49ed87f0b2 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -33,8 +33,7 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, HdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {} void createUpstreams() override { - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); hds_upstream_ = fake_upstreams_.back().get(); HttpIntegrationTest::createUpstreams(); } @@ -60,14 +59,12 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, // Endpoint connections if (tls_hosts_) { host_upstream_ = - std::make_unique(HttpIntegrationTest::createUpstreamTlsContext(), 0, - http_conn_type_, version_, timeSystem()); + createFakeUpstream(HttpIntegrationTest::createUpstreamTlsContext(), http_conn_type_); host2_upstream_ = - std::make_unique(HttpIntegrationTest::createUpstreamTlsContext(), 0, - http_conn_type_, version_, timeSystem()); + createFakeUpstream(HttpIntegrationTest::createUpstreamTlsContext(), http_conn_type_); } else { - host_upstream_ = std::make_unique(0, http_conn_type_, version_, timeSystem()); - host2_upstream_ = std::make_unique(0, http_conn_type_, version_, timeSystem()); + host_upstream_ = createFakeUpstream(http_conn_type_); + host2_upstream_ = createFakeUpstream(http_conn_type_); } } @@ -188,6 +185,31 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, return server_health_check_specifier_; } + envoy::service::health::v3::ClusterHealthCheck createSecondCluster(std::string name) { + // Add endpoint + envoy::service::health::v3::ClusterHealthCheck health_check; + + health_check.set_cluster_name(name); + Network::Utility::addressToProtobufAddress( + *host2_upstream_->localAddress(), + *health_check.add_locality_endpoints()->add_endpoints()->mutable_address()); + health_check.mutable_locality_endpoints(0)->mutable_locality()->set_region("kounopetra"); + health_check.mutable_locality_endpoints(0)->mutable_locality()->set_zone("emplisi"); + health_check.mutable_locality_endpoints(0)->mutable_locality()->set_sub_zone("paris"); + + health_check.add_health_checks()->mutable_timeout()->set_seconds(MaxTimeout); + health_check.mutable_health_checks(0)->mutable_interval()->set_seconds(MaxTimeout); + health_check.mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2); + health_check.mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2); + health_check.mutable_health_checks(0)->mutable_grpc_health_check(); + health_check.mutable_health_checks(0) + ->mutable_http_health_check() + ->set_hidden_envoy_deprecated_use_http2(false); + health_check.mutable_health_checks(0)->mutable_http_health_check()->set_path("/healthcheck"); + + return health_check; + } + // Creates a basic HealthCheckSpecifier message containing one endpoint and // one TCP health_check envoy::service::health::v3::HealthCheckSpecifier makeTcpHealthCheckSpecifier() { @@ -697,26 +719,8 @@ TEST_P(HdsIntegrationTest, TwoEndpointsDifferentClusters) { server_health_check_specifier_ = makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); - // Add endpoint - auto* health_check = server_health_check_specifier_.add_cluster_health_checks(); - - health_check->set_cluster_name("cat"); - Network::Utility::addressToProtobufAddress( - *host2_upstream_->localAddress(), - *health_check->add_locality_endpoints()->add_endpoints()->mutable_address()); - health_check->mutable_locality_endpoints(0)->mutable_locality()->set_region("kounopetra"); - health_check->mutable_locality_endpoints(0)->mutable_locality()->set_zone("emplisi"); - health_check->mutable_locality_endpoints(0)->mutable_locality()->set_sub_zone("paris"); - - health_check->add_health_checks()->mutable_timeout()->set_seconds(MaxTimeout); - health_check->mutable_health_checks(0)->mutable_interval()->set_seconds(MaxTimeout); - health_check->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2); - health_check->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2); - health_check->mutable_health_checks(0)->mutable_grpc_health_check(); - health_check->mutable_health_checks(0) - ->mutable_http_health_check() - ->set_hidden_envoy_deprecated_use_http2(false); - health_check->mutable_health_checks(0)->mutable_http_health_check()->set_path("/healthcheck"); + // Add Second Cluster + server_health_check_specifier_.add_cluster_health_checks()->MergeFrom(createSecondCluster("cat")); // Server <--> Envoy waitForHdsStream(); @@ -1043,5 +1047,124 @@ TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyTlsMissingSocketMatch) { cleanupHdsConnection(); } +TEST_P(HdsIntegrationTest, UpdateEndpoints) { + initialize(); + server_health_check_specifier_ = + makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); + + // Add Second Cluster. + server_health_check_specifier_.add_cluster_health_checks()->MergeFrom(createSecondCluster("cat")); + + // Server <--> Envoy + waitForHdsStream(); + ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_)); + + // Server asks for health checking + hds_stream_->startGrpcStream(); + hds_stream_->sendGrpcMessage(server_health_check_specifier_); + test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); + + // Envoy sends health check messages to two endpoints + healthcheckEndpoints("cat"); + + // Endpoint responds to the health check + host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "404"}}, false); + host_stream_->encodeData(1024, true); + host2_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + host2_stream_->encodeData(1024, true); + + // Receive updates until the one we expect arrives + ASSERT_TRUE(waitForClusterHealthResponse(envoy::config::core::v3::HEALTHY, + host2_upstream_->localAddress(), 1, 0, 0)); + + ASSERT_EQ(response_.endpoint_health_response().cluster_endpoints_health_size(), 2); + + // store cluster response info for easier reference. + const auto& cluster_resp0 = response_.endpoint_health_response().cluster_endpoints_health(0); + const auto& cluster_resp1 = response_.endpoint_health_response().cluster_endpoints_health(1); + + // check cluster info and sizes. + EXPECT_EQ(cluster_resp0.cluster_name(), "anna"); + ASSERT_EQ(cluster_resp0.locality_endpoints_health_size(), 1); + EXPECT_EQ(cluster_resp1.cluster_name(), "cat"); + ASSERT_EQ(cluster_resp1.locality_endpoints_health_size(), 1); + + // store locality response info for easier reference. + const auto& locality_resp0 = cluster_resp0.locality_endpoints_health(0); + const auto& locality_resp1 = cluster_resp1.locality_endpoints_health(0); + + // check locality info and sizes. + EXPECT_EQ(locality_resp0.locality().sub_zone(), "hobbiton"); + ASSERT_EQ(locality_resp0.endpoints_health_size(), 1); + EXPECT_EQ(locality_resp1.locality().sub_zone(), "paris"); + ASSERT_EQ(locality_resp1.endpoints_health_size(), 1); + + // Check endpoints. + EXPECT_TRUE(checkEndpointHealthResponse(locality_resp0.endpoints_health(0), + envoy::config::core::v3::UNHEALTHY, + host_upstream_->localAddress())); + + checkCounters(1, 2, 0, 1); + EXPECT_EQ(1, test_server_->counter("cluster.cat.health_check.success")->value()); + EXPECT_EQ(0, test_server_->counter("cluster.cat.health_check.failure")->value()); + + // Create new specifier that removes the second cluster, and adds an endpoint to the first. + server_health_check_specifier_ = + makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); + Network::Utility::addressToProtobufAddress( + *host2_upstream_->localAddress(), + *server_health_check_specifier_.mutable_cluster_health_checks(0) + ->mutable_locality_endpoints(0) + ->add_endpoints() + ->mutable_address()); + + // Reset second endpoint for usage in our cluster. + ASSERT_TRUE(host2_fake_connection_->close()); + ASSERT_TRUE(host2_fake_connection_->waitForDisconnect()); + + // Send new specifier. + hds_stream_->sendGrpcMessage(server_health_check_specifier_); + // TODO: add stats reporting and verification for Clusters added/removed/reused and Endpoints + // added/removed/reused. + test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); + + // Set up second endpoint again. + ASSERT_TRUE(host2_upstream_->waitForHttpConnection(*dispatcher_, host2_fake_connection_)); + ASSERT_TRUE(host2_fake_connection_->waitForNewStream(*dispatcher_, host2_stream_)); + ASSERT_TRUE(host2_stream_->waitForEndStream(*dispatcher_)); + EXPECT_EQ(host2_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(host2_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(host2_stream_->headers().getHostValue(), "anna"); + + // Endpoints respond to the health check + host2_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + host2_stream_->encodeData(1024, true); + + // Receive updates until the one we expect arrives + ASSERT_TRUE(waitForClusterHealthResponse(envoy::config::core::v3::HEALTHY, + host2_upstream_->localAddress(), 0, 0, 1)); + + // Ensure we have at least one cluster before trying to read it. + ASSERT_EQ(response_.endpoint_health_response().cluster_endpoints_health_size(), 1); + + // store cluster response info for easier reference. + const auto& cluster_response = response_.endpoint_health_response().cluster_endpoints_health(0); + + // Check cluster has correct name and number of localities (1) + EXPECT_EQ(cluster_response.cluster_name(), "anna"); + ASSERT_EQ(cluster_response.locality_endpoints_health_size(), 1); + + // check the only locality and its endpoints. + const auto& locality_response = cluster_response.locality_endpoints_health(0); + EXPECT_EQ(locality_response.locality().sub_zone(), "hobbiton"); + ASSERT_EQ(locality_response.endpoints_health_size(), 2); + EXPECT_TRUE(checkEndpointHealthResponse(locality_response.endpoints_health(0), + envoy::config::core::v3::UNHEALTHY, + host_upstream_->localAddress())); + + cleanupHostConnections(); + cleanupHdsConnection(); +} + } // namespace } // namespace Envoy diff --git a/test/integration/header_integration_test.cc b/test/integration/header_integration_test.cc index 0c98e11288a6..a7a837fe7e5e 100644 --- a/test/integration/header_integration_test.cc +++ b/test/integration/header_integration_test.cc @@ -359,8 +359,7 @@ class HeaderIntegrationTest HttpIntegrationTest::createUpstreams(); if (use_eds_) { - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); } } diff --git a/test/integration/header_prefix_integration_test.cc b/test/integration/header_prefix_integration_test.cc index e2e47831a27c..52ca839c7607 100644 --- a/test/integration/header_prefix_integration_test.cc +++ b/test/integration/header_prefix_integration_test.cc @@ -35,13 +35,15 @@ TEST_P(HeaderPrefixIntegrationTest, CustomHeaderPrefix) { auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 0); - EXPECT_TRUE(response->headers().get( - Envoy::Http::LowerCaseString{"x-custom-upstream-service-time"}) != nullptr); + EXPECT_FALSE(response->headers() + .get(Envoy::Http::LowerCaseString{"x-custom-upstream-service-time"}) + .empty()); EXPECT_EQ("x-custom-upstream-service-time", response->headers().EnvoyUpstreamServiceTime()->key().getStringView()); - EXPECT_TRUE(upstream_request_->headers().get( - Envoy::Http::LowerCaseString{"x-custom-expected-rq-timeout-ms"}) != nullptr); + EXPECT_FALSE(upstream_request_->headers() + .get(Envoy::Http::LowerCaseString{"x-custom-expected-rq-timeout-ms"}) + .empty()); EXPECT_EQ("x-custom-expected-rq-timeout-ms", upstream_request_->headers().EnvoyExpectedRequestTimeoutMs()->key().getStringView()); } diff --git a/test/integration/health_check_integration_test.cc b/test/integration/health_check_integration_test.cc new file mode 100644 index 000000000000..d9ca186bc849 --- /dev/null +++ b/test/integration/health_check_integration_test.cc @@ -0,0 +1,338 @@ +#include + +#include "envoy/config/core/v3/health_check.pb.h" + +#include "test/common/http/http2/http2_frame.h" +#include "test/common/upstream/utility.h" +#include "test/integration/http_integration.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +// Integration tests for active health checking. +// The tests fetch the cluster configuration using CDS in order to actively start health +// checking after Envoy and the hosts are initialized. +class HealthCheckIntegrationTestBase : public Event::TestUsingSimulatedTime, + public HttpIntegrationTest { +public: + HealthCheckIntegrationTestBase( + Network::Address::IpVersion ip_version, + FakeHttpConnection::Type upstream_protocol = FakeHttpConnection::Type::HTTP2) + : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ip_version, + ConfigHelper::discoveredClustersBootstrap("GRPC")), + ip_version_(ip_version), upstream_protocol_(upstream_protocol) {} + + // Per-cluster information including the fake connection and stream. + struct ClusterData { + const std::string name_; + envoy::config::cluster::v3::Cluster cluster_; + FakeUpstreamPtr host_upstream_; + FakeStreamPtr host_stream_; + FakeHttpConnectionPtr host_fake_connection_; + FakeRawConnectionPtr host_fake_raw_connection_; + + ClusterData(const std::string name) : name_(name) {} + }; + + void initialize() override { + // The endpoints and their configuration is received as part of a CDS response, and not + // statically defined clusters with active health-checking because in an integration test the + // hosts will be able to reply to the health-check requests only after the tests framework + // initialization has finished. This follows the same initialization procedure that is executed + // in the CDS integration tests. + + use_lds_ = false; + // Controls how many fake_upstreams_.emplace_back(new FakeUpstream) will happen in + // BaseIntegrationTest::createUpstreams() (which is part of initialize()). + // Make sure this number matches the size of the 'clusters' repeated field in the bootstrap + // config that you use! + setUpstreamCount(1); // the CDS cluster + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // CDS uses gRPC uses HTTP2. + + // HttpIntegrationTest::initialize() does many things: + // 1) It appends to fake_upstreams_ as many as you asked for via setUpstreamCount(). + // 2) It updates your bootstrap config with the ports your fake upstreams are actually listening + // on (since you're supposed to leave them as 0). + // 3) It creates and starts an IntegrationTestServer - the thing that wraps the almost-actual + // Envoy used in the tests. + // 4) Bringing up the server usually entails waiting to ensure that any listeners specified in + // the bootstrap config have come up, and registering them in a port map (see lookupPort()). + // However, this test needs to defer all of that to later. + defer_listener_finalization_ = true; + HttpIntegrationTest::initialize(); + + // Let Envoy establish its connection to the CDS server. + acceptXdsConnection(); + + // Expect 1 for the statically specified CDS server. + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 1); + + registerTestServerPorts({"http"}); + + // Create the regular (i.e. not an xDS server) upstreams. We create them manually here after + // initialize() because finalize() expects all fake_upstreams_ to correspond to a static + // cluster in the bootstrap config - which we don't want since we're using dynamic CDS. + for (auto& cluster : clusters_) { + cluster.host_upstream_ = std::make_unique(0, upstream_protocol_, version_, + timeSystem(), enable_half_close_); + cluster.cluster_ = ConfigHelper::buildStaticCluster( + cluster.name_, cluster.host_upstream_->localAddress()->ip()->port(), + Network::Test::getLoopbackAddressString(ip_version_)); + } + } + + void acceptXdsConnection() { + AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection. + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_); + RELEASE_ASSERT(result, result.message()); + result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); + RELEASE_ASSERT(result, result.message()); + xds_stream_->startGrpcStream(); + } + + // Closes the connections to the fake hosts. + void cleanupHostConnections() { + for (auto& cluster : clusters_) { + auto& host_fake_connection = cluster.host_fake_connection_; + if (host_fake_connection != nullptr) { + AssertionResult result = host_fake_connection->close(); + RELEASE_ASSERT(result, result.message()); + result = host_fake_connection->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); + } + } + } + + // Adds an active health check specifier to the given cluster. + envoy::config::core::v3::HealthCheck* + addHealthCheck(envoy::config::cluster::v3::Cluster& cluster) { + // Add general health check specifier to the cluster. + auto* health_check = cluster.add_health_checks(); + health_check->mutable_timeout()->set_seconds(30); + health_check->mutable_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + health_check->mutable_no_traffic_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + health_check->mutable_unhealthy_threshold()->set_value(1); + health_check->mutable_healthy_threshold()->set_value(1); + return health_check; + } + + // The number of clusters and their names must match the clusters in the CDS integration test + // configuration. + static constexpr size_t clusters_num_ = 2; + std::array clusters_{{{"cluster_1"}, {"cluster_2"}}}; + Network::Address::IpVersion ip_version_; + FakeHttpConnection::Type upstream_protocol_; +}; + +struct HttpHealthCheckIntegrationTestParams { + Network::Address::IpVersion ip_version; + FakeHttpConnection::Type upstream_protocol; +}; + +class HttpHealthCheckIntegrationTest + : public testing::TestWithParam, + public HealthCheckIntegrationTestBase { +public: + HttpHealthCheckIntegrationTest() + : HealthCheckIntegrationTestBase(GetParam().ip_version, GetParam().upstream_protocol) {} + + // Returns the 4 combinations for testing: + // [HTTP1, HTTP2] x [IPv4, IPv6] + static std::vector + getHttpHealthCheckIntegrationTestParams() { + std::vector ret; + + for (auto ip_version : TestEnvironment::getIpVersionsForTest()) { + for (auto upstream_protocol : + {FakeHttpConnection::Type::HTTP1, FakeHttpConnection::Type::HTTP2}) { + ret.push_back(HttpHealthCheckIntegrationTestParams{ip_version, upstream_protocol}); + } + } + return ret; + } + + static std::string httpHealthCheckTestParamsToString( + const ::testing::TestParamInfo& params) { + return absl::StrCat( + (params.param.ip_version == Network::Address::IpVersion::v4 ? "IPv4_" : "IPv6_"), + (params.param.upstream_protocol == FakeHttpConnection::Type::HTTP2 ? "Http2Upstream" + : "HttpUpstream")); + } + + void TearDown() override { + cleanupHostConnections(); + cleanUpXdsConnection(); + } + + // Adds a HTTP active health check specifier to the given cluster, and waits for the first health + // check probe to be received. + void initHttpHealthCheck(uint32_t cluster_idx) { + const envoy::type::v3::CodecClientType codec_client_type = + (FakeHttpConnection::Type::HTTP1 == upstream_protocol_) + ? envoy::type::v3::CodecClientType::HTTP1 + : envoy::type::v3::CodecClientType::HTTP2; + + auto& cluster_data = clusters_[cluster_idx]; + auto* health_check = addHealthCheck(cluster_data.cluster_); + health_check->mutable_http_health_check()->set_path("/healthcheck"); + health_check->mutable_http_health_check()->set_codec_client_type(codec_client_type); + + // Introduce the cluster using compareDiscoveryRequest / sendDiscoveryResponse. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {cluster_data.cluster_}, {cluster_data.cluster_}, {}, "55"); + + // Wait for upstream to receive health check request. + ASSERT_TRUE(cluster_data.host_upstream_->waitForHttpConnection( + *dispatcher_, cluster_data.host_fake_connection_)); + ASSERT_TRUE(cluster_data.host_fake_connection_->waitForNewStream(*dispatcher_, + cluster_data.host_stream_)); + ASSERT_TRUE(cluster_data.host_stream_->waitForEndStream(*dispatcher_)); + + EXPECT_EQ(cluster_data.host_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(cluster_data.host_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(cluster_data.host_stream_->headers().getHostValue(), cluster_data.name_); + } +}; + +INSTANTIATE_TEST_SUITE_P( + IpHttpVersions, HttpHealthCheckIntegrationTest, + testing::ValuesIn(HttpHealthCheckIntegrationTest::getHttpHealthCheckIntegrationTestParams()), + HttpHealthCheckIntegrationTest::httpHealthCheckTestParamsToString); + +// Tests that a healthy endpoint returns a valid HTTP health check response. +TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointHealthyHttp) { + const uint32_t cluster_idx = 0; + initialize(); + initHttpHealthCheck(cluster_idx); + + // Endpoint responds with healthy status to the health check. + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(1024, true); + + // Verify that Envoy detected the health check response. + test_server_->waitForCounterGe("cluster.cluster_1.health_check.success", 1); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.success")->value()); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); +} + +// Tests that an unhealthy endpoint returns a valid HTTP health check response. +TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointUnhealthyHttp) { + const uint32_t cluster_idx = 0; + initialize(); + initHttpHealthCheck(cluster_idx); + + // Endpoint responds to the health check with unhealthy status. + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(1024, true); + + test_server_->waitForCounterGe("cluster.cluster_1.health_check.failure", 1); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_1.health_check.success")->value()); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); +} + +// Tests that no HTTP health check response results in timeout and unhealthy endpoint. +TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointTimeoutHttp) { + const uint32_t cluster_idx = 0; + initialize(); + initHttpHealthCheck(cluster_idx); + + // Increase time until timeout (30s). + timeSystem().advanceTimeWait(std::chrono::seconds(30)); + + // Endpoint doesn't reply, and a healthcheck failure occurs (due to timeout). + test_server_->waitForCounterGe("cluster.cluster_1.health_check.failure", 1); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_1.health_check.success")->value()); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); +} + +class TcpHealthCheckIntegrationTest : public testing::TestWithParam, + public HealthCheckIntegrationTestBase { +public: + TcpHealthCheckIntegrationTest() : HealthCheckIntegrationTestBase(GetParam()) {} + + void TearDown() override { + cleanupHostConnections(); + cleanUpXdsConnection(); + } + + // Adds a TCP active health check specifier to the given cluster, and waits for the first health + // check probe to be received. + void initTcpHealthCheck(uint32_t cluster_idx) { + auto& cluster_data = clusters_[cluster_idx]; + auto health_check = addHealthCheck(cluster_data.cluster_); + health_check->mutable_tcp_health_check()->mutable_send()->set_text("50696E67"); // "Ping" + health_check->mutable_tcp_health_check()->add_receive()->set_text("506F6E67"); // "Pong" + + // Introduce the cluster using compareDiscoveryRequest / sendDiscoveryResponse. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {cluster_data.cluster_}, {cluster_data.cluster_}, {}, "55"); + + // Wait for upstream to receive TCP HC request. + ASSERT_TRUE( + cluster_data.host_upstream_->waitForRawConnection(cluster_data.host_fake_raw_connection_)); + ASSERT_TRUE(cluster_data.host_fake_raw_connection_->waitForData( + FakeRawConnection::waitForInexactMatch("Ping"))); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, TcpHealthCheckIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Tests that a healthy endpoint returns a valid TCP health check response. +TEST_P(TcpHealthCheckIntegrationTest, SingleEndpointHealthyTcp) { + const uint32_t cluster_idx = 0; + initialize(); + initTcpHealthCheck(cluster_idx); + + AssertionResult result = clusters_[cluster_idx].host_fake_raw_connection_->write("Pong"); + RELEASE_ASSERT(result, result.message()); + + test_server_->waitForCounterGe("cluster.cluster_1.health_check.success", 1); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.success")->value()); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); +} + +// Tests that an invalid response fails the health check. +TEST_P(TcpHealthCheckIntegrationTest, SingleEndpointWrongResponseTcp) { + const uint32_t cluster_idx = 0; + initialize(); + initTcpHealthCheck(cluster_idx); + + // Send the wrong reply ("Pong" is expected). + AssertionResult result = clusters_[cluster_idx].host_fake_raw_connection_->write("Poong"); + RELEASE_ASSERT(result, result.message()); + + // Envoy will wait until timeout occurs because no correct reply was received. + // Increase time until timeout (30s). + timeSystem().advanceTimeWait(std::chrono::seconds(30)); + + test_server_->waitForCounterGe("cluster.cluster_1.health_check.failure", 1); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_1.health_check.success")->value()); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); +} + +// Tests that no TCP health check response results in timeout and unhealthy endpoint. +TEST_P(TcpHealthCheckIntegrationTest, SingleEndpointTimeoutTcp) { + const uint32_t cluster_idx = 0; + initialize(); + initTcpHealthCheck(cluster_idx); + + // Increase time until timeout (30s). + timeSystem().advanceTimeWait(std::chrono::seconds(30)); + + test_server_->waitForCounterGe("cluster.cluster_1.health_check.failure", 1); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_1.health_check.success")->value()); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); +} + +} // namespace +} // namespace Envoy diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh index a09aee64e5cb..d16a5a0bc59e 100755 --- a/test/integration/hotrestart_test.sh +++ b/test/integration/hotrestart_test.sh @@ -5,6 +5,7 @@ # source/exe/main.cc and ./hotrestart_main.cc have not diverged except for # adding the new gauge. export ENVOY_BIN="${TEST_SRCDIR}"/envoy/test/integration/hotrestart_main +# shellcheck source=test/integration/test_utility.sh source "$TEST_SRCDIR/envoy/test/integration/test_utility.sh" # TODO(htuch): In this test script, we are duplicating work done in test_environment.cc via sed. @@ -20,9 +21,8 @@ mkdir -p "${TEST_TMPDIR}"/test/common/runtime/test_data/current/envoy_override if [[ -z "${ENVOY_IP_TEST_VERSIONS}" ]] || [[ "${ENVOY_IP_TEST_VERSIONS}" == "all" ]] \ || [[ "${ENVOY_IP_TEST_VERSIONS}" == "v4only" ]]; then HOT_RESTART_JSON_V4="${TEST_TMPDIR}"/hot_restart_v4.yaml - echo building ${HOT_RESTART_JSON_V4} ... - cat "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | - sed -e "s#{{ upstream_. }}#0#g" | \ + echo "building ${HOT_RESTART_JSON_V4} ..." + sed -e "s#{{ upstream_. }}#0#g" "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | \ sed -e "s#{{ test_rundir }}#$TEST_SRCDIR/envoy#" | \ sed -e "s#{{ test_tmpdir }}#$TEST_TMPDIR#" | \ sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ @@ -36,8 +36,7 @@ fi if [[ -z "${ENVOY_IP_TEST_VERSIONS}" ]] || [[ "${ENVOY_IP_TEST_VERSIONS}" == "all" ]] \ || [[ "${ENVOY_IP_TEST_VERSIONS}" == "v6only" ]]; then HOT_RESTART_JSON_V6="${TEST_TMPDIR}"/hot_restart_v6.yaml - cat "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | - sed -e "s#{{ upstream_. }}#0#g" | \ + sed -e "s#{{ upstream_. }}#0#g" "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | \ sed -e "s#{{ test_rundir }}#$TEST_SRCDIR/envoy#" | \ sed -e "s#{{ test_tmpdir }}#$TEST_TMPDIR#" | \ sed -e "s#{{ ip_loopback_address }}#::1#" | \ @@ -52,8 +51,7 @@ fi # upstreams to avoid too much wild sedding. HOT_RESTART_JSON_UDS="${TEST_TMPDIR}"/hot_restart_uds.yaml SOCKET_DIR="$(mktemp -d /tmp/envoy_test_hotrestart.XXXXXX)" -cat "${TEST_SRCDIR}/envoy"/test/config/integration/server_unix_listener.yaml | - sed -e "s#{{ socket_dir }}#${SOCKET_DIR}#" | \ +sed -e "s#{{ socket_dir }}#${SOCKET_DIR}#" "${TEST_SRCDIR}/envoy"/test/config/integration/server_unix_listener.yaml | \ sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_UDS}" @@ -61,9 +59,8 @@ JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_UDS}") # Test reuse port listener. HOT_RESTART_JSON_REUSE_PORT="${TEST_TMPDIR}"/hot_restart_v4.yaml -echo building ${HOT_RESTART_JSON_V4} ... -cat "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | - sed -e "s#{{ upstream_. }}#0#g" | \ +echo "building ${HOT_RESTART_JSON_V4} ..." +sed -e "s#{{ upstream_. }}#0#g" "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | \ sed -e "s#{{ test_rundir }}#$TEST_SRCDIR/envoy#" | \ sed -e "s#{{ test_tmpdir }}#$TEST_TMPDIR#" | \ sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ @@ -81,27 +78,34 @@ echo "Hot restart test using dynamic base id" TEST_INDEX=0 function run_testsuite() { - local HOT_RESTART_JSON="$1" - local FAKE_SYMBOL_TABLE="$2" + local BASE_ID BASE_ID_PATH HOT_RESTART_JSON="$1" FAKE_SYMBOL_TABLE="$2" + local SOCKET_PATH=@envoy_domain_socket + local SOCKET_MODE=0 + if [ -n "$3" ] && [ -n "$4" ] + then + SOCKET_PATH="$3" + SOCKET_MODE="$4" + fi start_test validation check "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" --mode validate --service-cluster cluster \ --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --service-node node --disable-hot-restart - local BASE_ID_PATH=$(mktemp 'envoy_test_base_id.XXXXXX') + BASE_ID_PATH=$(mktemp 'envoy_test_base_id.XXXXXX') echo "Selected dynamic base id path ${BASE_ID_PATH}" # Now start the real server, hot restart it twice, and shut it all down as a # basic hot restart sanity test. We expect SERVER_0 to exit quickly when # SERVER_2 starts, and are not relying on timeouts. - start_test Starting epoch 0 + start_test "Starting epoch 0" ADMIN_ADDRESS_PATH_0="${TEST_TMPDIR}"/admin.0."${TEST_INDEX}".address run_in_background_saving_pid "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" \ --restart-epoch 0 --use-dynamic-base-id --base-id-path "${BASE_ID_PATH}" \ --service-cluster cluster --service-node node --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" \ - --admin-address-path "${ADMIN_ADDRESS_PATH_0}" + --admin-address-path "${ADMIN_ADDRESS_PATH_0}" \ + --socket-path "${SOCKET_PATH}" --socket-mode "${SOCKET_MODE}" - local BASE_ID=$(cat "${BASE_ID_PATH}") + BASE_ID=$(cat "${BASE_ID_PATH}") while [ -z "${BASE_ID}" ]; do echo "Waiting for base id" sleep 0.5 @@ -112,7 +116,7 @@ function run_testsuite() { SERVER_0_PID=$BACKGROUND_PID - start_test Updating original config listener addresses + start_test "Updating original config listener addresses" sleep 3 UPDATED_HOT_RESTART_JSON="${TEST_TMPDIR}"/hot_restart_updated."${TEST_INDEX}".yaml @@ -122,37 +126,39 @@ function run_testsuite() { # Send SIGUSR1 signal to the first server, this should not kill it. Also send SIGHUP which should # get eaten. echo "Sending SIGUSR1/SIGHUP to first server" - kill -SIGUSR1 ${SERVER_0_PID} - kill -SIGHUP ${SERVER_0_PID} + kill -SIGUSR1 "${SERVER_0_PID}" + kill -SIGHUP "${SERVER_0_PID}" sleep 3 disableHeapCheck # To ensure that we don't accidentally change the /hot_restart_version # string, compare it against a hard-coded string. - start_test Checking for consistency of /hot_restart_version + start_test "Checking for consistency of /hot_restart_version" CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --base-id "${BASE_ID}" 2>&1) EXPECTED_CLI_HOT_RESTART_VERSION="11.${SHARED_MEMORY_SIZE}" echo "The Envoy's hot restart version is ${CLI_HOT_RESTART_VERSION}" echo "Now checking that the above version is what we expected." check [ "${CLI_HOT_RESTART_VERSION}" = "${EXPECTED_CLI_HOT_RESTART_VERSION}" ] - start_test Checking for consistency of /hot_restart_version with --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" + start_test "Checking for consistency of /hot_restart_version with --use-fake-symbol-table ${FAKE_SYMBOL_TABLE}" CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --base-id "${BASE_ID}" \ --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" 2>&1) + CLI_HOT_RESTART_VERSION=$(strip_fake_symbol_table_warning "$CLI_HOT_RESTART_VERSION" "$FAKE_SYMBOL_TABLE") EXPECTED_CLI_HOT_RESTART_VERSION="11.${SHARED_MEMORY_SIZE}" check [ "${CLI_HOT_RESTART_VERSION}" = "${EXPECTED_CLI_HOT_RESTART_VERSION}" ] - start_test Checking for match of --hot-restart-version and admin /hot_restart_version + start_test "Checking for match of --hot-restart-version and admin /hot_restart_version" ADMIN_ADDRESS_0=$(cat "${ADMIN_ADDRESS_PATH_0}") - echo fetching hot restart version from http://${ADMIN_ADDRESS_0}/hot_restart_version ... - ADMIN_HOT_RESTART_VERSION=$(curl -sg http://${ADMIN_ADDRESS_0}/hot_restart_version) + echo "fetching hot restart version from http://${ADMIN_ADDRESS_0}/hot_restart_version ..." + ADMIN_HOT_RESTART_VERSION=$(curl -sg "http://${ADMIN_ADDRESS_0}/hot_restart_version") echo "Fetched ADMIN_HOT_RESTART_VERSION is ${ADMIN_HOT_RESTART_VERSION}" CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --base-id "${BASE_ID}" \ --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" 2>&1) + CLI_HOT_RESTART_VERSION=$(strip_fake_symbol_table_warning "$CLI_HOT_RESTART_VERSION" "$FAKE_SYMBOL_TABLE") check [ "${ADMIN_HOT_RESTART_VERSION}" = "${CLI_HOT_RESTART_VERSION}" ] - start_test Checking server.hot_restart_generation 1 + start_test "Checking server.hot_restart_generation 1" GENERATION_0=$(scrape_stat "${ADMIN_ADDRESS_0}" "server.hot_restart_generation") check [ "$GENERATION_0" = "1" ]; @@ -169,7 +175,8 @@ function run_testsuite() { ADMIN_ADDRESS_PATH_1="${TEST_TMPDIR}"/admin.1."${TEST_INDEX}".address run_in_background_saving_pid "${ENVOY_BIN}" -c "${UPDATED_HOT_RESTART_JSON}" \ --restart-epoch 1 --base-id "${BASE_ID}" --service-cluster cluster --service-node node \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_1}" + --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_1}" \ + --socket-path "${SOCKET_PATH}" --socket-mode "${SOCKET_MODE}" SERVER_1_PID=$BACKGROUND_PID @@ -183,12 +190,12 @@ function run_testsuite() { # Check to see that the SERVER_1 accumulates the test_gauge value from # SERVER_0, This will be erased once SERVER_0 terminates. if [ "$TEST_GAUGE_0" != 0 ]; then - start_test Checking that the hotrestart_test_gauge incorporates SERVER_0 and SERVER_1. + start_test "Checking that the hotrestart_test_gauge incorporates SERVER_0 and SERVER_1." TEST_GAUGE_1=$(scrape_stat "${ADMIN_ADDRESS_1}" "hotrestart_test_gauge") - check [ $TEST_GAUGE_1 = "2" ] + check [ "$TEST_GAUGE_1" = "2" ] fi - start_test Checking that listener addresses have not changed + start_test "Checking that listener addresses have not changed" HOT_RESTART_JSON_1="${TEST_TMPDIR}"/hot_restart.1."${TEST_INDEX}".yaml "${TEST_SRCDIR}/envoy"/tools/socket_passing "-o" "${UPDATED_HOT_RESTART_JSON}" "-a" "${ADMIN_ADDRESS_PATH_1}" \ "-u" "${HOT_RESTART_JSON_1}" @@ -197,36 +204,35 @@ function run_testsuite() { # Send SIGUSR1 signal to the second server, this should not kill it, and # we prove that by checking its stats after having sent it a signal. - start_test Sending SIGUSR1 to SERVER_1. - kill -SIGUSR1 ${SERVER_1_PID} + start_test "Sending SIGUSR1 to SERVER_1." + kill -SIGUSR1 "${SERVER_1_PID}" sleep 3 - start_test Checking server.hot_restart_generation 2 + start_test "Checking server.hot_restart_generation 2" GENERATION_1=$(scrape_stat "${ADMIN_ADDRESS_1}" "server.hot_restart_generation") check [ "$GENERATION_1" = "2" ]; ADMIN_ADDRESS_PATH_2="${TEST_TMPDIR}"/admin.2."${TEST_INDEX}".address - start_test Starting epoch 2 + start_test "Starting epoch 2" run_in_background_saving_pid "${ENVOY_BIN}" -c "${UPDATED_HOT_RESTART_JSON}" \ --restart-epoch 2 --base-id "${BASE_ID}" --service-cluster cluster --service-node node \ --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_2}" \ - --parent-shutdown-time-s 3 + --parent-shutdown-time-s 3 \ + --socket-path "${SOCKET_PATH}" --socket-mode "${SOCKET_MODE}" SERVER_2_PID=$BACKGROUND_PID # Now wait for the SERVER_0 to exit. It should occur immediately when SERVER_2 starts, as # SERVER_1 will terminate SERVER_0 when it becomes the parent. - start_test Waiting for epoch 0 to finish. - echo time wait ${SERVER_0_PID} - time wait ${SERVER_0_PID} - [[ $? == 0 ]] + start_test "Waiting for epoch 0 to finish." + echo "time wait ${SERVER_0_PID}" + time wait "${SERVER_0_PID}" # Then wait for the SERVER_1 to exit, which should happen within a few seconds # due to '--parent-shutdown-time-s 3' on SERVER_2. - start_test Waiting for epoch 1 to finish. - echo time wait ${SERVER_1_PID} - time wait ${SERVER_1_PID} - [[ $? == 0 ]] + start_test "Waiting for epoch 1 to finish." + echo "time wait ${SERVER_1_PID}" + time wait "${SERVER_1_PID}" # This tests that we are retaining the generation count. For most Gauges, # we erase the parent contribution when the parent exits, but @@ -234,21 +240,21 @@ function run_testsuite() { # stat_merger_->retainParentGaugeValue(hot_restart_generation_stat_name_) # in source/server/hot_restarting_child.cc results in this test failing, # with the generation being decremented back to 1. - start_test Checking server.hot_restart_generation 2 + start_test "Checking server.hot_restart_generation 2" ADMIN_ADDRESS_2=$(cat "${ADMIN_ADDRESS_PATH_2}") GENERATION_2=$(scrape_stat "${ADMIN_ADDRESS_2}" "server.hot_restart_generation") check [ "$GENERATION_2" = "3" ]; # Check to see that the SERVER_2's test_gauge value reverts bac to 1, since # its parents have now exited and we have erased their gauge contributions. - start_test Check that the hotrestart_test_gauge reported in SERVER_2 excludes parent contribution + start_test "Check that the hotrestart_test_gauge reported in SERVER_2 excludes parent contribution" wait_status=$(wait_for_stat "$ADMIN_ADDRESS_2" "hotrestart_test_gauge" -eq 1 5) - echo $wait_status + echo "$wait_status" if [[ "$wait_status" != success* ]]; then handle_failure timeout fi - start_test Checking that listener addresses have not changed + start_test "Checking that listener addresses have not changed" HOT_RESTART_JSON_2="${TEST_TMPDIR}"/hot_restart.2."${TEST_INDEX}".yaml "${TEST_SRCDIR}/envoy"/tools/socket_passing "-o" "${UPDATED_HOT_RESTART_JSON}" "-a" "${ADMIN_ADDRESS_PATH_2}" \ "-u" "${HOT_RESTART_JSON_2}" @@ -256,12 +262,24 @@ function run_testsuite() { [[ -z "${CONFIG_DIFF}" ]] # Now term the last server, and the other one should exit also. - start_test Killing and waiting for epoch 2 - kill ${SERVER_2_PID} - wait ${SERVER_2_PID} - [[ $? == 0 ]] + start_test "Killing and waiting for epoch 2" + kill "${SERVER_2_PID}" + wait "${SERVER_2_PID}" +} + +# TODO(#13399): remove this helper function and the references to it, as long as +# the references to $FAKE_SYMBOL_TABLE. +function strip_fake_symbol_table_warning() { + local INPUT="$1" + local FAKE_SYMBOL_TABLE="$2" + if [ "$FAKE_SYMBOL_TABLE" = "1" ]; then + echo "$INPUT" | grep -v "Fake symbol tables have been removed" + else + echo "$INPUT" + fi } +# Hotrestart in abstract namespace for HOT_RESTART_JSON in "${JSON_TEST_ARRAY[@]}" do # Run one of the tests with real symbol tables. No need to do all of them. @@ -272,8 +290,30 @@ do run_testsuite "$HOT_RESTART_JSON" "1" || exit 1 done -start_test disabling hot_restart by command line. +# Hotrestart in specified UDS +# Real symbol tables are the default, so I had run just one with fake symbol tables +# (Switch the "0" and "1" in the second arg in the two run_testsuite calls below). +if [ "$TEST_INDEX" = "0" ]; then + run_testsuite "${HOT_RESTART_JSON_V4}" "0" "${SOCKET_DIR}/envoy_domain_socket" "600" || exit 1 +fi + +run_testsuite "${HOT_RESTART_JSON_V4}" "1" "${SOCKET_DIR}/envoy_domain_socket" "600" || exit 1 + +start_test "disabling hot_restart by command line." CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --disable-hot-restart 2>&1) check [ "disabled" = "${CLI_HOT_RESTART_VERSION}" ] +# Validating socket-path permission +start_test socket-mode for socket path +run_in_background_saving_pid "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" \ + --restart-epoch 0 --base-id 0 --base-id-path "${BASE_ID_PATH}" \ + --socket-path "${SOCKET_DIR}"/envoy_domain_socket --socket-mode 644 \ + --service-cluster cluster --service-node node --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" \ + --admin-address-path "${ADMIN_ADDRESS_PATH_0}" +sleep 3 +EXPECTED_SOCKET_MODE=$(stat -c '%a' "${SOCKET_DIR}"/envoy_domain_socket_parent_0) +check [ "644" = "${EXPECTED_SOCKET_MODE}" ] +kill "${BACKGROUND_PID}" +wait "${BACKGROUND_PID}" + echo "PASS" diff --git a/test/integration/http2_flood_integration_test.cc b/test/integration/http2_flood_integration_test.cc new file mode 100644 index 000000000000..6b47b7de186c --- /dev/null +++ b/test/integration/http2_flood_integration_test.cc @@ -0,0 +1,1059 @@ +#include +#include + +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/random_generator.h" +#include "common/http/header_map_impl.h" +#include "common/network/socket_option_impl.h" + +#include "test/integration/autonomous_upstream.h" +#include "test/integration/filters/test_socket_interface.h" +#include "test/integration/http_integration.h" +#include "test/integration/utility.h" +#include "test/mocks/http/mocks.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/printers.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using ::testing::HasSubstr; + +namespace Envoy { + +namespace { +const uint32_t ControlFrameFloodLimit = 100; +const uint32_t AllFrameFloodLimit = 1000; +} // namespace + +class SocketInterfaceSwap { +public: + // Object of this class hold the state determining the IoHandle which + // should return EAGAIN from the `writev` call. + struct IoHandleMatcher { + bool shouldReturnEgain(uint32_t port) const { + absl::ReaderMutexLock lock(&mutex_); + return port == port_ && writev_returns_egain_; + } + + void setSourcePort(uint32_t port) { + absl::WriterMutexLock lock(&mutex_); + port_ = port; + } + + void setWritevReturnsEgain() { + absl::WriterMutexLock lock(&mutex_); + writev_returns_egain_ = true; + } + + private: + mutable absl::Mutex mutex_; + uint32_t port_ ABSL_GUARDED_BY(mutex_) = 0; + bool writev_returns_egain_ ABSL_GUARDED_BY(mutex_) = false; + }; + + SocketInterfaceSwap() { + Envoy::Network::SocketInterfaceSingleton::clear(); + test_socket_interface_loader_ = std::make_unique( + std::make_unique( + [writev_matcher = writev_matcher_]( + Envoy::Network::TestIoSocketHandle* io_handle, const Buffer::RawSlice*, + uint64_t) -> absl::optional { + if (writev_matcher->shouldReturnEgain(io_handle->localAddress()->ip()->port())) { + return Api::IoCallUint64Result( + 0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), + Network::IoSocketError::deleteIoError)); + } + return absl::nullopt; + })); + } + + ~SocketInterfaceSwap() { + test_socket_interface_loader_.reset(); + Envoy::Network::SocketInterfaceSingleton::initialize(previous_socket_interface_); + } + +protected: + Envoy::Network::SocketInterface* const previous_socket_interface_{ + Envoy::Network::SocketInterfaceSingleton::getExisting()}; + std::shared_ptr writev_matcher_{std::make_shared()}; + std::unique_ptr test_socket_interface_loader_; +}; + +// It is important that the new socket interface is installed before any I/O activity starts and +// the previous one is restored after all I/O activity stops. Since the HttpIntegrationTest +// destructor stops Envoy the SocketInterfaceSwap destructor needs to run after it. This order of +// multiple inheritance ensures that SocketInterfaceSwap destructor runs after +// Http2FrameIntegrationTest destructor completes. +class Http2FloodMitigationTest : public SocketInterfaceSwap, + public testing::TestWithParam, + public Http2RawFrameIntegrationTest { +public: + Http2FloodMitigationTest() : Http2RawFrameIntegrationTest(GetParam()) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); + } + +protected: + void floodServer(const Http2Frame& frame, const std::string& flood_stat, uint32_t num_frames); + void floodServer(absl::string_view host, absl::string_view path, + Http2Frame::ResponseStatus expected_http_status, const std::string& flood_stat, + uint32_t num_frames); + + void setNetworkConnectionBufferSize(); + void beginSession() override; + void prefillOutboundDownstreamQueue(uint32_t data_frame_count, uint32_t data_frame_size = 10); + void triggerListenerDrain(); +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FloodMitigationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +void Http2FloodMitigationTest::setNetworkConnectionBufferSize() { + // nghttp2 library has its own internal mitigation for outbound control frames (see + // NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM). The default nghttp2 mitigation threshold of 1K is modified + // to 10K in the ConnectionImpl::Http2Options::Http2Options. The mitigation is triggered when + // there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2 internal + // outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering Envoy's + // own flood mitigation. This can happen when a buffer large enough to contain over 10K PING or + // SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening the + // network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS frames). + // Set it to the arbitrarily chosen value of 32K. Note that this buffer has 16K lower bound. + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + RELEASE_ASSERT(bootstrap.mutable_static_resources()->listeners_size() >= 1, ""); + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + + listener->mutable_per_connection_buffer_limit_bytes()->set_value(32 * 1024); + }); +} + +void Http2FloodMitigationTest::beginSession() { + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + // set lower outbound frame limits to make tests run faster + config_helper_.setOutboundFramesLimits(AllFrameFloodLimit, ControlFrameFloodLimit); + initialize(); + // Set up a raw connection to easily send requests without reading responses. Also, set a small + // TCP receive buffer to speed up connection backup. + auto options = std::make_shared(); + options->emplace_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, + ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); + writev_matcher_->setSourcePort(lookupPort("http")); + tcp_client_ = makeTcpConnection(lookupPort("http"), options); + startHttp2Session(); +} + +// Verify that the server detects the flood of the given frame. +void Http2FloodMitigationTest::floodServer(const Http2Frame& frame, const std::string& flood_stat, + uint32_t num_frames) { + // make sure all frames can fit into 16k buffer + ASSERT_LE(num_frames, (16u * 1024u) / frame.size()); + std::vector buf(num_frames * frame.size()); + for (auto pos = buf.begin(); pos != buf.end();) { + pos = std::copy(frame.begin(), frame.end(), pos); + } + + ASSERT_TRUE(tcp_client_->write({buf.begin(), buf.end()}, false, false)); + + // Envoy's flood mitigation should kill the connection + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); + test_server_->waitForCounterGe("http.config_test.downstream_cx_delayed_close_timeout", 1); +} + +// Verify that the server detects the flood using specified request parameters. +void Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_view path, + Http2Frame::ResponseStatus expected_http_status, + const std::string& flood_stat, uint32_t num_frames) { + uint32_t request_idx = 0; + auto request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(request_idx), host, path); + sendFrame(request); + auto frame = readFrame(); + EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); + EXPECT_EQ(expected_http_status, frame.responseStatus()); + writev_matcher_->setWritevReturnsEgain(); + for (uint32_t frame = 0; frame < num_frames; ++frame) { + request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(++request_idx), host, path); + sendFrame(request); + } + tcp_client_->waitForDisconnect(); + if (!flood_stat.empty()) { + EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); + } + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); +} + +void Http2FloodMitigationTest::prefillOutboundDownstreamQueue(uint32_t data_frame_count, + uint32_t data_frame_size) { + // Set large buffer limits so the test is not affected by the flow control. + config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); + autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; + beginSession(); + + // Do not read from the socket and send request that causes autonomous upstream to respond + // with the specified number of DATA frames. This pre-fills downstream outbound frame queue + // such the the next response triggers flood protection. + // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames + // start to accumulate in the transport socket buffer. + writev_matcher_->setWritevReturnsEgain(); + + const auto request = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(0), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", absl::StrCat(data_frame_count)), + Http2Frame::Header("response_size_bytes", absl::StrCat(data_frame_size)), + Http2Frame::Header("no_trailers", "0")}); + sendFrame(request); + + // Wait for some data to arrive and then wait for the upstream_rq_active to flip to 0 to indicate + // that the first request has completed. + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_rx_bytes_total", 10000); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_rq_active", 0); + // Verify that pre-fill did not trigger flood protection + EXPECT_EQ(0, test_server_->counter("http2.outbound_flood")->value()); +} + +void Http2FloodMitigationTest::triggerListenerDrain() { + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); +} + +TEST_P(Http2FloodMitigationTest, Ping) { + setNetworkConnectionBufferSize(); + beginSession(); + writev_matcher_->setWritevReturnsEgain(); + floodServer(Http2Frame::makePingFrame(), "http2.outbound_control_flood", + ControlFrameFloodLimit + 1); +} + +TEST_P(Http2FloodMitigationTest, Settings) { + setNetworkConnectionBufferSize(); + beginSession(); + writev_matcher_->setWritevReturnsEgain(); + floodServer(Http2Frame::makeEmptySettingsFrame(), "http2.outbound_control_flood", + ControlFrameFloodLimit + 1); +} + +// Verify that the server can detect flood of internally generated 404 responses. +TEST_P(Http2FloodMitigationTest, 404) { + // Change the default route to be restrictive, and send a request to a non existent route. + config_helper_.setDefaultHostAndRoute("foo.com", "/found"); + beginSession(); + + // Send requests to a non existent path to generate 404s + floodServer("host", "/notfound", Http2Frame::ResponseStatus::NotFound, "http2.outbound_flood", + AllFrameFloodLimit + 1); +} + +// Verify that the server can detect flood of response DATA frames +TEST_P(Http2FloodMitigationTest, Data) { + // Set large buffer limits so the test is not affected by the flow control. + config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); + autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; + beginSession(); + + // Do not read from the socket and send request that causes autonomous upstream + // to respond with 1000 DATA frames. The Http2FloodMitigationTest::beginSession() + // sets 1000 flood limit for all frame types. Including 1 HEADERS response frame + // 1000 DATA frames should trigger flood protection. + // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start + // to accumulate in the transport socket buffer. + writev_matcher_->setWritevReturnsEgain(); + + const auto request = Http2Frame::makeRequest( + 1, "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "1000"), Http2Frame::Header("no_trailers", "0")}); + sendFrame(request); + + // Wait for connection to be flooded with outbound DATA frames and disconnected. + tcp_client_->waitForDisconnect(); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect flood triggered by a DATA frame from a decoder filter call +// to sendLocalReply(). +// This test also verifies that RELEASE_ASSERT in the ConnectionImpl::StreamImpl::encodeDataHelper() +// is not fired when it is called by the sendLocalReply() in the dispatching context. +TEST_P(Http2FloodMitigationTest, DataOverflowFromDecoderFilterSendLocalReply) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + const std::string yaml_string = R"EOF( +name: send_local_reply_filter +typed_config: + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + prefix: "/call_send_local_reply" + code: 404 + body: "something" + )EOF"; + TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters()); + // keep router the last + auto size = hcm.http_filters_size(); + hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); + }); + + // pre-fill 2 away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 2); + + // At this point the outbound downstream frame queue should be 2 away from overflowing. + // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply with body. + // HEADERS + DATA frames should overflow the queue. + // Verify that connection was disconnected and appropriate counters were set. + auto request2 = + Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/call_send_local_reply"); + sendFrame(request2); + + // Wait for connection to be flooded with outbound DATA frame and disconnected. + tcp_client_->waitForDisconnect(); + + // Verify that the upstream connection is still alive. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect flood of response HEADERS frames +TEST_P(Http2FloodMitigationTest, Headers) { + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + // Send second request which should trigger headers only response. + // Verify that connection was disconnected and appropriate counters were set. + auto request2 = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(1), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_trailers", "0")}); + sendFrame(request2); + + // Wait for connection to be flooded with outbound HEADERS frame and disconnected. + tcp_client_->waitForDisconnect(); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect overflow by 100 continue response sent by Envoy itself +TEST_P(Http2FloodMitigationTest, Envoy100ContinueHeaders) { + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + // Send second request which should trigger Envoy to respond with 100 continue. + // Verify that connection was disconnected and appropriate counters were set. + auto request2 = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(1), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_trailers", "0"), + Http2Frame::Header("expect", "100-continue")}); + sendFrame(request2); + + // Wait for connection to be flooded with outbound HEADERS frame and disconnected. + tcp_client_->waitForDisconnect(); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // The second upstream request should be reset since it is disconnected when sending 100 continue + // response + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_rq_tx_reset")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect flood triggered by a HEADERS frame from a decoder filter call +// to sendLocalReply(). +// This test also verifies that RELEASE_ASSERT in the +// ConnectionImpl::StreamImpl::encodeHeadersBase() is not fired when it is called by the +// sendLocalReply() in the dispatching context. +TEST_P(Http2FloodMitigationTest, HeadersOverflowFromDecoderFilterSendLocalReply) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + const std::string yaml_string = R"EOF( +name: send_local_reply_filter +typed_config: + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + prefix: "/call_send_local_reply" + code: 404 + )EOF"; + TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters()); + // keep router the last + auto size = hcm.http_filters_size(); + hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); + }); + + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + // At this point the outbound downstream frame queue should be 1 away from overflowing. + // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply without body. + // Verify that connection was disconnected and appropriate counters were set. + auto request2 = + Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/call_send_local_reply"); + sendFrame(request2); + + // Wait for connection to be flooded with outbound HEADERS frame and disconnected. + tcp_client_->waitForDisconnect(); + + // Verify that the upstream connection is still alive. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// TODO(yanavlasov): add the same tests as above for the encoder filters. +// This is currently blocked by the https://github.com/envoyproxy/envoy/pull/13256 + +// Verify that the server can detect flood of response METADATA frames +TEST_P(Http2FloodMitigationTest, Metadata) { + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + RELEASE_ASSERT(bootstrap.mutable_static_resources()->clusters_size() >= 1, ""); + auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); + cluster->mutable_http2_protocol_options()->set_allow_metadata(true); + }); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { hcm.mutable_http2_protocol_options()->set_allow_metadata(true); }); + + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + // Send second request which should trigger response with METADATA frame. + auto metadata_map_vector_ptr = std::make_unique(); + Http::MetadataMap metadata_map = { + {"header_key1", "header_value1"}, + {"header_key2", "header_value2"}, + }; + auto metadata_map_ptr = std::make_unique(metadata_map); + metadata_map_vector_ptr->push_back(std::move(metadata_map_ptr)); + static_cast(fake_upstreams_.front().get()) + ->setPreResponseHeadersMetadata(std::move(metadata_map_vector_ptr)); + + // Verify that connection was disconnected and appropriate counters were set. + auto request2 = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(1), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_trailers", "0")}); + sendFrame(request2); + + // Wait for connection to be flooded with outbound METADATA frame and disconnected. + tcp_client_->waitForDisconnect(); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect flood of response trailers. +TEST_P(Http2FloodMitigationTest, Trailers) { + // Set large buffer limits so the test is not affected by the flow control. + config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); + autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; + beginSession(); + + // Do not read from the socket and send request that causes autonomous upstream + // to respond with 999 DATA frames and trailers. The Http2FloodMitigationTest::beginSession() + // sets 1000 flood limit for all frame types. Including 1 HEADERS response frame + // 999 DATA frames and trailers should trigger flood protection. + // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start + // to accumulate in the transport socket buffer. + writev_matcher_->setWritevReturnsEgain(); + + static_cast(fake_upstreams_.front().get()) + ->setResponseTrailers(std::make_unique( + Http::TestResponseTrailerMapImpl({{"foo", "bar"}}))); + + const auto request = + Http2Frame::makeRequest(Http2Frame::makeClientStreamId(0), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "999")}); + sendFrame(request); + + // Wait for connection to be flooded with outbound trailers and disconnected. + tcp_client_->waitForDisconnect(); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify flood detection by the WINDOW_UPDATE frame when a decoder filter is resuming reading from +// the downstream via DecoderFilterBelowWriteBufferLowWatermark. +TEST_P(Http2FloodMitigationTest, WindowUpdateOnLowWatermarkFlood) { + config_helper_.addFilter(R"EOF( + name: backpressure-filter + )EOF"); + config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); + // Set low window sizes in the server codec as nghttp2 sends WINDOW_UPDATE only after it consumes + // more than 25% of the window. + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + auto* h2_options = hcm.mutable_http2_protocol_options(); + h2_options->mutable_initial_stream_window_size()->set_value(70000); + h2_options->mutable_initial_connection_window_size()->set_value(70000); + }); + autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; + beginSession(); + + writev_matcher_->setWritevReturnsEgain(); + + // pre-fill two away from overflow + const auto request = Http2Frame::makePostRequest( + Http2Frame::makeClientStreamId(0), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "998"), Http2Frame::Header("no_trailers", "0")}); + sendFrame(request); + + // The backpressure-filter disables reading when it sees request headers, and it should prevent + // WINDOW_UPDATE to be sent on the following DATA frames. Send enough DATA to consume more than + // 25% of the 70K window so that nghttp2 will send WINDOW_UPDATE on read resumption. + auto data_frame = + Http2Frame::makeDataFrame(Http2Frame::makeClientStreamId(0), std::string(16384, '0')); + sendFrame(data_frame); + sendFrame(data_frame); + data_frame = Http2Frame::makeDataFrame(Http2Frame::makeClientStreamId(0), std::string(16384, '1'), + Http2Frame::DataFlags::EndStream); + sendFrame(data_frame); + + // Upstream will respond with 998 DATA frames and the backpressure-filter filter will re-enable + // reading on the last DATA frame. This will cause nghttp2 to send two WINDOW_UPDATE frames for + // stream and connection windows. Together with response DATA frames it should overflow outbound + // frame queue. Wait for connection to be flooded with outbound WINDOW_UPDATE frame and + // disconnected. + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_flow_control_paused_reading_total") + ->value()); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// TODO(yanavlasov): add tests for WINDOW_UPDATE overflow from the router filter. These tests need +// missing support for write resumption from test sockets that were forced to return EAGAIN by the +// test. + +// Verify that the server can detect flood of RST_STREAM frames. +TEST_P(Http2FloodMitigationTest, RST_STREAM) { + // Use invalid HTTP headers to trigger sending RST_STREAM frames. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); + }); + beginSession(); + + uint32_t stream_index = 0; + auto request = + Http::Http2::Http2Frame::makeMalformedRequest(Http2Frame::makeClientStreamId(stream_index)); + sendFrame(request); + auto response = readFrame(); + // Make sure we've got RST_STREAM from the server + EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); + + // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start + // to accumulate in the transport socket buffer. + writev_matcher_->setWritevReturnsEgain(); + + for (++stream_index; stream_index < ControlFrameFloodLimit + 2; ++stream_index) { + request = + Http::Http2::Http2Frame::makeMalformedRequest(Http2Frame::makeClientStreamId(stream_index)); + sendFrame(request); + } + tcp_client_->waitForDisconnect(); + EXPECT_EQ(1, test_server_->counter("http2.outbound_control_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); +} + +// Verify detection of flood by the RST_STREAM frame sent on pending flush timeout +TEST_P(Http2FloodMitigationTest, RstStreamOverflowOnPendingFlushTimeout) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + hcm.mutable_stream_idle_timeout()->set_seconds(0); + constexpr uint64_t IdleTimeoutMs = 400; + hcm.mutable_stream_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000); + }); + + // Pending flush timer is started when upstream response has completed but there is no window to + // send DATA downstream. The test downstream client does not update WINDOW and as such Envoy will + // use the default 65535 bytes. First, pre-fill outbound queue with 65 byte frames, which should + // consume 65 * 997 = 64805 bytes of downstream connection window. + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 3, 65); + + // At this point the outbound downstream frame queue should be 3 away from overflowing with 730 + // byte window. Make response to be 1 DATA frame with 1024 payload. This should overflow the + // available downstream window and start pending flush timer. Envoy proxies 2 frames downstream, + // HEADERS and partial DATA frame, which makes the frame queue 1 away from overflow. + const auto request2 = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(1), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "1"), + Http2Frame::Header("response_size_bytes", "1024"), Http2Frame::Header("no_trailers", "0")}); + sendFrame(request2); + + // Pending flush timer sends RST_STREAM frame which should overflow outbound frame queue and + // disconnect the connection. + tcp_client_->waitForDisconnect(); + + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + // Verify that pending flush timeout was hit + EXPECT_EQ(1, test_server_->counter("http2.tx_flush_timeout")->value()); +} + +// Verify detection of frame flood when sending second GOAWAY frame on drain timeout +TEST_P(Http2FloodMitigationTest, GoAwayOverflowOnDrainTimeout) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* drain_time_out = hcm.mutable_drain_timeout(); + std::chrono::milliseconds timeout(1000); + auto seconds = std::chrono::duration_cast(timeout); + drain_time_out->set_seconds(seconds.count()); + + auto* http_protocol_options = hcm.mutable_common_http_protocol_options(); + auto* idle_time_out = http_protocol_options->mutable_idle_timeout(); + idle_time_out->set_seconds(seconds.count()); + }); + // pre-fill two away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 2); + + // connection idle timeout will send first GOAWAY frame and start drain timer + // drain timeout will send second GOAWAY frame which should trigger flood protection + // Wait for connection to be flooded with outbound GOAWAY frame and disconnected. + tcp_client_->waitForDisconnect(); + + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify detection of overflowing outbound frame queue with the GOAWAY frames sent after the +// downstream idle connection timeout disconnects the connection. +// The test verifies protocol constraint violation handling in the +// Http2::ConnectionImpl::shutdownNotice() method. +TEST_P(Http2FloodMitigationTest, DownstreamIdleTimeoutTriggersFloodProtection) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* http_protocol_options = hcm.mutable_common_http_protocol_options(); + auto* idle_time_out = http_protocol_options->mutable_idle_timeout(); + std::chrono::milliseconds timeout(1000); + auto seconds = std::chrono::duration_cast(timeout); + idle_time_out->set_seconds(seconds.count()); + }); + + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_idle_timeout")->value()); +} + +// Verify detection of overflowing outbound frame queue with the GOAWAY frames sent after the +// downstream connection duration timeout disconnects the connection. +// The test verifies protocol constraint violation handling in the +// Http2::ConnectionImpl::shutdownNotice() method. +TEST_P(Http2FloodMitigationTest, DownstreamConnectionDurationTimeoutTriggersFloodProtection) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* http_protocol_options = hcm.mutable_common_http_protocol_options(); + auto* max_connection_duration = http_protocol_options->mutable_max_connection_duration(); + std::chrono::milliseconds timeout(1000); + auto seconds = std::chrono::duration_cast(timeout); + max_connection_duration->set_seconds(seconds.count()); + }); + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_max_duration_reached")->value()); +} + +// Verify detection of frame flood when sending GOAWAY frame during processing of response headers +// on a draining listener. +TEST_P(Http2FloodMitigationTest, GoawayOverflowDuringResponseWhenDraining) { + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + triggerListenerDrain(); + + // Send second request which should trigger Envoy to send GOAWAY (since it is in the draining + // state) when processing response headers. Verify that connection was disconnected and + // appropriate counters were set. + auto request2 = + Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/test/long/url"); + sendFrame(request2); + + // Wait for connection to be flooded with outbound GOAWAY frame and disconnected. + tcp_client_->waitForDisconnect(); + + // Verify that the upstream connection is still alive. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_drain_close")->value()); +} + +// Verify detection of frame flood when sending GOAWAY frame during call to sendLocalReply() +// from decoder filter on a draining listener. +TEST_P(Http2FloodMitigationTest, GoawayOverflowFromDecoderFilterSendLocalReplyWhenDraining) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + const std::string yaml_string = R"EOF( +name: send_local_reply_filter +typed_config: + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + prefix: "/call_send_local_reply" + code: 404 + )EOF"; + TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters()); + // keep router the last + auto size = hcm.http_filters_size(); + hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); + }); + + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + triggerListenerDrain(); + + // At this point the outbound downstream frame queue should be 1 away from overflowing. + // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply without body which + // should trigger Envoy to send GOAWAY (since it is in the draining state) when processing + // sendLocalReply() headers. Verify that connection was disconnected and appropriate counters were + // set. + auto request2 = + Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/call_send_local_reply"); + sendFrame(request2); + + // Wait for connection to be flooded with outbound GOAWAY frame and disconnected. + tcp_client_->waitForDisconnect(); + + // Verify that the upstream connection is still alive. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_drain_close")->value()); +} + +// Verify that the server can detect flooding by the RST_STREAM on when upstream disconnects +// before sending response headers. +TEST_P(Http2FloodMitigationTest, RstStreamOnUpstreamRemoteCloseBeforeResponseHeaders) { + // pre-fill 3 away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 3); + + // Start second request. + auto request2 = + Http2Frame::makePostRequest(Http2Frame::makeClientStreamId(1), "host", "/test/long/url"); + sendFrame(request2); + + // Wait for it to be proxied + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_total", 2); + + // Disconnect upstream connection. Since there no response headers were sent yet the router + // filter will send 503 with body and then RST_STREAM. With these 3 frames the downstream outbound + // frame queue should overflow. + ASSERT_TRUE(static_cast(fake_upstreams_.front().get())->closeConnection(0)); + + // Wait for connection to be flooded with outbound RST_STREAM frame and disconnected. + tcp_client_->waitForDisconnect(); + + ASSERT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect flooding by the RST_STREAM on stream idle timeout +// after sending response headers. +TEST_P(Http2FloodMitigationTest, RstStreamOnStreamIdleTimeoutAfterResponseHeaders) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* stream_idle_timeout = hcm.mutable_stream_idle_timeout(); + std::chrono::milliseconds timeout(1000); + auto seconds = std::chrono::duration_cast(timeout); + stream_idle_timeout->set_seconds(seconds.count()); + }); + // pre-fill 2 away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 2); + + // Start second request, which should result in response headers to be sent but the stream kept + // open. + auto request2 = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(1), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_end_stream", "0")}); + sendFrame(request2); + + // Wait for stream idle timeout to send RST_STREAM. With the response headers frame from the + // second response the downstream outbound frame queue should overflow. + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_idle_timeout")->value()); +} + +// Verify detection of overflowing outbound frame queue with the PING frames sent by the keep alive +// timer. The test verifies protocol constraint violation handling in the +// Http2::ConnectionImpl::sendKeepalive() method. +TEST_P(Http2FloodMitigationTest, KeepAliveTimeeTriggersFloodProtection) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* keep_alive = hcm.mutable_http2_protocol_options()->mutable_connection_keepalive(); + keep_alive->mutable_interval()->set_nanos(500 * 1000 * 1000); + keep_alive->mutable_timeout()->set_seconds(1); + }); + + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server stop reading downstream connection on protocol error. +TEST_P(Http2FloodMitigationTest, TooManyStreams) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_http2_protocol_options()->mutable_max_concurrent_streams()->set_value(2); + }); + autonomous_upstream_ = true; + beginSession(); + // To prevent Envoy from closing client streams the upstream connection needs to push back on + // writing by the upstream server. In this case Envoy will not see upstream responses and will + // keep client streams open, eventually maxing them out and causing client connection to be + // closed. + writev_matcher_->setSourcePort(fake_upstreams_[0]->localAddress()->ip()->port()); + + // Exceed the number of streams allowed by the server. The server should stop reading from the + // client. + floodServer("host", "/test/long/url", Http2Frame::ResponseStatus::Ok, "", 3); +} + +TEST_P(Http2FloodMitigationTest, EmptyHeaders) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_http2_protocol_options() + ->mutable_max_consecutive_inbound_frames_with_empty_payload() + ->set_value(0); + }); + beginSession(); + + const auto request = Http2Frame::makeEmptyHeadersFrame(Http2Frame::makeClientStreamId(0)); + sendFrame(request); + + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); +} + +TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + beginSession(); + + const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); + auto request = Http2Frame::makeEmptyHeadersFrame(request_stream_id); + sendFrame(request); + + for (int i = 0; i < 2; i++) { + request = Http2Frame::makeEmptyContinuationFrame(request_stream_id); + sendFrame(request); + } + + tcp_client_->waitForDisconnect(); + + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); + EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); +} + +TEST_P(Http2FloodMitigationTest, EmptyData) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + beginSession(); + + const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); + auto request = Http2Frame::makePostRequest(request_stream_id, "host", "/"); + sendFrame(request); + + for (int i = 0; i < 2; i++) { + request = Http2Frame::makeEmptyDataFrame(request_stream_id); + sendFrame(request); + } + + tcp_client_->waitForDisconnect(); + + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); + EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); +} + +TEST_P(Http2FloodMitigationTest, PriorityIdleStream) { + beginSession(); + + floodServer(Http2Frame::makePriorityFrame(Http2Frame::makeClientStreamId(0), + Http2Frame::makeClientStreamId(1)), + "http2.inbound_priority_frames_flood", + Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM + 1); +} + +TEST_P(Http2FloodMitigationTest, PriorityOpenStream) { + beginSession(); + + // Open stream. + const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); + const auto request = Http2Frame::makeRequest(request_stream_id, "host", "/"); + sendFrame(request); + + floodServer(Http2Frame::makePriorityFrame(request_stream_id, Http2Frame::makeClientStreamId(1)), + "http2.inbound_priority_frames_flood", + Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM * 2 + + 1); +} + +TEST_P(Http2FloodMitigationTest, PriorityClosedStream) { + autonomous_upstream_ = true; + beginSession(); + + // Open stream. + const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); + const auto request = Http2Frame::makeRequest(request_stream_id, "host", "/"); + sendFrame(request); + // Reading response marks this stream as closed in nghttp2. + auto frame = readFrame(); + EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); + + floodServer(Http2Frame::makePriorityFrame(request_stream_id, Http2Frame::makeClientStreamId(1)), + "http2.inbound_priority_frames_flood", + Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM * 2 + + 1); +} + +TEST_P(Http2FloodMitigationTest, WindowUpdate) { + beginSession(); + + // Open stream. + const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); + const auto request = Http2Frame::makeRequest(request_stream_id, "host", "/"); + sendFrame(request); + + // Since we do not send any DATA frames, only 4 sequential WINDOW_UPDATE frames should + // trigger flood protection. + floodServer(Http2Frame::makeWindowUpdateFrame(request_stream_id, 1), + "http2.inbound_window_update_frames_flood", 4); +} + +// Verify that the HTTP/2 connection is terminated upon receiving invalid HEADERS frame. +TEST_P(Http2FloodMitigationTest, ZerolenHeader) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + beginSession(); + + // Send invalid request. + const auto request = Http2Frame::makeMalformedRequestWithZerolenHeader( + Http2Frame::makeClientStreamId(0), "host", "/"); + sendFrame(request); + + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); + // expect a downstream protocol error. + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); +} + +// Verify that only the offending stream is terminated upon receiving invalid HEADERS frame. +TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); + }); + autonomous_upstream_ = true; + beginSession(); + + // Send invalid request. + uint32_t request_idx = 0; + auto request = Http2Frame::makeMalformedRequestWithZerolenHeader( + Http2Frame::makeClientStreamId(request_idx), "host", "/"); + sendFrame(request); + // Make sure we've got RST_STREAM from the server. + auto response = readFrame(); + EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); + + // Send valid request using the same connection. + request_idx++; + request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(request_idx), "host", "/"); + sendFrame(request); + response = readFrame(); + EXPECT_EQ(Http2Frame::Type::Headers, response.type()); + EXPECT_EQ(Http2Frame::ResponseStatus::Ok, response.responseStatus()); + + tcp_client_->close(); + + EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); + EXPECT_EQ(0, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); + // expect Downstream Protocol Error + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); +} + +} // namespace Envoy diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 6e2a4d82143b..1ad689a4849e 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -10,7 +10,6 @@ #include "common/buffer/buffer_impl.h" #include "common/common/random_generator.h" #include "common/http/header_map_impl.h" -#include "common/network/socket_option_impl.h" #include "test/integration/utility.h" #include "test/mocks/http/mocks.h" @@ -123,6 +122,34 @@ TEST_P(Http2IntegrationTest, CodecStreamIdleTimeout) { response->waitForReset(); } +TEST_P(Http2IntegrationTest, Http2DownstreamKeepalive) { + constexpr uint64_t interval_ms = 1; + constexpr uint64_t timeout_ms = 250; + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_http2_protocol_options() + ->mutable_connection_keepalive() + ->mutable_interval() + ->set_nanos(interval_ms * 1000 * 1000); + hcm.mutable_http2_protocol_options() + ->mutable_connection_keepalive() + ->mutable_timeout() + ->set_nanos(timeout_ms * 1000 * 1000); + }); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + + // This call is NOT running the event loop of the client, so downstream PINGs will + // not receive a response. + test_server_->waitForCounterEq("http2.keepalive_timeout", 1, + std::chrono::milliseconds(timeout_ms * 2)); + + response->waitForReset(); +} + static std::string response_metadata_filter = R"EOF( name: response-metadata-filter typed_config: @@ -539,6 +566,25 @@ TEST_P(Http2MetadataIntegrationTest, RequestMetadataReachSizeLimit) { ASSERT_FALSE(response->complete()); } +TEST_P(Http2MetadataIntegrationTest, RequestMetadataThenTrailers) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + Http::MetadataMap metadata_map = {{"key", "value"}}; + codec_client_->sendMetadata(*request_encoder_, metadata_map); + Http::TestRequestTrailerMapImpl request_trailers{{"trailer", "trailer"}}; + codec_client_->sendTrailers(*request_encoder_, request_trailers); + + waitForNextUpstreamRequest(); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); +} + static std::string request_metadata_filter = R"EOF( name: request-metadata-filter typed_config: @@ -649,12 +695,6 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 6); } -static std::string decode_headers_only = R"EOF( -name: decode-headers-only -typed_config: - "@type": type.googleapis.com/google.protobuf.Empty -)EOF"; - void Http2MetadataIntegrationTest::runHeaderOnlyTest(bool send_request_body, size_t body_size) { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& @@ -698,28 +738,6 @@ void Http2MetadataIntegrationTest::verifyHeadersOnlyTest() { EXPECT_EQ(true, upstream_request_->complete()); } -TEST_P(Http2MetadataIntegrationTest, DecodingHeadersOnlyRequestWithRequestMetadataEmptyData) { - addFilters({request_metadata_filter, decode_headers_only}); - - // Send a request with body, and body size is 0. - runHeaderOnlyTest(true, 0); - verifyHeadersOnlyTest(); -} - -TEST_P(Http2MetadataIntegrationTest, DecodingHeadersOnlyRequestWithRequestMetadataNoneEmptyData) { - addFilters({request_metadata_filter, decode_headers_only}); - // Send a request with body, and body size is 128. - runHeaderOnlyTest(true, 128); - verifyHeadersOnlyTest(); -} - -TEST_P(Http2MetadataIntegrationTest, DecodingHeadersOnlyRequestWithRequestMetadataDiffFilterOrder) { - addFilters({decode_headers_only, request_metadata_filter}); - // Send a request with body, and body size is 128. - runHeaderOnlyTest(true, 128); - verifyHeadersOnlyTest(); -} - TEST_P(Http2MetadataIntegrationTest, HeadersOnlyRequestWithRequestMetadata) { addFilters({request_metadata_filter}); // Send a headers only request. @@ -900,13 +918,14 @@ TEST_P(Http2IntegrationTest, GrpcRequestTimeout) { auto* route_config = hcm.mutable_route_config(); auto* virtual_host = route_config->mutable_virtual_hosts(0); auto* route = virtual_host->mutable_routes(0); - route->mutable_route()->mutable_max_grpc_timeout()->set_seconds(60 * 60); + route->mutable_route() + ->mutable_max_stream_duration() + ->mutable_grpc_timeout_header_max() + ->set_seconds(60 * 60); }); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); - // With upstream request timeout Envoy should send a gRPC-Status "DEADLINE EXCEEDED". - // TODO: Properly map request timeout to "DEADLINE EXCEEDED" instead of "SERVICE UNAVAILABLE". auto response = codec_client_->makeHeaderOnlyRequest( Http::TestRequestHeaderMapImpl{{":method", "POST"}, {":path", "/test/long/url"}, @@ -919,8 +938,9 @@ TEST_P(Http2IntegrationTest, GrpcRequestTimeout) { EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_NE(response->headers().GrpcStatus(), nullptr); - EXPECT_EQ("14", response->headers().getGrpcStatusValue()); // Service Unavailable - EXPECT_LT(0, test_server_->counter("cluster.cluster_0.upstream_rq_timeout")->value()); + EXPECT_EQ("4", response->headers().getGrpcStatusValue()); // Deadline exceeded. + EXPECT_LT(0, + test_server_->counter("http.config_test.downstream_rq_max_duration_reached")->value()); } // Interleave two requests and responses and make sure that idle timeout is handled correctly. @@ -1295,8 +1315,7 @@ Http2RingHashIntegrationTest::~Http2RingHashIntegrationTest() { void Http2RingHashIntegrationTest::createUpstreams() { for (int i = 0; i < num_upstreams_; i++) { - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP1, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP1); } } @@ -1380,9 +1399,11 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieNoTtl) { {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { EXPECT_EQ("200", response.headers().getStatusValue()); - EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); - served_by.insert(std::string( - response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); + EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie).empty()); + served_by.insert(std::string(response.headers() + .get(Http::LowerCaseString("x-served-by"))[0] + ->value() + .getStringView())); }); EXPECT_EQ(served_by.size(), num_upstreams_); } @@ -1411,7 +1432,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieWithNonzeroTtlSet) { [&](IntegrationStreamDecoder& response) { EXPECT_EQ("200", response.headers().getStatusValue()); std::string value( - response.headers().get(Http::Headers::get().SetCookie)->value().getStringView()); + response.headers().get(Http::Headers::get().SetCookie)[0]->value().getStringView()); set_cookies.insert(value); EXPECT_THAT(value, MatchesRegex("foo=.*; Max-Age=15; HttpOnly")); }); @@ -1442,7 +1463,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieWithZeroTtlSet) { [&](IntegrationStreamDecoder& response) { EXPECT_EQ("200", response.headers().getStatusValue()); std::string value( - response.headers().get(Http::Headers::get().SetCookie)->value().getStringView()); + response.headers().get(Http::Headers::get().SetCookie)[0]->value().getStringView()); set_cookies.insert(value); EXPECT_THAT(value, MatchesRegex("^foo=.*$")); }); @@ -1472,9 +1493,11 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieNoTtl) { {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { EXPECT_EQ("200", response.headers().getStatusValue()); - EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); - served_by.insert(std::string( - response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); + EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie).empty()); + served_by.insert(std::string(response.headers() + .get(Http::LowerCaseString("x-served-by"))[0] + ->value() + .getStringView())); }); EXPECT_EQ(served_by.size(), 1); } @@ -1503,65 +1526,20 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieWithTtlSet) { {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { EXPECT_EQ("200", response.headers().getStatusValue()); - EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); - served_by.insert(std::string( - response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); + EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie).empty()); + served_by.insert(std::string(response.headers() + .get(Http::LowerCaseString("x-served-by"))[0] + ->value() + .getStringView())); }); EXPECT_EQ(served_by.size(), 1); } -void Http2FrameIntegrationTest::startHttp2Session() { - ASSERT_TRUE(tcp_client_->write(Http2Frame::Preamble, false, false)); - - // Send empty initial SETTINGS frame. - auto settings = Http2Frame::makeEmptySettingsFrame(); - ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); - - // Read initial SETTINGS frame from the server. - readFrame(); - - // Send an SETTINGS ACK. - settings = Http2Frame::makeEmptySettingsFrame(Http2Frame::SettingsFlags::Ack); - ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); - - // read pending SETTINGS and WINDOW_UPDATE frames - readFrame(); - readFrame(); -} - -void Http2FrameIntegrationTest::beginSession() { - setDownstreamProtocol(Http::CodecClient::Type::HTTP2); - setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); - // set lower outbound frame limits to make tests run faster - config_helper_.setOutboundFramesLimits(1000, 100); - initialize(); - // Set up a raw connection to easily send requests without reading responses. - auto options = std::make_shared(); - options->emplace_back(std::make_shared( - envoy::config::core::v3::SocketOption::STATE_PREBIND, - ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); - tcp_client_ = makeTcpConnection(lookupPort("http"), options); - startHttp2Session(); -} - -Http2Frame Http2FrameIntegrationTest::readFrame() { - Http2Frame frame; - EXPECT_TRUE(tcp_client_->waitForData(frame.HeaderSize)); - frame.setHeader(tcp_client_->data()); - tcp_client_->clearData(frame.HeaderSize); - auto len = frame.payloadSize(); - if (len) { - EXPECT_TRUE(tcp_client_->waitForData(len)); - frame.setPayload(tcp_client_->data()); - tcp_client_->clearData(len); - } - return frame; -} - -void Http2FrameIntegrationTest::sendFrame(const Http2Frame& frame) { - ASSERT_TRUE(tcp_client_->connected()); - ASSERT_TRUE(tcp_client_->write(std::string(frame), false, false)); -} +class Http2FrameIntegrationTest : public testing::TestWithParam, + public Http2RawFrameIntegrationTest { +public: + Http2FrameIntegrationTest() : Http2RawFrameIntegrationTest(GetParam()) {} +}; // Regression test. TEST_P(Http2FrameIntegrationTest, SetDetailsTwice) { @@ -1587,410 +1565,4 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FrameIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -namespace { -const uint32_t ControlFrameFloodLimit = 100; -const uint32_t AllFrameFloodLimit = 1000; -} // namespace - -Http2FloodMitigationTest::Http2FloodMitigationTest() { - Envoy::Network::SocketInterfaceSingleton::clear(); - test_socket_interface_loader_ = std::make_unique( - std::make_unique( - [writev_matcher = writev_matcher_](Envoy::Network::TestIoSocketHandle* io_handle, - const Buffer::RawSlice*, - uint64_t) -> absl::optional { - if (writev_matcher->shouldReturnEgain(io_handle->localAddress()->ip()->port())) { - return Api::IoCallUint64Result( - 0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), - Network::IoSocketError::deleteIoError)); - } - return absl::nullopt; - })); - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); -} - -Http2FloodMitigationTest::~Http2FloodMitigationTest() { - test_socket_interface_loader_.reset(); - Envoy::Network::SocketInterfaceSingleton::initialize(previous_socket_interface_); -} - -void Http2FloodMitigationTest::setNetworkConnectionBufferSize() { - // nghttp2 library has its own internal mitigation for outbound control frames (see - // NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM). The default nghttp2 mitigation threshold of 1K is modified - // to 10K in the ConnectionImpl::Http2Options::Http2Options. The mitigation is triggered when - // there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2 internal - // outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering Envoy's - // own flood mitigation. This can happen when a buffer large enough to contain over 10K PING or - // SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening the - // network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS frames). - // Set it to the arbitrarily chosen value of 32K. Note that this buffer has 16K lower bound. - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - RELEASE_ASSERT(bootstrap.mutable_static_resources()->listeners_size() >= 1, ""); - auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - - listener->mutable_per_connection_buffer_limit_bytes()->set_value(32 * 1024); - }); -} - -void Http2FloodMitigationTest::beginSession() { - setDownstreamProtocol(Http::CodecClient::Type::HTTP2); - setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); - // set lower outbound frame limits to make tests run faster - config_helper_.setOutboundFramesLimits(AllFrameFloodLimit, ControlFrameFloodLimit); - initialize(); - // Set up a raw connection to easily send requests without reading responses. Also, set a small - // TCP receive buffer to speed up connection backup. - auto options = std::make_shared(); - options->emplace_back(std::make_shared( - envoy::config::core::v3::SocketOption::STATE_PREBIND, - ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); - writev_matcher_->setSourcePort(lookupPort("http")); - tcp_client_ = makeTcpConnection(lookupPort("http"), options); - startHttp2Session(); -} - -// Verify that the server detects the flood of the given frame. -void Http2FloodMitigationTest::floodServer(const Http2Frame& frame, const std::string& flood_stat, - uint32_t num_frames) { - // make sure all frames can fit into 16k buffer - ASSERT_LE(num_frames, (16u * 1024u) / frame.size()); - std::vector buf(num_frames * frame.size()); - for (auto pos = buf.begin(); pos != buf.end();) { - pos = std::copy(frame.begin(), frame.end(), pos); - } - - ASSERT_TRUE(tcp_client_->write({buf.begin(), buf.end()}, false, false)); - - // Envoy's flood mitigation should kill the connection - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); - test_server_->waitForCounterGe("http.config_test.downstream_cx_delayed_close_timeout", 1); -} - -// Verify that the server detects the flood using specified request parameters. -void Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_view path, - Http2Frame::ResponseStatus expected_http_status, - const std::string& flood_stat, uint32_t num_frames) { - uint32_t request_idx = 0; - auto request = Http2Frame::makeRequest(request_idx, host, path); - sendFrame(request); - auto frame = readFrame(); - EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); - EXPECT_EQ(expected_http_status, frame.responseStatus()); - writev_matcher_->setWritevReturnsEgain(); - for (uint32_t frame = 0; frame < num_frames; ++frame) { - request = Http2Frame::makeRequest(++request_idx, host, path); - sendFrame(request); - } - tcp_client_->waitForDisconnect(); - if (!flood_stat.empty()) { - EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); - } - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FloodMitigationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - -TEST_P(Http2FloodMitigationTest, Ping) { - setNetworkConnectionBufferSize(); - beginSession(); - writev_matcher_->setWritevReturnsEgain(); - floodServer(Http2Frame::makePingFrame(), "http2.outbound_control_flood", - ControlFrameFloodLimit + 1); -} - -TEST_P(Http2FloodMitigationTest, Settings) { - setNetworkConnectionBufferSize(); - beginSession(); - writev_matcher_->setWritevReturnsEgain(); - floodServer(Http2Frame::makeEmptySettingsFrame(), "http2.outbound_control_flood", - ControlFrameFloodLimit + 1); -} - -// Verify that the server can detect flood of internally generated 404 responses. -TEST_P(Http2FloodMitigationTest, 404) { - // Change the default route to be restrictive, and send a request to a non existent route. - config_helper_.setDefaultHostAndRoute("foo.com", "/found"); - beginSession(); - - // Send requests to a non existent path to generate 404s - floodServer("host", "/notfound", Http2Frame::ResponseStatus::NotFound, "http2.outbound_flood", - AllFrameFloodLimit + 1); -} - -// Verify that the server can detect flood of response DATA frames -TEST_P(Http2FloodMitigationTest, Data) { - // Set large buffer limits so the test is not affected by the flow control. - config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); - autonomous_upstream_ = true; - autonomous_allow_incomplete_streams_ = true; - beginSession(); - - // Do not read from the socket and send request that causes autonomous upstream - // to respond with 1000 DATA frames. The Http2FloodMitigationTest::beginSession() - // sets 1000 flood limit for all frame types. Including 1 HEADERS response frame - // 1000 DATA frames should trigger flood protection. - // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start - // to accumulate in the transport socket buffer. - writev_matcher_->setWritevReturnsEgain(); - - auto request = Http2Frame::makeRequest(0, "host", "/test/long/url", - {Http2Frame::Header("response_data_blocks", "1000")}); - sendFrame(request); - - // Wait for 19077 bytes to arrive from upstream (1K DATA frames of size 10 + HEADERS frame) - test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_rx_bytes_total", 19077); - - // If the server codec incorrectly thrown an exception on flood detection it would cause - // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed - // connections. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - - // Server codec should be flooded. However it is not disconnected until client sends it - // some bytes. Verify that it is still connected and send 1 byte. Flood detection - // happens before data is parsed so it does not matter what is being sent. - ASSERT_TRUE(tcp_client_->connected()); - ASSERT_TRUE(tcp_client_->write(std::string("a"), false, false)); - // Now wait for client to be disconnected and verify it is due to flood checks. - tcp_client_->waitForDisconnect(); - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -// Verify that the server can detect flood of RST_STREAM frames. -TEST_P(Http2FloodMitigationTest, RST_STREAM) { - // Use invalid HTTP headers to trigger sending RST_STREAM frames. - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.mutable_http2_protocol_options() - ->mutable_override_stream_error_on_invalid_http_message() - ->set_value(true); - }); - beginSession(); - - uint32_t stream_index = 0; - auto request = Http::Http2::Http2Frame::makeMalformedRequest(stream_index); - sendFrame(request); - auto response = readFrame(); - // Make sure we've got RST_STREAM from the server - EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); - - // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start - // to accumulate in the transport socket buffer. - writev_matcher_->setWritevReturnsEgain(); - - for (++stream_index; stream_index < ControlFrameFloodLimit + 2; ++stream_index) { - request = Http::Http2::Http2Frame::makeMalformedRequest(stream_index); - sendFrame(request); - } - tcp_client_->waitForDisconnect(); - EXPECT_EQ(1, test_server_->counter("http2.outbound_control_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -// Verify that the server stop reading downstream connection on protocol error. -TEST_P(Http2FloodMitigationTest, TooManyStreams) { - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.mutable_http2_protocol_options()->mutable_max_concurrent_streams()->set_value(2); - }); - autonomous_upstream_ = true; - beginSession(); - // To prevent Envoy from closing client streams the upstream connection needs to push back on - // writing by the upstream server. In this case Envoy will not see upstream responses and will - // keep client streams open, eventually maxing them out and causing client connection to be - // closed. - writev_matcher_->setSourcePort(fake_upstreams_[0]->localAddress()->ip()->port()); - - // Exceed the number of streams allowed by the server. The server should stop reading from the - // client. - floodServer("host", "/test/long/url", Http2Frame::ResponseStatus::Ok, "", 3); -} - -TEST_P(Http2FloodMitigationTest, EmptyHeaders) { - config_helper_.addConfigModifier( - [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.mutable_http2_protocol_options() - ->mutable_max_consecutive_inbound_frames_with_empty_payload() - ->set_value(0); - }); - beginSession(); - - uint32_t request_idx = 0; - auto request = Http2Frame::makeEmptyHeadersFrame(request_idx); - sendFrame(request); - - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { - useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); - beginSession(); - - uint32_t request_idx = 0; - auto request = Http2Frame::makeEmptyHeadersFrame(request_idx); - sendFrame(request); - - for (int i = 0; i < 2; i++) { - request = Http2Frame::makeEmptyContinuationFrame(request_idx); - sendFrame(request); - } - - tcp_client_->waitForDisconnect(); - - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); - EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -TEST_P(Http2FloodMitigationTest, EmptyData) { - useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); - beginSession(); - - uint32_t request_idx = 0; - auto request = Http2Frame::makePostRequest(request_idx, "host", "/"); - sendFrame(request); - - for (int i = 0; i < 2; i++) { - request = Http2Frame::makeEmptyDataFrame(request_idx); - sendFrame(request); - } - - tcp_client_->waitForDisconnect(); - - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); - EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -TEST_P(Http2FloodMitigationTest, PriorityIdleStream) { - beginSession(); - - floodServer(Http2Frame::makePriorityFrame(0, 1), "http2.inbound_priority_frames_flood", - Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM + 1); -} - -TEST_P(Http2FloodMitigationTest, PriorityOpenStream) { - beginSession(); - - // Open stream. - uint32_t request_idx = 0; - auto request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFrame(request); - - floodServer(Http2Frame::makePriorityFrame(request_idx, request_idx + 1), - "http2.inbound_priority_frames_flood", - Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM * 2 + - 1); -} - -TEST_P(Http2FloodMitigationTest, PriorityClosedStream) { - autonomous_upstream_ = true; - beginSession(); - - // Open stream. - uint32_t request_idx = 0; - auto request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFrame(request); - // Reading response marks this stream as closed in nghttp2. - auto frame = readFrame(); - EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); - - floodServer(Http2Frame::makePriorityFrame(request_idx, request_idx + 1), - "http2.inbound_priority_frames_flood", - Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM * 2 + - 1); -} - -TEST_P(Http2FloodMitigationTest, WindowUpdate) { - beginSession(); - - // Open stream. - uint32_t request_idx = 0; - auto request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFrame(request); - - // Since we do not send any DATA frames, only 4 sequential WINDOW_UPDATE frames should - // trigger flood protection. - floodServer(Http2Frame::makeWindowUpdateFrame(request_idx, 1), - "http2.inbound_window_update_frames_flood", 4); -} - -// Verify that the HTTP/2 connection is terminated upon receiving invalid HEADERS frame. -TEST_P(Http2FloodMitigationTest, ZerolenHeader) { - useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); - beginSession(); - - // Send invalid request. - uint32_t request_idx = 0; - auto request = Http2Frame::makeMalformedRequestWithZerolenHeader(request_idx, "host", "/"); - sendFrame(request); - - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); - // expect a downstream protocol error. - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); -} - -// Verify that only the offending stream is terminated upon receiving invalid HEADERS frame. -TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { - useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.mutable_http2_protocol_options() - ->mutable_override_stream_error_on_invalid_http_message() - ->set_value(true); - }); - autonomous_upstream_ = true; - beginSession(); - - // Send invalid request. - uint32_t request_idx = 0; - auto request = Http2Frame::makeMalformedRequestWithZerolenHeader(request_idx, "host", "/"); - sendFrame(request); - // Make sure we've got RST_STREAM from the server. - auto response = readFrame(); - EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); - - // Send valid request using the same connection. - request_idx++; - request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFrame(request); - response = readFrame(); - EXPECT_EQ(Http2Frame::Type::Headers, response.type()); - EXPECT_EQ(Http2Frame::ResponseStatus::Ok, response.responseStatus()); - - tcp_client_->close(); - - EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); - EXPECT_EQ(0, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); - // expect Downstream Protocol Error - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); -} - } // namespace Envoy diff --git a/test/integration/http2_integration_test.h b/test/integration/http2_integration_test.h index 4db1e5c2065c..45dcc445d5f4 100644 --- a/test/integration/http2_integration_test.h +++ b/test/integration/http2_integration_test.h @@ -5,15 +5,11 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" -#include "test/common/http/http2/http2_frame.h" -#include "test/integration/filters/test_socket_interface.h" #include "test/integration/http_integration.h" #include "absl/synchronization/mutex.h" #include "gtest/gtest.h" -using Envoy::Http::Http2::Http2Frame; - namespace Envoy { class Http2IntegrationTest : public testing::TestWithParam, public HttpIntegrationTest { @@ -71,61 +67,4 @@ class Http2MetadataIntegrationTest : public Http2IntegrationTest { void runHeaderOnlyTest(bool send_request_body, size_t body_size); }; -class Http2FrameIntegrationTest : public testing::TestWithParam, - public HttpIntegrationTest { -public: - Http2FrameIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {} - -protected: - void startHttp2Session(); - Http2Frame readFrame(); - void sendFrame(const Http2Frame& frame); - virtual void beginSession(); - - IntegrationTcpClientPtr tcp_client_; -}; - -class Http2FloodMitigationTest : public Http2FrameIntegrationTest { -public: - Http2FloodMitigationTest(); - ~Http2FloodMitigationTest() override; - -protected: - // Object of this class hold the state determining the IoHandle which - // should return EAGAIN from the `writev` call. - struct IoHandleMatcher { - bool shouldReturnEgain(uint32_t port) const { - absl::ReaderMutexLock lock(&mutex_); - return port == port_ && writev_returns_egain_; - } - - void setSourcePort(uint32_t port) { - absl::WriterMutexLock lock(&mutex_); - port_ = port; - } - - void setWritevReturnsEgain() { - absl::WriterMutexLock lock(&mutex_); - writev_returns_egain_ = true; - } - - private: - mutable absl::Mutex mutex_; - uint32_t port_ ABSL_GUARDED_BY(mutex_) = 0; - bool writev_returns_egain_ ABSL_GUARDED_BY(mutex_) = false; - }; - - void floodServer(const Http2Frame& frame, const std::string& flood_stat, uint32_t num_frames); - void floodServer(absl::string_view host, absl::string_view path, - Http2Frame::ResponseStatus expected_http_status, const std::string& flood_stat, - uint32_t num_frames); - - void setNetworkConnectionBufferSize(); - void beginSession() override; - - Envoy::Network::SocketInterface* const previous_socket_interface_{ - Envoy::Network::SocketInterfaceSingleton::getExisting()}; - std::shared_ptr writev_matcher_{std::make_shared()}; - std::unique_ptr test_socket_interface_loader_; -}; } // namespace Envoy diff --git a/test/integration/http2_upstream_integration_test.cc b/test/integration/http2_upstream_integration_test.cc index 3e7fca7ad457..30a193b83bc3 100644 --- a/test/integration/http2_upstream_integration_test.cc +++ b/test/integration/http2_upstream_integration_test.cc @@ -330,18 +330,19 @@ TEST_P(Http2UpstreamIntegrationTest, HittingEncoderFilterLimitForGrpc) { const std::string access_log_name = TestEnvironment::temporaryPath(TestUtility::uniqueFilename()); // Configure just enough of an upstream access log to reference the upstream headers. - const std::string yaml_string = R"EOF( + const std::string yaml_string = fmt::format(R"EOF( name: router typed_config: "@type": type.googleapis.com/envoy.config.filter.http.router.v2.Router upstream_log: name: accesslog filter: - not_health_check_filter: {} + not_health_check_filter: {{}} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: /dev/null - )EOF"; + path: {} + )EOF", + Platform::null_device_path); TestUtility::loadFromYaml(yaml_string, *hcm.mutable_http_filters(1)); }); diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 8036703a355e..eb2a70e3f121 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -20,6 +20,7 @@ #include "common/common/fmt.h" #include "common/common/thread_annotations.h" #include "common/http/headers.h" +#include "common/network/socket_option_impl.h" #include "common/network/utility.h" #include "common/protobuf/utility.h" #include "common/runtime/runtime_impl.h" @@ -66,10 +67,11 @@ typeToCodecType(Http::CodecClient::Type type) { } // namespace IntegrationCodecClient::IntegrationCodecClient( - Event::Dispatcher& dispatcher, Network::ClientConnectionPtr&& conn, - Upstream::HostDescriptionConstSharedPtr host_description, CodecClient::Type type) - : CodecClientProd(type, std::move(conn), host_description, dispatcher), dispatcher_(dispatcher), - callbacks_(*this), codec_callbacks_(*this) { + Event::Dispatcher& dispatcher, Random::RandomGenerator& random, + Network::ClientConnectionPtr&& conn, Upstream::HostDescriptionConstSharedPtr host_description, + CodecClient::Type type) + : CodecClientProd(type, std::move(conn), host_description, dispatcher, random), + dispatcher_(dispatcher), callbacks_(*this), codec_callbacks_(*this) { connection_->addConnectionCallbacks(callbacks_); setCodecConnectionCallbacks(codec_callbacks_); dispatcher.run(Event::Dispatcher::RunType::Block); @@ -226,8 +228,8 @@ IntegrationCodecClientPtr HttpIntegrationTest::makeRawHttpConnection( cluster->http1_settings_.enable_trailers_ = true; Upstream::HostDescriptionConstSharedPtr host_description{Upstream::makeTestHostDescription( cluster, fmt::format("tcp://{}:80", Network::Test::getLoopbackAddressUrlString(version_)))}; - return std::make_unique(*dispatcher_, std::move(conn), host_description, - downstream_protocol_); + return std::make_unique(*dispatcher_, random_, std::move(conn), + host_description, downstream_protocol_); } Network::TransportSocketFactoryPtr HttpIntegrationTest::createUpstreamTlsContext() { @@ -372,10 +374,10 @@ void HttpIntegrationTest::verifyResponse(IntegrationStreamDecoderPtr response, EXPECT_EQ(response_code, response->headers().getStatusValue()); expected_headers.iterate([response_headers = &response->headers()]( const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { - const Http::HeaderEntry* entry = + const auto entry = response_headers->get(Http::LowerCaseString{std::string(header.key().getStringView())}); - EXPECT_NE(entry, nullptr); - EXPECT_EQ(header.value().getStringView(), entry->value().getStringView()); + EXPECT_FALSE(entry.empty()); + EXPECT_EQ(header.value().getStringView(), entry[0]->value().getStringView()); return Http::HeaderMap::Iterate::Continue; }); @@ -848,12 +850,13 @@ void HttpIntegrationTest::testEnvoyHandling100Continue(bool additional_continue_ codec_client_->sendData(*request_encoder_, 10, true); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); // Verify the Expect header is stripped. - EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::Headers::get().Expect)); + EXPECT_TRUE(upstream_request_->headers().get(Http::Headers::get().Expect).empty()); if (via.empty()) { - EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::Headers::get().Via)); + EXPECT_TRUE(upstream_request_->headers().get(Http::Headers::get().Via).empty()); } else { - EXPECT_EQ(via, - upstream_request_->headers().get(Http::Headers::get().Via)->value().getStringView()); + EXPECT_EQ( + via, + upstream_request_->headers().get(Http::Headers::get().Via)[0]->value().getStringView()); } if (additional_continue_from_upstream) { @@ -1185,7 +1188,7 @@ void HttpIntegrationTest::testDownstreamResetBeforeResponseComplete() { codec_client_->sendData(*request_encoder_, 0, true); waitForNextUpstreamRequest(); - EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Cookie)->value(), "a=b; c=d"); + EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Cookie)[0]->value(), "a=b; c=d"); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->encodeData(512, false); @@ -1386,4 +1389,58 @@ std::string HttpIntegrationTest::listenerStatPrefix(const std::string& stat_name } return "listener.[__1]_0." + stat_name; } + +void Http2RawFrameIntegrationTest::startHttp2Session() { + ASSERT_TRUE(tcp_client_->write(Http2Frame::Preamble, false, false)); + + // Send empty initial SETTINGS frame. + auto settings = Http2Frame::makeEmptySettingsFrame(); + ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); + + // Read initial SETTINGS frame from the server. + readFrame(); + + // Send an SETTINGS ACK. + settings = Http2Frame::makeEmptySettingsFrame(Http2Frame::SettingsFlags::Ack); + ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); + + // read pending SETTINGS and WINDOW_UPDATE frames + readFrame(); + readFrame(); +} + +void Http2RawFrameIntegrationTest::beginSession() { + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + // set lower outbound frame limits to make tests run faster + config_helper_.setOutboundFramesLimits(1000, 100); + initialize(); + // Set up a raw connection to easily send requests without reading responses. + auto options = std::make_shared(); + options->emplace_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, + ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); + tcp_client_ = makeTcpConnection(lookupPort("http"), options); + startHttp2Session(); +} + +Http2Frame Http2RawFrameIntegrationTest::readFrame() { + Http2Frame frame; + EXPECT_TRUE(tcp_client_->waitForData(frame.HeaderSize)); + frame.setHeader(tcp_client_->data()); + tcp_client_->clearData(frame.HeaderSize); + auto len = frame.payloadSize(); + if (len) { + EXPECT_TRUE(tcp_client_->waitForData(len)); + frame.setPayload(tcp_client_->data()); + tcp_client_->clearData(len); + } + return frame; +} + +void Http2RawFrameIntegrationTest::sendFrame(const Http2Frame& frame) { + ASSERT_TRUE(tcp_client_->connected()); + ASSERT_TRUE(tcp_client_->write(std::string(frame), false, false)); +} + } // namespace Envoy diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index cb197cadb5e4..ae7652d59107 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -7,18 +7,22 @@ #include "common/http/codec_client.h" #include "common/network/filter_impl.h" +#include "test/common/http/http2/http2_frame.h" #include "test/integration/integration.h" #include "test/integration/utility.h" #include "test/test_common/printers.h" namespace Envoy { +using ::Envoy::Http::Http2::Http2Frame; + /** * HTTP codec client used during integration testing. */ class IntegrationCodecClient : public Http::CodecClientProd { public: - IntegrationCodecClient(Event::Dispatcher& dispatcher, Network::ClientConnectionPtr&& conn, + IntegrationCodecClient(Event::Dispatcher& dispatcher, Random::RandomGenerator& random, + Network::ClientConnectionPtr&& conn, Upstream::HostDescriptionConstSharedPtr host_description, Http::CodecClient::Type type); @@ -250,5 +254,22 @@ class HttpIntegrationTest : public BaseIntegrationTest { uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; std::string access_log_name_; + testing::NiceMock random_; +}; + +// Helper class for integration tests using raw HTTP/2 frames +class Http2RawFrameIntegrationTest : public HttpIntegrationTest { +public: + Http2RawFrameIntegrationTest(Network::Address::IpVersion version) + : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, version) {} + +protected: + void startHttp2Session(); + Http2Frame readFrame(); + void sendFrame(const Http2Frame& frame); + virtual void beginSession(); + + IntegrationTcpClientPtr tcp_client_; }; + } // namespace Envoy diff --git a/test/integration/http_subset_lb_integration_test.cc b/test/integration/http_subset_lb_integration_test.cc index cd275c20a7c3..3094db9cf42e 100644 --- a/test/integration/http_subset_lb_integration_test.cc +++ b/test/integration/http_subset_lb_integration_test.cc @@ -159,14 +159,14 @@ class HttpSubsetLbIntegrationTest // Expect a response from a host in the correct subset. EXPECT_EQ(response->headers() - .get(Envoy::Http::LowerCaseString{host_type_header_}) + .get(Envoy::Http::LowerCaseString{host_type_header_})[0] ->value() .getStringView(), expected_host_type); // Record the upstream address. hosts.emplace(response->headers() - .get(Envoy::Http::LowerCaseString{host_header_}) + .get(Envoy::Http::LowerCaseString{host_header_})[0] ->value() .getStringView()); diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index 963b04660f5a..d347849950ee 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -26,6 +26,10 @@ class IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest { auto* route = virtual_host->mutable_routes(0)->mutable_route(); route->mutable_idle_timeout()->set_seconds(0); route->mutable_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000); + + auto* header = virtual_host->mutable_response_headers_to_add()->Add()->mutable_header(); + header->set_key("foo"); + header->set_value("bar"); } if (enable_request_timeout_) { hcm.mutable_request_timeout()->set_seconds(0); @@ -178,6 +182,9 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeaders) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("408", response->headers().getStatusValue()); + auto foo = Http::LowerCaseString("foo"); + ASSERT_FALSE(response->headers().get(foo).empty()); + EXPECT_EQ("bar", response->headers().get(foo)[0]->value().getStringView()); EXPECT_EQ("stream timeout", response->body()); EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("stream_idle_timeout")); diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index b5640bfcbac5..c01716d33c24 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -118,8 +118,6 @@ std::string ContentType(const BufferingStreamDecoderPtr& response) { } // namespace TEST_P(IntegrationAdminTest, Admin) { - Stats::TestUtil::SymbolTableCreatorTestPeer symbol_table_creator_test_peer; - symbol_table_creator_test_peer.setUseFakeSymbolTables(false); initialize(); BufferingStreamDecoderPtr response; diff --git a/test/integration/integration_tcp_client.cc b/test/integration/integration_tcp_client.cc index 203a0c85a8ba..590b5c92fcf4 100644 --- a/test/integration/integration_tcp_client.cc +++ b/test/integration/integration_tcp_client.cc @@ -54,7 +54,7 @@ IntegrationTcpClient::IntegrationTcpClient( Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), options); ON_CALL(*client_write_buffer_, drain(_)) - .WillByDefault(testing::Invoke(client_write_buffer_, &MockWatermarkBuffer::baseDrain)); + .WillByDefault(Invoke(client_write_buffer_, &MockWatermarkBuffer::trackDrains)); EXPECT_CALL(*client_write_buffer_, drain(_)).Times(AnyNumber()); connection_->enableHalfClose(enable_half_close); @@ -116,26 +116,26 @@ AssertionResult IntegrationTcpClient::write(const std::string& data, bool end_st if (verify) { EXPECT_CALL(*client_write_buffer_, move(_)); if (!data.empty()) { - EXPECT_CALL(*client_write_buffer_, write(_)).Times(AtLeast(1)); + EXPECT_CALL(*client_write_buffer_, drain(_)).Times(AtLeast(1)); } } - int bytes_expected = client_write_buffer_->bytes_written() + data.size(); + uint64_t bytes_expected = client_write_buffer_->bytesDrained() + data.size(); connection_->write(buffer, end_stream); do { connection_->dispatcher().run(Event::Dispatcher::RunType::NonBlock); - if (client_write_buffer_->bytes_written() == bytes_expected || disconnected_) { + if (client_write_buffer_->bytesDrained() == bytes_expected || disconnected_) { break; } } while (bound.withinBound()); if (!bound.withinBound()) { return AssertionFailure() << "Timed out completing write"; - } else if (verify && (disconnected_ || client_write_buffer_->bytes_written() != bytes_expected)) { + } else if (verify && (disconnected_ || client_write_buffer_->bytesDrained() != bytes_expected)) { return AssertionFailure() << "Failed to complete write or unexpected disconnect. disconnected_: " << disconnected_ - << " bytes_written: " << client_write_buffer_->bytes_written() + << " bytes_drained: " << client_write_buffer_->bytesDrained() << " bytes_expected: " << bytes_expected; } diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 594f5ac656c5..3f91991cee94 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -179,7 +179,7 @@ TEST_P(IntegrationTest, RouterDirectResponseWithBody) { ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("example-value", response->headers() - .get(Envoy::Http::LowerCaseString("x-additional-header")) + .get(Envoy::Http::LowerCaseString("x-additional-header"))[0] ->value() .getStringView()); EXPECT_EQ("text/html", response->headers().getContentTypeValue()); @@ -223,7 +223,7 @@ TEST_P(IntegrationTest, RouterDirectResponseEmptyBody) { ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("example-value", response->headers() - .get(Envoy::Http::LowerCaseString("x-additional-header")) + .get(Envoy::Http::LowerCaseString("x-additional-header"))[0] ->value() .getStringView()); // Content-type header is removed. @@ -709,12 +709,12 @@ TEST_P(IntegrationTest, TestInlineHeaders) { ASSERT_TRUE(upstream_headers != nullptr); EXPECT_EQ(upstream_headers->Host()->value(), "foo.com"); EXPECT_EQ(upstream_headers->get_("User-Agent"), "public,123"); - ASSERT_TRUE(upstream_headers->get(Envoy::Http::LowerCaseString("foo")) != nullptr); + ASSERT_FALSE(upstream_headers->get(Envoy::Http::LowerCaseString("foo")).empty()); EXPECT_EQ("bar", - upstream_headers->get(Envoy::Http::LowerCaseString("foo"))->value().getStringView()); - ASSERT_TRUE(upstream_headers->get(Envoy::Http::LowerCaseString("eep")) != nullptr); + upstream_headers->get(Envoy::Http::LowerCaseString("foo"))[0]->value().getStringView()); + ASSERT_FALSE(upstream_headers->get(Envoy::Http::LowerCaseString("eep")).empty()); EXPECT_EQ("baz", - upstream_headers->get(Envoy::Http::LowerCaseString("eep"))->value().getStringView()); + upstream_headers->get(Envoy::Http::LowerCaseString("eep"))[0]->value().getStringView()); } // Verify for HTTP/1.0 a keep-alive header results in no connection: close. diff --git a/test/integration/listener_lds_integration_test.cc b/test/integration/listener_lds_integration_test.cc index 275895c066ca..183956c12eae 100644 --- a/test/integration/listener_lds_integration_test.cc +++ b/test/integration/listener_lds_integration_test.cc @@ -90,11 +90,9 @@ class ListenerIntegrationTest : public HttpIntegrationTest, void createUpstreams() override { HttpIntegrationTest::createUpstreams(); // Create the LDS upstream (fake_upstreams_[1]). - fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, - timeSystem(), enable_half_close_)); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); // Create the RDS upstream (fake_upstreams_[2]). - fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, - timeSystem(), enable_half_close_)); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); } void resetFakeUpstreamInfo(FakeUpstreamInfo* upstream_info) { diff --git a/test/integration/load_stats_integration_test.cc b/test/integration/load_stats_integration_test.cc index 3567488a9e4e..e66daee1d07b 100644 --- a/test/integration/load_stats_integration_test.cc +++ b/test/integration/load_stats_integration_test.cc @@ -99,8 +99,7 @@ class LoadStatsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara } void createUpstreams() override { - fake_upstreams_.emplace_back( - new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); load_report_upstream_ = fake_upstreams_.back().get(); HttpIntegrationTest::createUpstreams(); } diff --git a/test/integration/local_reply_integration_test.cc b/test/integration/local_reply_integration_test.cc index dacd7fcad033..d89669f0cb76 100644 --- a/test/integration/local_reply_integration_test.cc +++ b/test/integration/local_reply_integration_test.cc @@ -79,7 +79,8 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson) { EXPECT_EQ("application/json", response->headers().ContentType()->value().getStringView()); EXPECT_EQ("150", response->headers().ContentLength()->value().getStringView()); EXPECT_EQ("550", response->headers().Status()->value().getStringView()); - EXPECT_EQ("bar", response->headers().get(Http::LowerCaseString("foo"))->value().getStringView()); + EXPECT_EQ("bar", + response->headers().get(Http::LowerCaseString("foo"))[0]->value().getStringView()); // Check if returned json is same as expected EXPECT_TRUE(TestUtility::jsonStringEqual(response->body(), expected_body)); } @@ -210,7 +211,8 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJsonForFirstMatchingFi EXPECT_EQ("text/plain", response->headers().ContentType()->value().getStringView()); EXPECT_EQ("24", response->headers().ContentLength()->value().getStringView()); EXPECT_EQ("551", response->headers().Status()->value().getStringView()); - EXPECT_EQ("bar", response->headers().get(Http::LowerCaseString("foo"))->value().getStringView()); + EXPECT_EQ("bar", + response->headers().get(Http::LowerCaseString("foo"))[0]->value().getStringView()); // Check if returned json is same as expected EXPECT_EQ(response->body(), expected_body); } diff --git a/test/integration/overload_integration_test.cc b/test/integration/overload_integration_test.cc index 13a2eef76b58..0e7b9c7b3c7d 100644 --- a/test/integration/overload_integration_test.cc +++ b/test/integration/overload_integration_test.cc @@ -1,58 +1,123 @@ +#include + #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/overload/v3/overload.pb.h" +#include "envoy/server/resource_monitor.h" +#include "envoy/server/resource_monitor_config.h" +#include "test/common/config/dummy_config.pb.h" #include "test/integration/http_protocol_integration.h" +#include "test/test_common/registry.h" #include "absl/strings/str_cat.h" namespace Envoy { +class FakeResourceMonitorFactory; + +class FakeResourceMonitor : public Server::ResourceMonitor { +public: + FakeResourceMonitor(Event::Dispatcher& dispatcher, FakeResourceMonitorFactory& factory) + : dispatcher_(dispatcher), factory_(factory), pressure_(0.0) {} + ~FakeResourceMonitor() override; + void updateResourceUsage(Callbacks& callbacks) override; + + void setResourcePressure(double pressure) { + dispatcher_.post([this, pressure] { pressure_ = pressure; }); + } + +private: + Event::Dispatcher& dispatcher_; + FakeResourceMonitorFactory& factory_; + double pressure_; +}; + +class FakeResourceMonitorFactory : public Server::Configuration::ResourceMonitorFactory { +public: + FakeResourceMonitor* monitor() const { return monitor_; } + Server::ResourceMonitorPtr + createResourceMonitor(const Protobuf::Message& config, + Server::Configuration::ResourceMonitorFactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { + return "envoy.resource_monitors.testonly.fake_resource_monitor"; + } + + void onMonitorDestroyed(FakeResourceMonitor* monitor); + +private: + FakeResourceMonitor* monitor_{nullptr}; +}; + +FakeResourceMonitor::~FakeResourceMonitor() { factory_.onMonitorDestroyed(this); } + +void FakeResourceMonitor::updateResourceUsage(Callbacks& callbacks) { + Server::ResourceUsage usage; + usage.resource_pressure_ = pressure_; + callbacks.onSuccess(usage); +} + +void FakeResourceMonitorFactory::onMonitorDestroyed(FakeResourceMonitor* monitor) { + ASSERT(monitor_ == monitor); + monitor_ = nullptr; +} + +Server::ResourceMonitorPtr FakeResourceMonitorFactory::createResourceMonitor( + const Protobuf::Message&, Server::Configuration::ResourceMonitorFactoryContext& context) { + auto monitor = std::make_unique(context.dispatcher(), *this); + monitor_ = monitor.get(); + return monitor; +} + class OverloadIntegrationTest : public HttpProtocolIntegrationTest { protected: - OverloadIntegrationTest() - : injected_resource_filename_(TestEnvironment::temporaryPath("injected_resource")), - file_updater_(injected_resource_filename_) {} - void initialize() override { - config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - const std::string overload_config = fmt::format(R"EOF( + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + const std::string overload_config = R"EOF( refresh_interval: seconds: 0 nanos: 1000000 resource_monitors: - - name: "envoy.resource_monitors.injected_resource" + - name: "envoy.resource_monitors.testonly.fake_resource_monitor" typed_config: - "@type": type.googleapis.com/envoy.config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig - filename: "{}" + "@type": type.googleapis.com/google.protobuf.Empty actions: - name: "envoy.overload_actions.stop_accepting_requests" triggers: - - name: "envoy.resource_monitors.injected_resource" + - name: "envoy.resource_monitors.testonly.fake_resource_monitor" threshold: value: 0.9 - name: "envoy.overload_actions.disable_http_keepalive" triggers: - - name: "envoy.resource_monitors.injected_resource" + - name: "envoy.resource_monitors.testonly.fake_resource_monitor" threshold: value: 0.8 - name: "envoy.overload_actions.stop_accepting_connections" triggers: - - name: "envoy.resource_monitors.injected_resource" + - name: "envoy.resource_monitors.testonly.fake_resource_monitor" threshold: value: 0.95 - )EOF", - injected_resource_filename_); + )EOF"; *bootstrap.mutable_overload_manager() = TestUtility::parseYaml(overload_config); }); - updateResource(0); HttpIntegrationTest::initialize(); + updateResource(0); } - void updateResource(double pressure) { file_updater_.update(absl::StrCat(pressure)); } + void updateResource(double pressure) { + auto* monitor = fake_resource_monitor_factory_.monitor(); + ASSERT(monitor != nullptr); + monitor->setResourcePressure(pressure); + } - const std::string injected_resource_filename_; - AtomicFileUpdater file_updater_; + FakeResourceMonitorFactory fake_resource_monitor_factory_; + Registry::InjectFactory inject_factory_{ + fake_resource_monitor_factory_}; }; INSTANTIATE_TEST_SUITE_P(Protocols, OverloadIntegrationTest, diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 719e34954f2d..39e5b2738e87 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -154,7 +154,7 @@ TEST_P(ProtocolIntegrationTest, RouterRedirect) { ASSERT_TRUE(response->complete()); EXPECT_EQ("301", response->headers().getStatusValue()); EXPECT_EQ("https://www.redirect.com/foo", - response->headers().get(Http::Headers::get().Location)->value().getStringView()); + response->headers().get(Http::Headers::get().Location)[0]->value().getStringView()); } TEST_P(ProtocolIntegrationTest, UnknownResponsecode) { @@ -303,7 +303,7 @@ name: add-trailers-filter if (upstreamProtocol() == FakeHttpConnection::Type::HTTP2) { EXPECT_EQ("decode", upstream_request_->trailers() - ->get(Http::LowerCaseString("grpc-message")) + ->get(Http::LowerCaseString("grpc-message"))[0] ->value() .getStringView()); } @@ -330,7 +330,7 @@ TEST_P(ProtocolIntegrationTest, ResponseWithHostHeader) { EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("host", - response->headers().get(Http::LowerCaseString("host"))->value().getStringView()); + response->headers().get(Http::LowerCaseString("host"))[0]->value().getStringView()); } // Regression test for https://github.com/envoyproxy/envoy/issues/10270 @@ -351,7 +351,7 @@ TEST_P(ProtocolIntegrationTest, LongHeaderValueWithSpaces) { {"longrequestvalue", long_header_value_with_inner_lws}}); waitForNextUpstreamRequest(); EXPECT_EQ(long_header_value_with_inner_lws, upstream_request_->headers() - .get(Http::LowerCaseString("longrequestvalue")) + .get(Http::LowerCaseString("longrequestvalue"))[0] ->value() .getStringView()); upstream_request_->encodeHeaders( @@ -363,10 +363,12 @@ TEST_P(ProtocolIntegrationTest, LongHeaderValueWithSpaces) { EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("host", - response->headers().get(Http::LowerCaseString("host"))->value().getStringView()); - EXPECT_EQ( - long_header_value_with_inner_lws, - response->headers().get(Http::LowerCaseString("longresponsevalue"))->value().getStringView()); + response->headers().get(Http::LowerCaseString("host"))[0]->value().getStringView()); + EXPECT_EQ(long_header_value_with_inner_lws, + response->headers() + .get(Http::LowerCaseString("longresponsevalue"))[0] + ->value() + .getStringView()); } TEST_P(ProtocolIntegrationTest, Retry) { @@ -854,6 +856,16 @@ TEST_P(DownstreamProtocolIntegrationTest, HittingDecoderFilterLimit) { // Test hitting the encoder buffer filter with too many response bytes to buffer. Given the request // headers are sent on early, the stream/connection will be reset. TEST_P(ProtocolIntegrationTest, HittingEncoderFilterLimit) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + auto* route_config = hcm.mutable_route_config(); + auto* virtual_host = route_config->mutable_virtual_hosts(0); + auto* header = virtual_host->mutable_response_headers_to_add()->Add()->mutable_header(); + header->set_key("foo"); + header->set_value("bar"); + }); + useAccessLog(); config_helper_.addFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " "type.googleapis.com/google.protobuf.Empty } }"); @@ -886,6 +898,11 @@ TEST_P(ProtocolIntegrationTest, HittingEncoderFilterLimit) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); EXPECT_EQ("500", response->headers().getStatusValue()); + // Regression test all sendLocalReply paths add route-requested headers. + auto foo = Http::LowerCaseString("foo"); + ASSERT_FALSE(response->headers().get(foo).empty()); + EXPECT_EQ("bar", response->headers().get(foo)[0]->value().getStringView()); + // Regression test https://github.com/envoyproxy/envoy/issues/9881 by making // sure this path does standard HCM header transformations. EXPECT_TRUE(response->headers().Date() != nullptr); @@ -1246,157 +1263,27 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) { } } -TEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterEncoding) { +TEST_P(DownstreamProtocolIntegrationTest, LocalReplyDuringEncoding) { config_helper_.addFilter(R"EOF( -name: encode-headers-only +name: local-reply-during-encode )EOF"); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = - codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{":method", "GET"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}, - 128); - waitForNextUpstreamRequest(); - upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); - response->waitForEndStream(); - EXPECT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); - if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { - ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - } else { - ASSERT_TRUE(upstream_request_->waitForReset()); - ASSERT_TRUE(fake_upstream_connection_->close()); - ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - } - - EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().getStatusValue()); - EXPECT_EQ(0, response->body().size()); -} -TEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterDecoding) { - config_helper_.addFilter(R"EOF( -name: decode-headers-only -)EOF"); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = - codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}, - 128); - waitForNextUpstreamRequest(); - upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); - upstream_request_->encodeData(128, true); - response->waitForEndStream(); - - EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().getStatusValue()); - EXPECT_EQ(128, response->body().size()); -} - -TEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterEncodingIntermediateFilters) { - config_helper_.addFilter(R"EOF( -name: passthrough-filter -)EOF"); - config_helper_.addFilter(R"EOF( -name: encode-headers-only -)EOF"); - config_helper_.addFilter(R"EOF( -name: passthrough-filter -)EOF"); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = - codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{":method", "GET"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}, - 128); - waitForNextUpstreamRequest(); - upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); - response->waitForEndStream(); - if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { - ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - } else { - ASSERT_TRUE(upstream_request_->waitForReset()); - ASSERT_TRUE(fake_upstream_connection_->close()); - ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - } - - EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().getStatusValue()); - EXPECT_EQ(0, response->body().size()); -} - -TEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterDecodingIntermediateFilters) { - config_helper_.addFilter(R"EOF( -name: passthrough-filter -)EOF"); - config_helper_.addFilter(R"EOF( -name: decode-headers-only -)EOF"); - config_helper_.addFilter(R"EOF( -name: passthrough-filter -)EOF"); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = - codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}, - 128); - waitForNextUpstreamRequest(); - upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); - upstream_request_->encodeData(128, true); - response->waitForEndStream(); - - EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().getStatusValue()); - EXPECT_EQ(128, response->body().size()); -} - -// Verifies behavior when request data is encoded after the request has been -// turned into a headers-only request and the response has already begun. -TEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterInterleaved) { - config_helper_.addFilter(R"EOF( -name: decode-headers-only -)EOF"); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - - // First send the request headers. The filter should turn this into a header-only - // request. - auto encoder_decoder = - codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "GET"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}); - request_encoder_ = &encoder_decoder.first; - auto response = std::move(encoder_decoder.second); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); // Wait for the upstream request and begin sending a response with end_stream = false. waitForNextUpstreamRequest(); - upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); - - // Simulate additional data after the request has been turned into a headers only request. - Buffer::OwnedImpl data(std::string(128, 'a')); - request_encoder_->encodeData(data, false); - - // End the response. - upstream_request_->encodeData(128, true); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().getStatusValue()); + EXPECT_EQ("500", response->headers().getStatusValue()); EXPECT_EQ(0, upstream_request_->body().length()); } @@ -1837,15 +1724,15 @@ TEST_P(ProtocolIntegrationTest, MultipleSetCookies) { ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - std::vector out; - Http::HeaderUtility::getAllOfHeader(response->headers(), "set-cookie", out); + const auto out = response->headers().get(Http::LowerCaseString("set-cookie")); ASSERT_EQ(out.size(), 2); - ASSERT_EQ(out[0], "foo"); - ASSERT_EQ(out[1], "bar"); + ASSERT_EQ(out[0]->value().getStringView(), "foo"); + ASSERT_EQ(out[1]->value().getStringView(), "bar"); } // Resets the downstream stream immediately and verifies that we clean up everything. TEST_P(ProtocolIntegrationTest, TestDownstreamResetIdleTimeout) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); config_helper_.setDownstreamHttpIdleTimeout(std::chrono::milliseconds(100)); initialize(); @@ -1874,6 +1761,7 @@ TEST_P(ProtocolIntegrationTest, TestDownstreamResetIdleTimeout) { } ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_THAT(waitForAccessLog(access_log_name_), Not(HasSubstr("DPE"))); } // Test connection is closed after single request processed. @@ -1945,7 +1833,7 @@ TEST_P(ProtocolIntegrationTest, ConnDurationTimeoutNoHttpRequest) { TEST_P(DownstreamProtocolIntegrationTest, TestPrefetch) { config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); - cluster->mutable_prefetch_policy()->mutable_prefetch_ratio()->set_value(1.5); + cluster->mutable_prefetch_policy()->mutable_per_upstream_prefetch_ratio()->set_value(1.5); }); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); diff --git a/test/integration/proxy_proto_integration_test.cc b/test/integration/proxy_proto_integration_test.cc index 92c4bc90b39d..064d884899ec 100644 --- a/test/integration/proxy_proto_integration_test.cc +++ b/test/integration/proxy_proto_integration_test.cc @@ -133,11 +133,11 @@ TEST_P(ProxyProtoIntegrationTest, AccessLog) { testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); const std::string log_line = waitForAccessLog(access_log_name_); - const std::vector tokens = StringUtil::splitToken(log_line, " "); + const std::vector tokens = StringUtil::splitToken(log_line, " ", false, true); ASSERT_EQ(2, tokens.size()); EXPECT_EQ(tokens[0], Network::Test::getLoopbackAddressString(GetParam())); - EXPECT_EQ(tokens[1], "1.2.3.4:12345\n"); + EXPECT_EQ(tokens[1], "1.2.3.4:12345"); } TEST_P(ProxyProtoIntegrationTest, DEPRECATED_FEATURE_TEST(OriginalDst)) { diff --git a/test/integration/run_envoy_test.sh b/test/integration/run_envoy_test.sh index 4c9c21d73d34..1b24f51fc1b9 100755 --- a/test/integration/run_envoy_test.sh +++ b/test/integration/run_envoy_test.sh @@ -1,6 +1,8 @@ #!/bin/bash export ENVOY_BIN="${TEST_SRCDIR}/envoy/test/integration/hotrestart_main" + +# shellcheck source=test/integration/test_utility.sh source "${TEST_SRCDIR}/envoy/test/integration/test_utility.sh" function expect_fail_with_error() { @@ -8,7 +10,7 @@ function expect_fail_with_error() { rm -f "$log" expected_error="$1" shift - echo ${ENVOY_BIN} --use-dynamic-base-id "$@" ">&" "$log" + echo "${ENVOY_BIN} --use-dynamic-base-id $*" ">&" "$log" ${ENVOY_BIN} --use-dynamic-base-id "$@" >& "$log" EXIT_CODE=$? cat "$log" @@ -17,24 +19,24 @@ function expect_fail_with_error() { } -start_test Launching envoy with a bogus command line flag. +start_test "Launching envoy with a bogus command line flag." expect_fail_with_error "PARSE ERROR: Argument: --bogus-flag" --bogus-flag -start_test Launching envoy without --config-path or --config-yaml fails. +start_test "Launching envoy without --config-path or --config-yaml fails." expect_fail_with_error \ "At least one of --config-path or --config-yaml or Options::configProto() should be non-empty" -start_test Launching envoy with unknown IP address. +start_test "Launching envoy with unknown IP address." expect_fail_with_error "error: unknown IP address version" --local-address-ip-version foo -start_test Launching envoy with unknown mode. +start_test "Launching envoy with unknown mode." expect_fail_with_error "error: unknown mode" --mode foo -start_test Launching envoy with bogus component log level. +start_test "Launching envoy with bogus component log level." expect_fail_with_error "error: component log level not correctly specified" --component-log-level upstream:foo:bar -start_test Launching envoy with invalid log level. +start_test "Launching envoy with invalid log level." expect_fail_with_error "error: invalid log level specified" --component-log-level upstream:foo -start_test Launching envoy with invalid component. +start_test "Launching envoy with invalid component." expect_fail_with_error "error: invalid component specified" --component-log-level foo:debug diff --git a/test/integration/scoped_rds_integration_test.cc b/test/integration/scoped_rds_integration_test.cc index d80ab1b87e53..30b071415ad3 100644 --- a/test/integration/scoped_rds_integration_test.cc +++ b/test/integration/scoped_rds_integration_test.cc @@ -122,11 +122,9 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, void createUpstreams() override { HttpIntegrationTest::createUpstreams(); // Create the SRDS upstream. - fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, - timeSystem(), enable_half_close_)); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); // Create the RDS upstream. - fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, - timeSystem(), enable_half_close_)); + addFakeUpstream(FakeHttpConnection::Type::HTTP2); } void resetFakeUpstreamInfo(FakeUpstreamInfo* upstream_info) { diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index f218ddd09fc2..4e5bc1ddebf5 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -316,8 +316,7 @@ class SdsDynamicDownstreamCertValidationContextTest : public SdsDynamicDownstrea void createUpstreams() override { // Fake upstream with SSL/TLS for the first cluster. - fake_upstreams_.emplace_back(new FakeUpstream( - createUpstreamSslContext(), 0, FakeHttpConnection::Type::HTTP1, version_, timeSystem())); + addFakeUpstream(createUpstreamSslContext(), FakeHttpConnection::Type::HTTP1); create_xds_upstream_ = true; } @@ -474,9 +473,8 @@ class SdsDynamicUpstreamIntegrationTest : public SdsDynamicIntegrationBaseTest { void createUpstreams() override { // This is for backend with ssl - fake_upstreams_.emplace_back(new FakeUpstream(createUpstreamSslContext(context_manager_, *api_), - 0, FakeHttpConnection::Type::HTTP1, version_, - timeSystem())); + addFakeUpstream(createUpstreamSslContext(context_manager_, *api_), + FakeHttpConnection::Type::HTTP1); create_xds_upstream_ = true; } }; diff --git a/test/integration/sds_generic_secret_integration_test.cc b/test/integration/sds_generic_secret_integration_test.cc index 719bc0cc4f39..ac466c6a40ec 100644 --- a/test/integration/sds_generic_secret_integration_test.cc +++ b/test/integration/sds_generic_secret_integration_test.cc @@ -147,9 +147,10 @@ TEST_P(SdsGenericSecretIntegrationTest, FilterFetchSuccess) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); - EXPECT_EQ( - "DUMMY_AES_128_KEY", - upstream_request_->headers().get(Http::LowerCaseString("secret"))->value().getStringView()); + EXPECT_EQ("DUMMY_AES_128_KEY", upstream_request_->headers() + .get(Http::LowerCaseString("secret"))[0] + ->value() + .getStringView()); } } // namespace Envoy diff --git a/test/integration/sds_static_integration_test.cc b/test/integration/sds_static_integration_test.cc index da9e77c6e23f..172e7b0ca2f6 100644 --- a/test/integration/sds_static_integration_test.cc +++ b/test/integration/sds_static_integration_test.cc @@ -145,9 +145,8 @@ class SdsStaticUpstreamIntegrationTest : public testing::TestWithParam(*symbol_table_) - : std::make_unique(*symbol_table_)); + (use_real_stats ? std::make_unique(symbol_table_) + : std::make_unique(symbol_table_)); } void IntegrationTestServerImpl::createAndRunEnvoyServer( diff --git a/test/integration/server.h b/test/integration/server.h index 65cad7f5ea88..48995e8cb0b7 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -574,7 +574,7 @@ class IntegrationTestServerImpl : public IntegrationTestServer { Stats::Store* stat_store_{}; Network::Address::InstanceConstSharedPtr admin_address_; absl::Notification server_gone_; - Stats::SymbolTablePtr symbol_table_; + Stats::SymbolTableImpl symbol_table_; std::unique_ptr stats_allocator_; }; diff --git a/test/integration/socket_interface_integration_test.cc b/test/integration/socket_interface_integration_test.cc index 8969d41a55a1..93a5db5aff84 100644 --- a/test/integration/socket_interface_integration_test.cc +++ b/test/integration/socket_interface_integration_test.cc @@ -1,3 +1,5 @@ +#include "common/buffer/buffer_impl.h" +#include "common/network/address_impl.h" #include "common/network/socket_interface.h" #include "test/integration/integration.h" @@ -87,5 +89,59 @@ TEST_P(SocketInterfaceIntegrationTest, AddressWithSocketInterface) { client_->close(Network::ConnectionCloseType::FlushWrite); } +// Test that connecting to internal address will crash. +// TODO(lambdai): Add internal connection implementation to enable the connection creation. +TEST_P(SocketInterfaceIntegrationTest, InternalAddressWithSocketInterface) { + BaseIntegrationTest::initialize(); + + ConnectionStatusCallbacks connect_callbacks_; + Network::ClientConnectionPtr client_; + const Network::SocketInterface* sock_interface = Network::socketInterface( + "envoy.extensions.network.socket_interface.default_socket_interface"); + Network::Address::InstanceConstSharedPtr address = + std::make_shared("listener_0", sock_interface); + + ASSERT_DEATH(client_ = dispatcher_->createClientConnection( + address, Network::Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr), + "panic: not implemented"); +} + +// Test that recv from internal address will crash. +// TODO(lambdai): Add internal socket implementation to enable the io path. +TEST_P(SocketInterfaceIntegrationTest, UdpRecvFromInternalAddressWithSocketInterface) { + BaseIntegrationTest::initialize(); + + const Network::SocketInterface* sock_interface = Network::socketInterface( + "envoy.extensions.network.socket_interface.default_socket_interface"); + Network::Address::InstanceConstSharedPtr address = + std::make_shared("listener_0", sock_interface); + + ASSERT_DEATH(std::make_unique(Network::Socket::Type::Datagram, address), ""); +} + +// Test that send to internal address will return io error. +TEST_P(SocketInterfaceIntegrationTest, UdpSendToInternalAddressWithSocketInterface) { + BaseIntegrationTest::initialize(); + + const Network::SocketInterface* sock_interface = Network::socketInterface( + "envoy.extensions.network.socket_interface.default_socket_interface"); + Network::Address::InstanceConstSharedPtr peer_internal_address = + std::make_shared("listener_0", sock_interface); + Network::Address::InstanceConstSharedPtr local_valid_address = + Network::Test::getCanonicalLoopbackAddress(version_); + + auto socket = + std::make_unique(Network::Socket::Type::Datagram, local_valid_address); + + Buffer::OwnedImpl buffer; + Buffer::RawSlice iovec; + buffer.reserve(100, &iovec, 1); + + auto result = + socket->ioHandle().sendmsg(&iovec, 1, 0, local_valid_address->ip(), *peer_internal_address); + ASSERT_FALSE(result.ok()); + ASSERT_EQ(result.err_->getErrorCode(), Api::IoError::IoErrorCode::NoSupport); +} } // namespace } // namespace Envoy \ No newline at end of file diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 2ffc1ade58c9..376e416e44fd 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -7,7 +7,6 @@ #include "common/config/well_known_names.h" #include "common/memory/stats.h" -#include "common/stats/symbol_table_creator.h" #include "test/common/stats/stat_test_utility.h" #include "test/config/utility.h" @@ -206,8 +205,6 @@ class ClusterMemoryTestRunner : public testing::TestWithParam::GetParam()) {} - Stats::TestUtil::SymbolTableCreatorTestPeer symbol_table_creator_test_peer_; - Network::Address::IpVersion ip_version_; }; @@ -215,106 +212,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ClusterMemoryTestRunner, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { - symbol_table_creator_test_peer_.setUseFakeSymbolTables(true); - - // A unique instance of ClusterMemoryTest allows for multiple runs of Envoy with - // differing configuration. This is necessary for measuring the memory consumption - // between the different instances within the same test. - const size_t m100 = ClusterMemoryTestHelper::computeMemoryDelta(1, 0, 101, 0, true); - const size_t m_per_cluster = (m100) / 100; - - // Note: if you are increasing this golden value because you are adding a - // stat, please confirm that this will be generally useful to most Envoy - // users. Otherwise you are adding to the per-cluster memory overhead, which - // will be significant for Envoy installations that are massively - // multi-tenant. - // - // History of golden values: - // - // Date PR Bytes Per Cluster Notes - // exact upper-bound - // ---------- ----- ----------------- ----- - // 2019/03/20 6329 59015 Initial version - // 2019/04/12 6477 59576 Implementing Endpoint lease... - // 2019/04/23 6659 59512 Reintroduce dispatcher stats... - // 2019/04/24 6161 49415 Pack tags and tag extracted names - // 2019/05/07 6794 49957 Stats for excluded hosts in cluster - // 2019/04/27 6733 50213 Use SymbolTable API for HTTP codes - // 2019/05/31 6866 50157 libstdc++ upgrade in CI - // 2019/06/03 7199 49393 absl update - // 2019/06/06 7208 49650 make memory targets approximate - // 2019/06/17 7243 49412 49700 macros for exact/upper-bound memory checks - // 2019/06/29 7364 45685 46000 combine 2 levels of stat ref-counting into 1 - // 2019/06/30 7428 42742 43000 remove stats multiple inheritance, inline HeapStatData - // 2019/07/06 7477 42742 43000 fork gauge representation to drop pending_increment_ - // 2019/07/15 7555 42806 43000 static link libstdc++ in tests - // 2019/07/24 7503 43030 44000 add upstream filters to clusters - // 2019/08/13 7877 42838 44000 skip EdfScheduler creation if all host weights equal - // 2019/09/02 8118 42830 43000 Share symbol-tables in cluster/host stats. - // 2019/09/16 8100 42894 43000 Add transport socket matcher in cluster. - // 2019/09/25 8226 43022 44000 dns: enable dns failure refresh rate configuration - // 2019/09/30 8354 43310 44000 Implement transport socket match. - // 2019/10/17 8537 43308 44000 add new enum value HTTP3 - // 2019/10/17 8484 43340 44000 stats: add unit support to histogram - // 2019/11/01 8859 43563 44000 build: switch to libc++ by default - // 2019/11/15 9040 43371 44000 build: update protobuf to 3.10.1 - // 2019/11/15 9031 43403 44000 upstream: track whether cluster is local - // 2019/12/10 8779 42919 43500 use var-length coding for name length - // 2020/01/07 9069 43413 44000 upstream: Implement retry concurrency budgets - // 2020/01/07 9564 43445 44000 use RefcountPtr for CentralCache. - // 2020/01/09 8889 43509 44000 api: add UpstreamHttpProtocolOptions message - // 2020/01/09 9227 43637 44000 router: per-cluster histograms w/ timeout budget - // 2020/01/12 9633 43797 44104 config: support recovery of original message when - // upgrading. - // 2020/02/13 10042 43797 44136 Metadata: Metadata are shared across different - // clusters and hosts. - // 2020/03/16 9964 44085 44600 http2: support custom SETTINGS parameters. - // 2020/03/24 10501 44261 44600 upstream: upstream_rq_retry_limit_exceeded. - // 2020/04/02 10624 43356 44000 Use 100 clusters rather than 1000 to avoid timeouts - // 2020/04/07 10661 43349 44000 fix clang tidy on master - // 2020/04/23 10531 44169 44600 http: max stream duration upstream support. - // 2020/04/23 10661 44425 46000 per-listener connection limits - // 2020/05/05 10908 44233 44600 router: add InternalRedirectPolicy and predicate - // 2020/05/13 10531 44425 44600 Refactor resource manager - // 2020/05/20 11223 44491 44600 Add primary clusters tracking to cluster manager. - // 2020/06/10 11561 44491 44811 Make upstreams pluggable - // 2020/06/29 11751 44715 46000 Improve time complexity of removing callback handle - // in callback manager. - // 2020/07/07 11252 44971 46000 Introduce Least Request LB active request bias config - // 2020/07/15 11748 45003 46000 Stream error on invalid messaging - // 2020/07/20 11559 44747 46000 stats: add histograms for request/response headers - // and body sizes. - // 2020/07/21 12034 44811 46000 Add configurable histogram buckets. - // 2020/07/31 12035 45002 46000 Init manager store unready targets in hash map. - // 2020/08/10 12275 44949 46000 Re-organize tls histogram maps to improve continuity. - // 2020/08/11 12202 44949 46500 router: add new retry back-off strategy - - // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI - // 'release' builds, where we control the platform and tool-chain. So you - // will need to find the correct value only after failing CI and looking - // at the logs. - // - // On a local clang8/libstdc++/linux flow, the memory usage was observed in - // June 2019 to be 64 bytes higher than it is in CI/release. Your mileage may - // vary. - // - // If you encounter a failure here, please see - // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests - // for details on how to fix. - // - // We only run the exact test for ipv6 because ipv4 in some cases may allocate a - // different number of bytes. We still run the approximate test. - if (ip_version_ != Network::Address::IpVersion::v6) { - // https://github.com/envoyproxy/envoy/issues/12209 - // EXPECT_MEMORY_EQ(m_per_cluster, 44949); - } - EXPECT_MEMORY_LE(m_per_cluster, 46500); // Round up to allow platform variations. -} - -TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { - symbol_table_creator_test_peer_.setUseFakeSymbolTables(false); - +TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSize) { // A unique instance of ClusterMemoryTest allows for multiple runs of Envoy with // differing configuration. This is necessary for measuring the memory consumption // between the different instances within the same test. @@ -368,6 +266,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/07/31 12035 37114 38000 Init manager store unready targets in hash map. // 2020/08/10 12275 37061 38000 Re-organize tls histogram maps to improve continuity. // 2020/08/11 12202 37061 38500 router: add new retry back-off strategy + // 2020/09/11 12973 38993 upstream: predictive prefetch + // 2020/10/02 13251 39326 switch to google tcmalloc // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -388,12 +288,10 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // https://github.com/envoyproxy/envoy/issues/12209 // EXPECT_MEMORY_EQ(m_per_cluster, 37061); } - EXPECT_MEMORY_LE(m_per_cluster, 38500); // Round up to allow platform variations. + EXPECT_MEMORY_LE(m_per_cluster, 40000); // Round up to allow platform variations. } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { - symbol_table_creator_test_peer_.setUseFakeSymbolTables(false); - // A unique instance of ClusterMemoryTest allows for multiple runs of Envoy with // differing configuration. This is necessary for measuring the memory consumption // between the different instances within the same test. @@ -436,7 +334,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { // https://github.com/envoyproxy/envoy/issues/12209 // EXPECT_MEMORY_EQ(m_per_host, 1380); } - EXPECT_MEMORY_LE(m_per_host, 1800); // Round up to allow platform variations. + EXPECT_MEMORY_LE(m_per_host, 2000); // Round up to allow platform variations. } } // namespace diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index a1a4abd3ed26..37ff51d8b976 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -435,6 +435,63 @@ TEST_P(TcpProxyIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); } +TEST_P(TcpProxyIntegrationTest, TestMaxDownstreamConnectionDurationWithNoData) { + autonomous_upstream_ = true; + + enable_half_close_ = false; + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + auto* filter_chain = listener->mutable_filter_chains(0); + auto* config_blob = filter_chain->mutable_filters(0)->mutable_typed_config(); + + ASSERT_TRUE( + config_blob->Is()); + auto tcp_proxy_config = + MessageUtil::anyConvert( + *config_blob); + tcp_proxy_config.mutable_max_downstream_connection_duration()->set_nanos( + std::chrono::duration_cast(std::chrono::milliseconds(100)) + .count()); + config_blob->PackFrom(tcp_proxy_config); + }); + + initialize(); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client->waitForDisconnect(); +} + +TEST_P(TcpProxyIntegrationTest, TestMaxDownstreamConnectionDurationWithLargeOutstandingData) { + config_helper_.setBufferLimits(1024, 1024); + enable_half_close_ = false; + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + auto* filter_chain = listener->mutable_filter_chains(0); + auto* config_blob = filter_chain->mutable_filters(0)->mutable_typed_config(); + + ASSERT_TRUE( + config_blob->Is()); + auto tcp_proxy_config = + MessageUtil::anyConvert( + *config_blob); + tcp_proxy_config.mutable_max_downstream_connection_duration()->set_nanos( + std::chrono::duration_cast(std::chrono::milliseconds(500)) + .count()); + config_blob->PackFrom(tcp_proxy_config); + }); + + initialize(); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + FakeRawConnectionPtr fake_upstream_connection; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + + std::string data(1024 * 16, 'a'); + ASSERT_TRUE(tcp_client->write(data)); + ASSERT_TRUE(fake_upstream_connection->write(data)); + + tcp_client->waitForDisconnect(); + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); +} + TEST_P(TcpProxyIntegrationTest, TestNoCloseOnHealthFailure) { concurrency_ = 2; @@ -1045,7 +1102,7 @@ void TcpProxySslIntegrationTest::sendAndReceiveTlsData(const std::string& data_t // Ship some data upstream. Buffer::OwnedImpl buffer(data_to_send_upstream); ssl_client_->write(buffer, false); - while (client_write_buffer_->bytes_drained() != data_to_send_upstream.size()) { + while (client_write_buffer_->bytesDrained() != data_to_send_upstream.size()) { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } @@ -1112,7 +1169,7 @@ TEST_P(TcpProxySslIntegrationTest, UpstreamHalfClose) { const std::string& val("data"); Buffer::OwnedImpl buffer(val); ssl_client_->write(buffer, false); - while (client_write_buffer_->bytes_drained() != val.size()) { + while (client_write_buffer_->bytesDrained() != val.size()) { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } ASSERT_TRUE(fake_upstream_connection_->waitForData(val.size())); diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 11a14a182416..5c4fbdf022e4 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -218,11 +218,11 @@ TEST_P(ProxyingConnectIntegrationTest, ProxyConnect) { result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_); RELEASE_ASSERT(result, result.message()); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Method)->value(), "CONNECT"); + EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Method)[0]->value(), "CONNECT"); if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { - EXPECT_TRUE(upstream_request_->headers().get(Http::Headers::get().Protocol) == nullptr); + EXPECT_TRUE(upstream_request_->headers().get(Http::Headers::get().Protocol).empty()); } else { - EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Protocol)->value(), + EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Protocol)[0]->value(), "bytestream"); } diff --git a/test/integration/test_utility.sh b/test/integration/test_utility.sh index 33b3bfa6838a..ece50f5f3d31 100644 --- a/test/integration/test_utility.sh +++ b/test/integration/test_utility.sh @@ -1,3 +1,5 @@ +#!/bin/bash + # Helper script for bash integration tests, intended to be source'd from the # _test.sh. # @@ -8,20 +10,22 @@ CURRENT_TEST="NONE" function start_test() { - CURRENT_TEST="$@" + CURRENT_TEST="$1" echo "TEST: $CURRENT_TEST" } check() { echo " check" "$@" ... + # see https://github.com/koalaman/shellcheck/issues/1679 + # shellcheck disable=SC2119 "$@" || handle_failure } -BACKGROUND_PID="?" +export BACKGROUND_PID="?" run_in_background_saving_pid() { echo " backgrounding:" "$@" ... "$@" & - BACKGROUND_PID="$!" + export BACKGROUND_PID="$!" } # By default, print a message like: @@ -35,6 +39,7 @@ run_in_background_saving_pid() { # Assumes it's being called from a failure-reporting function and that the # actual failure the user is interested in is our caller's caller. If it # weren't for this, fail and handle_failure could be the same. +# shellcheck disable=SC2120 handle_failure() { if [ $# -eq 1 ]; then echo FAILed Input: "$1" @@ -44,7 +49,7 @@ handle_failure() { # to avoid printing 'handle_failure' we start with 1 to skip get_stack caller local i local stack_size=${#FUNCNAME[@]} - for (( i=1; i<$stack_size ; i++ )); do + for (( i=1; i void { auto* admin_addr = bootstrap.mutable_admin()->mutable_address(); @@ -68,6 +75,7 @@ void UdsListenerIntegrationTest::initialize() { auto* listener = listeners->Add(); listener->set_name("listener_0"); listener->mutable_address()->mutable_pipe()->set_path(getListenerSocketName()); + listener->mutable_address()->mutable_pipe()->set_mode(getMode()); *(listener->mutable_filter_chains()) = filter_chains; }); HttpIntegrationTest::initialize(); @@ -84,6 +92,28 @@ HttpIntegrationTest::ConnectionCreationFunction UdsListenerIntegrationTest::crea }; } +// Excluding Windows; chmod(2) against Windows AF_UNIX socket files succeeds, +// but stat(2) against those returns ENOENT. +#ifndef WIN32 +TEST_P(UdsListenerIntegrationTest, TestSocketMode) { + if (abstract_namespace_) { + // stat(2) against sockets in abstract namespace is not possible + GTEST_SKIP(); + } + + initialize(); + + Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); + struct stat listener_stat; + EXPECT_EQ(os_sys_calls.stat(getListenerSocketName().c_str(), &listener_stat).rc_, 0); + if (mode_ == 0) { + EXPECT_NE(listener_stat.st_mode & 0777, 0); + } else { + EXPECT_EQ(listener_stat.st_mode & mode_, mode_); + } +} +#endif + TEST_P(UdsListenerIntegrationTest, TestPeerCredentials) { fake_upstreams_count_ = 1; initialize(); diff --git a/test/integration/uds_integration_test.h b/test/integration/uds_integration_test.h index b47463d6aea8..43fdeabd5b56 100644 --- a/test/integration/uds_integration_test.h +++ b/test/integration/uds_integration_test.h @@ -25,9 +25,8 @@ class UdsUpstreamIntegrationTest abstract_namespace_(std::get<1>(GetParam())) {} void createUpstreams() override { - fake_upstreams_.emplace_back(new FakeUpstream( - TestEnvironment::unixDomainSocketPath("udstest.1.sock", abstract_namespace_), - FakeHttpConnection::Type::HTTP1, timeSystem())); + addFakeUpstream(TestEnvironment::unixDomainSocketPath("udstest.1.sock", abstract_namespace_), + FakeHttpConnection::Type::HTTP1); config_helper_.addConfigModifier( [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { @@ -55,12 +54,12 @@ class UdsUpstreamIntegrationTest }; class UdsListenerIntegrationTest - : public testing::TestWithParam>, + : public testing::TestWithParam>, public HttpIntegrationTest { public: UdsListenerIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, std::get<0>(GetParam())), - abstract_namespace_(std::get<1>(GetParam())) {} + abstract_namespace_(std::get<1>(GetParam())), mode_(std::get<2>(GetParam())) {} void initialize() override; @@ -72,10 +71,13 @@ class UdsListenerIntegrationTest return TestEnvironment::unixDomainSocketPath("listener_0.sock", abstract_namespace_); } + mode_t getMode() { return mode_; } + protected: HttpIntegrationTest::ConnectionCreationFunction createConnectionFn(); const bool abstract_namespace_; + const mode_t mode_; }; } // namespace Envoy diff --git a/test/integration/utility.cc b/test/integration/utility.cc index a0e93000cb65..05ed83a8d656 100644 --- a/test/integration/utility.cc +++ b/test/integration/utility.cc @@ -18,6 +18,7 @@ #include "common/upstream/upstream_impl.h" #include "test/common/upstream/utility.h" +#include "test/mocks/common.h" #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/cluster_info.h" #include "test/test_common/network_utility.h" @@ -65,9 +66,11 @@ IntegrationUtil::makeSingleRequest(const Network::Address::InstanceConstSharedPt const std::string& host, const std::string& content_type) { NiceMock mock_stats_store; + NiceMock random; Event::GlobalTimeSystem time_system; + NiceMock random_generator; Api::Impl api(Thread::threadFactoryForTest(), mock_stats_store, time_system, - Filesystem::fileSystemForTest()); + Filesystem::fileSystemForTest(), random_generator); Event::DispatcherPtr dispatcher(api.allocateDispatcher("test_thread")); std::shared_ptr cluster{new NiceMock()}; Upstream::HostDescriptionConstSharedPtr host_description{ @@ -76,7 +79,7 @@ IntegrationUtil::makeSingleRequest(const Network::Address::InstanceConstSharedPt type, dispatcher->createClientConnection(addr, Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr), - host_description, *dispatcher); + host_description, *dispatcher, random); BufferingStreamDecoderPtr response(new BufferingStreamDecoder([&]() -> void { client.close(); dispatcher->exit(); diff --git a/test/integration/vhds_integration_test.cc b/test/integration/vhds_integration_test.cc index e1d74ee9ad94..9e9847804e58 100644 --- a/test/integration/vhds_integration_test.cc +++ b/test/integration/vhds_integration_test.cc @@ -147,7 +147,7 @@ class VhdsInitializationTest : public HttpIntegrationTest, // Overridden to insert this stuff into the initialize() at the very beginning of // HttpIntegrationTest::testRouterRequestAndResponseWithBody(). void initialize() override { - // Controls how many fake_upstreams_.emplace_back(new FakeUpstream) will happen in + // Controls how many addFakeUpstream() will happen in // BaseIntegrationTest::createUpstreams() (which is part of initialize()). // Make sure this number matches the size of the 'clusters' repeated field in the bootstrap // config that you use! @@ -269,7 +269,7 @@ class VhdsIntegrationTest : public HttpIntegrationTest, // Overridden to insert this stuff into the initialize() at the very beginning of // HttpIntegrationTest::testRouterRequestAndResponseWithBody(). void initialize() override { - // Controls how many fake_upstreams_.emplace_back(new FakeUpstream) will happen in + // Controls how many addFakeUpstream() will happen in // BaseIntegrationTest::createUpstreams() (which is part of initialize()). // Make sure this number matches the size of the 'clusters' repeated field in the bootstrap // config that you use! diff --git a/test/integration/websocket_integration_test.cc b/test/integration/websocket_integration_test.cc index a7e92f4bca6f..cf6682610975 100644 --- a/test/integration/websocket_integration_test.cc +++ b/test/integration/websocket_integration_test.cc @@ -208,7 +208,7 @@ TEST_P(WebsocketIntegrationTest, WebSocketConnectionUpstreamDisconnect) { // Verify both the data and the disconnect went through. response_->waitForBodyData(5); EXPECT_EQ("world", response_->body()); - waitForClientDisconnectOrReset(); + waitForClientDisconnectOrReset(Http::StreamResetReason::ConnectError); } TEST_P(WebsocketIntegrationTest, EarlyData) { diff --git a/test/integration/websocket_integration_test.h b/test/integration/websocket_integration_test.h index c060f043c732..5f3aae0edf99 100644 --- a/test/integration/websocket_integration_test.h +++ b/test/integration/websocket_integration_test.h @@ -1,5 +1,7 @@ #pragma once +#include "envoy/http/codec.h" + #include "test/integration/http_protocol_integration.h" #include "gtest/gtest.h" @@ -35,9 +37,11 @@ class WebsocketIntegrationTest : public HttpProtocolIntegrationTest { } } - void waitForClientDisconnectOrReset() { + void waitForClientDisconnectOrReset( + Http::StreamResetReason reason = Http::StreamResetReason::RemoteReset) { if (downstreamProtocol() != Http::CodecClient::Type::HTTP1) { response_->waitForReset(); + ASSERT_EQ(reason, response_->resetReason()); } else { ASSERT_TRUE(codec_client_->waitForDisconnect()); } diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index d3f62abf43fa..4e0dcaf73c07 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -76,6 +76,31 @@ TEST_P(XdsIntegrationTestTypedStruct, RouterRequestAndResponseWithBodyNoBuffer) testRouterRequestAndResponseWithBody(1024, 512, false); } +class UdpaXdsIntegrationTestListCollection : public XdsIntegrationTest { +public: + UdpaXdsIntegrationTestListCollection() = default; + + void createEnvoy() override { + // TODO(htuch): Convert CDS/EDS/RDS to UDPA list collections when support is implemented in + // Envoy. + createEnvoyServer({ + "test/config/integration/server_xds.bootstrap.udpa.yaml", + "test/config/integration/server_xds.cds.yaml", + "test/config/integration/server_xds.eds.yaml", + "test/config/integration/server_xds.lds.udpa.list_collection.yaml", + "test/config/integration/server_xds.rds.yaml", + }); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, UdpaXdsIntegrationTestListCollection, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(UdpaXdsIntegrationTestListCollection, RouterRequestAndResponseWithBodyNoBuffer) { + testRouterRequestAndResponseWithBody(1024, 512, false); +} + class LdsInplaceUpdateTcpProxyIntegrationTest : public testing::TestWithParam, public BaseIntegrationTest { @@ -257,7 +282,7 @@ class LdsInplaceUpdateHttpIntegrationTest LdsInplaceUpdateHttpIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} - void initialize() override { + void inplaceInitialize(bool add_default_filter_chain = false) { autonomous_upstream_ = true; setUpstreamCount(2); @@ -265,7 +290,14 @@ class LdsInplaceUpdateHttpIntegrationTest std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter(); config_helper_.addListenerFilter(tls_inspector_config); config_helper_.addSslConfig(); - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + config_helper_.addConfigModifier([this, add_default_filter_chain]( + envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + if (!use_default_balancer_) { + bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_connection_balance_config() + ->mutable_exact_balance(); + } auto* filter_chain_0 = bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); *filter_chain_0->mutable_filter_chain_match()->mutable_application_protocols()->Add() = @@ -296,6 +328,13 @@ class LdsInplaceUpdateHttpIntegrationTest bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( *bootstrap.mutable_static_resources()->mutable_clusters(0)); bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name("cluster_1"); + + if (add_default_filter_chain) { + auto default_filter_chain = bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_default_filter_chain(); + default_filter_chain->MergeFrom(*filter_chain_0); + } }); BaseIntegrationTest::initialize(); @@ -331,22 +370,33 @@ class LdsInplaceUpdateHttpIntegrationTest } } + void expectConnenctionServed(std::string alpn = "alpn0") { + auto codec_client_after_config_update = createHttpCodec(alpn); + expectResponseHeaderConnectionClose(*codec_client_after_config_update, false); + codec_client_after_config_update->close(); + } + std::unique_ptr context_manager_; Network::TransportSocketFactoryPtr context_; testing::NiceMock secret_manager_; Network::Address::InstanceConstSharedPtr address_; + bool use_default_balancer_{false}; }; -// Verify that http response on filter chain 0 has "Connection: close" header when filter chain 0 -// is deleted during the listener update. +// Verify that http response on filter chain 1 and default filter chain have "Connection: close" +// header when these 2 filter chains are deleted during the listener update. TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { - initialize(); + inplaceInitialize(/*add_default_filter_chain=*/true); auto codec_client_1 = createHttpCodec("alpn1"); auto codec_client_0 = createHttpCodec("alpn0"); - Cleanup cleanup([c1 = codec_client_1.get(), c0 = codec_client_0.get()]() { + auto codec_client_default = createHttpCodec("alpndefault"); + + Cleanup cleanup([c1 = codec_client_1.get(), c0 = codec_client_0.get(), + c_default = codec_client_default.get()]() { c1->close(); c0->close(); + c_default->close(); }); ConfigHelper new_config_helper(version_, *api_, MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); @@ -354,6 +404,7 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); listener->mutable_filter_chains()->RemoveLast(); + listener->clear_default_filter_chain(); }); new_config_helper.setLds("1"); @@ -361,38 +412,103 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); expectResponseHeaderConnectionClose(*codec_client_1, true); + expectResponseHeaderConnectionClose(*codec_client_default, true); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 0); expectResponseHeaderConnectionClose(*codec_client_0, false); + expectConnenctionServed(); } // Verify that http clients of filter chain 0 survives if new listener config adds new filter -// chain 2. +// chain 2 and default filter chain. TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { - initialize(); + inplaceInitialize(); test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); auto codec_client_0 = createHttpCodec("alpn0"); Cleanup cleanup0([c0 = codec_client_0.get()]() { c0->close(); }); ConfigHelper new_config_helper(version_, *api_, MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); - new_config_helper.addConfigModifier( - [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1)); - *listener->mutable_filter_chains(2) - ->mutable_filter_chain_match() - ->mutable_application_protocols(0) = "alpn2"; - }); + new_config_helper.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) + -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1)); + *listener->mutable_filter_chains(2) + ->mutable_filter_chain_match() + ->mutable_application_protocols(0) = "alpn2"; + auto default_filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_default_filter_chain(); + default_filter_chain->MergeFrom(*listener->mutable_filter_chains(1)); + }); new_config_helper.setLds("1"); test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); auto codec_client_2 = createHttpCodec("alpn2"); - Cleanup cleanup2([c2 = codec_client_2.get()]() { c2->close(); }); + auto codec_client_default = createHttpCodec("alpndefault"); + + Cleanup cleanup2([c2 = codec_client_2.get(), c_default = codec_client_default.get()]() { + c2->close(); + c_default->close(); + }); expectResponseHeaderConnectionClose(*codec_client_2, false); + expectResponseHeaderConnectionClose(*codec_client_default, false); + expectResponseHeaderConnectionClose(*codec_client_0, false); + expectConnenctionServed(); +} + +// Verify that http clients of default filter chain is drained and recreated if the default filter +// chain updates. +TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigUpdatingDefaultFilterChain) { + inplaceInitialize(true); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + + auto codec_client_default = createHttpCodec("alpndefault"); + Cleanup cleanup0([c_default = codec_client_default.get()]() { c_default->close(); }); + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) + -> void { + auto default_filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_default_filter_chain(); + default_filter_chain->set_name("default_filter_chain_v2"); + }); + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); + + auto codec_client_default_v2 = createHttpCodec("alpndefaultv2"); + + Cleanup cleanup2([c_default_v2 = codec_client_default_v2.get()]() { c_default_v2->close(); }); + expectResponseHeaderConnectionClose(*codec_client_default, true); + expectResponseHeaderConnectionClose(*codec_client_default_v2, false); + expectConnenctionServed(); +} + +// Verify that balancer is inherited. Test only default balancer because ExactConnectionBalancer +// is verified in filter chain add and delete test case. +TEST_P(LdsInplaceUpdateHttpIntegrationTest, OverlappingFilterChainServesNewConnection) { + use_default_balancer_ = true; + inplaceInitialize(); + + auto codec_client_0 = createHttpCodec("alpn0"); + Cleanup cleanup([c0 = codec_client_0.get()]() { c0->close(); }); + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->RemoveLast(); + }); + + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); expectResponseHeaderConnectionClose(*codec_client_0, false); + expectConnenctionServed(); } +// Verify default filter chain update is filter chain only update. +TEST_P(LdsInplaceUpdateHttpIntegrationTest, DefaultFilterChainUpdate) {} INSTANTIATE_TEST_SUITE_P(IpVersions, LdsInplaceUpdateHttpIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); diff --git a/test/integration/xfcc_integration_test.cc b/test/integration/xfcc_integration_test.cc index e14ee0ef7e30..3b299de13c51 100644 --- a/test/integration/xfcc_integration_test.cc +++ b/test/integration/xfcc_integration_test.cc @@ -118,8 +118,7 @@ Network::ClientConnectionPtr XfccIntegrationTest::makeMtlsClientConnection() { } void XfccIntegrationTest::createUpstreams() { - fake_upstreams_.emplace_back(new FakeUpstream( - createUpstreamSslContext(), 0, FakeHttpConnection::Type::HTTP1, version_, timeSystem())); + addFakeUpstream(createUpstreamSslContext(), FakeHttpConnection::Type::HTTP1); } void XfccIntegrationTest::initialize() { @@ -745,6 +744,8 @@ TEST_P(XfccIntegrationTest, TagExtractedNameGenerationTest) { {"server.parent_connections", "server.parent_connections"}, {"server.total_connections", "server.total_connections"}, {"server.days_until_first_cert_expiring", "server.days_until_first_cert_expiring"}, + {"server.seconds_until_first_ocsp_response_expiring", + "server.seconds_until_first_ocsp_response_expiring"}, {"server.version", "server.version"}}; auto test_name_against_mapping = diff --git a/test/mocks/BUILD b/test/mocks/BUILD index f12ced49f118..ca36c72a401a 100644 --- a/test/mocks/BUILD +++ b/test/mocks/BUILD @@ -16,7 +16,6 @@ envoy_cc_test_library( "//include/envoy/common:conn_pool_interface", "//include/envoy/common:random_generator_interface", "//include/envoy/common:time_interface", - "//include/envoy/common:token_bucket_interface", "//source/common/common:minimal_logger_lib", "//test/test_common:test_time_lib", ], diff --git a/test/mocks/api/mocks.cc b/test/mocks/api/mocks.cc index e1bd53bbeca6..6678bf4b15ba 100644 --- a/test/mocks/api/mocks.cc +++ b/test/mocks/api/mocks.cc @@ -15,6 +15,7 @@ namespace Api { MockApi::MockApi() { ON_CALL(*this, fileSystem()).WillByDefault(ReturnRef(file_system_)); ON_CALL(*this, rootScope()).WillByDefault(ReturnRef(stats_store_)); + ON_CALL(*this, randomGenerator()).WillByDefault(ReturnRef(random_)); } MockApi::~MockApi() = default; @@ -30,8 +31,20 @@ Event::DispatcherPtr MockApi::allocateDispatcher(const std::string& name, MockOsSysCalls::MockOsSysCalls() { ON_CALL(*this, close(_)).WillByDefault(Invoke([](os_fd_t fd) { +#ifdef WIN32 + int rc = ::closesocket(fd); + int last_error = ::GetLastError(); + // It might be the case that the fd is not actually a socket. In that case Winsock api is + // failing with error `WSAENOTSOCK`. In that case we fall back to a regular close. + if (last_error == WSAENOTSOCK) { + rc = ::close(fd); + last_error = ::GetLastError(); + } + return SysCallIntResult{rc, last_error}; +#else const int rc = ::close(fd); return SysCallIntResult{rc, errno}; +#endif })); } @@ -52,7 +65,6 @@ SysCallIntResult MockOsSysCalls::setsockopt(os_fd_t sockfd, int level, int optna SysCallIntResult MockOsSysCalls::getsockopt(os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) { - ASSERT(*optlen == sizeof(int) || *optlen == sizeof(sockaddr_storage)); int val = 0; const auto& it = boolsockopts_.find(SockOptKey(sockfd, level, optname)); if (it != boolsockopts_.end()) { diff --git a/test/mocks/api/mocks.h b/test/mocks/api/mocks.h index da145a98a54b..afefc9600348 100644 --- a/test/mocks/api/mocks.h +++ b/test/mocks/api/mocks.h @@ -14,6 +14,7 @@ #include "common/api/os_sys_calls_impl_linux.h" #endif +#include "test/mocks/common.h" #include "test/mocks/filesystem/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/test_time.h" @@ -41,11 +42,13 @@ class MockApi : public Api { MOCK_METHOD(Filesystem::Instance&, fileSystem, ()); MOCK_METHOD(Thread::ThreadFactory&, threadFactory, ()); MOCK_METHOD(const Stats::Scope&, rootScope, ()); + MOCK_METHOD(Random::RandomGenerator&, randomGenerator, ()); MOCK_METHOD(ProcessContextOptRef, processContext, ()); testing::NiceMock file_system_; Event::GlobalTimeSystem time_system_; testing::NiceMock stats_store_; + testing::NiceMock random_; }; class MockOsSysCalls : public OsSysCallsImpl { @@ -59,6 +62,7 @@ class MockOsSysCalls : public OsSysCallsImpl { SysCallIntResult getsockopt(os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) override; + MOCK_METHOD(SysCallSocketResult, accept, (os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen)); MOCK_METHOD(SysCallIntResult, bind, (os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen)); MOCK_METHOD(SysCallIntResult, ioctl, (os_fd_t sockfd, unsigned long int request, void* argp)); MOCK_METHOD(SysCallIntResult, close, (os_fd_t)); diff --git a/test/mocks/buffer/mocks.h b/test/mocks/buffer/mocks.h index 6918729c7b39..87d9c11f2942 100644 --- a/test/mocks/buffer/mocks.h +++ b/test/mocks/buffer/mocks.h @@ -20,7 +20,6 @@ template class MockBufferBase : public BaseClass { MockBufferBase(std::function below_low, std::function above_high, std::function above_overflow); - MOCK_METHOD(Api::IoCallUint64Result, write, (Network::IoHandle & io_handle)); MOCK_METHOD(void, move, (Buffer::Instance & rhs)); MOCK_METHOD(void, move, (Buffer::Instance & rhs, uint64_t length)); MOCK_METHOD(void, drain, (uint64_t size)); @@ -28,31 +27,14 @@ template class MockBufferBase : public BaseClass { void baseMove(Buffer::Instance& rhs) { BaseClass::move(rhs); } void baseDrain(uint64_t size) { BaseClass::drain(size); } - Api::IoCallUint64Result trackWrites(Network::IoHandle& io_handle) { - Api::IoCallUint64Result result = BaseClass::write(io_handle); - if (result.ok() && result.rc_ > 0) { - bytes_written_ += result.rc_; - } - return result; - } - void trackDrains(uint64_t size) { bytes_drained_ += size; BaseClass::drain(size); } - // A convenience function to invoke on write() which fails the write with EAGAIN. - Api::IoCallUint64Result failWrite(Network::IoHandle&) { - return Api::IoCallUint64Result( - /*rc=*/0, - Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), [](Api::IoError*) {})); - } - - int bytes_written() const { return bytes_written_; } - uint64_t bytes_drained() const { return bytes_drained_; } + uint64_t bytesDrained() const { return bytes_drained_; } private: - int bytes_written_{0}; uint64_t bytes_drained_{0}; }; @@ -71,8 +53,6 @@ template <> MockBufferBase::MockBufferBase(); class MockBuffer : public MockBufferBase { public: MockBuffer() { - ON_CALL(*this, write(testing::_)) - .WillByDefault(testing::Invoke(this, &MockBuffer::trackWrites)); ON_CALL(*this, move(testing::_)).WillByDefault(testing::Invoke(this, &MockBuffer::baseMove)); } }; @@ -84,8 +64,6 @@ class MockWatermarkBuffer : public MockBufferBase { MockWatermarkBuffer(std::function below_low, std::function above_high, std::function above_overflow) : BaseClass(below_low, above_high, above_overflow) { - ON_CALL(*this, write(testing::_)) - .WillByDefault(testing::Invoke(this, &MockWatermarkBuffer::trackWrites)); ON_CALL(*this, move(testing::_)) .WillByDefault(testing::Invoke(this, &MockWatermarkBuffer::baseMove)); } diff --git a/test/mocks/common.h b/test/mocks/common.h index ee29efa4c333..e4a4b37f6756 100644 --- a/test/mocks/common.h +++ b/test/mocks/common.h @@ -6,8 +6,6 @@ #include "envoy/common/random_generator.h" #include "envoy/common/scope_tracker.h" #include "envoy/common/time.h" -#include "envoy/common/token_bucket.h" -#include "envoy/event/timer.h" #include "common/common/logger.h" diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index 34efe4c30bc9..943630ef4f2f 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -77,6 +77,11 @@ class MockSubscriptionFactory : public SubscriptionFactory { (const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url, Stats::Scope& scope, SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder)); + MOCK_METHOD(SubscriptionPtr, collectionSubscriptionFromUrl, + (const udpa::core::v1::ResourceLocator& collection_locator, + const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url, + Stats::Scope& scope, SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder)); MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); MockSubscription* subscription_{}; diff --git a/test/mocks/event/BUILD b/test/mocks/event/BUILD index 8a27b1804d07..a337290e5c01 100644 --- a/test/mocks/event/BUILD +++ b/test/mocks/event/BUILD @@ -1,6 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", + "envoy_cc_test_library", "envoy_package", ) @@ -27,3 +28,11 @@ envoy_cc_mock( "//test/test_common:test_time_lib", ], ) + +envoy_cc_test_library( + name = "wrapped_dispatcher", + hdrs = ["wrapped_dispatcher.h"], + deps = [ + "//include/envoy/event:dispatcher_interface", + ], +) diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index d263c3ad68bc..215e7cafbdb4 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -49,7 +49,11 @@ MockTimer::MockTimer(MockDispatcher* dispatcher) : MockTimer() { .RetiresOnSaturation(); } -MockTimer::~MockTimer() = default; +MockTimer::~MockTimer() { + if (timer_destroyed_) { + *timer_destroyed_ = true; + } +} MockSchedulableCallback::~MockSchedulableCallback() = default; diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index 605005fe471a..35475b694b97 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -69,9 +69,9 @@ class MockDispatcher : public Dispatcher { return Network::ListenerPtr{createListener_(std::move(socket), cb, bind_to_port, backlog_size)}; } - Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr&& socket, + Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr socket, Network::UdpListenerCallbacks& cb) override { - return Network::UdpListenerPtr{createUdpListener_(std::move(socket), cb)}; + return Network::UdpListenerPtr{createUdpListener_(socket, cb)}; } Event::TimerPtr createTimer(Event::TimerCb cb) override { @@ -118,7 +118,7 @@ class MockDispatcher : public Dispatcher { (Network::SocketSharedPtr && socket, Network::TcpListenerCallbacks& cb, bool bind_to_port, uint32_t backlog_size)); MOCK_METHOD(Network::UdpListener*, createUdpListener_, - (Network::SocketSharedPtr && socket, Network::UdpListenerCallbacks& cb)); + (Network::SocketSharedPtr socket, Network::UdpListenerCallbacks& cb)); MOCK_METHOD(Timer*, createTimer_, (Event::TimerCb cb)); MOCK_METHOD(SchedulableCallback*, createSchedulableCallback_, (std::function cb)); MOCK_METHOD(void, deferredDelete_, (DeferredDeletable * to_delete)); @@ -161,10 +161,8 @@ class MockTimer : public Timer { // Timer MOCK_METHOD(void, disableTimer, ()); - MOCK_METHOD(void, enableTimer, - (const std::chrono::milliseconds&, const ScopeTrackedObject* scope)); - MOCK_METHOD(void, enableHRTimer, - (const std::chrono::microseconds&, const ScopeTrackedObject* scope)); + MOCK_METHOD(void, enableTimer, (std::chrono::milliseconds, const ScopeTrackedObject* scope)); + MOCK_METHOD(void, enableHRTimer, (std::chrono::microseconds, const ScopeTrackedObject* scope)); MOCK_METHOD(bool, enabled, ()); MockDispatcher* dispatcher_{}; @@ -172,6 +170,9 @@ class MockTimer : public Timer { bool enabled_{}; Event::TimerCb callback_; + + // If not nullptr, will be set on dtor. This can help to verify that the timer was destroyed. + bool* timer_destroyed_{}; }; class MockSchedulableCallback : public SchedulableCallback { diff --git a/test/mocks/event/wrapped_dispatcher.h b/test/mocks/event/wrapped_dispatcher.h new file mode 100644 index 000000000000..172066506bf0 --- /dev/null +++ b/test/mocks/event/wrapped_dispatcher.h @@ -0,0 +1,111 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/event/dispatcher.h" + +namespace Envoy { +namespace Event { + +// Dispatcher implementation that forwards all methods to another implementation +// class. Subclassing this provides a convenient way to forward most methods and +// override the behavior of a few. +class WrappedDispatcher : public Dispatcher { +public: + WrappedDispatcher(Dispatcher& impl) : impl_(impl) {} + + // Event::Dispatcher + const std::string& name() override { return impl_.name(); } + + TimeSource& timeSource() override { return impl_.timeSource(); } + + void initializeStats(Stats::Scope& scope, const absl::optional& prefix) override { + impl_.initializeStats(scope, prefix); + } + + void clearDeferredDeleteList() override { impl_.clearDeferredDeleteList(); } + + Network::ConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket, + Network::TransportSocketPtr&& transport_socket, + StreamInfo::StreamInfo& stream_info) override { + return impl_.createServerConnection(std::move(socket), std::move(transport_socket), + stream_info); + } + + Network::ClientConnectionPtr + createClientConnection(Network::Address::InstanceConstSharedPtr address, + Network::Address::InstanceConstSharedPtr source_address, + Network::TransportSocketPtr&& transport_socket, + const Network::ConnectionSocket::OptionsSharedPtr& options) override { + return impl_.createClientConnection(std::move(address), std::move(source_address), + std::move(transport_socket), options); + } + + Network::DnsResolverSharedPtr + createDnsResolver(const std::vector& resolvers, + const bool use_tcp_for_dns_lookups) override { + return impl_.createDnsResolver(resolvers, use_tcp_for_dns_lookups); + } + + FileEventPtr createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, + uint32_t events) override { + return impl_.createFileEvent(fd, cb, trigger, events); + } + + Filesystem::WatcherPtr createFilesystemWatcher() override { + return impl_.createFilesystemWatcher(); + } + + Network::ListenerPtr createListener(Network::SocketSharedPtr&& socket, + Network::TcpListenerCallbacks& cb, bool bind_to_port, + uint32_t backlog_size) override { + return impl_.createListener(std::move(socket), cb, bind_to_port, backlog_size); + } + + Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr socket, + Network::UdpListenerCallbacks& cb) override { + return impl_.createUdpListener(std::move(socket), cb); + } + + TimerPtr createTimer(TimerCb cb) override { return impl_.createTimer(std::move(cb)); } + + Event::SchedulableCallbackPtr createSchedulableCallback(std::function cb) override { + return impl_.createSchedulableCallback(std::move(cb)); + } + + void deferredDelete(DeferredDeletablePtr&& to_delete) override { + impl_.deferredDelete(std::move(to_delete)); + } + + void exit() override { impl_.exit(); } + + SignalEventPtr listenForSignal(int signal_num, SignalCb cb) override { + return impl_.listenForSignal(signal_num, std::move(cb)); + } + + void post(std::function callback) override { impl_.post(std::move(callback)); } + + void run(RunType type) override { impl_.run(type); } + + Buffer::WatermarkFactory& getWatermarkFactory() override { return impl_.getWatermarkFactory(); } + const ScopeTrackedObject* setTrackedObject(const ScopeTrackedObject* object) override { + return impl_.setTrackedObject(object); + } + + MonotonicTime approximateMonotonicTime() const override { + return impl_.approximateMonotonicTime(); + } + + void updateApproximateMonotonicTime() override { impl_.updateApproximateMonotonicTime(); } + + bool isThreadSafe() const override { return impl_.isThreadSafe(); } + +protected: + Dispatcher& impl_; +}; + +} // namespace Event +} // namespace Envoy \ No newline at end of file diff --git a/test/mocks/grpc/mocks.cc b/test/mocks/grpc/mocks.cc index bd7a07416f35..f5f62eb18112 100644 --- a/test/mocks/grpc/mocks.cc +++ b/test/mocks/grpc/mocks.cc @@ -2,12 +2,15 @@ #include "test/mocks/http/mocks.h" +using testing::Return; + namespace Envoy { namespace Grpc { MockAsyncClient::MockAsyncClient() { async_request_ = std::make_unique>(); ON_CALL(*this, sendRaw(_, _, _, _, _, _)).WillByDefault(Return(async_request_.get())); + ON_CALL(*this, dispatcher()).WillByDefault(Return(&dispatcher_)); } MockAsyncClient::~MockAsyncClient() = default; diff --git a/test/mocks/grpc/mocks.h b/test/mocks/grpc/mocks.h index 476ba677f945..c260c254e596 100644 --- a/test/mocks/grpc/mocks.h +++ b/test/mocks/grpc/mocks.h @@ -11,6 +11,7 @@ #include "common/grpc/typed_async_client.h" +#include "test/mocks/event/mocks.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -88,8 +89,10 @@ class MockAsyncClient : public RawAsyncClient { (absl::string_view service_full_name, absl::string_view method_name, RawAsyncStreamCallbacks& callbacks, const Http::AsyncClient::StreamOptions& options)); + MOCK_METHOD(Event::Dispatcher*, dispatcher, ()); std::unique_ptr> async_request_; + Event::MockDispatcher dispatcher_; }; class MockAsyncClientFactory : public AsyncClientFactory { diff --git a/test/mocks/http/BUILD b/test/mocks/http/BUILD index d169464f6c26..ca7705d8881f 100644 --- a/test/mocks/http/BUILD +++ b/test/mocks/http/BUILD @@ -50,6 +50,7 @@ envoy_cc_mock( "//include/envoy/http:filter_interface", "//include/envoy/ssl:connection_interface", "//include/envoy/tracing:http_tracer_interface", + "//source/common/http:filter_manager_lib", "//source/common/http:header_map_lib", "//test/mocks/event:event_mocks", "//test/mocks/router:router_mocks", diff --git a/test/mocks/http/conn_pool.h b/test/mocks/http/conn_pool.h index 4fd32853cfa9..fcfb5e090c51 100644 --- a/test/mocks/http/conn_pool.h +++ b/test/mocks/http/conn_pool.h @@ -22,6 +22,7 @@ class MockInstance : public Instance { MOCK_METHOD(void, drainConnections, ()); MOCK_METHOD(bool, hasActiveConnections, (), (const)); MOCK_METHOD(Cancellable*, newStream, (ResponseDecoder & response_decoder, Callbacks& callbacks)); + MOCK_METHOD(bool, maybePrefetch, (float)); MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); std::shared_ptr> host_; diff --git a/test/mocks/http/mocks.cc b/test/mocks/http/mocks.cc index 8ab44f59099e..639295ae16d3 100644 --- a/test/mocks/http/mocks.cc +++ b/test/mocks/http/mocks.cc @@ -2,6 +2,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/event/dispatcher.h" +#include "envoy/http/header_map.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -20,6 +21,16 @@ MockConnectionCallbacks::~MockConnectionCallbacks() = default; MockServerConnectionCallbacks::MockServerConnectionCallbacks() = default; MockServerConnectionCallbacks::~MockServerConnectionCallbacks() = default; +MockFilterManagerCallbacks::MockFilterManagerCallbacks() { + ON_CALL(*this, responseHeaders()).WillByDefault(Invoke([this]() -> ResponseHeaderMapOptRef { + if (response_headers_) { + return absl::make_optional(std::ref(*response_headers_)); + } + return absl::nullopt; + })); +} +MockFilterManagerCallbacks::~MockFilterManagerCallbacks() = default; + MockStreamCallbacks::MockStreamCallbacks() = default; MockStreamCallbacks::~MockStreamCallbacks() = default; @@ -78,16 +89,15 @@ void MockStreamDecoderFilterCallbacks::sendLocalReply_( Code code, absl::string_view body, std::function modify_headers, const absl::optional grpc_status, absl::string_view details) { - details_ = std::string(details); Utility::sendLocalReply( stream_destroyed_, Utility::EncodeFunctions{ nullptr, nullptr, - [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + [this, modify_headers, details](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { if (modify_headers != nullptr) { modify_headers(*headers); } - encodeHeaders(std::move(headers), end_stream); + encodeHeaders(std::move(headers), end_stream, details); }, [this](Buffer::Instance& data, bool end_stream) -> void { encodeData(data, end_stream); diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index 94550d19ae12..ce9eb19fb646 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -14,6 +14,7 @@ #include "envoy/http/filter.h" #include "envoy/ssl/connection.h" +#include "common/http/filter_manager.h" #include "common/http/header_map_impl.h" #include "common/http/utility.h" @@ -47,6 +48,55 @@ class MockConnectionCallbacks : public virtual ConnectionCallbacks { MOCK_METHOD(void, onGoAway, (GoAwayErrorCode error_code)); }; +class MockFilterManagerCallbacks : public FilterManagerCallbacks { +public: + MockFilterManagerCallbacks(); + ~MockFilterManagerCallbacks() override; + + MOCK_METHOD(void, encodeHeaders, (ResponseHeaderMap&, bool)); + MOCK_METHOD(void, encode100ContinueHeaders, (ResponseHeaderMap&)); + MOCK_METHOD(void, encodeData, (Buffer::Instance&, bool)); + MOCK_METHOD(void, encodeTrailers, (ResponseTrailerMap&)); + MOCK_METHOD(void, encodeMetadata, (MetadataMapVector&)); + MOCK_METHOD(void, setRequestTrailers, (RequestTrailerMapPtr &&)); + MOCK_METHOD(void, setContinueHeaders, (ResponseHeaderMapPtr &&)); + MOCK_METHOD(void, setResponseHeaders_, (ResponseHeaderMap&)); + void setResponseHeaders(ResponseHeaderMapPtr&& response_headers) override { + // TODO(snowp): Repeat this pattern for all setters. + response_headers_ = std::move(response_headers); + setResponseHeaders_(*response_headers_); + } + MOCK_METHOD(void, setResponseTrailers, (ResponseTrailerMapPtr &&)); + MOCK_METHOD(RequestHeaderMapOptRef, requestHeaders, ()); + MOCK_METHOD(RequestTrailerMapOptRef, requestTrailers, ()); + MOCK_METHOD(ResponseHeaderMapOptRef, continueHeaders, ()); + MOCK_METHOD(ResponseHeaderMapOptRef, responseHeaders, ()); + MOCK_METHOD(ResponseTrailerMapOptRef, responseTrailers, ()); + MOCK_METHOD(void, endStream, ()); + MOCK_METHOD(void, onDecoderFilterBelowWriteBufferLowWatermark, ()); + MOCK_METHOD(void, onDecoderFilterAboveWriteBufferHighWatermark, ()); + MOCK_METHOD(void, upgradeFilterChainCreated, ()); + MOCK_METHOD(void, disarmRequestTimeout, ()); + MOCK_METHOD(void, resetIdleTimer, ()); + MOCK_METHOD(void, recreateStream, (StreamInfo::FilterStateSharedPtr filter_state)); + MOCK_METHOD(void, resetStream, ()); + MOCK_METHOD(const Router::RouteEntry::UpgradeMap*, upgradeMap, ()); + MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, clusterInfo, ()); + MOCK_METHOD(Router::RouteConstSharedPtr, route, (const Router::RouteCallback& cb)); + MOCK_METHOD(void, clearRouteCache, ()); + MOCK_METHOD(absl::optional, routeConfig, ()); + MOCK_METHOD(void, requestRouteConfigUpdate, (Http::RouteConfigUpdatedCallbackSharedPtr)); + MOCK_METHOD(Tracing::Span&, activeSpan, ()); + MOCK_METHOD(void, onResponseDataTooLarge, ()); + MOCK_METHOD(void, onRequestDataTooLarge, ()); + MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); + MOCK_METHOD(void, onLocalReply, (Code code)); + MOCK_METHOD(Tracing::Config&, tracingConfig, ()); + MOCK_METHOD(const ScopeTrackedObject&, scope, ()); + + ResponseHeaderMapPtr response_headers_; +}; + class MockServerConnectionCallbacks : public ServerConnectionCallbacks, public MockConnectionCallbacks { public: @@ -164,13 +214,21 @@ class MockStreamDecoderFilterCallbacks : public StreamDecoderFilterCallbacks, void encode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override { encode100ContinueHeaders_(*headers); } - void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override { + void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, + absl::string_view details) override { + stream_info_.setResponseCodeDetails(details); encodeHeaders_(*headers, end_stream); } void encodeTrailers(ResponseTrailerMapPtr&& trailers) override { encodeTrailers_(*trailers); } void encodeMetadata(MetadataMapPtr&& metadata_map) override { encodeMetadata_(std::move(metadata_map)); } + absl::string_view details() { + if (stream_info_.responseCodeDetails()) { + return stream_info_.responseCodeDetails().value(); + } + return ""; + } MOCK_METHOD(void, continueDecoding, ()); MOCK_METHOD(void, addDecodedData, (Buffer::Instance & data, bool streaming)); @@ -195,7 +253,6 @@ class MockStreamDecoderFilterCallbacks : public StreamDecoderFilterCallbacks, testing::NiceMock active_span_; testing::NiceMock tracing_config_; testing::NiceMock scope_; - std::string details_; bool is_grpc_request_{}; bool is_head_request_{false}; bool stream_destroyed_{}; @@ -235,6 +292,11 @@ class MockStreamEncoderFilterCallbacks : public StreamEncoderFilterCallbacks, MOCK_METHOD(void, continueEncoding, ()); MOCK_METHOD(const Buffer::Instance*, encodingBuffer, ()); MOCK_METHOD(void, modifyEncodingBuffer, (std::function)); + MOCK_METHOD(void, sendLocalReply, + (Code code, absl::string_view body, + std::function modify_headers, + const absl::optional grpc_status, + absl::string_view details)); MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); Buffer::InstancePtr buffer_; @@ -249,6 +311,7 @@ class MockStreamDecoderFilter : public StreamDecoderFilter { ~MockStreamDecoderFilter() override; // Http::StreamFilterBase + MOCK_METHOD(void, onStreamComplete, ()); MOCK_METHOD(void, onDestroy, ()); // Http::StreamDecoderFilter @@ -273,6 +336,7 @@ class MockStreamEncoderFilter : public StreamEncoderFilter { ~MockStreamEncoderFilter() override; // Http::StreamFilterBase + MOCK_METHOD(void, onStreamComplete, ()); MOCK_METHOD(void, onDestroy, ()); // Http::MockStreamEncoderFilter @@ -293,6 +357,7 @@ class MockStreamFilter : public StreamFilter { ~MockStreamFilter() override; // Http::StreamFilterBase + MOCK_METHOD(void, onStreamComplete, ()); MOCK_METHOD(void, onDestroy, ()); // Http::StreamDecoderFilter diff --git a/test/mocks/local_reply/BUILD b/test/mocks/local_reply/BUILD new file mode 100644 index 000000000000..7467186cb044 --- /dev/null +++ b/test/mocks/local_reply/BUILD @@ -0,0 +1,16 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_mock( + name = "local_reply_mocks", + srcs = ["mocks.cc"], + hdrs = ["mocks.h"], + deps = ["//source/common/local_reply:local_reply_lib"], +) diff --git a/test/mocks/local_reply/mocks.cc b/test/mocks/local_reply/mocks.cc new file mode 100644 index 000000000000..f6219934def1 --- /dev/null +++ b/test/mocks/local_reply/mocks.cc @@ -0,0 +1,8 @@ +#include "test/mocks/local_reply/mocks.h" + +namespace Envoy { +namespace LocalReply { +MockLocalReply::MockLocalReply() = default; +MockLocalReply::~MockLocalReply() = default; +} // namespace LocalReply +} // namespace Envoy \ No newline at end of file diff --git a/test/mocks/local_reply/mocks.h b/test/mocks/local_reply/mocks.h new file mode 100644 index 000000000000..3d0a7ddeab88 --- /dev/null +++ b/test/mocks/local_reply/mocks.h @@ -0,0 +1,19 @@ +#include "common/local_reply/local_reply.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace LocalReply { +class MockLocalReply : public LocalReply { +public: + MockLocalReply(); + ~MockLocalReply() override; + + MOCK_METHOD(void, rewrite, + (const Http::RequestHeaderMap* request_headers, + Http::ResponseHeaderMap& response_headers, StreamInfo::StreamInfoImpl& stream_info, + Http::Code& code, std::string& body, absl::string_view& content_type), + (const)); +}; +} // namespace LocalReply +} // namespace Envoy \ No newline at end of file diff --git a/test/mocks/network/connection.h b/test/mocks/network/connection.h index 3c26f85b4bf5..6a7856887fe0 100644 --- a/test/mocks/network/connection.h +++ b/test/mocks/network/connection.h @@ -86,6 +86,7 @@ class MockConnection : public Connection, public MockConnectionBase { MOCK_METHOD(const StreamInfo::StreamInfo&, streamInfo, (), (const)); MOCK_METHOD(void, setDelayedCloseTimeout, (std::chrono::milliseconds)); MOCK_METHOD(absl::string_view, transportFailureReason, (), (const)); + MOCK_METHOD(absl::optional, lastRoundTripTime, (), (const)); }; /** @@ -133,6 +134,7 @@ class MockClientConnection : public ClientConnection, public MockConnectionBase MOCK_METHOD(const StreamInfo::StreamInfo&, streamInfo, (), (const)); MOCK_METHOD(void, setDelayedCloseTimeout, (std::chrono::milliseconds)); MOCK_METHOD(absl::string_view, transportFailureReason, (), (const)); + MOCK_METHOD(absl::optional, lastRoundTripTime, (), (const)); // Network::ClientConnection MOCK_METHOD(void, connect, ()); @@ -183,6 +185,7 @@ class MockFilterManagerConnection : public FilterManagerConnection, public MockC MOCK_METHOD(const StreamInfo::StreamInfo&, streamInfo, (), (const)); MOCK_METHOD(void, setDelayedCloseTimeout, (std::chrono::milliseconds)); MOCK_METHOD(absl::string_view, transportFailureReason, (), (const)); + MOCK_METHOD(absl::optional, lastRoundTripTime, (), (const)); // Network::FilterManagerConnection MOCK_METHOD(StreamBuffer, getReadBuffer, ()); diff --git a/test/mocks/network/io_handle.h b/test/mocks/network/io_handle.h index 59011ab2366f..df94028210eb 100644 --- a/test/mocks/network/io_handle.h +++ b/test/mocks/network/io_handle.h @@ -24,8 +24,10 @@ class MockIoHandle : public IoHandle { MOCK_METHOD(bool, isOpen, (), (const)); MOCK_METHOD(Api::IoCallUint64Result, readv, (uint64_t max_length, Buffer::RawSlice* slices, uint64_t num_slice)); + MOCK_METHOD(Api::IoCallUint64Result, read, (Buffer::Instance & buffer, uint64_t max_length)); MOCK_METHOD(Api::IoCallUint64Result, writev, (const Buffer::RawSlice* slices, uint64_t num_slice)); + MOCK_METHOD(Api::IoCallUint64Result, write, (Buffer::Instance & buffer)); MOCK_METHOD(Api::IoCallUint64Result, sendmsg, (const Buffer::RawSlice* slices, uint64_t num_slice, int flags, const Address::Ip* self_ip, const Address::Instance& peer_address)); @@ -53,6 +55,7 @@ class MockIoHandle : public IoHandle { (Event::Dispatcher & dispatcher, Event::FileReadyCb cb, Event::FileTriggerType trigger, uint32_t events)); MOCK_METHOD(Api::SysCallIntResult, shutdown, (int how)); + MOCK_METHOD(absl::optional, lastRoundTripTime, ()); }; } // namespace Network diff --git a/test/mocks/network/mocks.cc b/test/mocks/network/mocks.cc index 3f203fe7e343..db6e933ec19c 100644 --- a/test/mocks/network/mocks.cc +++ b/test/mocks/network/mocks.cc @@ -7,6 +7,7 @@ #include "common/network/address_impl.h" #include "common/network/io_socket_handle_impl.h" +#include "common/network/udp_listener_impl.h" #include "common/network/utility.h" #include "test/test_common/printers.h" @@ -25,7 +26,8 @@ namespace Envoy { namespace Network { MockListenerConfig::MockListenerConfig() - : socket_(std::make_shared>()) { + : socket_(std::make_shared>()), + udp_listener_worker_router_(std::make_unique(1)) { ON_CALL(*this, filterChainFactory()).WillByDefault(ReturnRef(filter_chain_factory_)); ON_CALL(*this, listenSocketFactory()).WillByDefault(ReturnRef(socket_factory_)); ON_CALL(socket_factory_, localAddress()).WillByDefault(ReturnRef(socket_->localAddress())); @@ -34,6 +36,9 @@ MockListenerConfig::MockListenerConfig() .WillByDefault(Return(std::reference_wrapper(*socket_))); ON_CALL(*this, listenerScope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); + ON_CALL(*this, udpListenerWorkerRouter()).WillByDefault(Invoke([this]() { + return UdpListenerWorkerRouterOptRef(*udp_listener_worker_router_); + })); } MockListenerConfig::~MockListenerConfig() = default; @@ -168,6 +173,8 @@ MockConnectionHandler::~MockConnectionHandler() = default; MockIp::MockIp() = default; MockIp::~MockIp() = default; +MockResolvedAddress::MockResolvedAddress(const std::string& logical, const std::string& physical) + : logical_(logical), physical_(physical) {} MockResolvedAddress::~MockResolvedAddress() = default; MockTransportSocketCallbacks::MockTransportSocketCallbacks() { @@ -175,6 +182,9 @@ MockTransportSocketCallbacks::MockTransportSocketCallbacks() { } MockTransportSocketCallbacks::~MockTransportSocketCallbacks() = default; +MockUdpPacketWriter::MockUdpPacketWriter() = default; +MockUdpPacketWriter::~MockUdpPacketWriter() = default; + MockUdpListener::MockUdpListener() { ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); } diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index e061c118e6dc..de7b843a72d0 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -131,7 +131,7 @@ class MockTcpListenerCallbacks : public TcpListenerCallbacks { void onAccept(ConnectionSocketPtr&& socket) override { onAccept_(socket); } MOCK_METHOD(void, onAccept_, (ConnectionSocketPtr & socket)); - MOCK_METHOD(void, onReject, ()); + MOCK_METHOD(void, onReject, (RejectCause), (override)); }; class MockUdpListenerCallbacks : public UdpListenerCallbacks { @@ -139,11 +139,14 @@ class MockUdpListenerCallbacks : public UdpListenerCallbacks { MockUdpListenerCallbacks(); ~MockUdpListenerCallbacks() override; - MOCK_METHOD(void, onData, (UdpRecvData & data)); + MOCK_METHOD(void, onData, (UdpRecvData && data)); MOCK_METHOD(void, onReadReady, ()); MOCK_METHOD(void, onWriteReady, (const Socket& socket)); MOCK_METHOD(void, onReceiveError, (Api::IoError::IoErrorCode err)); MOCK_METHOD(Network::UdpPacketWriter&, udpPacketWriter, ()); + MOCK_METHOD(uint32_t, workerIndex, (), (const)); + MOCK_METHOD(void, onDataWorker, (Network::UdpRecvData && data)); + MOCK_METHOD(void, post, (Network::UdpRecvData && data)); }; class MockDrainDecision : public DrainDecision { @@ -298,6 +301,7 @@ class MockConnectionSocket : public ConnectionSocket { MOCK_METHOD(Api::SysCallIntResult, setSocketOption, (int, int, const void*, socklen_t)); MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*), (const)); MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool)); + MOCK_METHOD(absl::optional, lastRoundTripTime, ()); IoHandlePtr io_handle_; Address::InstanceConstSharedPtr local_address_; @@ -356,6 +360,7 @@ class MockListenerConfig : public ListenerConfig { MOCK_METHOD(const std::string&, name, (), (const)); MOCK_METHOD(Network::ActiveUdpListenerFactory*, udpListenerFactory, ()); MOCK_METHOD(Network::UdpPacketWriterFactoryOptRef, udpPacketWriterFactory, ()); + MOCK_METHOD(Network::UdpListenerWorkerRouterOptRef, udpListenerWorkerRouter, ()); MOCK_METHOD(ConnectionBalancer&, connectionBalancer, ()); MOCK_METHOD(ResourceLimit&, openConnections, ()); MOCK_METHOD(uint32_t, tcpBacklogSize, (), (const)); @@ -372,6 +377,7 @@ class MockListenerConfig : public ListenerConfig { testing::NiceMock filter_chain_factory_; MockListenSocketFactory socket_factory_; SocketSharedPtr socket_; + UdpListenerWorkerRouterPtr udp_listener_worker_router_; Stats::IsolatedStoreImpl scope_; std::string name_; const std::vector empty_access_logs_; @@ -385,6 +391,7 @@ class MockListener : public Listener { MOCK_METHOD(void, onDestroy, ()); MOCK_METHOD(void, enable, ()); MOCK_METHOD(void, disable, ()); + MOCK_METHOD(void, setRejectFraction, (float)); }; class MockConnectionHandler : public ConnectionHandler { @@ -398,6 +405,7 @@ class MockConnectionHandler : public ConnectionHandler { MOCK_METHOD(void, addListener, (absl::optional overridden_listener, ListenerConfig& config)); MOCK_METHOD(void, removeListeners, (uint64_t listener_tag)); + MOCK_METHOD(UdpListenerCallbacksOptRef, getUdpListenerCallbacks, (uint64_t listener_tag)); MOCK_METHOD(void, removeFilterChains, (uint64_t listener_tag, const std::list& filter_chains, std::function completion)); @@ -405,6 +413,7 @@ class MockConnectionHandler : public ConnectionHandler { MOCK_METHOD(void, stopListeners, ()); MOCK_METHOD(void, disableListeners, ()); MOCK_METHOD(void, enableListeners, ()); + MOCK_METHOD(void, setListenerRejectFraction, (float), (override)); MOCK_METHOD(const std::string&, statPrefix, (), (const)); }; @@ -425,8 +434,7 @@ class MockIp : public Address::Ip { class MockResolvedAddress : public Address::Instance { public: - MockResolvedAddress(const std::string& logical, const std::string& physical) - : logical_(logical), physical_(physical) {} + MockResolvedAddress(const std::string& logical, const std::string& physical); ~MockResolvedAddress() override; bool operator==(const Address::Instance& other) const override { @@ -437,6 +445,7 @@ class MockResolvedAddress : public Address::Instance { MOCK_METHOD(Api::SysCallIntResult, connect, (os_fd_t), (const)); MOCK_METHOD(const Address::Ip*, ip, (), (const)); MOCK_METHOD(const Address::Pipe*, pipe, (), (const)); + MOCK_METHOD(Address::EnvoyInternalAddress*, envoyInternalAddress, (), (const)); MOCK_METHOD(IoHandlePtr, socket, (Socket::Type), (const)); MOCK_METHOD(Address::Type, type, (), (const)); MOCK_METHOD(const sockaddr*, sockAddr, (), (const)); @@ -471,7 +480,8 @@ class MockTransportSocketCallbacks : public TransportSocketCallbacks { class MockUdpPacketWriter : public UdpPacketWriter { public: - MockUdpPacketWriter() = default; + MockUdpPacketWriter(); + ~MockUdpPacketWriter() override; MOCK_METHOD(Api::IoCallUint64Result, writePacket, (const Buffer::Instance& buffer, const Address::Ip* local_ip, @@ -493,10 +503,12 @@ class MockUdpListener : public UdpListener { MOCK_METHOD(void, onDestroy, ()); MOCK_METHOD(void, enable, ()); MOCK_METHOD(void, disable, ()); + MOCK_METHOD(void, setRejectFraction, (float), (override)); MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); MOCK_METHOD(Address::InstanceConstSharedPtr&, localAddress, (), (const)); MOCK_METHOD(Api::IoCallUint64Result, send, (const UdpSendData&)); MOCK_METHOD(Api::IoCallUint64Result, flush, ()); + MOCK_METHOD(void, activateRead, ()); Event::MockDispatcher dispatcher_; }; diff --git a/test/mocks/router/BUILD b/test/mocks/router/BUILD index 7044a84c1edb..4bb335ea67a8 100644 --- a/test/mocks/router/BUILD +++ b/test/mocks/router/BUILD @@ -26,7 +26,6 @@ envoy_cc_mock( "//include/envoy/stream_info:stream_info_interface", "//include/envoy/thread_local:thread_local_interface", "//include/envoy/upstream:cluster_manager_interface", - "//source/common/stats:fake_symbol_table_lib", "//test/mocks:common_lib", "//test/mocks/stats:stats_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 9fea76803bb2..b015647a791d 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -29,7 +29,7 @@ #include "envoy/type/v3/percent.pb.h" #include "envoy/upstream/cluster_manager.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/global.h" @@ -365,6 +365,9 @@ class MockRouteEntry : public RouteEntry { MOCK_METHOD(const std::vector&, shadowPolicies, (), (const)); MOCK_METHOD(std::chrono::milliseconds, timeout, (), (const)); MOCK_METHOD(absl::optional, idleTimeout, (), (const)); + MOCK_METHOD(absl::optional, maxStreamDuration, (), (const)); + MOCK_METHOD(absl::optional, grpcTimeoutHeaderMax, (), (const)); + MOCK_METHOD(absl::optional, grpcTimeoutHeaderOffset, (), (const)); MOCK_METHOD(absl::optional, maxGrpcTimeout, (), (const)); MOCK_METHOD(absl::optional, grpcTimeoutOffset, (), (const)); MOCK_METHOD(const VirtualCluster*, virtualCluster, (const Http::HeaderMap& headers), (const)); diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index 53bea8ce81ad..15c35439aa7e 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -32,7 +32,6 @@ class MockSnapshot : public Snapshot { } } - MOCK_METHOD(void, countDeprecatedFeatureUse, (), (const)); MOCK_METHOD(bool, deprecatedFeatureEnabled, (absl::string_view key, bool default_enabled), (const)); MOCK_METHOD(bool, runtimeFeatureEnabled, (absl::string_view key), (const)); @@ -68,6 +67,7 @@ class MockLoader : public Loader { MOCK_METHOD(void, mergeValues, ((const absl::node_hash_map&))); MOCK_METHOD(void, startRtdsSubscriptions, (ReadyCallback)); MOCK_METHOD(Stats::Scope&, getRootScope, ()); + MOCK_METHOD(void, countDeprecatedFeatureUse, (), (const)); testing::NiceMock snapshot_; testing::NiceMock store_; diff --git a/test/mocks/server/factory_context.cc b/test/mocks/server/factory_context.cc index 3dcca0e28cf7..b02076911f4f 100644 --- a/test/mocks/server/factory_context.cc +++ b/test/mocks/server/factory_context.cc @@ -26,7 +26,6 @@ MockFactoryContext::MockFactoryContext() ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); diff --git a/test/mocks/server/factory_context.h b/test/mocks/server/factory_context.h index ecc3423f072e..d8be7913563a 100644 --- a/test/mocks/server/factory_context.h +++ b/test/mocks/server/factory_context.h @@ -29,7 +29,6 @@ class MockFactoryContext : public virtual FactoryContext { MOCK_METHOD(bool, healthCheckFailed, ()); MOCK_METHOD(Init::Manager&, initManager, ()); MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); - MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Stats::Scope&, scope, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); @@ -58,7 +57,6 @@ class MockFactoryContext : public virtual FactoryContext { testing::NiceMock init_manager_; testing::NiceMock lifecycle_notifier_; testing::NiceMock local_info_; - testing::NiceMock random_; testing::NiceMock runtime_loader_; testing::NiceMock scope_; testing::NiceMock thread_local_; diff --git a/test/mocks/server/instance.cc b/test/mocks/server/instance.cc index 91c102c45267..f89bb4e31d19 100644 --- a/test/mocks/server/instance.cc +++ b/test/mocks/server/instance.cc @@ -33,7 +33,6 @@ MockInstance::MockInstance() ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); ON_CALL(*this, hotRestart()).WillByDefault(ReturnRef(hot_restart_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); ON_CALL(*this, options()).WillByDefault(ReturnRef(options_)); @@ -60,7 +59,6 @@ MockServerFactoryContext::MockServerFactoryContext() ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); diff --git a/test/mocks/server/instance.h b/test/mocks/server/instance.h index 9b83b0bbf42c..cf9f8bf3e885 100644 --- a/test/mocks/server/instance.h +++ b/test/mocks/server/instance.h @@ -4,7 +4,7 @@ #include "common/grpc/context_impl.h" #include "common/http/context_impl.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "extensions/transport_sockets/tls/context_manager_impl.h" @@ -65,7 +65,6 @@ class MockInstance : public Instance { MOCK_METHOD(Envoy::MutexTracer*, mutexTracer, ()); MOCK_METHOD(const Options&, options, ()); MOCK_METHOD(OverloadManager&, overloadManager, ()); - MOCK_METHOD(Random::RandomGenerator&, random, ()); MOCK_METHOD(Runtime::Loader&, runtime, ()); MOCK_METHOD(void, shutdown, ()); MOCK_METHOD(bool, isShutdown, ()); @@ -108,7 +107,6 @@ class MockInstance : public Instance { testing::NiceMock access_log_manager_; testing::NiceMock hot_restart_; testing::NiceMock options_; - testing::NiceMock random_; testing::NiceMock lifecycle_notifier_; testing::NiceMock local_info_; testing::NiceMock init_manager_; @@ -134,7 +132,6 @@ class MockServerFactoryContext : public virtual ServerFactoryContext { MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Stats::Scope&, scope, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); @@ -155,7 +152,6 @@ class MockServerFactoryContext : public virtual ServerFactoryContext { testing::NiceMock dispatcher_; testing::NiceMock drain_manager_; testing::NiceMock local_info_; - testing::NiceMock random_; testing::NiceMock runtime_loader_; testing::NiceMock scope_; testing::NiceMock thread_local_; diff --git a/test/mocks/server/listener_component_factory.h b/test/mocks/server/listener_component_factory.h index 84d73dd4bfd5..23953b1cec44 100644 --- a/test/mocks/server/listener_component_factory.h +++ b/test/mocks/server/listener_component_factory.h @@ -19,11 +19,14 @@ class MockListenerComponentFactory : public ListenerComponentFactory { createDrainManager(envoy::config::listener::v3::Listener::DrainType drain_type) override { return DrainManagerPtr{createDrainManager_(drain_type)}; } - LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) override { - return LdsApiPtr{createLdsApi_(lds_config)}; + LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config, + const udpa::core::v1::ResourceLocator* lds_resources_locator) override { + return LdsApiPtr{createLdsApi_(lds_config, lds_resources_locator)}; } - MOCK_METHOD(LdsApi*, createLdsApi_, (const envoy::config::core::v3::ConfigSource& lds_config)); + MOCK_METHOD(LdsApi*, createLdsApi_, + (const envoy::config::core::v3::ConfigSource&, + const udpa::core::v1::ResourceLocator*)); MOCK_METHOD(std::vector, createNetworkFilterFactoryList, (const Protobuf::RepeatedPtrField& filters, Configuration::FilterChainFactoryContext& filter_chain_factory_context)); diff --git a/test/mocks/server/listener_manager.h b/test/mocks/server/listener_manager.h index a08a60cf8f8e..2f2baf88f71d 100644 --- a/test/mocks/server/listener_manager.h +++ b/test/mocks/server/listener_manager.h @@ -14,7 +14,9 @@ class MockListenerManager : public ListenerManager { MOCK_METHOD(bool, addOrUpdateListener, (const envoy::config::listener::v3::Listener& config, const std::string& version_info, bool modifiable)); - MOCK_METHOD(void, createLdsApi, (const envoy::config::core::v3::ConfigSource& lds_config)); + MOCK_METHOD(void, createLdsApi, + (const envoy::config::core::v3::ConfigSource& lds_config, + const udpa::core::v1::ResourceLocator*)); MOCK_METHOD(std::vector>, listeners, (ListenerState state)); MOCK_METHOD(uint64_t, numConnections, (), (const)); diff --git a/test/mocks/server/main.h b/test/mocks/server/main.h index 6fa9825856c6..59f6259250d7 100644 --- a/test/mocks/server/main.h +++ b/test/mocks/server/main.h @@ -21,7 +21,8 @@ class MockMain : public Main { MOCK_METHOD(Upstream::ClusterManager*, clusterManager, ()); MOCK_METHOD(std::list&, statsSinks, ()); MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); - MOCK_METHOD(const Watchdog&, watchdogConfig, (), (const)); + MOCK_METHOD(const Watchdog&, mainThreadWatchdogConfig, (), (const)); + MOCK_METHOD(const Watchdog&, workerWatchdogConfig, (), (const)); }; } // namespace Configuration } // namespace Server diff --git a/test/mocks/server/options.cc b/test/mocks/server/options.cc index c407ab6ef907..354628ee65c7 100644 --- a/test/mocks/server/options.cc +++ b/test/mocks/server/options.cc @@ -43,6 +43,8 @@ MockOptions::MockOptions(const std::string& config_path) : config_path_(config_p ON_CALL(*this, toCommandLineOptions()).WillByDefault(Invoke([] { return std::make_unique(); })); + ON_CALL(*this, socketPath()).WillByDefault(ReturnRef(socket_path_)); + ON_CALL(*this, socketMode()).WillByDefault(ReturnPointee(&socket_mode_)); } MockOptions::~MockOptions() = default; diff --git a/test/mocks/server/options.h b/test/mocks/server/options.h index b4591ccbe828..51eeadf3d3b2 100644 --- a/test/mocks/server/options.h +++ b/test/mocks/server/options.h @@ -51,6 +51,8 @@ class MockOptions : public Options { MOCK_METHOD(bool, cpusetThreadsEnabled, (), (const)); MOCK_METHOD(const std::vector&, disabledExtensions, (), (const)); MOCK_METHOD(Server::CommandLineOptionsPtr, toCommandLineOptions, (), (const)); + MOCK_METHOD(const std::string&, socketPath, (), (const)); + MOCK_METHOD(mode_t, socketMode, (), (const)); std::string config_path_; envoy::config::bootstrap::v3::Bootstrap config_proto_; @@ -72,6 +74,8 @@ class MockOptions : public Options { bool mutex_tracing_enabled_{}; bool cpuset_threads_enabled_{}; std::vector disabled_extensions_; + std::string socket_path_; + mode_t socket_mode_; }; } // namespace Server } // namespace Envoy diff --git a/test/mocks/server/worker_factory.h b/test/mocks/server/worker_factory.h index 3c05ed76566c..ca3ee983a3d4 100644 --- a/test/mocks/server/worker_factory.h +++ b/test/mocks/server/worker_factory.h @@ -13,7 +13,7 @@ class MockWorkerFactory : public WorkerFactory { ~MockWorkerFactory() override; // Server::WorkerFactory - WorkerPtr createWorker(OverloadManager&, const std::string&) override { + WorkerPtr createWorker(uint32_t, OverloadManager&, const std::string&) override { return WorkerPtr{createWorker_()}; } diff --git a/test/mocks/ssl/mocks.h b/test/mocks/ssl/mocks.h index ea2a0edd6000..6a5cbe8df649 100644 --- a/test/mocks/ssl/mocks.h +++ b/test/mocks/ssl/mocks.h @@ -29,6 +29,7 @@ class MockContextManager : public ContextManager { (Stats::Scope & stats, const ServerContextConfig& config, const std::vector& server_names)); MOCK_METHOD(size_t, daysUntilFirstCertExpires, (), (const)); + MOCK_METHOD(absl::optional, secondsUntilFirstOcspResponseExpires, (), (const)); MOCK_METHOD(void, iterateContexts, (std::function callback)); MOCK_METHOD(Ssl::PrivateKeyMethodManager&, privateKeyMethodManager, ()); }; @@ -66,6 +67,7 @@ class MockClientContext : public ClientContext { ~MockClientContext() override; MOCK_METHOD(size_t, daysUntilFirstCertExpires, (), (const)); + MOCK_METHOD(absl::optional, secondsUntilFirstOcspResponseExpires, (), (const)); MOCK_METHOD(CertificateDetailsPtr, getCaCertInformation, (), (const)); MOCK_METHOD(std::vector, getCertChainInformation, (), (const)); }; @@ -116,6 +118,7 @@ class MockServerContextConfig : public ServerContextConfig { MOCK_METHOD(Ssl::HandshakerCapabilities, capabilities, (), (const, override)); MOCK_METHOD(bool, requireClientCertificate, (), (const)); + MOCK_METHOD(OcspStaplePolicy, ocspStaplePolicy, (), (const)); MOCK_METHOD(const std::vector&, sessionTicketKeys, (), (const)); MOCK_METHOD(bool, disableStatelessSessionResumption, (), (const)); }; @@ -129,6 +132,8 @@ class MockTlsCertificateConfig : public TlsCertificateConfig { MOCK_METHOD(const std::string&, certificateChainPath, (), (const)); MOCK_METHOD(const std::string&, privateKey, (), (const)); MOCK_METHOD(const std::string&, privateKeyPath, (), (const)); + MOCK_METHOD(const std::vector&, ocspStaple, (), (const)); + MOCK_METHOD(const std::string&, ocspStaplePath, (), (const)); MOCK_METHOD(const std::string&, password, (), (const)); MOCK_METHOD(const std::string&, passwordPath, (), (const)); MOCK_METHOD(Envoy::Ssl::PrivateKeyMethodProviderSharedPtr, privateKeyMethod, (), (const)); diff --git a/test/mocks/stats/BUILD b/test/mocks/stats/BUILD index 6d4ddd19a050..55b3c0a19474 100644 --- a/test/mocks/stats/BUILD +++ b/test/mocks/stats/BUILD @@ -17,12 +17,10 @@ envoy_cc_mock( "//include/envoy/stats:timespan_interface", "//include/envoy/thread_local:thread_local_interface", "//include/envoy/upstream:cluster_manager_interface", - "//source/common/stats:fake_symbol_table_lib", "//source/common/stats:histogram_lib", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", "//source/common/stats:store_impl_lib", - "//source/common/stats:symbol_table_creator_lib", "//source/common/stats:timespan_lib", "//test/common/stats:stat_test_utility_lib", "//test/mocks:common_lib", diff --git a/test/mocks/stats/mocks.cc b/test/mocks/stats/mocks.cc index adeed55eeb5e..cd5e01fb5b0c 100644 --- a/test/mocks/stats/mocks.cc +++ b/test/mocks/stats/mocks.cc @@ -2,7 +2,7 @@ #include -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index 272603041d98..cc43bd084e10 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -14,11 +14,10 @@ #include "envoy/thread_local/thread_local.h" #include "envoy/upstream/cluster_manager.h" -#include "common/stats/fake_symbol_table_impl.h" #include "common/stats/histogram_impl.h" #include "common/stats/isolated_store_impl.h" #include "common/stats/store_impl.h" -#include "common/stats/symbol_table_creator.h" +#include "common/stats/symbol_table_impl.h" #include "common/stats/timespan_impl.h" #include "test/common/stats/stat_test_utility.h" @@ -31,12 +30,11 @@ namespace Stats { class TestSymbolTableHelper { public: - TestSymbolTableHelper() : symbol_table_(SymbolTableCreator::makeSymbolTable()) {} - SymbolTable& symbolTable() { return *symbol_table_; } - const SymbolTable& constSymbolTable() const { return *symbol_table_; } + SymbolTable& symbolTable() { return symbol_table_; } + const SymbolTable& constSymbolTable() const { return symbol_table_; } private: - SymbolTablePtr symbol_table_; + SymbolTableImpl symbol_table_; }; class TestSymbolTable { @@ -264,9 +262,9 @@ class MockTextReadout : public MockMetric { MockTextReadout(); ~MockTextReadout() override; - MOCK_METHOD1(set, void(absl::string_view value)); - MOCK_CONST_METHOD0(used, bool()); - MOCK_CONST_METHOD0(value, std::string()); + MOCK_METHOD(void, set, (absl::string_view value), (override)); + MOCK_METHOD(bool, used, (), (const, override)); + MOCK_METHOD(std::string, value, (), (const, override)); bool used_; std::string value_; diff --git a/test/mocks/stream_info/mocks.cc b/test/mocks/stream_info/mocks.cc index 79cb4f41763f..8373bdeb3603 100644 --- a/test/mocks/stream_info/mocks.cc +++ b/test/mocks/stream_info/mocks.cc @@ -27,6 +27,9 @@ MockStreamInfo::MockStreamInfo() ON_CALL(*this, setResponseCodeDetails(_)).WillByDefault(Invoke([this](absl::string_view details) { response_code_details_ = std::string(details); })); + ON_CALL(*this, setConnectionTerminationDetails(_)) + .WillByDefault( + Invoke([this](absl::string_view details) { connection_termination_details_ = details; })); ON_CALL(*this, startTime()).WillByDefault(ReturnPointee(&start_time_)); ON_CALL(*this, startTimeMonotonic()).WillByDefault(ReturnPointee(&start_time_monotonic_)); ON_CALL(*this, lastDownstreamRxByteReceived()) @@ -89,6 +92,8 @@ MockStreamInfo::MockStreamInfo() ON_CALL(*this, protocol()).WillByDefault(ReturnPointee(&protocol_)); ON_CALL(*this, responseCode()).WillByDefault(ReturnPointee(&response_code_)); ON_CALL(*this, responseCodeDetails()).WillByDefault(ReturnPointee(&response_code_details_)); + ON_CALL(*this, connectionTerminationDetails()) + .WillByDefault(ReturnPointee(&connection_termination_details_)); ON_CALL(*this, addBytesReceived(_)).WillByDefault(Invoke([this](uint64_t bytes_received) { bytes_received_ += bytes_received; })); @@ -129,6 +134,10 @@ MockStreamInfo::MockStreamInfo() ON_CALL(*this, getRouteName()).WillByDefault(ReturnRef(route_name_)); ON_CALL(*this, upstreamTransportFailureReason()) .WillByDefault(ReturnRef(upstream_transport_failure_reason_)); + ON_CALL(*this, connectionID()).WillByDefault(Return(connection_id_)); + ON_CALL(*this, setConnectionID(_)).WillByDefault(Invoke([this](uint64_t id) { + connection_id_ = id; + })); } MockStreamInfo::~MockStreamInfo() = default; diff --git a/test/mocks/stream_info/mocks.h b/test/mocks/stream_info/mocks.h index 2c5b09562e96..b02b849c2310 100644 --- a/test/mocks/stream_info/mocks.h +++ b/test/mocks/stream_info/mocks.h @@ -22,6 +22,7 @@ class MockStreamInfo : public StreamInfo { // StreamInfo::StreamInfo MOCK_METHOD(void, setResponseFlag, (ResponseFlag response_flag)); MOCK_METHOD(void, setResponseCodeDetails, (absl::string_view)); + MOCK_METHOD(void, setConnectionTerminationDetails, (absl::string_view)); MOCK_METHOD(bool, intersectResponseFlags, (uint64_t), (const)); MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host)); MOCK_METHOD(SystemTime, startTime, (), (const)); @@ -51,6 +52,7 @@ class MockStreamInfo : public StreamInfo { MOCK_METHOD(void, protocol, (Http::Protocol protocol)); MOCK_METHOD(absl::optional, responseCode, (), (const)); MOCK_METHOD(const absl::optional&, responseCodeDetails, (), (const)); + MOCK_METHOD(const absl::optional&, connectionTerminationDetails, (), (const)); MOCK_METHOD(void, addBytesSent, (uint64_t)); MOCK_METHOD(uint64_t, bytesSent, (), (const)); MOCK_METHOD(bool, hasResponseFlag, (ResponseFlag), (const)); @@ -95,6 +97,8 @@ class MockStreamInfo : public StreamInfo { (const)); MOCK_METHOD(Http::RequestIDExtensionSharedPtr, getRequestIDExtension, (), (const)); MOCK_METHOD(void, setRequestIDExtension, (Http::RequestIDExtensionSharedPtr)); + MOCK_METHOD(absl::optional, connectionID, (), (const)); + MOCK_METHOD(void, setConnectionID, (uint64_t)); std::shared_ptr> host_{ new testing::NiceMock()}; @@ -105,6 +109,7 @@ class MockStreamInfo : public StreamInfo { absl::optional first_upstream_tx_byte_sent_; absl::optional last_upstream_tx_byte_sent_; absl::optional first_upstream_rx_byte_received_; + absl::optional connection_id_; absl::optional last_upstream_rx_byte_received_; absl::optional first_downstream_tx_byte_sent_; absl::optional last_downstream_tx_byte_sent_; @@ -112,6 +117,7 @@ class MockStreamInfo : public StreamInfo { absl::optional protocol_; absl::optional response_code_; absl::optional response_code_details_; + absl::optional connection_termination_details_; uint64_t response_flags_{}; envoy::config::core::v3::Metadata metadata_; FilterStateSharedPtr upstream_filter_state_; diff --git a/test/mocks/tcp/mocks.h b/test/mocks/tcp/mocks.h index 9e4182423ed6..c03cb1368192 100644 --- a/test/mocks/tcp/mocks.h +++ b/test/mocks/tcp/mocks.h @@ -55,6 +55,7 @@ class MockInstance : public Instance { MOCK_METHOD(void, drainConnections, ()); MOCK_METHOD(void, closeConnections, ()); MOCK_METHOD(Cancellable*, newConnection, (Tcp::ConnectionPool::Callbacks & callbacks)); + MOCK_METHOD(bool, maybePrefetch, (float), ()); MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); Envoy::ConnectionPool::MockCancellable* newConnectionImpl(Callbacks& cb); diff --git a/test/mocks/thread_local/mocks.h b/test/mocks/thread_local/mocks.h index 9bbd26a64465..dc6518c5068a 100644 --- a/test/mocks/thread_local/mocks.h +++ b/test/mocks/thread_local/mocks.h @@ -60,10 +60,6 @@ class MockInstance : public Instance { // ThreadLocal::Slot ThreadLocalObjectSharedPtr get() override { return parent_.data_[index_]; } bool currentThreadRegistered() override { return parent_.registered_; } - void runOnAllThreads(Event::PostCb cb) override { parent_.runOnAllThreads(cb); } - void runOnAllThreads(Event::PostCb cb, Event::PostCb main_callback) override { - parent_.runOnAllThreads(cb, main_callback); - } void runOnAllThreads(const UpdateCb& cb) override { parent_.runOnAllThreads([cb, this]() { parent_.data_[index_] = cb(parent_.data_[index_]); }); } diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index 63d31ee92665..87afe77c3014 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -53,7 +53,7 @@ MockClusterInfo::MockClusterInfo() circuit_breakers_stats_, absl::nullopt, absl::nullopt)) { ON_CALL(*this, connectTimeout()).WillByDefault(Return(std::chrono::milliseconds(1))); ON_CALL(*this, idleTimeout()).WillByDefault(Return(absl::optional())); - ON_CALL(*this, prefetchRatio()).WillByDefault(Return(1.0)); + ON_CALL(*this, perUpstreamPrefetchRatio()).WillByDefault(Return(1.0)); ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); ON_CALL(*this, edsServiceName()).WillByDefault(ReturnPointee(&eds_service_name_)); ON_CALL(*this, http1Settings()).WillByDefault(ReturnRef(http1_settings_)); diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 80f17582914d..7e1427b2de03 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -89,7 +89,12 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(bool, addedViaApi, (), (const)); MOCK_METHOD(std::chrono::milliseconds, connectTimeout, (), (const)); MOCK_METHOD(const absl::optional, idleTimeout, (), (const)); - MOCK_METHOD(float, prefetchRatio, (), (const)); + MOCK_METHOD(const absl::optional, maxStreamDuration, (), (const)); + MOCK_METHOD(const absl::optional, grpcTimeoutHeaderMax, (), (const)); + MOCK_METHOD(const absl::optional, grpcTimeoutHeaderOffset, (), + (const)); + MOCK_METHOD(float, perUpstreamPrefetchRatio, (), (const)); + MOCK_METHOD(float, peekaheadRatio, (), (const)); MOCK_METHOD(uint32_t, perConnectionBufferLimitBytes, (), (const)); MOCK_METHOD(uint64_t, features, (), (const)); MOCK_METHOD(const Http::Http1Settings&, http1Settings, (), (const)); diff --git a/test/mocks/upstream/host.h b/test/mocks/upstream/host.h index 3c927b0208aa..95183622dbb7 100644 --- a/test/mocks/upstream/host.h +++ b/test/mocks/upstream/host.h @@ -9,7 +9,7 @@ #include "envoy/data/cluster/v2alpha/outlier_detection_event.pb.h" #include "envoy/upstream/upstream.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "test/mocks/network/transport_socket.h" #include "test/mocks/upstream/cluster_info.h" diff --git a/test/mocks/upstream/load_balancer.h b/test/mocks/upstream/load_balancer.h index 364b6a7eb1d3..356782e914bb 100644 --- a/test/mocks/upstream/load_balancer.h +++ b/test/mocks/upstream/load_balancer.h @@ -16,6 +16,7 @@ class MockLoadBalancer : public LoadBalancer { // Upstream::LoadBalancer MOCK_METHOD(HostConstSharedPtr, chooseHost, (LoadBalancerContext * context)); + MOCK_METHOD(HostConstSharedPtr, peekAnotherHost, (LoadBalancerContext * context)); std::shared_ptr host_{new MockHost()}; }; diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 140ab7714a32..2ba764721609 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -3,7 +3,7 @@ # directory:coverage_percent # for existing directories with low coverage. declare -a KNOWN_LOW_COVERAGE=( -"source/common/network:94.0" +"source/common/network:95.6" "source/common/http/http3:50.0" "source/common/tracing:94.9" "source/common/protobuf:94.3" @@ -14,12 +14,14 @@ declare -a KNOWN_LOW_COVERAGE=( "source/common/init:96.2" "source/common/json:90.6" "source/common/filesystem:96.1" -"source/common/filesystem/posix:93.7" +"source/common/filesystem/posix:94.5" +"source/common/thread:0.0" # Death tests don't report LCOV "source/common/thread_local:95.7" "source/common/crypto:0.0" "source/common/common:96.1" "source/common/common/posix:94.1" -"source/common/signal:85.1" +"source/common/signal:90.4" +"source/common/watchdog:42.9" # Death tests don't report LCOV "source/exe:93.7" "source/extensions:96.3" "source/extensions/common:94.4" @@ -35,9 +37,9 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/network/mongo_proxy:94.0" "source/extensions/filters/network/common:96.1" "source/extensions/filters/network/common/redis:96.2" -"source/extensions/filters/network/http_connection_manager:95.4" -"source/extensions/filters/http/cache:80.7" -"source/extensions/filters/http/cache/simple_http_cache:84.5" +"source/extensions/filters/network/http_connection_manager:95.2" +"source/extensions/filters/http/cache:92.4" +"source/extensions/filters/http/cache/simple_http_cache:95.2" "source/extensions/filters/http/dynamic_forward_proxy:94.9" "source/extensions/filters/http/ip_tagging:91.2" "source/extensions/filters/http/grpc_json_transcoder:93.3" @@ -46,12 +48,11 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/listener/tls_inspector:92.4" "source/extensions/filters/listener/http_inspector:93.3" "source/extensions/filters/udp:91.1" -"source/extensions/filters/udp/dns_filter:89.2" +"source/extensions/filters/udp/dns_filter:96.9" "source/extensions/filters/common:94.7" "source/extensions/filters/common/expr:92.2" "source/extensions/filters/common/rbac:87.1" "source/extensions/filters/common/fault:94.3" -"source/extensions/filters/common/lua:95.9" "source/extensions/grpc_credentials:92.0" "source/extensions/health_checkers:95.9" "source/extensions/health_checkers/redis:95.9" @@ -61,16 +62,16 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/tracers:96.0" "source/extensions/tracers/opencensus:91.2" "source/extensions/tracers/xray:94.0" -"source/extensions/transport_sockets:94.9" +"source/extensions/transport_sockets:95.3" "source/extensions/transport_sockets/tap:95.6" "source/extensions/transport_sockets/tls:94.2" "source/extensions/transport_sockets/tls/ocsp:95.3" "source/extensions/transport_sockets/tls/private_key:76.9" -"source/extensions/watchdog:84.9" +"source/extensions/watchdog:69.6" # Death tests within extensions "source/extensions/watchdog/profile_action:84.9" -"source/server:94.7" -"source/server/config_validation:76.8" -"source/server/admin:95.5" +"source/server:94.6" +"source/server/config_validation:76.6" +"source/server/admin:95.3" ) [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" @@ -85,10 +86,10 @@ DIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD # Set their low bar as their current coverage level. get_coverage_target() { DIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD - for FILE_PERCENT in ${KNOWN_LOW_COVERAGE[@]} + for FILE_PERCENT in "${KNOWN_LOW_COVERAGE[@]}" do - if [[ $FILE_PERCENT =~ "$1:" ]]; then - DIRECTORY_THRESHOLD=$(echo $FILE_PERCENT | sed 's/.*://') + if [[ $FILE_PERCENT =~ $1: ]]; then + DIRECTORY_THRESHOLD="${FILE_PERCENT//*:/}" return fi done @@ -96,10 +97,11 @@ get_coverage_target() { # Make sure that for each directory with code, coverage doesn't dip # below the default coverage threshold. -for DIRECTORY in $(find source/* -type d) +SOURCES=$(find source/* -type d) +while read -r DIRECTORY do - get_coverage_target $DIRECTORY - COVERAGE_VALUE=$(lcov -e $COVERAGE_DATA "$DIRECTORY/*" -o /dev/null | grep line | cut -d ' ' -f 4) + get_coverage_target "$DIRECTORY" + COVERAGE_VALUE=$(lcov -e "$COVERAGE_DATA" "${DIRECTORY}/*" -o /dev/null | grep line | cut -d ' ' -f 4) COVERAGE_VALUE=${COVERAGE_VALUE%?} # If the coverage number is 'n' (no data found) there is 0% coverage. This is # probably a directory without source code, so we skip checks. @@ -112,10 +114,10 @@ do continue; fi; COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${DIRECTORY_THRESHOLD}" | bc) - if test ${COVERAGE_FAILED} -eq 1; then - echo Code coverage for ${DIRECTORY} is lower than limit of ${DIRECTORY_THRESHOLD} \(${COVERAGE_VALUE}\) + if [[ "${COVERAGE_FAILED}" -eq 1 ]]; then + echo "Code coverage for ${DIRECTORY} is lower than limit of ${DIRECTORY_THRESHOLD} (${COVERAGE_VALUE})" FAILED=1 fi -done +done <<< "$SOURCES" exit $FAILED diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index dbcfd46aedd2..48d8b47a5734 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -5,6 +5,9 @@ set -e [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" [[ -z "${VALIDATE_COVERAGE}" ]] && VALIDATE_COVERAGE=true [[ -z "${FUZZ_COVERAGE}" ]] && FUZZ_COVERAGE=false +[[ -z "${COVERAGE_THRESHOLD}" ]] && COVERAGE_THRESHOLD=96.5 +COVERAGE_TARGET="${COVERAGE_TARGET:-}" +read -ra BAZEL_BUILD_OPTIONS <<< "${BAZEL_BUILD_OPTIONS:-}" echo "Starting run_envoy_bazel_coverage.sh..." echo " PWD=$(pwd)" @@ -15,22 +18,29 @@ echo " VALIDATE_COVERAGE=${VALIDATE_COVERAGE}" # projects that want to run coverage on a different/combined target. # Command-line arguments take precedence over ${COVERAGE_TARGET}. if [[ $# -gt 0 ]]; then - COVERAGE_TARGETS=$* + COVERAGE_TARGETS=("$@") elif [[ -n "${COVERAGE_TARGET}" ]]; then - COVERAGE_TARGETS=${COVERAGE_TARGET} + COVERAGE_TARGETS=("${COVERAGE_TARGET}") else - COVERAGE_TARGETS=//test/... + COVERAGE_TARGETS=(//test/...) fi if [[ "${FUZZ_COVERAGE}" == "true" ]]; then # Filter targets to just fuzz tests. - COVERAGE_TARGETS=$(bazel query "attr("tags", "fuzz_target", ${COVERAGE_TARGETS})") - BAZEL_BUILD_OPTIONS+=" --config=fuzz-coverage --test_tag_filters=-nocoverage" + _targets=$(bazel query "attr('tags', 'fuzz_target', ${COVERAGE_TARGETS[*]})") + COVERAGE_TARGETS=() + while read -r line; do COVERAGE_TARGETS+=("$line"); done \ + <<< "$_targets" + BAZEL_BUILD_OPTIONS+=( + "--config=fuzz-coverage" + "--test_tag_filters=-nocoverage") else - BAZEL_BUILD_OPTIONS+=" --config=test-coverage --test_tag_filters=-nocoverage,-fuzz_target" + BAZEL_BUILD_OPTIONS+=( + "--config=test-coverage" + "--test_tag_filters=-nocoverage,-fuzz_target") fi -bazel coverage ${BAZEL_BUILD_OPTIONS} ${COVERAGE_TARGETS} +bazel coverage "${BAZEL_BUILD_OPTIONS[@]}" "${COVERAGE_TARGETS[@]}" # Collecting profile and testlogs [[ -z "${ENVOY_BUILD_PROFILE}" ]] || cp -f "$(bazel info output_base)/command.profile.gz" "${ENVOY_BUILD_PROFILE}/coverage.profile.gz" || true @@ -44,28 +54,26 @@ mkdir -p "${COVERAGE_DIR}" COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" cp bazel-out/_coverage/_coverage_report.dat "${COVERAGE_DATA}" -COVERAGE_VALUE=$(genhtml --prefix ${PWD} --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4) +COVERAGE_VALUE="$(genhtml --prefix "${PWD}" --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4)" COVERAGE_VALUE=${COVERAGE_VALUE%?} if [ "${FUZZ_COVERAGE}" == "true" ] then - [[ -z "${ENVOY_FUZZ_COVERAGE_ARTIFACT}" ]] || tar zcf "${ENVOY_FUZZ_COVERAGE_ARTIFACT}" -C ${COVERAGE_DIR} --transform 's/^\./fuzz_coverage/' . + [[ -z "${ENVOY_FUZZ_COVERAGE_ARTIFACT}" ]] || tar zcf "${ENVOY_FUZZ_COVERAGE_ARTIFACT}" -C "${COVERAGE_DIR}" --transform 's/^\./fuzz_coverage/' . else - [[ -z "${ENVOY_COVERAGE_ARTIFACT}" ]] || tar zcf "${ENVOY_COVERAGE_ARTIFACT}" -C ${COVERAGE_DIR} --transform 's/^\./coverage/' . + [[ -z "${ENVOY_COVERAGE_ARTIFACT}" ]] || tar zcf "${ENVOY_COVERAGE_ARTIFACT}" -C "${COVERAGE_DIR}" --transform 's/^\./coverage/' . fi if [[ "$VALIDATE_COVERAGE" == "true" ]]; then if [[ "${FUZZ_COVERAGE}" == "true" ]]; then COVERAGE_THRESHOLD=27.0 - else - COVERAGE_THRESHOLD=96.5 fi COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc) - if test ${COVERAGE_FAILED} -eq 1; then - echo Code coverage ${COVERAGE_VALUE} is lower than limit of ${COVERAGE_THRESHOLD} + if [[ "${COVERAGE_FAILED}" -eq 1 ]]; then + echo "Code coverage ${COVERAGE_VALUE} is lower than limit of ${COVERAGE_THRESHOLD}" exit 1 else - echo Code coverage ${COVERAGE_VALUE} is good and higher than limit of ${COVERAGE_THRESHOLD} + echo "Code coverage ${COVERAGE_VALUE} is good and higher than limit of ${COVERAGE_THRESHOLD}" fi fi @@ -77,7 +85,7 @@ if [[ "$VALIDATE_COVERAGE" == "true" ]] && [[ "${FUZZ_COVERAGE}" == "false" ]]; if [ $? -eq 1 ]; then echo Per-extension coverage failed: - echo $output + echo "$output" exit 1 fi echo Per-extension coverage passed. diff --git a/test/server/BUILD b/test/server/BUILD index fbb057d1fa71..929a3ddd2f3d 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -271,7 +271,8 @@ envoy_cc_test( srcs = ["listener_manager_impl_quic_only_test.cc"], tags = [ "nofips", - # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows + # Skipping as quiche quic_gso_batch_writer.h does not exist on Windows + # required by quic_stream_send_buffer.cc "skip_on_windows", ], deps = [ @@ -357,6 +358,8 @@ envoy_cc_test( ":server_test_data", ":static_validation_test_data", ], + # TODO(envoyproxy/windows-dev): diagnose clang-cl build test timeout + tags = ["fails_on_windows"], deps = [ "//source/common/version:version_lib", "//source/extensions/access_loggers/file:config", @@ -379,6 +382,7 @@ envoy_cc_test( "//test/mocks/server:options_mocks", "//test/mocks/server:overload_manager_mocks", "//test/mocks/stats:stats_mocks", + "//test/test_common:logging_lib", "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", "//test/test_common:test_time_lib", diff --git a/test/server/admin/BUILD b/test/server/admin/BUILD index e395ccb176c8..1952d5f6007f 100644 --- a/test/server/admin/BUILD +++ b/test/server/admin/BUILD @@ -33,7 +33,6 @@ envoy_cc_test( "//source/common/json:json_loader_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", - "//source/common/stats:symbol_table_creator_lib", "//source/common/stats:thread_local_store_lib", "//source/server/admin:admin_lib", "//test/mocks/runtime:runtime_mocks", @@ -107,6 +106,7 @@ envoy_cc_test( ":admin_instance_lib", "//source/extensions/transport_sockets/tls:context_config_lib", "//test/test_common:logging_lib", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", ], ) diff --git a/test/server/admin/clusters_handler_test.cc b/test/server/admin/clusters_handler_test.cc index 3cfa123e86fe..f0ba3f5ae7e5 100644 --- a/test/server/admin/clusters_handler_test.cc +++ b/test/server/admin/clusters_handler_test.cc @@ -113,6 +113,23 @@ TEST_P(AdminInstanceTest, ClustersJson) { "value": 9 }, "added_via_api": true, + "circuit_breakers": { + "thresholds": [ + { + "max_connections": 1, + "max_pending_requests": 1024, + "max_requests": 1024, + "max_retries": 1 + }, + { + "priority": "HIGH", + "max_connections": 1, + "max_pending_requests": 1024, + "max_requests": 1024, + "max_retries": 1 + } + ] + }, "host_statuses": [ { "address": { @@ -147,7 +164,7 @@ TEST_P(AdminInstanceTest, ClustersJson) { "name": "test_gauge", "value": "11", "type": "GAUGE" - }, + } ], "health_status": { "eds_health_status": "DEGRADED", diff --git a/test/server/admin/server_info_handler_test.cc b/test/server/admin/server_info_handler_test.cc index d9ef53339f95..4a5cd67adb70 100644 --- a/test/server/admin/server_info_handler_test.cc +++ b/test/server/admin/server_info_handler_test.cc @@ -4,6 +4,7 @@ #include "test/server/admin/admin_instance.h" #include "test/test_common/logging.h" +#include "test/test_common/test_runtime.h" using testing::Ge; using testing::HasSubstr; @@ -96,11 +97,13 @@ TEST_P(AdminInstanceTest, GetReadyRequest) { } TEST_P(AdminInstanceTest, GetRequest) { - EXPECT_CALL(server_.options_, toCommandLineOptions()).WillRepeatedly(Invoke([] { + NiceMock local_info; + EXPECT_CALL(server_, localInfo()).WillRepeatedly(ReturnRef(local_info)); + EXPECT_CALL(server_.options_, toCommandLineOptions()).WillRepeatedly(Invoke([&local_info] { Server::CommandLineOptionsPtr command_line_options = std::make_unique(); command_line_options->set_restart_epoch(2); - command_line_options->set_service_cluster("cluster"); + command_line_options->set_service_cluster(local_info.clusterName()); return command_line_options; })); NiceMock initManager; @@ -122,7 +125,13 @@ TEST_P(AdminInstanceTest, GetRequest) { EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::LIVE); EXPECT_EQ(server_info_proto.hot_restart_version(), "foo_version"); EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); - EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), local_info.clusterName()); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), + server_info_proto.node().cluster()); + EXPECT_EQ(server_info_proto.command_line_options().service_node(), ""); + EXPECT_EQ(server_info_proto.command_line_options().service_zone(), ""); + EXPECT_EQ(server_info_proto.node().id(), local_info.nodeName()); + EXPECT_EQ(server_info_proto.node().locality().zone(), local_info.zoneName()); } { @@ -139,7 +148,13 @@ TEST_P(AdminInstanceTest, GetRequest) { TestUtility::loadFromJson(body, server_info_proto); EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::PRE_INITIALIZING); EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); - EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), local_info.clusterName()); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), + server_info_proto.node().cluster()); + EXPECT_EQ(server_info_proto.command_line_options().service_node(), ""); + EXPECT_EQ(server_info_proto.command_line_options().service_zone(), ""); + EXPECT_EQ(server_info_proto.node().id(), local_info.nodeName()); + EXPECT_EQ(server_info_proto.node().locality().zone(), local_info.zoneName()); } Http::TestResponseHeaderMapImpl response_headers; @@ -155,10 +170,18 @@ TEST_P(AdminInstanceTest, GetRequest) { TestUtility::loadFromJson(body, server_info_proto); EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::INITIALIZING); EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); - EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), local_info.clusterName()); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), + server_info_proto.node().cluster()); + EXPECT_EQ(server_info_proto.command_line_options().service_node(), ""); + EXPECT_EQ(server_info_proto.command_line_options().service_zone(), ""); + EXPECT_EQ(server_info_proto.node().id(), local_info.nodeName()); + EXPECT_EQ(server_info_proto.node().locality().zone(), local_info.zoneName()); } TEST_P(AdminInstanceTest, PostRequest) { + // Load TestScopedRuntime to suppress warnings related to runtime features. + TestScopedRuntime scoped_runtime; Http::TestResponseHeaderMapImpl response_headers; std::string body; EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::OK, diff --git a/test/server/admin/stats_handler_test.cc b/test/server/admin/stats_handler_test.cc index 623438013b97..1059d642edbd 100644 --- a/test/server/admin/stats_handler_test.cc +++ b/test/server/admin/stats_handler_test.cc @@ -19,8 +19,7 @@ namespace Server { class AdminStatsTest : public testing::TestWithParam { public: - AdminStatsTest() - : symbol_table_(Stats::SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_) { + AdminStatsTest() : alloc_(symbol_table_) { store_ = std::make_unique(alloc_); store_->addSink(sink_); } @@ -34,7 +33,7 @@ class AdminStatsTest : public testing::TestWithParam main_thread_dispatcher_; NiceMock tls_; Stats::AllocatorImpl alloc_; diff --git a/test/server/config_validation/cluster_manager_test.cc b/test/server/config_validation/cluster_manager_test.cc index 36f3a0443ebb..1318d192f14f 100644 --- a/test/server/config_validation/cluster_manager_test.cc +++ b/test/server/config_validation/cluster_manager_test.cc @@ -48,8 +48,8 @@ TEST(ValidationClusterManagerTest, MockedMethods) { Singleton::ManagerImpl singleton_manager{Thread::threadFactoryForTest()}; ValidationClusterManagerFactory factory( - admin, runtime, stats_store, tls, random, dns_resolver, ssl_context_manager, dispatcher, - local_info, secret_manager, validation_context, *api, http_context, grpc_context, log_manager, + admin, runtime, stats_store, tls, dns_resolver, ssl_context_manager, dispatcher, local_info, + secret_manager, validation_context, *api, http_context, grpc_context, log_manager, singleton_manager, time_system); const envoy::config::bootstrap::v3::Bootstrap bootstrap; diff --git a/test/server/config_validation/config_fuzz_test.cc b/test/server/config_validation/config_fuzz_test.cc index 107bb2eeb415..a94dbb01aeb3 100644 --- a/test/server/config_validation/config_fuzz_test.cc +++ b/test/server/config_validation/config_fuzz_test.cc @@ -19,13 +19,24 @@ namespace { // Derived from //test/server:server_fuzz_test.cc, but starts the server in configuration validation // mode (quits upon validation of the given config) DEFINE_PROTO_FUZZER(const envoy::config::bootstrap::v3::Bootstrap& input) { + envoy::config::bootstrap::v3::Bootstrap sanitizedInput(input); + // TODO(asraa): QUIC is not enabled in production code yet, so remove references for HTTP3. + // Tracked at https://github.com/envoyproxy/envoy/issues/9513. + for (auto& cluster : *sanitizedInput.mutable_static_resources()->mutable_clusters()) { + for (auto& health_check : *cluster.mutable_health_checks()) { + if (health_check.http_health_check().codec_client_type() == + envoy::type::v3::CodecClientType::HTTP3) { + health_check.mutable_http_health_check()->clear_codec_client_type(); + } + } + } testing::NiceMock options; TestComponentFactory component_factory; Fuzz::PerTestEnvironment test_env; const std::string bootstrap_path = test_env.temporaryPath("bootstrap.pb_text"); std::ofstream bootstrap_file(bootstrap_path); - bootstrap_file << input.DebugString(); + bootstrap_file << sanitizedInput.DebugString(); options.config_path_ = bootstrap_path; options.log_level_ = Fuzz::Runner::logLevel(); diff --git a/test/server/config_validation/dispatcher_test.cc b/test/server/config_validation/dispatcher_test.cc index f0367a149ee1..a4eb71df8d8a 100644 --- a/test/server/config_validation/dispatcher_test.cc +++ b/test/server/config_validation/dispatcher_test.cc @@ -9,6 +9,7 @@ #include "server/config_validation/api.h" +#include "test/mocks/common.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/test_time.h" @@ -22,15 +23,16 @@ namespace Envoy { class ConfigValidation : public testing::TestWithParam { public: ConfigValidation() { - validation_ = std::make_unique(Thread::threadFactoryForTest(), - stats_store_, test_time_.timeSystem(), - Filesystem::fileSystemForTest()); + validation_ = std::make_unique( + Thread::threadFactoryForTest(), stats_store_, test_time_.timeSystem(), + Filesystem::fileSystemForTest(), random_generator_); dispatcher_ = validation_->allocateDispatcher("test_thread"); } DangerousDeprecatedTestTime test_time_; Event::DispatcherPtr dispatcher_; Stats::IsolatedStoreImpl stats_store_; + testing::NiceMock random_generator_; private: // Using config validation API. diff --git a/test/server/config_validation/server_test.cc b/test/server/config_validation/server_test.cc index c1e6de23ec48..c833d71f61a6 100644 --- a/test/server/config_validation/server_test.cc +++ b/test/server/config_validation/server_test.cc @@ -134,7 +134,7 @@ TEST_P(ValidationServerTest, NoopLifecycleNotifier) { // as-is. (Note, /dev/stdout as an access log file is invalid on Windows, no equivalent /dev/ // exists.) -auto testing_values = ::testing::Values("front-proxy_front-envoy.yaml", "google_com_proxy.v2.yaml", +auto testing_values = ::testing::Values("front-proxy_front-envoy.yaml", "google_com_proxy.yaml", #ifndef WIN32 "grpc-bridge_server_envoy-proxy.yaml", #endif diff --git a/test/server/configuration_impl_test.cc b/test/server/configuration_impl_test.cc index 12f2315052a5..ea6da93626b9 100644 --- a/test/server/configuration_impl_test.cc +++ b/test/server/configuration_impl_test.cc @@ -60,10 +60,10 @@ class ConfigurationImplTest : public testing::Test { : api_(Api::createApiForTest()), cluster_manager_factory_( server_.admin(), server_.runtime(), server_.stats(), server_.threadLocal(), - server_.random(), server_.dnsResolver(), server_.sslContextManager(), - server_.dispatcher(), server_.localInfo(), server_.secretManager(), - server_.messageValidationContext(), *api_, server_.httpContext(), server_.grpcContext(), - server_.accessLogManager(), server_.singletonManager()) {} + server_.dnsResolver(), server_.sslContextManager(), server_.dispatcher(), + server_.localInfo(), server_.secretManager(), server_.messageValidationContext(), *api_, + server_.httpContext(), server_.grpcContext(), server_.accessLogManager(), + server_.singletonManager()) {} void addStatsdFakeClusterConfig(envoy::config::metrics::v3::StatsSink& sink) { envoy::config::metrics::v3::StatsdSink statsd_sink; @@ -754,7 +754,8 @@ TEST_F(ConfigurationImplTest, KillTimeoutWithoutSkew) { MainImpl config; config.initialize(bootstrap, server_, cluster_manager_factory_); - EXPECT_EQ(std::chrono::milliseconds(1000), config.watchdogConfig().killTimeout()); + EXPECT_EQ(config.workerWatchdogConfig().killTimeout(), std::chrono::milliseconds(1000)); + EXPECT_EQ(config.mainThreadWatchdogConfig().killTimeout(), std::chrono::milliseconds(1000)); } TEST_F(ConfigurationImplTest, CanSkewsKillTimeout) { @@ -772,8 +773,10 @@ TEST_F(ConfigurationImplTest, CanSkewsKillTimeout) { MainImpl config; config.initialize(bootstrap, server_, cluster_manager_factory_); - EXPECT_LT(std::chrono::milliseconds(1000), config.watchdogConfig().killTimeout()); - EXPECT_GE(std::chrono::milliseconds(1500), config.watchdogConfig().killTimeout()); + EXPECT_LT(std::chrono::milliseconds(1000), config.mainThreadWatchdogConfig().killTimeout()); + EXPECT_LT(std::chrono::milliseconds(1000), config.workerWatchdogConfig().killTimeout()); + EXPECT_GE(std::chrono::milliseconds(1500), config.mainThreadWatchdogConfig().killTimeout()); + EXPECT_GE(std::chrono::milliseconds(1500), config.workerWatchdogConfig().killTimeout()); } TEST_F(ConfigurationImplTest, DoesNotSkewIfKillTimeoutDisabled) { @@ -790,9 +793,41 @@ TEST_F(ConfigurationImplTest, DoesNotSkewIfKillTimeoutDisabled) { MainImpl config; config.initialize(bootstrap, server_, cluster_manager_factory_); - EXPECT_EQ(std::chrono::milliseconds(0), config.watchdogConfig().killTimeout()); + EXPECT_EQ(config.mainThreadWatchdogConfig().killTimeout(), std::chrono::milliseconds(0)); + EXPECT_EQ(config.workerWatchdogConfig().killTimeout(), std::chrono::milliseconds(0)); } +TEST_F(ConfigurationImplTest, ShouldErrorIfBothWatchdogsAndWatchdogSet) { + const std::string json = R"EOF( { "watchdogs": {}, "watchdog": {}})EOF"; + + envoy::config::bootstrap::v3::Bootstrap bootstrap; + TestUtility::loadFromJson(json, bootstrap); + + MainImpl config; + + EXPECT_THROW_WITH_MESSAGE(config.initialize(bootstrap, server_, cluster_manager_factory_), + EnvoyException, "Only one of watchdog or watchdogs should be set!"); +} + +TEST_F(ConfigurationImplTest, CanSetMultiWatchdogConfigs) { + const std::string json = R"EOF( { "watchdogs": { + "main_thread_watchdog" : { + miss_timeout : "2s" + }, + "worker_watchdog" : { + miss_timeout : "0.5s" + } + }})EOF"; + + envoy::config::bootstrap::v3::Bootstrap bootstrap; + TestUtility::loadFromJson(json, bootstrap); + + MainImpl config; + config.initialize(bootstrap, server_, cluster_manager_factory_); + + EXPECT_EQ(config.mainThreadWatchdogConfig().missTimeout(), std::chrono::milliseconds(2000)); + EXPECT_EQ(config.workerWatchdogConfig().missTimeout(), std::chrono::milliseconds(500)); +} } // namespace } // namespace Configuration } // namespace Server diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 56357156bb8b..dba36b1216dc 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -12,6 +12,7 @@ #include "common/network/io_socket_handle_impl.h" #include "common/network/raw_buffer_socket.h" #include "common/network/udp_default_writer_config.h" +#include "common/network/udp_listener_impl.h" #include "common/network/utility.h" #include "server/connection_handler_impl.h" @@ -43,7 +44,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable()), - handler_(new ConnectionHandlerImpl(dispatcher_)), + handler_(new ConnectionHandlerImpl(dispatcher_, 0)), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), listener_filter_matcher_(std::make_shared>()), access_log_(std::make_shared()) { @@ -60,14 +61,17 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable access_log, std::shared_ptr> filter_chain_manager = nullptr, - uint32_t tcp_backlog_size = ENVOY_TCP_BACKLOG_SIZE) + uint32_t tcp_backlog_size = ENVOY_TCP_BACKLOG_SIZE, + Network::ConnectionBalancerSharedPtr connection_balancer = nullptr) : parent_(parent), socket_(std::make_shared>()), socket_factory_(std::move(socket_factory)), tag_(tag), bind_to_port_(bind_to_port), tcp_backlog_size_(tcp_backlog_size), hand_off_restored_destination_connections_(hand_off_restored_destination_connections), name_(name), listener_filters_timeout_(listener_filters_timeout), continue_on_listener_filters_timeout_(continue_on_listener_filters_timeout), - connection_balancer_(std::make_unique()), + connection_balancer_(connection_balancer == nullptr + ? std::make_shared() + : connection_balancer), access_logs_({access_log}), inline_filter_chain_manager_(filter_chain_manager), init_manager_(nullptr) { envoy::config::listener::v3::UdpListenerConfig dummy; @@ -107,6 +111,9 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable udp_listener_factory_; std::unique_ptr udp_writer_factory_; - Network::ConnectionBalancerPtr connection_balancer_; + Network::UdpListenerWorkerRouterPtr udp_listener_worker_router_; + Network::ConnectionBalancerSharedPtr connection_balancer_; BasicResourceLimitImpl open_connections_; const std::vector access_logs_; std::shared_ptr> inline_filter_chain_manager_; @@ -168,10 +176,12 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable connection_balancer = nullptr, Network::BalancedConnectionHandler** balanced_connection_handler = nullptr, Network::Socket::Type socket_type = Network::Socket::Type::Stream, std::chrono::milliseconds listener_filters_timeout = std::chrono::milliseconds(15000), @@ -193,8 +203,9 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable( *this, tag, bind_to_port, hand_off_restored_destination_connections, name, socket_type, listener_filters_timeout, continue_on_listener_filters_timeout, socket_factory_, - access_log_, overridden_filter_chain_manager, tcp_backlog_size)); + access_log_, overridden_filter_chain_manager, tcp_backlog_size, connection_balancer)); EXPECT_CALL(*socket_factory_, socketType()).WillOnce(Return(socket_type)); + if (listener == nullptr) { // Expecting listener config in place update. // If so, dispatcher would not create new network listener. @@ -217,14 +228,15 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable Network::UdpListener* { return dynamic_cast(listener); })); + listeners_.back()->udp_listener_worker_router_ = + std::make_unique(1); } - if (connection_balancer != nullptr) { - listeners_.back()->connection_balancer_.reset(connection_balancer); - ASSERT(balanced_connection_handler != nullptr); + if (balanced_connection_handler != nullptr) { EXPECT_CALL(*connection_balancer, registerHandler(_)) .WillOnce(SaveArgAddress(balanced_connection_handler)); } + return listeners_.back().get(); } @@ -257,7 +269,7 @@ TEST_F(ConnectionHandlerTest, RemoveListenerDuringRebalance) { Network::TcpListenerCallbacks* listener_callbacks; auto listener = new NiceMock(); - Network::MockConnectionBalancer* connection_balancer = new Network::MockConnectionBalancer(); + auto connection_balancer = std::make_shared(); Network::BalancedConnectionHandler* current_handler; TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks, @@ -440,6 +452,37 @@ TEST_F(ConnectionHandlerTest, AddDisabledListener) { handler_->addListener(absl::nullopt, *test_listener); } +TEST_F(ConnectionHandlerTest, SetListenerRejectFraction) { + InSequence s; + + Network::TcpListenerCallbacks* listener_callbacks; + auto listener = new NiceMock(); + TestListener* test_listener = + addListener(1, false, false, "test_listener", listener, &listener_callbacks); + EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + handler_->addListener(absl::nullopt, *test_listener); + + EXPECT_CALL(*listener, setRejectFraction(0.1234f)); + EXPECT_CALL(*listener, onDestroy()); + + handler_->setListenerRejectFraction(0.1234f); +} + +TEST_F(ConnectionHandlerTest, AddListenerSetRejectFraction) { + InSequence s; + + Network::TcpListenerCallbacks* listener_callbacks; + auto listener = new NiceMock(); + TestListener* test_listener = + addListener(1, false, false, "test_listener", listener, &listener_callbacks); + EXPECT_CALL(*listener, setRejectFraction(0.12345f)); + EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(*listener, onDestroy()); + + handler_->setListenerRejectFraction(0.12345f); + handler_->addListener(absl::nullopt, *test_listener); +} + TEST_F(ConnectionHandlerTest, DestroyCloseConnections) { InSequence s; @@ -561,7 +604,12 @@ TEST_F(ConnectionHandlerTest, NormalRedirect) { EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, "test.downstream_cx_total")->value()); EXPECT_EQ(1UL, TestUtility::findGauge(stats_store_, "test.downstream_cx_active")->value()); - EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1); + EXPECT_CALL(*access_log_, log(_, _, _, _)) + .WillOnce( + Invoke([&](const Http::RequestHeaderMap*, const Http::ResponseHeaderMap*, + const Http::ResponseTrailerMap*, const StreamInfo::StreamInfo& stream_info) { + EXPECT_EQ(alt_address, stream_info.downstreamLocalAddress()); + })); connection->close(Network::ConnectionCloseType::NoFlush); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(0UL, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); @@ -988,9 +1036,14 @@ TEST_F(ConnectionHandlerTest, TcpListenerInplaceUpdate) { uint64_t old_listener_tag = 1; uint64_t new_listener_tag = 2; Network::TcpListenerCallbacks* old_listener_callbacks; + Network::BalancedConnectionHandler* current_handler; + auto old_listener = new NiceMock(); - TestListener* old_test_listener = addListener(old_listener_tag, true, false, "test_listener", - old_listener, &old_listener_callbacks); + auto mock_connection_balancer = std::make_shared(); + + TestListener* old_test_listener = + addListener(old_listener_tag, true, false, "test_listener", old_listener, + &old_listener_callbacks, mock_connection_balancer, ¤t_handler); EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *old_test_listener); ASSERT_NE(old_test_listener, nullptr); @@ -999,19 +1052,25 @@ TEST_F(ConnectionHandlerTest, TcpListenerInplaceUpdate) { auto overridden_filter_chain_manager = std::make_shared>(); - TestListener* new_test_listener = - addListener(new_listener_tag, true, false, "test_listener", /* Network::Listener */ nullptr, - &new_listener_callbacks, nullptr, nullptr, Network::Socket::Type::Stream, - std::chrono::milliseconds(15000), false, overridden_filter_chain_manager); + TestListener* new_test_listener = addListener( + new_listener_tag, true, false, "test_listener", /* Network::Listener */ nullptr, + &new_listener_callbacks, mock_connection_balancer, nullptr, Network::Socket::Type::Stream, + std::chrono::milliseconds(15000), false, overridden_filter_chain_manager); handler_->addListener(old_listener_tag, *new_test_listener); ASSERT_EQ(new_listener_callbacks, nullptr) << "new listener should be inplace added and callback should not change"; Network::MockConnectionSocket* connection = new NiceMock(); + current_handler->incNumConnections(); + + EXPECT_CALL(*mock_connection_balancer, pickTargetHandler(_)) + .WillOnce(ReturnRef(*current_handler)); EXPECT_CALL(manager_, findFilterChain(_)).Times(0); EXPECT_CALL(*overridden_filter_chain_manager, findFilterChain(_)).WillOnce(Return(nullptr)); EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1); + EXPECT_CALL(*mock_connection_balancer, unregisterHandler(_)); old_listener_callbacks->onAccept(Network::ConnectionSocketPtr{connection}); + EXPECT_EQ(0UL, handler_->numConnections()); EXPECT_CALL(*old_listener, onDestroy()); } @@ -1058,6 +1117,36 @@ TEST_F(ConnectionHandlerTest, TcpListenerRemoveFilterChain) { handler_.reset(); } +TEST_F(ConnectionHandlerTest, TcpListenerGlobalCxLimitReject) { + Network::TcpListenerCallbacks* listener_callbacks; + auto listener = new NiceMock(); + TestListener* test_listener = + addListener(1, true, false, "test_listener", listener, &listener_callbacks); + EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + handler_->addListener(absl::nullopt, *test_listener); + + listener_callbacks->onReject(Network::TcpListenerCallbacks::RejectCause::GlobalCxLimit); + + EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, "downstream_global_cx_overflow")->value()); + EXPECT_EQ(0UL, TestUtility::findCounter(stats_store_, "downstream_cx_overload_reject")->value()); + EXPECT_CALL(*listener, onDestroy()); +} + +TEST_F(ConnectionHandlerTest, TcpListenerOverloadActionReject) { + Network::TcpListenerCallbacks* listener_callbacks; + auto listener = new NiceMock(); + TestListener* test_listener = + addListener(1, true, false, "test_listener", listener, &listener_callbacks); + EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + handler_->addListener(absl::nullopt, *test_listener); + + listener_callbacks->onReject(Network::TcpListenerCallbacks::RejectCause::OverloadAction); + + EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, "downstream_cx_overload_reject")->value()); + EXPECT_EQ(0UL, TestUtility::findCounter(stats_store_, "downstream_global_cx_overflow")->value()); + EXPECT_CALL(*listener, onDestroy()); +} + // Listener Filter matchers works. TEST_F(ConnectionHandlerTest, ListenerFilterWorks) { Network::TcpListenerCallbacks* listener_callbacks; diff --git a/test/server/drain_manager_impl_test.cc b/test/server/drain_manager_impl_test.cc index 160080b34b1b..f5a5687af366 100644 --- a/test/server/drain_manager_impl_test.cc +++ b/test/server/drain_manager_impl_test.cc @@ -80,13 +80,13 @@ TEST_P(DrainManagerImplTest, DrainDeadline) { // Ensure drainClose() behaviour is determined by the deadline. drain_manager.startDrainSequence([] {}); EXPECT_CALL(server_, healthCheckFailed()).WillRepeatedly(Return(false)); - ON_CALL(server_.random_, random()).WillByDefault(Return(DrainTimeSeconds * 2 - 1)); + ON_CALL(server_.api_.random_, random()).WillByDefault(Return(DrainTimeSeconds * 2 - 1)); ON_CALL(server_.options_, drainTime()) .WillByDefault(Return(std::chrono::seconds(DrainTimeSeconds))); if (drain_gradually) { // random() should be called when elapsed time < drain timeout - EXPECT_CALL(server_.random_, random()).Times(2); + EXPECT_CALL(server_.api_.random_, random()).Times(2); EXPECT_FALSE(drain_manager.drainClose()); simTime().advanceTimeWait(std::chrono::seconds(DrainTimeSeconds - 1)); EXPECT_FALSE(drain_manager.drainClose()); @@ -99,7 +99,7 @@ TEST_P(DrainManagerImplTest, DrainDeadline) { simTime().advanceTimeWait(std::chrono::seconds(500)); EXPECT_TRUE(drain_manager.drainClose()); } else { - EXPECT_CALL(server_.random_, random()).Times(0); + EXPECT_CALL(server_.api_.random_, random()).Times(0); EXPECT_TRUE(drain_manager.drainClose()); simTime().advanceTimeWait(std::chrono::seconds(DrainTimeSeconds - 1)); EXPECT_TRUE(drain_manager.drainClose()); @@ -117,7 +117,7 @@ TEST_P(DrainManagerImplTest, DrainDeadlineProbability) { ON_CALL(server_.options_, drainStrategy()) .WillByDefault(Return(drain_gradually ? Server::DrainStrategy::Gradual : Server::DrainStrategy::Immediate)); - ON_CALL(server_.random_, random()).WillByDefault(Return(4)); + ON_CALL(server_.api_.random_, random()).WillByDefault(Return(4)); ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(3))); DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT); @@ -133,7 +133,7 @@ TEST_P(DrainManagerImplTest, DrainDeadlineProbability) { if (drain_gradually) { // random() should be called when elapsed time < drain timeout - EXPECT_CALL(server_.random_, random()).Times(2); + EXPECT_CALL(server_.api_.random_, random()).Times(2); // Current elapsed time is 0 // drainClose() will return true when elapsed time > (4 % 3 == 1). EXPECT_FALSE(drain_manager.drainClose()); @@ -142,7 +142,7 @@ TEST_P(DrainManagerImplTest, DrainDeadlineProbability) { simTime().advanceTimeWait(std::chrono::seconds(1)); EXPECT_TRUE(drain_manager.drainClose()); } else { - EXPECT_CALL(server_.random_, random()).Times(0); + EXPECT_CALL(server_.api_.random_, random()).Times(0); EXPECT_TRUE(drain_manager.drainClose()); simTime().advanceTimeWait(std::chrono::seconds(2)); EXPECT_TRUE(drain_manager.drainClose()); diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index a04047346b30..8fc4e49bc620 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -116,6 +116,7 @@ class MockConnectionSocket : public Network::ConnectionSocket { return {0, 0}; } Api::SysCallIntResult setBlockingForTest(bool) override { return {0, 0}; } + absl::optional lastRoundTripTime() override { return {}; } private: Network::IoHandlePtr io_handle_; @@ -131,14 +132,14 @@ const char YamlHeader[] = R"EOF( socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty transport_socket: - name: tls + name: "envoy.transport_sockets.tls" typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" common_tls_context: tls_certificates: - certificate_chain: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" } @@ -151,9 +152,9 @@ const char YamlSingleServer[] = R"EOF( server_names: "server1.example.com" transport_protocol: "tls" transport_socket: - name: tls + name: "envoy.transport_sockets.tls" typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" common_tls_context: tls_certificates: - certificate_chain: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem" } @@ -166,9 +167,9 @@ const char YamlSingleDstPortTop[] = R"EOF( destination_port: )EOF"; const char YamlSingleDstPortBottom[] = R"EOF( transport_socket: - name: tls + name: "envoy.transport_sockets.tls" typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" common_tls_context: tls_certificates: - certificate_chain: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem" } @@ -218,7 +219,8 @@ BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest) FilterChainManagerImpl filter_chain_manager{ std::make_shared("127.0.0.1", 1234), factory_context, init_manager_}; - filter_chain_manager.addFilterChain(filter_chains_, dummy_builder_, filter_chain_manager); + filter_chain_manager.addFilterChains(filter_chains_, nullptr, dummy_builder_, + filter_chain_manager); } } @@ -241,8 +243,10 @@ BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainFindTest) std::make_shared("127.0.0.1", 1234), factory_context, init_manager_}; - filter_chain_manager.addFilterChain(filter_chains_, dummy_builder_, filter_chain_manager); + filter_chain_manager.addFilterChains(filter_chains_, nullptr, dummy_builder_, + filter_chain_manager); for (auto _ : state) { + UNREFERENCED_PARAMETER(_); for (int i = 0; i < state.range(0); i++) { filter_chain_manager.findFilterChain(sockets[i]); } diff --git a/test/server/filter_chain_manager_impl_test.cc b/test/server/filter_chain_manager_impl_test.cc index 4b78f2a70d88..92fdec6d8997 100644 --- a/test/server/filter_chain_manager_impl_test.cc +++ b/test/server/filter_chain_manager_impl_test.cc @@ -99,10 +99,12 @@ class FilterChainManagerImplTest : public testing::Test { return filter_chain_manager_.findFilterChain(*mock_socket); } - void addSingleFilterChainHelper(const envoy::config::listener::v3::FilterChain& filter_chain) { - filter_chain_manager_.addFilterChain( + void addSingleFilterChainHelper( + const envoy::config::listener::v3::FilterChain& filter_chain, + const envoy::config::listener::v3::FilterChain* fallback_filter_chain = nullptr) { + filter_chain_manager_.addFilterChains( std::vector{&filter_chain}, - filter_chain_factory_builder_, filter_chain_manager_); + fallback_filter_chain, filter_chain_factory_builder_, filter_chain_manager_); } // Intermediate states. @@ -128,6 +130,12 @@ class FilterChainManagerImplTest : public testing::Test { )EOF"; Init::ManagerImpl init_manager_{"for_filter_chain_manager_test"}; envoy::config::listener::v3::FilterChain filter_chain_template_; + std::shared_ptr build_out_filter_chain_{ + std::make_shared()}; + envoy::config::listener::v3::FilterChain fallback_filter_chain_; + std::shared_ptr build_out_fallback_filter_chain_{ + std::make_shared()}; + NiceMock filter_chain_factory_builder_; NiceMock parent_context_; // Test target. @@ -147,21 +155,37 @@ TEST_F(FilterChainManagerImplTest, AddSingleFilterChain) { EXPECT_NE(filter_chain, nullptr); } +TEST_F(FilterChainManagerImplTest, FilterChainUseFallbackIfNoFilterChainMatches) { + // The build helper will build matchable filter chain and then build the default filter chain. + EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)) + .WillOnce(Return(build_out_fallback_filter_chain_)); + EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)) + .WillOnce(Return(std::make_shared())) + .RetiresOnSaturation(); + addSingleFilterChainHelper(filter_chain_template_, &fallback_filter_chain_); + + auto filter_chain = findFilterChainHelper(10000, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); + EXPECT_NE(filter_chain, nullptr); + auto fallback_filter_chain = + findFilterChainHelper(9999, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); + EXPECT_EQ(fallback_filter_chain, build_out_fallback_filter_chain_.get()); +} + TEST_F(FilterChainManagerImplTest, LookupFilterChainContextByFilterChainMessage) { std::vector filter_chain_messages; for (int i = 0; i < 2; i++) { envoy::config::listener::v3::FilterChain new_filter_chain = filter_chain_template_; new_filter_chain.set_name(absl::StrCat("filter_chain_", i)); - // For sanity check + // For sanity check. new_filter_chain.mutable_filter_chain_match()->mutable_destination_port()->set_value(10000 + i); filter_chain_messages.push_back(std::move(new_filter_chain)); } EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(2); - filter_chain_manager_.addFilterChain( + filter_chain_manager_.addFilterChains( std::vector{&filter_chain_messages[0], &filter_chain_messages[1]}, - filter_chain_factory_builder_, filter_chain_manager_); + nullptr, filter_chain_factory_builder_, filter_chain_manager_); } TEST_F(FilterChainManagerImplTest, DuplicateContextsAreNotBuilt) { @@ -176,9 +200,9 @@ TEST_F(FilterChainManagerImplTest, DuplicateContextsAreNotBuilt) { } EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(1); - filter_chain_manager_.addFilterChain( + filter_chain_manager_.addFilterChains( std::vector{&filter_chain_messages[0]}, - filter_chain_factory_builder_, filter_chain_manager_); + nullptr, filter_chain_factory_builder_, filter_chain_manager_); FilterChainManagerImpl new_filter_chain_manager{ std::make_shared("127.0.0.1", 1234), parent_context_, @@ -186,10 +210,10 @@ TEST_F(FilterChainManagerImplTest, DuplicateContextsAreNotBuilt) { // The new filter chain manager maintains 3 filter chains, but only 2 filter chain context is // built because it reuse the filter chain context in the previous filter chain manager EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(2); - new_filter_chain_manager.addFilterChain( + new_filter_chain_manager.addFilterChains( std::vector{ &filter_chain_messages[0], &filter_chain_messages[1], &filter_chain_messages[2]}, - filter_chain_factory_builder_, new_filter_chain_manager); + nullptr, filter_chain_factory_builder_, new_filter_chain_manager); } TEST_F(FilterChainManagerImplTest, CreatedFilterChainFactoryContextHasIndependentDrainClose) { diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index cd284c895ad9..1709c0972c89 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -84,7 +84,7 @@ class GuardDogTestBase : public testing::TestWithParam { } void initGuardDog(Stats::Scope& stats_scope, const Server::Configuration::Watchdog& config) { - guard_dog_ = std::make_unique(stats_scope, config, *api_, + guard_dog_ = std::make_unique(stats_scope, config, *api_, "server", std::make_unique()); } @@ -445,15 +445,15 @@ class RecordGuardDogAction : public Configuration::GuardDogAction { RecordGuardDogAction(std::vector& events) : events_(events) {} void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event, - const std::vector>& thread_ltt_pairs, + const std::vector>& thread_last_checkin_pairs, MonotonicTime /*now*/) override { std::string event_string = envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent_Name(event); absl::StrAppend(&event_string, " : "); std::vector output_string_parts; - output_string_parts.reserve(thread_ltt_pairs.size()); + output_string_parts.reserve(thread_last_checkin_pairs.size()); - for (const auto& thread_ltt_pair : thread_ltt_pairs) { + for (const auto& thread_ltt_pair : thread_last_checkin_pairs) { output_string_parts.push_back(thread_ltt_pair.first.debugString()); } @@ -470,9 +470,10 @@ class AssertGuardDogAction : public Configuration::GuardDogAction { public: AssertGuardDogAction() = default; - void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent /*event*/, - const std::vector>& /*thread_ltt_pairs*/, - MonotonicTime /*now*/) override { + void + run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent /*event*/, + const std::vector>& /*thread_last_checkin_pairs*/, + MonotonicTime /*now*/) override { RELEASE_ASSERT(false, "ASSERT_GUARDDOG_ACTION"); } }; diff --git a/test/server/hot_restart_impl_test.cc b/test/server/hot_restart_impl_test.cc index 1d127c2572c4..14133189d03a 100644 --- a/test/server/hot_restart_impl_test.cc +++ b/test/server/hot_restart_impl_test.cc @@ -43,7 +43,7 @@ class HotRestartImplTest : public testing::Test { EXPECT_CALL(os_sys_calls_, bind(_, _, _)).Times(2); // Test we match the correct stat with empty-slots before, after, or both. - hot_restart_ = std::make_unique(0, 0); + hot_restart_ = std::make_unique(0, 0, "@envoy_domain_socket", 0); hot_restart_->drainParentListeners(); // We close both sockets. @@ -87,7 +87,7 @@ TEST_F(HotRestartImplTest, DomainSocketAlreadyInUse) { .WillOnce(Return(Api::SysCallIntResult{-1, SOCKET_ERROR_ADDR_IN_USE})); EXPECT_CALL(os_sys_calls_, close(_)).Times(1); - EXPECT_THROW(std::make_unique(0, 0), + EXPECT_THROW(std::make_unique(0, 0, "@envoy_domain_socket", 0), Server::HotRestartDomainSocketInUseException); } @@ -98,7 +98,7 @@ TEST_F(HotRestartImplTest, DomainSocketError) { .WillOnce(Return(Api::SysCallIntResult{-1, SOCKET_ERROR_ACCESS})); EXPECT_CALL(os_sys_calls_, close(_)).Times(1); - EXPECT_THROW(std::make_unique(0, 0), EnvoyException); + EXPECT_THROW(std::make_unique(0, 0, "@envoy_domain_socket", 0), EnvoyException); } } // namespace diff --git a/test/server/hot_restarting_parent_test.cc b/test/server/hot_restarting_parent_test.cc index a3f405d550db..32485e964092 100644 --- a/test/server/hot_restarting_parent_test.cc +++ b/test/server/hot_restarting_parent_test.cc @@ -143,7 +143,7 @@ TEST_F(HotRestartingParentTest, RetainDynamicStats) { Stats::Gauge& g2 = child_store.gaugeFromStatName(dynamic.add("g2"), Stats::Gauge::ImportMode::Accumulate); - HotRestartingChild hot_restarting_child(0, 0); + HotRestartingChild hot_restarting_child(0, 0, "@envoy_domain_socket", 0); hot_restarting_child.mergeParentStats(child_store, stats_proto); EXPECT_EQ(1, c1.value()); EXPECT_EQ(1, c2.value()); diff --git a/test/server/lds_api_test.cc b/test/server/lds_api_test.cc index 5b8efae5b756..1cfeea582ca8 100644 --- a/test/server/lds_api_test.cc +++ b/test/server/lds_api_test.cc @@ -40,8 +40,8 @@ class LdsApiTest : public testing::Test { void setup() { envoy::config::core::v3::ConfigSource lds_config; EXPECT_CALL(init_manager_, add(_)); - lds_ = std::make_unique(lds_config, cluster_manager_, init_manager_, store_, - listener_manager_, validation_visitor_); + lds_ = std::make_unique(lds_config, nullptr, cluster_manager_, init_manager_, + store_, listener_manager_, validation_visitor_); EXPECT_CALL(*cluster_manager_.subscription_factory_.subscription_, start(_, _)); init_target_handle_->initialize(init_watcher_); lds_callbacks_ = cluster_manager_.subscription_factory_.callbacks_; diff --git a/test/server/listener_manager_impl_quic_only_test.cc b/test/server/listener_manager_impl_quic_only_test.cc index 3a2283fa4cc7..891949861423 100644 --- a/test/server/listener_manager_impl_quic_only_test.cc +++ b/test/server/listener_manager_impl_quic_only_test.cc @@ -62,7 +62,7 @@ reuse_port: true envoy::config::listener::v3::Listener listener_proto = parseListenerFromV3Yaml(yaml); ON_CALL(udp_gso_syscall_, supportsUdpGso()).WillByDefault(Return(true)); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND, #ifdef SO_RXQ_OVFL // SO_REUSEPORT is on as configured /* expected_num_options */ diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 1f871adffcd2..bbf8d6b03a1f 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -163,7 +163,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, EmptyFilter) { - filters: [] )EOF"; - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -336,7 +336,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) { envoy::config::listener::v3::Listener listener_proto; EXPECT_TRUE(Protobuf::TextFormat::ParseFromString(proto_text, &listener_proto)); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(listener_factory_, createListenSocket(_, Network::Socket::Type::Datagram, _, {{true, false}})) @@ -536,6 +536,18 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, StatsScopeTest) { EXPECT_EQ(1UL, server_.stats_store_.counterFromString("listener.127.0.0.1_1234.foo").value()); } +TEST_F(ListenerManagerImplTest, UnsupportedInternalListener) { + const std::string yaml = R"EOF( +address: + envoy_internal_address: + server_listener_name: a_listener_name +filter_chains: +- filters: [] + )EOF"; + + ASSERT_DEATH(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), ""); +} + TEST_F(ListenerManagerImplTest, NotDefaultListenerFiltersTimeout) { const std::string yaml = R"EOF( name: "foo" @@ -751,9 +763,9 @@ TEST_F(ListenerManagerImplTest, ListenerTeardownNotifiesServerInitManager) { InSequence s; auto* lds_api = new MockLdsApi(); - EXPECT_CALL(listener_factory_, createLdsApi_(_)).WillOnce(Return(lds_api)); + EXPECT_CALL(listener_factory_, createLdsApi_(_, _)).WillOnce(Return(lds_api)); envoy::config::core::v3::ConfigSource lds_config; - manager_->createLdsApi(lds_config); + manager_->createLdsApi(lds_config, nullptr); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("")); checkConfigDump(R"EOF( @@ -883,9 +895,9 @@ TEST_F(ListenerManagerImplTest, OverrideListener) { time_system_.setSystemTime(std::chrono::milliseconds(1001001001001)); auto* lds_api = new MockLdsApi(); - EXPECT_CALL(listener_factory_, createLdsApi_(_)).WillOnce(Return(lds_api)); + EXPECT_CALL(listener_factory_, createLdsApi_(_, _)).WillOnce(Return(lds_api)); envoy::config::core::v3::ConfigSource lds_config; - manager_->createLdsApi(lds_config); + manager_->createLdsApi(lds_config, nullptr); // Add foo listener. const std::string listener_foo_yaml = R"EOF( @@ -957,9 +969,9 @@ TEST_F(ListenerManagerImplTest, AddOrUpdateListener) { InSequence s; auto* lds_api = new MockLdsApi(); - EXPECT_CALL(listener_factory_, createLdsApi_(_)).WillOnce(Return(lds_api)); + EXPECT_CALL(listener_factory_, createLdsApi_(_, _)).WillOnce(Return(lds_api)); envoy::config::core::v3::ConfigSource lds_config; - manager_->createLdsApi(lds_config); + manager_->createLdsApi(lds_config, nullptr); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("")); checkConfigDump(R"EOF( @@ -2132,7 +2144,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationP )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2178,7 +2190,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationI )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2224,7 +2236,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithServerNamesM )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2271,7 +2283,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithTransportPro )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2314,7 +2326,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationP )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2360,7 +2372,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2421,7 +2433,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2481,7 +2493,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpv6Ma )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2520,7 +2532,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMa )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2587,7 +2599,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2674,7 +2686,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2760,7 +2772,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2855,7 +2867,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNam )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2927,7 +2939,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithTransport )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2972,7 +2984,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicati )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3022,7 +3034,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleR )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3097,7 +3109,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDifferent )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3138,7 +3150,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3260,7 +3272,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsFilterChainWithoutTlsInspector )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3293,7 +3305,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3321,7 +3333,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SniFilterChainWithoutTlsInspector )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3349,7 +3361,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, AlpnFilterChainWithoutTlsInspecto )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3378,7 +3390,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CustomTransportProtocolWithSniWit )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3417,7 +3429,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInline) { absl::CEscape(ca), R"EOF(" } )EOF"); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3442,7 +3454,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateChainInlinePrivateK )EOF"), Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3615,7 +3627,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3692,7 +3704,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilter) { )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3766,7 +3778,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterIPv6) { )EOF", Network::Address::IpVersion::v6); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3964,7 +3976,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLFilename) { )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3992,7 +4004,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLInline) { )EOF"), Network::Address::IpVersion::v4); - EXPECT_CALL(server_.random_, uuid()); + EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -4519,6 +4531,38 @@ TEST(ListenerMessageUtilTest, ListenerMessageHaveDifferentNameNotEquivalent) { EXPECT_FALSE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); } +TEST(ListenerMessageUtilTest, ListenerDefaultFilterChainChangeIsAlwaysFilterChainOnlyChange) { + envoy::config::listener::v3::Listener listener1; + listener1.set_name("common"); + envoy::config::listener::v3::FilterChain default_filter_chain_1; + default_filter_chain_1.set_name("127.0.0.1"); + envoy::config::listener::v3::Listener listener2; + listener2.set_name("common"); + envoy::config::listener::v3::FilterChain default_filter_chain_2; + default_filter_chain_2.set_name("127.0.0.2"); + + { + listener1.clear_default_filter_chain(); + listener2.clear_default_filter_chain(); + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); + } + { + *listener1.mutable_default_filter_chain() = default_filter_chain_1; + listener2.clear_default_filter_chain(); + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); + } + { + listener1.clear_default_filter_chain(); + *listener2.mutable_default_filter_chain() = default_filter_chain_2; + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); + } + { + *listener1.mutable_default_filter_chain() = default_filter_chain_1; + *listener2.mutable_default_filter_chain() = default_filter_chain_2; + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); + } +} + TEST(ListenerMessageUtilTest, ListenerMessageHaveDifferentFilterChainsAreEquivalent) { envoy::config::listener::v3::Listener listener1; listener1.set_name("common"); diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 747859f669a1..9b22f3109bf8 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -53,7 +53,7 @@ class ListenerHandle { class ListenerManagerImplTest : public testing::Test { protected: - ListenerManagerImplTest() : api_(Api::createApiForTest()) {} + ListenerManagerImplTest() : api_(Api::createApiForTest(server_.api_.random_)) {} void SetUp() override { ON_CALL(server_, api()).WillByDefault(ReturnRef(*api_)); diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index 04343efc658c..3ab6be0e647c 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -69,6 +69,12 @@ TEST_F(OptionsImplTest, InvalidCommandLine) { "Couldn't find match for argument"); } +TEST_F(OptionsImplTest, InvalidSocketMode) { + EXPECT_THROW_WITH_REGEX( + createOptionsImpl("envoy --socket-path /foo/envoy_domain_socket --socket-mode foo"), + MalformedArgvException, "error: invalid socket-mode 'foo'"); +} + TEST_F(OptionsImplTest, V1Disallowed) { std::unique_ptr options = createOptionsImpl( "envoy --mode validate --concurrency 2 -c hello --admin-address-path path --restart-epoch 1 " @@ -89,7 +95,8 @@ TEST_F(OptionsImplTest, All) { "/foo/bar " "--disable-hot-restart --cpuset-threads --allow-unknown-static-fields " "--reject-unknown-dynamic-fields --use-fake-symbol-table 0 --base-id 5 " - "--use-dynamic-base-id --base-id-path /foo/baz"); + "--use-dynamic-base-id --base-id-path /foo/baz " + "--socket-path /foo/envoy_domain_socket --socket-mode 644"); EXPECT_EQ(Server::Mode::Validate, options->mode()); EXPECT_EQ(2U, options->concurrency()); EXPECT_EQ("hello", options->configPath()); @@ -115,11 +122,20 @@ TEST_F(OptionsImplTest, All) { EXPECT_EQ(5U, options->baseId()); EXPECT_TRUE(options->useDynamicBaseId()); EXPECT_EQ("/foo/baz", options->baseIdPath()); + EXPECT_EQ("/foo/envoy_domain_socket", options->socketPath()); + EXPECT_EQ(0644, options->socketMode()); options = createOptionsImpl("envoy --mode init_only"); EXPECT_EQ(Server::Mode::InitOnly, options->mode()); } +// TODO(#13399): remove this test once we remove the option. +TEST_F(OptionsImplTest, FakeSymtabWarning) { + EXPECT_LOG_CONTAINS("warning", "Fake symbol tables have been removed", + createOptionsImpl("envoy --use-fake-symbol-table 1")); + EXPECT_NO_LOGS(createOptionsImpl("envoy --use-fake-symbol-table 0")); +} + // Either variants of allow-unknown-[static-]-fields works. TEST_F(OptionsImplTest, AllowUnknownFields) { { @@ -174,6 +190,8 @@ TEST_F(OptionsImplTest, SetAll) { options->setAllowUnkownFields(true); options->setRejectUnknownFieldsDynamic(true); options->setFakeSymbolTableEnabled(!options->fakeSymbolTableEnabled()); + options->setSocketPath("/foo/envoy_domain_socket"); + options->setSocketMode(0644); EXPECT_EQ(109876, options->baseId()); EXPECT_EQ(42U, options->concurrency()); @@ -202,6 +220,8 @@ TEST_F(OptionsImplTest, SetAll) { EXPECT_TRUE(options->allowUnknownStaticFields()); EXPECT_TRUE(options->rejectUnknownDynamicFields()); EXPECT_EQ(!fake_symbol_table_enabled, options->fakeSymbolTableEnabled()); + EXPECT_EQ("/foo/envoy_domain_socket", options->socketPath()); + EXPECT_EQ(0644, options->socketMode()); // Validate that CommandLineOptions is constructed correctly. Server::CommandLineOptionsPtr command_line_options = options->toCommandLineOptions(); @@ -231,6 +251,8 @@ TEST_F(OptionsImplTest, SetAll) { EXPECT_EQ(options->hotRestartDisabled(), command_line_options->disable_hot_restart()); EXPECT_EQ(options->mutexTracingEnabled(), command_line_options->enable_mutex_tracing()); EXPECT_EQ(options->cpusetThreadsEnabled(), command_line_options->cpuset_threads()); + EXPECT_EQ(options->socketPath(), command_line_options->socket_path()); + EXPECT_EQ(options->socketMode(), command_line_options->socket_mode()); } TEST_F(OptionsImplTest, DefaultParams) { @@ -242,6 +264,8 @@ TEST_F(OptionsImplTest, DefaultParams) { EXPECT_EQ(Network::Address::IpVersion::v4, options->localAddressIpVersion()); EXPECT_EQ(Server::Mode::Serve, options->mode()); EXPECT_EQ(spdlog::level::warn, options->logLevel()); + EXPECT_EQ("@envoy_domain_socket", options->socketPath()); + EXPECT_EQ(0, options->socketMode()); EXPECT_FALSE(options->hotRestartDisabled()); EXPECT_FALSE(options->cpusetThreadsEnabled()); @@ -254,6 +278,8 @@ TEST_F(OptionsImplTest, DefaultParams) { EXPECT_EQ(envoy::admin::v3::CommandLineOptions::v4, command_line_options->local_address_ip_version()); EXPECT_EQ(envoy::admin::v3::CommandLineOptions::Serve, command_line_options->mode()); + EXPECT_EQ("@envoy_domain_socket", command_line_options->socket_path()); + EXPECT_EQ(0, command_line_options->socket_mode()); EXPECT_FALSE(command_line_options->disable_hot_restart()); EXPECT_FALSE(command_line_options->cpuset_threads()); EXPECT_FALSE(command_line_options->allow_unknown_static_fields()); diff --git a/test/server/overload_manager_impl_test.cc b/test/server/overload_manager_impl_test.cc index 5dbbd0c88baa..8d730c656d1f 100644 --- a/test/server/overload_manager_impl_test.cc +++ b/test/server/overload_manager_impl_test.cc @@ -119,60 +119,10 @@ class OverloadManagerImplTest : public testing::Test { envoy::config::overload::v3::OverloadManager parseConfig(const std::string& config) { envoy::config::overload::v3::OverloadManager proto; - bool success = Protobuf::TextFormat::ParseFromString(config, &proto); - ASSERT(success); + TestUtility::loadFromYaml(config, proto); return proto; } - std::string getConfig() { - return R"EOF( - refresh_interval { - seconds: 1 - } - resource_monitors { - name: "envoy.resource_monitors.fake_resource1" - } - resource_monitors { - name: "envoy.resource_monitors.fake_resource2" - } - resource_monitors { - name: "envoy.resource_monitors.fake_resource3" - } - resource_monitors { - name: "envoy.resource_monitors.fake_resource4" - } - actions { - name: "envoy.overload_actions.dummy_action" - triggers { - name: "envoy.resource_monitors.fake_resource1" - threshold { - value: 0.9 - } - } - triggers { - name: "envoy.resource_monitors.fake_resource2" - threshold { - value: 0.8 - } - } - triggers { - name: "envoy.resource_monitors.fake_resource3" - scaled { - scaling_threshold: 0.5 - saturation_threshold: 0.8 - } - } - triggers { - name: "envoy.resource_monitors.fake_resource4" - scaled { - scaling_threshold: 0.5 - saturation_threshold: 0.8 - } - } - } - )EOF"; - } - std::unique_ptr createOverloadManager(const std::string& config) { return std::make_unique(dispatcher_, stats_, thread_local_, parseConfig(config), validation_visitor_, *api_); @@ -195,10 +145,37 @@ class OverloadManagerImplTest : public testing::Test { Api::ApiPtr api_; }; +constexpr char kRegularStateConfig[] = R"YAML( + refresh_interval: + seconds: 1 + resource_monitors: + - name: envoy.resource_monitors.fake_resource1 + - name: envoy.resource_monitors.fake_resource2 + - name: envoy.resource_monitors.fake_resource3 + - name: envoy.resource_monitors.fake_resource4 + actions: + - name: envoy.overload_actions.dummy_action + triggers: + - name: envoy.resource_monitors.fake_resource1 + threshold: + value: 0.9 + - name: envoy.resource_monitors.fake_resource2 + threshold: + value: 0.8 + - name: envoy.resource_monitors.fake_resource3 + scaled: + scaling_threshold: 0.5 + saturation_threshold: 0.8 + - name: envoy.resource_monitors.fake_resource4 + scaled: + scaling_threshold: 0.5 + saturation_threshold: 0.8 +)YAML"; + TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { setDispatcherExpectation(); - auto manager(createOverloadManager(getConfig())); + auto manager(createOverloadManager(kRegularStateConfig)); bool is_active = false; int cb_count = 0; manager->registerForAction("envoy.overload_actions.dummy_action", dispatcher_, @@ -306,7 +283,7 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { TEST_F(OverloadManagerImplTest, ScaledTrigger) { setDispatcherExpectation(); - auto manager(createOverloadManager(getConfig())); + auto manager(createOverloadManager(kRegularStateConfig)); manager->start(); const auto& action_state = manager->getThreadLocalOverloadState().getState("envoy.overload_actions.dummy_action"); @@ -350,7 +327,7 @@ TEST_F(OverloadManagerImplTest, ScaledTrigger) { TEST_F(OverloadManagerImplTest, FailedUpdates) { setDispatcherExpectation(); - auto manager(createOverloadManager(getConfig())); + auto manager(createOverloadManager(kRegularStateConfig)); manager->start(); Stats::Counter& failed_updates = stats_.counter("overload.envoy.resource_monitors.fake_resource1.failed_updates"); @@ -366,7 +343,7 @@ TEST_F(OverloadManagerImplTest, FailedUpdates) { TEST_F(OverloadManagerImplTest, AggregatesMultipleResourceUpdates) { setDispatcherExpectation(); - auto manager(createOverloadManager(getConfig())); + auto manager(createOverloadManager(kRegularStateConfig)); manager->start(); const OverloadActionState& action_state = @@ -388,7 +365,7 @@ TEST_F(OverloadManagerImplTest, AggregatesMultipleResourceUpdates) { TEST_F(OverloadManagerImplTest, DelayedUpdatesAreCoalesced) { setDispatcherExpectation(); - auto manager(createOverloadManager(getConfig())); + auto manager(createOverloadManager(kRegularStateConfig)); manager->start(); const OverloadActionState& action_state = @@ -412,7 +389,7 @@ TEST_F(OverloadManagerImplTest, DelayedUpdatesAreCoalesced) { TEST_F(OverloadManagerImplTest, FlushesUpdatesEvenWithOneUnresponsive) { setDispatcherExpectation(); - auto manager(createOverloadManager(getConfig())); + auto manager(createOverloadManager(kRegularStateConfig)); manager->start(); const OverloadActionState& action_state = @@ -436,7 +413,7 @@ TEST_F(OverloadManagerImplTest, FlushesUpdatesEvenWithOneUnresponsive) { TEST_F(OverloadManagerImplTest, SkippedUpdates) { setDispatcherExpectation(); - auto manager(createOverloadManager(getConfig())); + auto manager(createOverloadManager(kRegularStateConfig)); manager->start(); Stats::Counter& skipped_updates = stats_.counter("overload.envoy.resource_monitors.fake_resource1.skipped_updates"); @@ -466,12 +443,9 @@ TEST_F(OverloadManagerImplTest, SkippedUpdates) { TEST_F(OverloadManagerImplTest, DuplicateResourceMonitor) { const std::string config = R"EOF( - resource_monitors { - name: "envoy.resource_monitors.fake_resource1" - } - resource_monitors { - name: "envoy.resource_monitors.fake_resource1" - } + resource_monitors: + - name: "envoy.resource_monitors.fake_resource1" + - name: "envoy.resource_monitors.fake_resource1" )EOF"; EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException, @@ -480,12 +454,9 @@ TEST_F(OverloadManagerImplTest, DuplicateResourceMonitor) { TEST_F(OverloadManagerImplTest, DuplicateOverloadAction) { const std::string config = R"EOF( - actions { - name: "envoy.overload_actions.dummy_action" - } - actions { - name: "envoy.overload_actions.dummy_action" - } + actions: + - name: "envoy.overload_actions.dummy_action" + - name: "envoy.overload_actions.dummy_action" )EOF"; EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException, @@ -495,19 +466,15 @@ TEST_F(OverloadManagerImplTest, DuplicateOverloadAction) { // A scaled trigger action's thresholds must conform to scaling < saturation. TEST_F(OverloadManagerImplTest, ScaledTriggerSaturationLessThanScalingThreshold) { const std::string config = R"EOF( - resource_monitors { - name: "envoy.resource_monitors.fake_resource1" - } - actions { - name: "envoy.overload_actions.dummy_action" - triggers { - name: "envoy.resource_monitors.fake_resource1" - scaled { - scaling_threshold: 0.9 - saturation_threshold: 0.8 - } - } - } + resource_monitors: + - name: "envoy.resource_monitors.fake_resource1" + actions: + - name: "envoy.overload_actions.dummy_action" + triggers: + - name: "envoy.resource_monitors.fake_resource1" + scaled: + scaling_threshold: 0.9 + saturation_threshold: 0.8 )EOF"; EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException, @@ -517,19 +484,15 @@ TEST_F(OverloadManagerImplTest, ScaledTriggerSaturationLessThanScalingThreshold) // A scaled trigger action can't have threshold values that are equal. TEST_F(OverloadManagerImplTest, ScaledTriggerThresholdsEqual) { const std::string config = R"EOF( - resource_monitors { - name: "envoy.resource_monitors.fake_resource1" - } - actions { - name: "envoy.overload_actions.dummy_action" - triggers { - name: "envoy.resource_monitors.fake_resource1" - scaled { - scaling_threshold: 0.9 - saturation_threshold: 0.9 - } - } - } + resource_monitors: + - name: "envoy.resource_monitors.fake_resource1" + actions: + - name: "envoy.overload_actions.dummy_action" + triggers: + - name: "envoy.resource_monitors.fake_resource1" + scaled: + scaling_threshold: 0.9 + saturation_threshold: 0.9 )EOF"; EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException, @@ -538,15 +501,12 @@ TEST_F(OverloadManagerImplTest, ScaledTriggerThresholdsEqual) { TEST_F(OverloadManagerImplTest, UnknownTrigger) { const std::string config = R"EOF( - actions { - name: "envoy.overload_actions.dummy_action" - triggers { - name: "envoy.resource_monitors.fake_resource1" - threshold { - value: 0.9 - } - } - } + actions: + - name: "envoy.overload_actions.dummy_action" + triggers: + - name: "envoy.resource_monitors.fake_resource1" + threshold: + value: 0.9 )EOF"; EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException, @@ -555,24 +515,17 @@ TEST_F(OverloadManagerImplTest, UnknownTrigger) { TEST_F(OverloadManagerImplTest, DuplicateTrigger) { const std::string config = R"EOF( - resource_monitors { - name: "envoy.resource_monitors.fake_resource1" - } - actions { - name: "envoy.overload_actions.dummy_action" - triggers { - name: "envoy.resource_monitors.fake_resource1" - threshold { - value: 0.9 - } - } - triggers { - name: "envoy.resource_monitors.fake_resource1" - threshold { - value: 0.8 - } - } - } + resource_monitors: + - name: "envoy.resource_monitors.fake_resource1" + actions: + - name: "envoy.overload_actions.dummy_action" + triggers: + - name: "envoy.resource_monitors.fake_resource1" + threshold: + value: 0.9 + - name: "envoy.resource_monitors.fake_resource1" + threshold: + value: 0.8 )EOF"; EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException, "Duplicate trigger .*"); @@ -581,7 +534,7 @@ TEST_F(OverloadManagerImplTest, DuplicateTrigger) { TEST_F(OverloadManagerImplTest, Shutdown) { setDispatcherExpectation(); - auto manager(createOverloadManager(getConfig())); + auto manager(createOverloadManager(kRegularStateConfig)); manager->start(); EXPECT_CALL(*timer_, disableTimer()); diff --git a/test/server/server_corpus/clusterfuzz-testcase-config_fuzz_test-4788023076847616 b/test/server/server_corpus/clusterfuzz-testcase-config_fuzz_test-4788023076847616 new file mode 100644 index 000000000000..e6c808c6cd5f --- /dev/null +++ b/test/server/server_corpus/clusterfuzz-testcase-config_fuzz_test-4788023076847616 @@ -0,0 +1,188 @@ +static_resources { + clusters { + name: ";" + connect_timeout { + seconds: 2304 + nanos: 132 + } + health_checks { + timeout { + nanos: 262144 + } + interval { + seconds: 2559 + nanos: 67154560 + } + unhealthy_threshold { + value: 122 + } + healthy_threshold { + value: 1728053248 + } + alt_port { + value: 4 + } + http_health_check { + path: "0.0.0.0" + receive { + text: "@B\017\000\000\000\000\000" + } + request_headers_to_add { + header { + key: ";" + value: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + } + append { + value: true + } + } + request_headers_to_remove: ";x" + codec_client_type: HTTP3 + } + no_traffic_interval { + nanos: 917760 + } + unhealthy_edge_interval { + seconds: 2559 + nanos: 16384 + } + event_log_path: "]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]" + interval_jitter_percent: 524288 + tls_options { + alpn_protocols: "/" + } + } + http2_protocol_options { + } + upstream_bind_config { + source_address { + address: "0.0.0.0" + port_value: 0 + } + freebind { + value: true + } + } + common_http_protocol_options { + idle_timeout { + seconds: 2304 + nanos: 132 + } + } + load_assignment { + cluster_name: "domains" + endpoints { + locality { + zone: "6" + } + load_balancing_weight { + value: 122 + } + priority: 122 + } + endpoints { + lb_endpoints { + endpoint { + address { + socket_address { + protocol: UDP + address: "0.0.0.0" + port_value: 122 + } + } + } + health_status: TIMEOUT + load_balancing_weight { + value: 8960 + } + } + priority: 122 + } + endpoints { + locality { + zone: "\n\000\000\000" + } + lb_endpoints { + endpoint { + address { + socket_address { + address: "0.0.0.0" + port_value: 0 + } + } + } + health_status: TIMEOUT + } + load_balancing_weight { + value: 122 + } + priority: 122 + } + endpoints { + locality { + sub_zone: "|" + } + priority: 122 + proximity { + value: 664184 + } + } + endpoints { + locality { + zone: "77777777" + } + lb_endpoints { + endpoint { + address { + socket_address { + address: "0.0.0.1" + port_value: 0 + ipv4_compat: true + } + } + } + health_status: TIMEOUT + } + lb_endpoints { + endpoint { + address { + socket_address { + address: "0.0.0.0" + port_value: 0 + } + } + } + health_status: TIMEOUT + } + load_balancing_weight { + value: 1728053248 + } + priority: 106 + } + policy { + endpoint_stale_after { + nanos: 262144 + } + } + } + dns_failure_refresh_rate { + base_interval { + seconds: 8 + nanos: 812933685 + } + } + upstream_http_protocol_options { + } + } +} +cluster_manager { + load_stats_config { + transport_api_version: V3 + } +} +stats_sinks { + name: "type.googleapis.com/envoy.api.v2.route.Route" + typed_config { + type_url: "IIIIIIIIIIIIIIII" + } +} diff --git a/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5084029869883392 b/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5084029869883392 new file mode 100644 index 000000000000..948e34d2ce4f --- /dev/null +++ b/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5084029869883392 @@ -0,0 +1,9 @@ +static_resources { + listeners { + address { + envoy_internal_address { + server_listener_name: "ipv6" + } + } + } +} \ No newline at end of file diff --git a/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5733243234811904 b/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5733243234811904 new file mode 100644 index 000000000000..0b3fd14152e2 --- /dev/null +++ b/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5733243234811904 @@ -0,0 +1,204 @@ +node { +} +static_resources { + clusters { + name: ";" + connect_timeout { + seconds: 8 + nanos: 132 + } + per_connection_buffer_limit_bytes { + value: 1728053248 + } + health_checks { + timeout { + seconds: 8 + nanos: 25 + } + interval { + seconds: 2559 + nanos: 67154560 + } + unhealthy_threshold { + value: 1728053248 + } + healthy_threshold { + value: 1728053248 + } + alt_port { + value: 4 + } + http_health_check { + path: ":" + receive { + text: "@B\017\000\000\000\000\000" + } + request_headers_to_add { + header { + key: "\361\211\211\211\t\341\211\211\tt" + value: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\337\205\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + } + } + request_headers_to_remove: ";" + } + unhealthy_interval { + seconds: 8 + nanos: 132 + } + unhealthy_edge_interval { + seconds: 2299 + nanos: 16384 + } + event_log_path: "%" + always_log_health_check_failures: true + } + max_requests_per_connection { + value: 842473504 + } + http2_protocol_options { + max_inbound_priority_frames_per_stream { + value: 1701737468 + } + } + alt_stat_name: "=" + common_http_protocol_options { + max_headers_count { + value: 842473504 + } + } + load_assignment { + cluster_name: "domains" + endpoints { + locality { + zone: "6" + } + load_balancing_weight { + value: 842473504 + } + priority: 122 + } + endpoints { + lb_endpoints { + endpoint { + address { + socket_address { + address: "0.0.0.0" + port_value: 0 + } + } + } + health_status: TIMEOUT + } + priority: 122 + proximity { + value: 28732523 + } + } + endpoints { + locality { + sub_zone: "|" + } + lb_endpoints { + endpoint { + address { + pipe { + path: "$node {\n}\ns" + } + } + } + health_status: UNHEALTHY + } + load_balancing_weight { + value: 122 + } + priority: 122 + } + endpoints { + locality { + sub_zone: "|" + } + lb_endpoints { + endpoint { + address { + socket_address { + address: "0.0.0.0" + port_value: 0 + } + } + } + health_status: DEGRADED + } + priority: 122 + } + endpoints { + lb_endpoints { + health_status: HEALTHY + endpoint_name: "\021\000\000\000\000\000\000\000" + } + lb_endpoints { + endpoint { + address { + envoy_internal_address { + server_listener_name: "\001\000\000\001" + } + } + } + health_status: TIMEOUT + } + priority: 106 + } + policy { + drop_overloads { + category: "U" + } + } + } + dns_failure_refresh_rate { + base_interval { + seconds: 8 + nanos: 132 + } + } + track_cluster_stats { + } + } +} +stats_sinks { + name: "type.googleapis.com/envoy.api.v2.route.Route" + typed_config { + type_url: "IIIIIIIIIIIIIIII" + } +} +stats_sinks { + name: "=" + typed_config { + value: "\000\037" + } +} +stats_sinks { +} +stats_sinks { +} +stats_sinks { + name: "[" +} +stats_sinks { +} +stats_sinks { + name: "z" +} +stats_sinks { + name: "z" +} +stats_sinks { + name: "z" +} +stats_sinks { + name: "z" +} +stats_sinks { + name: "z" +} +stats_sinks { + name: "z" +} \ No newline at end of file diff --git a/test/server/server_corpus/not_implemented_envoy_internal b/test/server/server_corpus/not_implemented_envoy_internal new file mode 100644 index 000000000000..2fd02520683c --- /dev/null +++ b/test/server/server_corpus/not_implemented_envoy_internal @@ -0,0 +1,103 @@ +static_resources { + clusters { + name: "ser" + connect_timeout { + nanos: 813 + } + lb_policy: RING_HASH + health_checks { + timeout { + seconds: 1000000 + nanos: 262239 + } + interval { + seconds: 10838081697 + nanos: 95 + } + unhealthy_threshold { + } + healthy_threshold { + } + http_health_check { + host: "\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\001\037\037\037\037\037\037\037\037\037\037\037\037\037f\037\037\037\037" + path: "&" + } + healthy_edge_interval { + nanos: 95 + } + } + circuit_breakers { + } + http_protocol_options { + allow_absolute_url { + value: true + } + } + load_assignment { + cluster_name: "." + endpoints { + } + endpoints { + lb_endpoints { + endpoint { + address { + pipe { + path: "f" + } + } + } + health_status: DRAINING + } + } + endpoints { + lb_endpoints { + endpoint { + address { + envoy_internal_address { + server_listener_name: "\000\000\000\003" + } + } + } + } + priority: 16 + } + endpoints { + proximity { + value: 10240 + } + } + endpoints { + lb_endpoints { + endpoint { + address { + socket_address { + address: "127.0.0.1" + port_value: 9901 + } + } + } + health_status: HEALTHY + } + priority: 16 + } + } + use_tcp_for_dns_lookups: true + } +} +stats_flush_interval { + nanos: 16777216 +} +admin { + access_log_path: "f" + address { + socket_address { + address: "\024" + } + } + socket_options { + description: "=" + level: 4702337453602635775 + int_value: 4702337453602635775 + } +} +use_tcp_for_dns_lookups: true \ No newline at end of file diff --git a/test/server/server_fuzz_test.cc b/test/server/server_fuzz_test.cc index 4859db0e97cf..f96f3b17c8d6 100644 --- a/test/server/server_fuzz_test.cc +++ b/test/server/server_fuzz_test.cc @@ -27,7 +27,9 @@ void makePortHermetic(Fuzz::PerTestEnvironment& test_env, envoy::config::core::v3::Address& address) { if (address.has_socket_address()) { address.mutable_socket_address()->set_port_value(0); - } else if (address.has_pipe()) { + } else if (address.has_pipe() || address.has_envoy_internal_address()) { + // TODO(asraa): Remove this work-around to replace EnvoyInternalAddress when implemented and + // remove condition at line 74. address.mutable_pipe()->set_path("@" + test_env.testId() + address.pipe().path()); } } @@ -57,7 +59,10 @@ makeHermeticPathsAndPorts(Fuzz::PerTestEnvironment& test_env, for (auto& health_check : *cluster.mutable_health_checks()) { // TODO(asraa): QUIC is not enabled in production code yet, so remove references for HTTP3. // Tracked at https://github.com/envoyproxy/envoy/issues/9513. - health_check.mutable_http_health_check()->clear_codec_client_type(); + if (health_check.http_health_check().codec_client_type() == + envoy::type::v3::CodecClientType::HTTP3) { + health_check.mutable_http_health_check()->clear_codec_client_type(); + } } // We may have both deprecated hosts() or load_assignment(). for (auto& host : *cluster.mutable_hidden_envoy_deprecated_hosts()) { @@ -67,7 +72,8 @@ makeHermeticPathsAndPorts(Fuzz::PerTestEnvironment& test_env, auto* locality_lb = cluster.mutable_load_assignment()->mutable_endpoints(j); for (int k = 0; k < locality_lb->lb_endpoints_size(); ++k) { auto* lb_endpoint = locality_lb->mutable_lb_endpoints(k); - if (lb_endpoint->endpoint().address().has_socket_address()) { + if (lb_endpoint->endpoint().address().has_socket_address() || + lb_endpoint->endpoint().address().has_envoy_internal_address()) { makePortHermetic(test_env, *lb_endpoint->mutable_endpoint()->mutable_address()); } } diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 3f037e702330..a26da0f646e6 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -25,6 +25,7 @@ #include "test/mocks/server/overload_manager.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" +#include "test/test_common/logging.h" #include "test/test_common/registry.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/test_time.h" @@ -475,7 +476,8 @@ TEST_P(ServerInstanceImplTest, Stats) { options_.concurrency_ = 2; options_.hot_restart_epoch_ = 3; EXPECT_NO_THROW(initialize("test/server/test_data/server/empty_bootstrap.yaml")); - EXPECT_NE(nullptr, TestUtility::findCounter(stats_store_, "server.watchdog_miss")); + EXPECT_NE(nullptr, TestUtility::findCounter(stats_store_, "main_thread.watchdog_miss")); + EXPECT_NE(nullptr, TestUtility::findCounter(stats_store_, "workers.watchdog_miss")); EXPECT_EQ(2L, TestUtility::findGauge(stats_store_, "server.concurrency")->value()); EXPECT_EQ(3L, TestUtility::findGauge(stats_store_, "server.hot_restart_epoch")->value()); @@ -503,18 +505,8 @@ TEST_P(ServerInstanceImplTest, Stats) { #endif } -class TestWithSimTimeAndRealSymbolTables : public Event::TestUsingSimulatedTime { -protected: - TestWithSimTimeAndRealSymbolTables() { - symbol_table_creator_test_peer_.setUseFakeSymbolTables(false); - } - -private: - Stats::TestUtil::SymbolTableCreatorTestPeer symbol_table_creator_test_peer_; -}; - class ServerStatsTest - : public TestWithSimTimeAndRealSymbolTables, + : public Event::TestUsingSimulatedTime, public ServerInstanceImplTestBase, public testing::TestWithParam> { protected: @@ -526,11 +518,12 @@ class ServerStatsTest void flushStats() { if (manual_flush_) { server_->flushStats(); + server_->dispatcher().run(Event::Dispatcher::RunType::Block); } else { // Default flush interval is 5 seconds. - simTime().advanceTimeAsync(std::chrono::seconds(6)); + simTime().advanceTimeAndRun(std::chrono::seconds(6), server_->dispatcher(), + Event::Dispatcher::RunType::Block); } - server_->dispatcher().run(Event::Dispatcher::RunType::Block); } bool manual_flush_; @@ -663,13 +656,24 @@ TEST_P(ServerInstanceImplTest, BootstrapNode) { // Validate that bootstrap pb_text loads. TEST_P(ServerInstanceImplTest, LoadsBootstrapFromPbText) { - initialize("test/server/test_data/server/node_bootstrap.pb_text"); + EXPECT_LOG_NOT_CONTAINS("trace", "Configuration does not parse cleanly as v3", + initialize("test/server/test_data/server/node_bootstrap.pb_text")); EXPECT_EQ("bootstrap_id", server_->localInfo().node().id()); } // Validate that bootstrap v2 pb_text with deprecated fields loads. TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2BootstrapFromPbText)) { - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"); + EXPECT_LOG_CONTAINS( + "trace", "Configuration does not parse cleanly as v3", + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text")); + EXPECT_FALSE(server_->localInfo().node().hidden_envoy_deprecated_build_version().empty()); +} + +// Validate that bootstrap v2 YAML with deprecated fields loads. +TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2BootstrapFromYaml)) { + EXPECT_LOG_CONTAINS( + "trace", "Configuration does not parse cleanly as v3", + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml")); EXPECT_FALSE(server_->localInfo().node().hidden_envoy_deprecated_build_version().empty()); } @@ -682,19 +686,65 @@ TEST_P(ServerInstanceImplTest, FailToLoadV3ConfigWhenV2SelectedFromPbText) { EnvoyException, "Unable to parse file"); } -// Validate that we correctly parse a V2 file when configured to do so. +// Validate that bootstrap v3 YAML with new fields loads fails if V2 config is specified. +TEST_P(ServerInstanceImplTest, FailToLoadV3ConfigWhenV2SelectedFromYaml) { + options_.bootstrap_version_ = 2; + + EXPECT_THROW_WITH_REGEX( + initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml"), + EnvoyException, "has unknown fields"); +} + +// Validate that we correctly parse a V2 pb_text file when configured to do so. TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2ConfigWhenV2SelectedFromPbText)) { options_.bootstrap_version_ = 2; - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"); + EXPECT_LOG_CONTAINS( + "trace", "Configuration does not parse cleanly as v3", + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text")); + EXPECT_EQ(server_->localInfo().node().id(), "bootstrap_id"); +} + +// Validate that we correctly parse a V2 YAML file when configured to do so. +TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2ConfigWhenV2SelectedFromYaml)) { + options_.bootstrap_version_ = 2; + + EXPECT_LOG_CONTAINS( + "trace", "Configuration does not parse cleanly as v3", + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml")); EXPECT_EQ(server_->localInfo().node().id(), "bootstrap_id"); } -// Validate that we correctly parse a V3 file when configured to do so. -TEST_P(ServerInstanceImplTest, LoadsV3ConfigWhenV2SelectedFromPbText) { +// Validate that we correctly parse a V3 pb_text file without explicit version configuration. +TEST_P(ServerInstanceImplTest, LoadsV3ConfigFromPbText) { + EXPECT_LOG_NOT_CONTAINS( + "trace", "Configuration does not parse cleanly as v3", + initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text")); +} + +// Validate that we correctly parse a V3 YAML file without explicit version configuration. +TEST_P(ServerInstanceImplTest, LoadsV3ConfigFromYaml) { + EXPECT_LOG_NOT_CONTAINS( + "trace", "Configuration does not parse cleanly as v3", + initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml")); +} + +// Validate that we correctly parse a V3 pb_text file when configured to do so. +TEST_P(ServerInstanceImplTest, LoadsV3ConfigWhenV3SelectedFromPbText) { options_.bootstrap_version_ = 3; - initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text"); + EXPECT_LOG_NOT_CONTAINS( + "trace", "Configuration does not parse cleanly as v3", + initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text")); +} + +// Validate that we correctly parse a V3 YAML file when configured to do so. +TEST_P(ServerInstanceImplTest, LoadsV3ConfigWhenV3SelectedFromYaml) { + options_.bootstrap_version_ = 3; + + EXPECT_LOG_NOT_CONTAINS( + "trace", "Configuration does not parse cleanly as v3", + initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml")); } // Validate that bootstrap v2 pb_text with deprecated fields loads fails if V3 config is specified. @@ -706,6 +756,15 @@ TEST_P(ServerInstanceImplTest, FailToLoadV2ConfigWhenV3SelectedFromPbText) { EnvoyException, "Unable to parse file"); } +// Validate that bootstrap v2 YAML with deprecated fields loads fails if V3 config is specified. +TEST_P(ServerInstanceImplTest, FailToLoadV2ConfigWhenV3SelectedFromYaml) { + options_.bootstrap_version_ = 3; + + EXPECT_THROW_WITH_REGEX( + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml"), + EnvoyException, "has unknown fields"); +} + // Validate that we blow up on invalid version number. TEST_P(ServerInstanceImplTest, InvalidBootstrapVersion) { options_.bootstrap_version_ = 1; @@ -790,8 +849,8 @@ TEST_P(ServerInstanceImplTest, BootstrapRtdsThroughAdsViaEdsFails) { TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(InvalidLegacyBootstrapRuntime)) { EXPECT_THROW_WITH_MESSAGE( - initialize("test/server/test_data/server/invalid_runtime_bootstrap.yaml"), EnvoyException, - "Invalid runtime entry value for foo"); + initialize("test/server/test_data/server/invalid_legacy_runtime_bootstrap.yaml"), + EnvoyException, "Invalid runtime entry value for foo"); } // Validate invalid runtime in bootstrap is rejected. diff --git a/test/server/ssl_context_manager_test.cc b/test/server/ssl_context_manager_test.cc index 31e57bb9732d..ba37581664e5 100644 --- a/test/server/ssl_context_manager_test.cc +++ b/test/server/ssl_context_manager_test.cc @@ -22,6 +22,7 @@ TEST(SslContextManager, createStub) { // Check we've created a stub, not real manager. EXPECT_EQ(manager->daysUntilFirstCertExpires(), std::numeric_limits::max()); + EXPECT_EQ(manager->secondsUntilFirstOcspResponseExpires(), absl::nullopt); EXPECT_THROW_WITH_MESSAGE(manager->createSslClientContext(scope, client_config), EnvoyException, "SSL is not supported in this configuration"); EXPECT_THROW_WITH_MESSAGE(manager->createSslServerContext(scope, server_config, server_names), diff --git a/test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml b/test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml new file mode 100644 index 000000000000..f8ff1b6a9375 --- /dev/null +++ b/test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml @@ -0,0 +1,3 @@ +node: + id: "bootstrap_id" + build_version: "foo" diff --git a/test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml b/test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml new file mode 100644 index 000000000000..79e53f61565b --- /dev/null +++ b/test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml @@ -0,0 +1,5 @@ +static_resources: + clusters : + name: "cluster" + ignore_health_on_host_removal: true + connect_timeout: 1s diff --git a/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml b/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml index 07b6220e447f..e79bd34b52e7 100644 --- a/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml +++ b/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml @@ -6,7 +6,7 @@ tracing: http: name: zipkin typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig + "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: zipkin collector_endpoint: "/api/v1/spans" collector_endpoint_version: HTTP_JSON diff --git a/test/server/test_data/static_validation/network_filter_unknown_field.yaml b/test/server/test_data/static_validation/network_filter_unknown_field.yaml index 7c17f16cee52..6535893cce4b 100644 --- a/test/server/test_data/static_validation/network_filter_unknown_field.yaml +++ b/test/server/test_data/static_validation/network_filter_unknown_field.yaml @@ -9,7 +9,7 @@ static_resources: - filters: - name: http typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: HTTP2 stat_prefix: blah route_config: {} diff --git a/test/test_common/BUILD b/test/test_common/BUILD index 97be022e4d02..8e175627ab9c 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -137,7 +137,6 @@ envoy_cc_test_library( "//source/common/network:address_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", - "//source/common/stats:fake_symbol_table_lib", "//source/common/stats:stats_lib", "//test/mocks/stats:stats_mocks", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", @@ -256,6 +255,7 @@ envoy_cc_test_library( deps = [ ":global_lib", ":only_one_thread_lib", + "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", "//source/common/common:thread_lib", ], @@ -274,6 +274,11 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "status_utility_lib", + hdrs = ["status_utility.h"], +) + envoy_cc_test( name = "simulated_time_system_test", srcs = ["simulated_time_system_test.cc"], @@ -296,6 +301,27 @@ envoy_cc_test( ], ) +envoy_cc_test_library( + name = "wasm_lib", + hdrs = ["wasm_base.h"], + deps = [ + "//source/common/stream_info:stream_info_lib", + "//source/extensions/common/wasm:wasm_interoperation_lib", + "//source/extensions/common/wasm:wasm_lib", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/ssl:ssl_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:environment_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto", + ], +) + envoy_basic_cc_library( name = "test_version_linkstamp", srcs = ["test_version_linkstamp.cc"], diff --git a/test/test_common/logging.cc b/test/test_common/logging.cc index 8e398ce52947..f014241fb198 100644 --- a/test/test_common/logging.cc +++ b/test/test_common/logging.cc @@ -7,20 +7,31 @@ namespace Envoy { LogLevelSetter::LogLevelSetter(spdlog::level::level_enum log_level) { - for (Logger::Logger& logger : Logger::Registry::loggers()) { - previous_levels_.push_back(logger.level()); - logger.setLevel(log_level); + if (Logger::Context::useFancyLogger()) { + previous_fancy_levels_ = getFancyContext().getAllFancyLogLevelsForTest(); + getFancyContext().setAllFancyLoggers(log_level); + } else { + for (Logger::Logger& logger : Logger::Registry::loggers()) { + previous_levels_.push_back(logger.level()); + logger.setLevel(log_level); + } } } LogLevelSetter::~LogLevelSetter() { - auto prev_level = previous_levels_.begin(); - for (Logger::Logger& logger : Logger::Registry::loggers()) { - ASSERT(prev_level != previous_levels_.end()); - logger.setLevel(*prev_level); - ++prev_level; + if (Logger::Context::useFancyLogger()) { + for (const auto& it : previous_fancy_levels_) { + getFancyContext().setFancyLogger(it.first, it.second); + } + } else { + auto prev_level = previous_levels_.begin(); + for (Logger::Logger& logger : Logger::Registry::loggers()) { + ASSERT(prev_level != previous_levels_.end()); + logger.setLevel(*prev_level); + ++prev_level; + } + ASSERT(prev_level == previous_levels_.end()); } - ASSERT(prev_level == previous_levels_.end()); } LogRecordingSink::LogRecordingSink(Logger::DelegatingLogSinkSharedPtr log_sink) diff --git a/test/test_common/logging.h b/test/test_common/logging.h index d2c79a0a5901..698fba241aad 100644 --- a/test/test_common/logging.h +++ b/test/test_common/logging.h @@ -35,6 +35,7 @@ class LogLevelSetter { private: std::vector previous_levels_; + FancyLogLevelMap previous_fancy_levels_; }; /** diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index 03a697e056bb..af27468a6a79 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -67,7 +67,7 @@ class SimulatedTimeSystemHelper::SimulatedScheduler : public Scheduler { // Implementation of SimulatedTimeSystemHelper::Alarm methods. bool isEnabled(Alarm& alarm) ABSL_LOCKS_EXCLUDED(mutex_); - void enableAlarm(Alarm& alarm, const std::chrono::microseconds& duration) + void enableAlarm(Alarm& alarm, const std::chrono::microseconds duration) ABSL_LOCKS_EXCLUDED(mutex_); void disableAlarm(Alarm& alarm) ABSL_LOCKS_EXCLUDED(mutex_) { absl::MutexLock lock(&mutex_); @@ -235,11 +235,11 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { // Timer void disableTimer() override; - void enableTimer(const std::chrono::milliseconds& duration, + void enableTimer(const std::chrono::milliseconds duration, const ScopeTrackedObject* scope) override { enableHRTimer(duration, scope); }; - void enableHRTimer(const std::chrono::microseconds& duration, + void enableHRTimer(const std::chrono::microseconds duration, const ScopeTrackedObject* scope) override; bool enabled() override { return simulated_scheduler_.isEnabled(*this); } @@ -264,7 +264,7 @@ bool SimulatedTimeSystemHelper::SimulatedScheduler::isEnabled(Alarm& alarm) { } void SimulatedTimeSystemHelper::SimulatedScheduler::enableAlarm( - Alarm& alarm, const std::chrono::microseconds& duration) { + Alarm& alarm, const std::chrono::microseconds duration) { { absl::MutexLock lock(&mutex_); if (duration.count() == 0 && triggered_alarms_.contains(alarm)) { @@ -348,7 +348,7 @@ void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimer() { } void SimulatedTimeSystemHelper::Alarm::Alarm::enableHRTimer( - const std::chrono::microseconds& duration, const ScopeTrackedObject* /*scope*/) { + const std::chrono::microseconds duration, const ScopeTrackedObject* /*scope*/) { simulated_scheduler_.enableAlarm(*this, duration); } @@ -361,7 +361,7 @@ static int instance_count = 0; // When we initialize our simulated time, we'll start the current time based on // the real current time. But thereafter, real-time will not be used, and time -// will march forward only by calling.advanceTimeAsync(). +// will march forward only by calling advanceTimeAndRun() or advanceTimeWait(). SimulatedTimeSystemHelper::SimulatedTimeSystemHelper() : monotonic_time_(MonotonicTime(std::chrono::seconds(0))), system_time_(real_time_source_.systemTime()), pending_updates_(0) { diff --git a/test/test_common/simulated_time_system_test.cc b/test/test_common/simulated_time_system_test.cc index 7451ce8711f4..4234142114af 100644 --- a/test/test_common/simulated_time_system_test.cc +++ b/test/test_common/simulated_time_system_test.cc @@ -58,8 +58,8 @@ class SimulatedTimeSystemTest : public testing::TestWithParam { } void advanceMsAndLoop(int64_t delay_ms) { - time_system_.advanceTimeAsync(std::chrono::milliseconds(delay_ms)); - base_scheduler_.run(Dispatcher::RunType::NonBlock); + time_system_.advanceTimeAndRun(std::chrono::milliseconds(delay_ms), base_scheduler_, + Dispatcher::RunType::NonBlock); } void advanceSystemMsAndLoop(int64_t delay_ms) { diff --git a/test/test_common/status_utility.h b/test/test_common/status_utility.h new file mode 100644 index 000000000000..5a48a7c2e0f3 --- /dev/null +++ b/test/test_common/status_utility.h @@ -0,0 +1,42 @@ +#pragma once + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace StatusHelpers { + +// Check that a StatusOr is OK and has a value equal to its argument. +// +// For example: +// +// StatusOr status(3); +// EXPECT_THAT(status, IsOkAndHolds(3)); +MATCHER_P(IsOkAndHolds, expected, "") { + if (!arg) { + *result_listener << "which has unexpected status: " << arg.status(); + return false; + } + if (*arg != expected) { + *result_listener << "which has wrong value: " << *arg; + return false; + } + return true; +} + +// Check that a StatusOr as a status code equal to its argument. +// +// For example: +// +// StatusOr status(absl::InvalidArgumentError("bad argument!")); +// EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument)); +MATCHER_P(StatusIs, expected_code, "") { + if (arg.status().code() != expected_code) { + *result_listener << "which has unexpected status: " << arg.status(); + return false; + } + return true; +} + +} // namespace StatusHelpers +} // namespace Envoy diff --git a/test/test_common/test_time_system.h b/test/test_common/test_time_system.h index 4cbd3d5c4f39..932f7083260d 100644 --- a/test/test_common/test_time_system.h +++ b/test/test_common/test_time_system.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/common/time.h" +#include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" #include "common/common/assert.h" @@ -48,13 +49,14 @@ class TestTimeSystem : public Event::TimeSystem { /** * Advances time forward by the specified duration, running any timers * scheduled to fire, and blocking until the timer callbacks are complete. - * See also advanceTimeAsync(), which does not block. + * See also advanceTimeAndRun(), which provides the option to run a specific + * dispatcher or scheduler after advancing the time. * * This function should be used in multi-threaded tests, where other * threads are running dispatcher loops. Integration tests should usually * use this variant. * - * @param duration The amount of time to sleep. + * @param duration The amount of time to advance. */ virtual void advanceTimeWaitImpl(const Duration& duration) PURE; template void advanceTimeWait(const D& duration = false) { @@ -62,21 +64,33 @@ class TestTimeSystem : public Event::TimeSystem { } /** - * Advances time forward by the specified duration. Timers may be triggered on - * their threads, but unlike advanceTimeWait(), this method does not block - * waiting for them to complete. + * Advances time forward by the specified duration. Timers on event loops outside the current + * thread may trigger, but unlike advanceTimeWait(), this method does not block waiting for them + * to complete. This method also takes in a parameter the dispatcher or scheduler for the current + * thread, which will be run in the requested mode after advancing the time forward. * - * This function should be used in single-threaded tests, in scenarios where - * after time is advanced, the main test thread will run a dispatcher - * loop. Unit tests will often use this variant. + * This function should be used in single-threaded tests that want to advance time and then run + * the test thread event loop. Unit tests will often use this variant. * - * @param duration The amount of time to sleep. + * @param duration The amount of time to advance. + * @param dispatcher_or_scheduler The event loop to run after advancing time forward. + * @param mode The mode to use when running the event loop. */ - virtual void advanceTimeAsyncImpl(const Duration& duration) PURE; - template void advanceTimeAsync(const D& duration = false) { + template + void advanceTimeAndRun(const D& duration, DispatcherOrScheduler& dispatcher_or_scheduler, + Dispatcher::RunType mode) { advanceTimeAsyncImpl(std::chrono::duration_cast(duration)); + dispatcher_or_scheduler.run(mode); } + /** + * Helper function used by the implementation of advanceTimeAndRun which just advances time + * forward by the specified amount. + * + * @param duration The amount of time to advance. + */ + virtual void advanceTimeAsyncImpl(const Duration& duration) PURE; + /** * Waits for the specified duration to expire, or for the condition to be satisfied, whichever * comes first. diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 0909ac0bc44a..928cb440c5f2 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -28,10 +28,12 @@ #include "common/config/resource_name.h" #include "common/filesystem/directory.h" #include "common/filesystem/filesystem_impl.h" +#include "common/http/header_utility.h" #include "common/json/json_loader.h" #include "common/network/address_impl.h" #include "common/network/utility.h" +#include "test/mocks/common.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/resources.h" @@ -64,22 +66,29 @@ uint64_t TestRandomGenerator::random() { return generator_(); } bool TestUtility::headerMapEqualIgnoreOrder(const Http::HeaderMap& lhs, const Http::HeaderMap& rhs) { - if (lhs.size() != rhs.size()) { - return false; - } - - bool equal = true; - rhs.iterate([&lhs, &equal](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { - const Http::HeaderEntry* entry = - lhs.get(Http::LowerCaseString(std::string(header.key().getStringView()))); - if (entry == nullptr || (entry->value() != header.value().getStringView())) { - equal = false; + absl::flat_hash_set lhs_keys; + absl::flat_hash_set rhs_keys; + lhs.iterate([&lhs_keys](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + const std::string key{header.key().getStringView()}; + lhs_keys.insert(key); + return Http::HeaderMap::Iterate::Continue; + }); + rhs.iterate([&lhs, &rhs, &rhs_keys](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + const std::string key{header.key().getStringView()}; + // Compare with canonicalized multi-value headers. This ensures we respect order within + // a header. + const auto lhs_entry = + Http::HeaderUtility::getAllOfHeaderAsString(lhs, Http::LowerCaseString(key)); + const auto rhs_entry = + Http::HeaderUtility::getAllOfHeaderAsString(rhs, Http::LowerCaseString(key)); + ASSERT(rhs_entry.result()); + if (lhs_entry.result() != rhs_entry.result()) { return Http::HeaderMap::Iterate::Break; } + rhs_keys.insert(key); return Http::HeaderMap::Iterate::Continue; }); - - return equal; + return lhs_keys.size() == rhs_keys.size(); } bool TestUtility::buffersEqual(const Buffer::Instance& lhs, const Buffer::Instance& rhs) { @@ -401,18 +410,17 @@ class TestImplProvider { protected: Event::GlobalTimeSystem global_time_system_; testing::NiceMock default_stats_store_; + testing::NiceMock mock_random_generator_; }; class TestImpl : public TestImplProvider, public Impl { public: - TestImpl(Thread::ThreadFactory& thread_factory, Stats::Store& stats_store, - Filesystem::Instance& file_system) - : Impl(thread_factory, stats_store, global_time_system_, file_system) {} - TestImpl(Thread::ThreadFactory& thread_factory, Event::TimeSystem& time_system, - Filesystem::Instance& file_system) - : Impl(thread_factory, default_stats_store_, time_system, file_system) {} - TestImpl(Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system) - : Impl(thread_factory, default_stats_store_, global_time_system_, file_system) {} + TestImpl(Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system, + Stats::Store* stats_store = nullptr, Event::TimeSystem* time_system = nullptr, + Random::RandomGenerator* random = nullptr) + : Impl(thread_factory, stats_store ? *stats_store : default_stats_store_, + time_system ? *time_system : global_time_system_, file_system, + random ? *random : mock_random_generator_) {} }; ApiPtr createApiForTest() { @@ -420,19 +428,29 @@ ApiPtr createApiForTest() { Filesystem::fileSystemForTest()); } +ApiPtr createApiForTest(Random::RandomGenerator& random) { + return std::make_unique(Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), + nullptr, nullptr, &random); +} + ApiPtr createApiForTest(Stats::Store& stat_store) { - return std::make_unique(Thread::threadFactoryForTest(), stat_store, - Filesystem::fileSystemForTest()); + return std::make_unique(Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), + &stat_store); +} + +ApiPtr createApiForTest(Stats::Store& stat_store, Random::RandomGenerator& random) { + return std::make_unique(Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), + &stat_store, nullptr, &random); } ApiPtr createApiForTest(Event::TimeSystem& time_system) { - return std::make_unique(Thread::threadFactoryForTest(), time_system, - Filesystem::fileSystemForTest()); + return std::make_unique(Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), + nullptr, &time_system); } ApiPtr createApiForTest(Stats::Store& stat_store, Event::TimeSystem& time_system) { - return std::make_unique(Thread::threadFactoryForTest(), stat_store, time_system, - Filesystem::fileSystemForTest()); + return std::make_unique(Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), + &stat_store, &time_system); } } // namespace Api diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 20a20246ef82..3dc15c8ccac0 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -25,7 +25,7 @@ #include "common/http/header_map_impl.h" #include "common/protobuf/message_validator_impl.h" #include "common/protobuf/utility.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "test/test_common/file_system_for_test.h" #include "test/test_common/printers.h" @@ -880,15 +880,16 @@ template class TestHeaderMapImplBase : public Inte } std::string get_(const std::string& key) const { return get_(LowerCaseString(key)); } std::string get_(const LowerCaseString& key) const { - const HeaderEntry* header = get(key); - if (!header) { + // TODO(mattklein123): Possibly allow getting additional headers beyond the first. + auto headers = get(key); + if (headers.empty()) { return EMPTY_STRING; } else { - return std::string(header->value().getStringView()); + return std::string(headers[0]->value().getStringView()); } } - bool has(const std::string& key) const { return get(LowerCaseString(key)) != nullptr; } - bool has(const LowerCaseString& key) const { return get(key) != nullptr; } + bool has(const std::string& key) const { return !get(LowerCaseString(key)).empty(); } + bool has(const LowerCaseString& key) const { return !get(key).empty(); } size_t remove(const std::string& key) { return remove(LowerCaseString(key)); } // HeaderMap @@ -934,7 +935,7 @@ template class TestHeaderMapImplBase : public Inte header_map_->verifyByteSizeInternalForTest(); } uint64_t byteSize() const override { return header_map_->byteSize(); } - const HeaderEntry* get(const LowerCaseString& key) const override { + HeaderMap::GetResult get(const LowerCaseString& key) const override { return header_map_->get(key); } void iterate(HeaderMap::ConstIterateCb cb) const override { header_map_->iterate(cb); } @@ -1040,7 +1041,9 @@ makeHeaderMap(const std::initializer_list>& namespace Api { ApiPtr createApiForTest(); +ApiPtr createApiForTest(Random::RandomGenerator& random); ApiPtr createApiForTest(Stats::Store& stat_store); +ApiPtr createApiForTest(Stats::Store& stat_store, Random::RandomGenerator& random); ApiPtr createApiForTest(Event::TimeSystem& time_system); ApiPtr createApiForTest(Stats::Store& stat_store, Event::TimeSystem& time_system); } // namespace Api diff --git a/test/test_common/utility_test.cc b/test/test_common/utility_test.cc index 648d65cda365..71190a50317e 100644 --- a/test/test_common/utility_test.cc +++ b/test/test_common/utility_test.cc @@ -28,6 +28,24 @@ TEST(HeaderMapEqualIgnoreOrder, NotEqual) { EXPECT_FALSE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); } +TEST(HeaderMapEqualIgnoreOrder, MultiValue) { + { + Http::TestRequestHeaderMapImpl lhs{{"bar", "a"}, {"foo", "1"}, {"foo", "2"}}; + Http::TestRequestHeaderMapImpl rhs{{"foo", "1"}, {"bar", "a"}, {"foo", "2"}}; + EXPECT_TRUE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); + } + { + Http::TestRequestHeaderMapImpl lhs{{"bar", "a"}, {"foo", "1"}, {"foo", "2"}}; + Http::TestRequestHeaderMapImpl rhs{{"foo", "2"}, {"bar", "a"}, {"foo", "1"}}; + EXPECT_FALSE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); + } + { + Http::TestRequestHeaderMapImpl lhs{{"bar", "a"}, {"foo", "1"}, {"foo", "2"}}; + Http::TestRequestHeaderMapImpl rhs{{"foo", "1,2"}, {"bar", "a"}}; + EXPECT_TRUE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); + } +} + TEST(ProtoEqIgnoreField, ActuallyEqual) { // Ignored field equal { diff --git a/test/test_common/wasm_base.h b/test/test_common/wasm_base.h new file mode 100644 index 000000000000..d049460d3e41 --- /dev/null +++ b/test/test_common/wasm_base.h @@ -0,0 +1,150 @@ +#include + +#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/server/lifecycle_notifier.h" + +#include "common/buffer/buffer_impl.h" +#include "common/http/message_impl.h" +#include "common/stats/isolated_store_impl.h" +#include "common/stream_info/stream_info_impl.h" + +#include "extensions/common/wasm/wasm.h" +#include "extensions/common/wasm/wasm_state.h" + +#include "test/mocks/grpc/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/mocks/ssl/mocks.h" +#include "test/mocks/stream_info/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/printers.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Wasm { + +#define MOCK_CONTEXT_LOG_ \ + using Context::log; \ + proxy_wasm::WasmResult log(uint32_t level, absl::string_view message) override { \ + log_(static_cast(level), message); \ + return proxy_wasm::WasmResult::Ok; \ + } \ + MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message)) + +class DeferredRunner { +public: + ~DeferredRunner() { + if (f_) { + f_(); + } + } + void setFunction(std::function f) { f_ = f; } + +private: + std::function f_; +}; + +template class WasmTestBase : public Base { +public: + // NOLINTNEXTLINE(readability-identifier-naming) + void SetUp() override { clearCodeCacheForTesting(); } + + void setupBase(const std::string& runtime, const std::string& code, CreateContextFn create_root, + std::string root_id = "", std::string vm_configuration = "", + bool fail_open = false, std::string plugin_configuration = "") { + envoy::extensions::wasm::v3::VmConfig vm_config; + vm_config.set_vm_id("vm_id"); + vm_config.set_runtime(absl::StrCat("envoy.wasm.runtime.", runtime)); + ProtobufWkt::StringValue vm_configuration_string; + vm_configuration_string.set_value(vm_configuration); + vm_config.mutable_configuration()->PackFrom(vm_configuration_string); + vm_config.mutable_code()->mutable_local()->set_inline_bytes(code); + Api::ApiPtr api = Api::createApiForTest(stats_store_); + scope_ = Stats::ScopeSharedPtr(stats_store_.createScope("wasm.")); + auto name = "plugin_name"; + auto vm_id = ""; + plugin_ = std::make_shared( + name, root_id, vm_id, runtime, plugin_configuration, fail_open, + envoy::config::core::v3::TrafficDirection::INBOUND, local_info_, &listener_metadata_); + // Passes ownership of root_context_. + Extensions::Common::Wasm::createWasm( + vm_config, plugin_, scope_, cluster_manager_, init_manager_, dispatcher_, *api, + lifecycle_notifier_, remote_data_provider_, + [this](WasmHandleSharedPtr wasm) { wasm_ = wasm; }, create_root); + if (wasm_) { + wasm_ = getOrCreateThreadLocalWasm( + wasm_, plugin_, dispatcher_, + [this, create_root](Wasm* wasm, const std::shared_ptr& plugin) { + root_context_ = static_cast(create_root(wasm, plugin)); + return root_context_; + }); + } + } + + WasmHandleSharedPtr& wasm() { return wasm_; } + Context* rootContext() { return root_context_; } + + DeferredRunner deferred_runner_; + Stats::IsolatedStoreImpl stats_store_; + Stats::ScopeSharedPtr scope_; + NiceMock tls_; + NiceMock dispatcher_; + NiceMock cluster_manager_; + NiceMock init_manager_; + WasmHandleSharedPtr wasm_; + PluginSharedPtr plugin_; + NiceMock ssl_; + NiceMock connection_; + NiceMock decoder_callbacks_; + NiceMock encoder_callbacks_; + NiceMock local_info_; + NiceMock lifecycle_notifier_; + envoy::config::core::v3::Metadata listener_metadata_; + Context* root_context_ = nullptr; // Unowned. + Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_; +}; + +template class WasmHttpFilterTestBase : public WasmTestBase { +public: + template void setupFilterBase(const std::string root_id = "") { + auto wasm = WasmTestBase::wasm_ ? WasmTestBase::wasm_->wasm().get() : nullptr; + int root_context_id = wasm ? wasm->getRootContext(root_id)->id() : 0; + context_ = std::make_unique(wasm, root_context_id, WasmTestBase::plugin_); + context_->setDecoderFilterCallbacks(decoder_callbacks_); + context_->setEncoderFilterCallbacks(encoder_callbacks_); + } + + std::unique_ptr context_; + NiceMock decoder_callbacks_; + NiceMock encoder_callbacks_; + NiceMock request_stream_info_; +}; + +template +class WasmNetworkFilterTestBase : public WasmTestBase { +public: + template void setupFilterBase(const std::string root_id = "") { + auto wasm = WasmTestBase::wasm_ ? WasmTestBase::wasm_->wasm().get() : nullptr; + int root_context_id = wasm ? wasm->getRootContext(root_id)->id() : 0; + context_ = std::make_unique(wasm, root_context_id, WasmTestBase::plugin_); + context_->initializeReadFilterCallbacks(read_filter_callbacks_); + context_->initializeWriteFilterCallbacks(write_filter_callbacks_); + } + + std::unique_ptr context_; + NiceMock read_filter_callbacks_; + NiceMock write_filter_callbacks_; +}; + +} // namespace Wasm +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/test/tools/router_check/coverage.cc b/test/tools/router_check/coverage.cc index f079f0319fd7..687e7ae3aa75 100644 --- a/test/tools/router_check/coverage.cc +++ b/test/tools/router_check/coverage.cc @@ -5,7 +5,7 @@ namespace Envoy { double RouteCoverage::report() { uint64_t route_weight = 0; - for (const auto& covered_field : coverageFields()) { + for (auto covered_field : coverageFields()) { if (covered_field) { route_weight += 1; } diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index 87ad88f40d3f..3b5178ff87fa 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -51,8 +51,9 @@ toString(envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase speci NOT_REACHED_GCOVR_EXCL_LINE; } -const std::string toString(const Envoy::Http::HeaderEntry* entry) { - return entry == nullptr ? "NULL" : std::string(entry->value().getStringView()); +const std::string toString(const Envoy::Http::HeaderMap::GetResult& entry) { + // TODO(mattklein123): Print multiple header values. + return entry.empty() ? "NULL" : std::string(entry[0]->value().getStringView()); } } // namespace diff --git a/test/tools/router_check/router.h b/test/tools/router_check/router.h index bb3e50064cc4..a566b0bb7224 100644 --- a/test/tools/router_check/router.h +++ b/test/tools/router_check/router.h @@ -12,7 +12,7 @@ #include "common/http/headers.h" #include "common/json/json_loader.h" #include "common/router/config_impl.h" -#include "common/stats/fake_symbol_table_impl.h" +#include "common/stats/symbol_table_impl.h" #include "common/stream_info/stream_info_impl.h" #include "test/mocks/server/instance.h" diff --git a/test/tools/router_check/test/route_tests.sh b/test/tools/router_check/test/route_tests.sh index fff340590727..7d7e6bdb55f6 100755 --- a/test/tools/router_check/test/route_tests.sh +++ b/test/tools/router_check/test/route_tests.sh @@ -13,7 +13,7 @@ TESTS=("ContentType" "ClusterHeader" "DirectResponse" "HeaderMatchedRouting" "Re # Testing expected matches for t in "${TESTS[@]}" do - TEST_OUTPUT=$("${PATH_BIN}" "-c" "${PATH_CONFIG}/${t}.yaml" "-t" "${PATH_CONFIG}/${t}.golden.proto.json" "--details") + "${PATH_BIN}" "-c" "${PATH_CONFIG}/${t}.yaml" "-t" "${PATH_CONFIG}/${t}.golden.proto.json" "--details" done # Testing coverage flag passes @@ -48,14 +48,14 @@ if [[ "${COVERAGE_OUTPUT}" != *"Failed to meet coverage requirement: 100%"* ]] ; fi # Test the yaml test file support -TEST_OUTPUT=$("${PATH_BIN}" "-c" "${PATH_CONFIG}/Weighted.yaml" "-t" "${PATH_CONFIG}/Weighted.golden.proto.yaml" "--details") +"${PATH_BIN}" "-c" "${PATH_CONFIG}/Weighted.yaml" "-t" "${PATH_CONFIG}/Weighted.golden.proto.yaml" "--details" # Test the proto text test file support -TEST_OUTPUT=$("${PATH_BIN}" "-c" "${PATH_CONFIG}/Weighted.yaml" "-t" "${PATH_CONFIG}/Weighted.golden.proto.pb_text" "--details") +"${PATH_BIN}" "-c" "${PATH_CONFIG}/Weighted.yaml" "-t" "${PATH_CONFIG}/Weighted.golden.proto.pb_text" "--details" # Bad config file echo "testing bad config output" -BAD_CONFIG_OUTPUT=$(("${PATH_BIN}" "-c" "${PATH_CONFIG}/Redirect.golden.proto.json" "-t" "${PATH_CONFIG}/TestRoutes.yaml") 2>&1) || +BAD_CONFIG_OUTPUT=$("${PATH_BIN}" "-c" "${PATH_CONFIG}/Redirect.golden.proto.json" "-t" "${PATH_CONFIG}/TestRoutes.yaml" 2>&1) || echo "${BAD_CONFIG_OUTPUT:-no-output}" if [[ "${BAD_CONFIG_OUTPUT}" != *"Protobuf message (type envoy.config.route.v3.RouteConfiguration reason INVALID_ARGUMENT:tests: Cannot find field.) has unknown fields"* ]]; then exit 1 diff --git a/test/tools/router_check/validation.proto b/test/tools/router_check/validation.proto index 9702e223f1a5..d88774641c31 100644 --- a/test/tools/router_check/validation.proto +++ b/test/tools/router_check/validation.proto @@ -21,7 +21,7 @@ message Validation { message ValidationItem { // Name of the test case. There is no uniqueness constraint among the test case names. // The name has to be non empty. - string test_name = 1 [(validate.rules).string.min_bytes = 1]; + string test_name = 1 [(validate.rules).string.min_len = 1]; // The input constraints of the test case. ValidationInput input = 2 [(validate.rules).message.required = true]; @@ -38,17 +38,17 @@ message ValidationInput { // This pseudo-header field includes the authority portion of the target URI. // Clients that generate HTTP/2 requests directly SHOULD use the :authority pseudo-header field // instead of the Host header field. - string authority = 1 [(validate.rules).string.min_bytes = 1]; + string authority = 1 [(validate.rules).string.min_len = 1]; // The :path pseudo-header field includes the path and query parts of the target URI. // This pseudo-header field MUST NOT be empty for http or https URIs. // http or https URIs that do not contain a path component MUST include a value of '/' // The exception to this rule is an OPTIONS request for an http or https URI that does not include // a path component. - string path = 2 [(validate.rules).string.min_bytes = 1]; + string path = 2 [(validate.rules).string.min_len = 1]; // This pseudo-header field includes the HTTP method. - string method = 4 [(validate.rules).string.min_bytes = 3]; + string method = 4 [(validate.rules).string.min_len = 3]; // An integer used to identify the target for weighted cluster selection. // The default value of random_value is 0. diff --git a/test/tools/wee8_compile/BUILD b/test/tools/wee8_compile/BUILD index d1184b071750..0f9fa2f4cf55 100644 --- a/test/tools/wee8_compile/BUILD +++ b/test/tools/wee8_compile/BUILD @@ -1,7 +1,7 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_cc_test_binary", - "envoy_cc_test_library", + "envoy_cc_binary", + "envoy_cc_library", "envoy_package", ) @@ -9,12 +9,12 @@ licenses(["notice"]) # Apache 2 envoy_package() -envoy_cc_test_binary( +envoy_cc_binary( name = "wee8_compile_tool", deps = [":wee8_compile_lib"], ) -envoy_cc_test_library( +envoy_cc_library( name = "wee8_compile_lib", srcs = ["wee8_compile.cc"], external_deps = ["wee8"], diff --git a/test/tools/wee8_compile/wee8_compile.cc b/test/tools/wee8_compile/wee8_compile.cc index 499311b5418b..42cbfea08a18 100644 --- a/test/tools/wee8_compile/wee8_compile.cc +++ b/test/tools/wee8_compile/wee8_compile.cc @@ -1,14 +1,3 @@ -/* - * A tool to precompile Wasm modules. - * - * This is accomplished by loading and instantiating the Wasm module, serializing - * the V8 Isolate containing compiled code, and saving it in Wasm module's Custom - * Section under the "precompiled_v8_v_" name. - * - * Such precompiled Wasm module can be deserialized and loaded by V8, without the - * need to compile Wasm bytecode each time it's loaded. - */ - // NOLINT(namespace-envoy) #include @@ -21,19 +10,16 @@ #include "v8-version.h" #include "wasm-api/wasm.hh" -uint32_t InvalidVarint = ~uint32_t{0}; - -uint32_t parseVarint(const byte_t** pos, const byte_t* end) { +uint32_t parseVarint(const byte_t*& pos, const byte_t* end) { uint32_t n = 0; uint32_t shift = 0; byte_t b; do { - if (*pos >= end) { - return InvalidVarint; + if (pos + 1 > end) { + return static_cast(-1); } - b = **pos; - (*pos)++; + b = *pos++; n += (b & 0x7f) << shift; shift += 7; } while ((b & 0x80) != 0); @@ -45,11 +31,15 @@ wasm::vec getVarint(uint32_t value) { byte_t bytes[5]; int pos = 0; - while (value >= 0x80) { - bytes[pos++] = static_cast(0x80 | (value & 0x7f)); + while (pos < 5) { + if ((value & ~0x7F) == 0) { + bytes[pos++] = static_cast(value); + break; + } + + bytes[pos++] = static_cast(value & 0x7F) | 0x80; value >>= 7; } - bytes[pos++] = static_cast(value & 0x7f); auto vec = wasm::vec::make_uninitialized(pos); ::memcpy(vec.get(), bytes, pos); @@ -79,20 +69,24 @@ wasm::vec readWasmModule(const char* path, const std::string& name) { return wasm::vec::invalid(); } - // Parse Custom Sections to see if precompiled module already exists. + // Parse custom sections to see if precompiled module already exists. const byte_t* pos = content.get() + 8 /* Wasm header */; const byte_t* end = content.get() + content.size(); while (pos < end) { - const byte_t section_type = *pos++; - const uint32_t section_len = parseVarint(&pos, end); - if (section_len == InvalidVarint || section_len > static_cast(end - pos)) { + if (pos + 1 > end) { std::cerr << "ERROR: Failed to parse corrupted Wasm module from: " << path << std::endl; return wasm::vec::invalid(); } - if (section_type == 0 /* Custom Section */) { - const byte_t* section_data_start = pos; - const uint32_t section_name_len = parseVarint(&pos, end); - if (section_name_len == InvalidVarint || section_name_len > static_cast(end - pos)) { + const auto section_type = *pos++; + const auto section_len = parseVarint(pos, end); + if (section_len == static_cast(-1) || pos + section_len > end) { + std::cerr << "ERROR: Failed to parse corrupted Wasm module from: " << path << std::endl; + return wasm::vec::invalid(); + } + if (section_type == 0 /* custom section */) { + const auto section_data_start = pos; + const auto section_name_len = parseVarint(pos, end); + if (section_name_len == static_cast(-1) || pos + section_name_len > end) { std::cerr << "ERROR: Failed to parse corrupted Wasm module from: " << path << std::endl; return wasm::vec::invalid(); } @@ -121,14 +115,18 @@ wasm::vec stripWasmModule(const wasm::vec& module) { pos += 8; while (pos < end) { - const byte_t* section_start = pos; - const byte_t section_type = *pos++; - const uint32_t section_len = parseVarint(&pos, end); - if (section_len == InvalidVarint || section_len > static_cast(end - pos)) { + const auto section_start = pos; + if (pos + 1 > end) { std::cerr << "ERROR: Failed to parse corrupted Wasm module." << std::endl; return wasm::vec::invalid(); } - if (section_type != 0 /* Custom Section */) { + const auto section_type = *pos++; + const auto section_len = parseVarint(pos, end); + if (section_len == static_cast(-1) || pos + section_len > end) { + std::cerr << "ERROR: Failed to parse corrupted Wasm module." << std::endl; + return wasm::vec::invalid(); + } + if (section_type != 0 /* custom section */) { stripped.insert(stripped.end(), section_start, pos + section_len); } pos += section_len; @@ -156,8 +154,7 @@ wasm::vec serializeWasmModule(const char* path, const wasm::vec& return wasm::vec::invalid(); } - // TODO(PiotrSikora): figure out how to wait until the backgrounded (optimized) compilation is - // finished, or ideally, how to run the optimized synchronous compilation right away. + // TODO(PiotrSikora): figure out how to hook the completion callback. sleep(3); return module->serialize(); @@ -167,11 +164,11 @@ bool writeWasmModule(const char* path, const wasm::vec& module, size_t s const std::string& section_name, const wasm::vec& serialized) { auto file = std::fstream(path, std::ios::out | std::ios::binary); file.write(module.get(), module.size()); - const char section_type = '\0'; // Custom Section + const char section_type = '\0'; // custom section file.write(§ion_type, 1); - const wasm::vec section_name_len = getVarint(static_cast(section_name.size())); - const wasm::vec section_size = getVarint( - static_cast(section_name_len.size() + section_name.size() + serialized.size())); + const auto section_name_len = getVarint(section_name.size()); + const auto section_size = + getVarint(section_name_len.size() + section_name.size() + serialized.size()); file.write(section_size.get(), section_size.size()); file.write(section_name_len.get(), section_name_len.size()); file.write(section_name.data(), section_name.size()); @@ -183,48 +180,46 @@ bool writeWasmModule(const char* path, const wasm::vec& module, size_t s return false; } - const size_t total_size = module.size() + 1 + section_size.size() + section_name_len.size() + - section_name.size() + serialized.size(); + const auto total_size = module.size() + 1 + section_size.size() + section_name_len.size() + + section_name.size() + serialized.size(); std::cout << "Written " << total_size << " bytes (bytecode: " << stripped_module_size << " bytes," << " precompiled: " << serialized.size() << " bytes)." << std::endl; return true; } #if defined(__linux__) && defined(__x86_64__) -#define WEE8_WASM_PRECOMPILE_PLATFORM "linux_x86_64" -#endif - -#ifndef WEE8_WASM_PRECOMPILE_PLATFORM - -int main(int, char**) { - std::cerr << "Unsupported platform." << std::endl; - return EXIT_FAILURE; -} - +#define WEE8_PLATFORM "linux_x86_64" #else +#define WEE8_PLATFORM "" +#endif int main(int argc, char* argv[]) { + if (sizeof(WEE8_PLATFORM) - 1 == 0) { + std::cerr << "Unsupported platform." << std::endl; + return EXIT_FAILURE; + } + if (argc != 3) { std::cerr << "Usage: " << argv[0] << " " << std::endl; return EXIT_FAILURE; } - const std::string section_name = - "precompiled_v8_v" + std::to_string(V8_MAJOR_VERSION) + "." + - std::to_string(V8_MINOR_VERSION) + "." + std::to_string(V8_BUILD_NUMBER) + "." + - std::to_string(V8_PATCH_LEVEL) + "_" + WEE8_WASM_PRECOMPILE_PLATFORM; + const std::string section_name = "precompiled_wee8_v" + std::to_string(V8_MAJOR_VERSION) + "." + + std::to_string(V8_MINOR_VERSION) + "." + + std::to_string(V8_BUILD_NUMBER) + "." + + std::to_string(V8_PATCH_LEVEL) + "_" + WEE8_PLATFORM; - const wasm::vec module = readWasmModule(argv[1], section_name); + const auto module = readWasmModule(argv[1], section_name); if (!module) { return EXIT_FAILURE; } - const wasm::vec stripped_module = stripWasmModule(module); + const auto stripped_module = stripWasmModule(module); if (!stripped_module) { return EXIT_FAILURE; } - const wasm::vec serialized = serializeWasmModule(argv[1], stripped_module); + const auto serialized = serializeWasmModule(argv[1], stripped_module); if (!serialized) { return EXIT_FAILURE; } @@ -235,5 +230,3 @@ int main(int argc, char* argv[]) { return EXIT_SUCCESS; } - -#endif diff --git a/tools/api/generate_go_protobuf.py b/tools/api/generate_go_protobuf.py index 620e80bb32c5..5b25de2dbb0a 100755 --- a/tools/api/generate_go_protobuf.py +++ b/tools/api/generate_go_protobuf.py @@ -4,17 +4,21 @@ from subprocess import check_call import glob import os +import shlex import shutil import sys import re +# Needed for CI to pass down bazel options. +BAZEL_BUILD_OPTIONS = shlex.split(os.environ.get('BAZEL_BUILD_OPTIONS', '')) + TARGETS = '@envoy_api//...' IMPORT_BASE = 'github.com/envoyproxy/go-control-plane' OUTPUT_BASE = 'build_go' REPO_BASE = 'go-control-plane' BRANCH = 'master' MIRROR_MSG = 'Mirrored from envoyproxy/envoy @ ' -USER_NAME = 'go-control-plane(CircleCI)' +USER_NAME = 'go-control-plane(Azure Pipelines)' USER_EMAIL = 'go-control-plane@users.noreply.github.com' @@ -28,7 +32,11 @@ def generateProtobufs(output): # Each rule has the form @envoy_api//foo/bar:baz_go_proto. # First build all the rules to ensure we have the output files. - check_call(['bazel', 'build', '-c', 'fastbuild'] + go_protos) + # We preserve source info so comments are retained on generated code. + check_call([ + 'bazel', 'build', '-c', 'fastbuild', + '--experimental_proto_descriptor_sets_include_source_info' + ] + BAZEL_BUILD_OPTIONS + go_protos) for rule in go_protos: # Example rule: @@ -63,9 +71,7 @@ def git(repo, *args): def cloneGoProtobufs(repo): # Create a local clone of go-control-plane - git(None, 'clone', 'git@github.com:envoyproxy/go-control-plane', repo) - git(repo, 'fetch') - git(repo, 'checkout', '-B', BRANCH, 'origin/master') + git(None, 'clone', 'git@github.com:envoyproxy/go-control-plane', repo, '-b', BRANCH) def findLastSyncSHA(repo): diff --git a/tools/api_boost/api_boost.py b/tools/api_boost/api_boost.py index eda6eaf94088..5cd9846bcf21 100755 --- a/tools/api_boost/api_boost.py +++ b/tools/api_boost/api_boost.py @@ -132,7 +132,6 @@ def ApiBoostTree(target_paths, sp.run([ 'bazel', 'build', - '--config=libc++', '--strip=always', ] + BAZEL_BUILD_OPTIONS + dep_lib_build_targets, check=True) diff --git a/tools/check_repositories.sh b/tools/check_repositories.sh index e3f1c1f82b17..0503f8153f67 100755 --- a/tools/check_repositories.sh +++ b/tools/check_repositories.sh @@ -4,7 +4,7 @@ set -eu # Check whether any git repositories are defined. # Git repository definition contains `commit` and `remote` fields. -if git grep -n "commit =\|remote =" *.bzl; then +if git grep -n "commit =\|remote =" -- '*.bzl'; then echo "Using git repositories is not allowed." echo "To ensure that all dependencies can be stored offline in distdir, only HTTP repositories are allowed." exit 1 @@ -12,10 +12,10 @@ fi # Check whether number of defined `url =` or `urls =` and `sha256 =` kwargs in # repository definitions is equal. -urls_count=$(git grep -E "\` +REF_WITH_PUNCTUATION_REGEX = re.compile(".*\. <[^<]*>`\s*") +DOT_MULTI_SPACE_REGEX = re.compile("\\. +") # yapf: disable PROTOBUF_TYPE_ERRORS = { @@ -183,841 +226,826 @@ # yapf: enable -# Map a line transformation function across each line of a file, -# writing the result lines as requested. -# If there is a clang format nesting or mismatch error, return the first occurrence -def evaluateLines(path, line_xform, write=True): - error_message = None - format_flag = True - output_lines = [] - for line_number, line in enumerate(readLines(path)): - if line.find("// clang-format off") != -1: - if not format_flag and error_message is None: - error_message = "%s:%d: %s" % (path, line_number + 1, "clang-format nested off") - format_flag = False - if line.find("// clang-format on") != -1: - if format_flag and error_message is None: - error_message = "%s:%d: %s" % (path, line_number + 1, "clang-format nested on") - format_flag = True - if format_flag: - output_lines.append(line_xform(line, line_number)) - else: - output_lines.append(line) - # We used to use fileinput in the older Python 2.7 script, but this doesn't do - # inplace mode and UTF-8 in Python 3, so doing it the manual way. - if write: - pathlib.Path(path).write_text('\n'.join(output_lines), encoding='utf-8') - if not format_flag and error_message is None: - error_message = "%s:%d: %s" % (path, line_number + 1, "clang-format remains off") - return error_message - - -# Obtain all the lines in a given file. -def readLines(path): - return readFile(path).split('\n') - - -# Read a UTF-8 encoded file as a str. -def readFile(path): - return pathlib.Path(path).read_text(encoding='utf-8') - - -# lookPath searches for the given executable in all directories in PATH -# environment variable. If it cannot be found, empty string is returned. -def lookPath(executable): - return shutil.which(executable) or '' - - -# pathExists checks whether the given path exists. This function assumes that -# the path is absolute and evaluates environment variables. -def pathExists(executable): - return os.path.exists(os.path.expandvars(executable)) +class FormatChecker: - -# executableByOthers checks whether the given path has execute permission for -# others. -def executableByOthers(executable): - st = os.stat(os.path.expandvars(executable)) - return bool(st.st_mode & stat.S_IXOTH) - - -# Check whether all needed external tools (clang-format, buildifier, buildozer) are -# available. -def checkTools(): - error_messages = [] - - clang_format_abs_path = lookPath(CLANG_FORMAT_PATH) - if clang_format_abs_path: - if not executableByOthers(clang_format_abs_path): - error_messages.append("command {} exists, but cannot be executed by other " - "users".format(CLANG_FORMAT_PATH)) - else: - error_messages.append( - "Command {} not found. If you have clang-format in version 10.x.x " - "installed, but the binary name is different or it's not available in " - "PATH, please use CLANG_FORMAT environment variable to specify the path. " - "Examples:\n" - " export CLANG_FORMAT=clang-format-10.0.0\n" - " export CLANG_FORMAT=/opt/bin/clang-format-10\n" - " export CLANG_FORMAT=/usr/local/opt/llvm@10/bin/clang-format".format(CLANG_FORMAT_PATH)) - - def checkBazelTool(name, path, var): - bazel_tool_abs_path = lookPath(path) - if bazel_tool_abs_path: - if not executableByOthers(bazel_tool_abs_path): - error_messages.append("command {} exists, but cannot be executed by other " - "users".format(path)) - elif pathExists(path): - if not executableByOthers(path): + def __init__(self, args): + self.operation_type = args.operation_type + self.target_path = args.target_path + self.api_prefix = args.api_prefix + self.api_shadow_root = args.api_shadow_prefix + self.envoy_build_rule_check = not args.skip_envoy_build_rule_check + self.namespace_check = args.namespace_check + self.namespace_check_excluded_paths = args.namespace_check_excluded_paths + [ + "./tools/api_boost/testdata/", + "./tools/clang_tools/", + ] + self.build_fixer_check_excluded_paths = args.build_fixer_check_excluded_paths + [ + "./bazel/external/", + "./bazel/toolchains/", + "./bazel/BUILD", + "./tools/clang_tools", + ] + self.include_dir_order = args.include_dir_order + + # Map a line transformation function across each line of a file, + # writing the result lines as requested. + # If there is a clang format nesting or mismatch error, return the first occurrence + def evaluateLines(self, path, line_xform, write=True): + error_message = None + format_flag = True + output_lines = [] + for line_number, line in enumerate(self.readLines(path)): + if line.find("// clang-format off") != -1: + if not format_flag and error_message is None: + error_message = "%s:%d: %s" % (path, line_number + 1, "clang-format nested off") + format_flag = False + if line.find("// clang-format on") != -1: + if format_flag and error_message is None: + error_message = "%s:%d: %s" % (path, line_number + 1, "clang-format nested on") + format_flag = True + if format_flag: + output_lines.append(line_xform(line, line_number)) + else: + output_lines.append(line) + # We used to use fileinput in the older Python 2.7 script, but this doesn't do + # inplace mode and UTF-8 in Python 3, so doing it the manual way. + if write: + pathlib.Path(path).write_text('\n'.join(output_lines), encoding='utf-8') + if not format_flag and error_message is None: + error_message = "%s:%d: %s" % (path, line_number + 1, "clang-format remains off") + return error_message + + # Obtain all the lines in a given file. + def readLines(self, path): + return self.readFile(path).split('\n') + + # Read a UTF-8 encoded file as a str. + def readFile(self, path): + return pathlib.Path(path).read_text(encoding='utf-8') + + # lookPath searches for the given executable in all directories in PATH + # environment variable. If it cannot be found, empty string is returned. + def lookPath(self, executable): + return shutil.which(executable) or '' + + # pathExists checks whether the given path exists. This function assumes that + # the path is absolute and evaluates environment variables. + def pathExists(self, executable): + return os.path.exists(os.path.expandvars(executable)) + + # executableByOthers checks whether the given path has execute permission for + # others. + def executableByOthers(self, executable): + st = os.stat(os.path.expandvars(executable)) + return bool(st.st_mode & stat.S_IXOTH) + + # Check whether all needed external tools (clang-format, buildifier, buildozer) are + # available. + def checkTools(self): + error_messages = [] + + clang_format_abs_path = self.lookPath(CLANG_FORMAT_PATH) + if clang_format_abs_path: + if not self.executableByOthers(clang_format_abs_path): error_messages.append("command {} exists, but cannot be executed by other " - "users".format(path)) + "users".format(CLANG_FORMAT_PATH)) else: + error_messages.append( + "Command {} not found. If you have clang-format in version 10.x.x " + "installed, but the binary name is different or it's not available in " + "PATH, please use CLANG_FORMAT environment variable to specify the path. " + "Examples:\n" + " export CLANG_FORMAT=clang-format-10.0.0\n" + " export CLANG_FORMAT=/opt/bin/clang-format-10\n" + " export CLANG_FORMAT=/usr/local/opt/llvm@10/bin/clang-format".format( + CLANG_FORMAT_PATH)) + + def checkBazelTool(name, path, var): + bazel_tool_abs_path = self.lookPath(path) + if bazel_tool_abs_path: + if not self.executableByOthers(bazel_tool_abs_path): + error_messages.append("command {} exists, but cannot be executed by other " + "users".format(path)) + elif self.pathExists(path): + if not self.executableByOthers(path): + error_messages.append("command {} exists, but cannot be executed by other " + "users".format(path)) + else: - error_messages.append("Command {} not found. If you have {} installed, but the binary " - "name is different or it's not available in $GOPATH/bin, please use " - "{} environment variable to specify the path. Example:\n" - " export {}=`which {}`\n" - "If you don't have {} installed, you can install it by:\n" - " go get -u github.com/bazelbuild/buildtools/{}".format( - path, name, var, var, name, name, name)) - - checkBazelTool('buildifier', BUILDIFIER_PATH, 'BUILDIFIER_BIN') - checkBazelTool('buildozer', BUILDOZER_PATH, 'BUILDOZER_BIN') - - return error_messages - - -def checkNamespace(file_path): - for excluded_path in namespace_check_excluded_paths: - if file_path.startswith(excluded_path): - return [] - - nolint = "NOLINT(namespace-%s)" % namespace_check.lower() - text = readFile(file_path) - if not re.search("^\s*namespace\s+%s\s*{" % namespace_check, text, re.MULTILINE) and \ - not nolint in text: - return ["Unable to find %s namespace or %s for file: %s" % (namespace_check, nolint, file_path)] - return [] + error_messages.append("Command {} not found. If you have {} installed, but the binary " + "name is different or it's not available in $GOPATH/bin, please use " + "{} environment variable to specify the path. Example:\n" + " export {}=`which {}`\n" + "If you don't have {} installed, you can install it by:\n" + " go get -u github.com/bazelbuild/buildtools/{}".format( + path, name, var, var, name, name, name)) + checkBazelTool('buildifier', BUILDIFIER_PATH, 'BUILDIFIER_BIN') + checkBazelTool('buildozer', BUILDOZER_PATH, 'BUILDOZER_BIN') -def packageNameForProto(file_path): - package_name = None - error_message = [] - result = PROTO_PACKAGE_REGEX.search(readFile(file_path)) - if result is not None and len(result.groups()) == 1: - package_name = result.group(1) - if package_name is None: - error_message = ["Unable to find package name for proto file: %s" % file_path] + return error_messages - return [package_name, error_message] + def checkNamespace(self, file_path): + for excluded_path in self.namespace_check_excluded_paths: + if file_path.startswith(excluded_path): + return [] + + nolint = "NOLINT(namespace-%s)" % self.namespace_check.lower() + text = self.readFile(file_path) + if not re.search("^\s*namespace\s+%s\s*{" % self.namespace_check, text, re.MULTILINE) and \ + not nolint in text: + return [ + "Unable to find %s namespace or %s for file: %s" % + (self.namespace_check, nolint, file_path) + ] + return [] + def packageNameForProto(self, file_path): + package_name = None + error_message = [] + result = PROTO_PACKAGE_REGEX.search(self.readFile(file_path)) + if result is not None and len(result.groups()) == 1: + package_name = result.group(1) + if package_name is None: + error_message = ["Unable to find package name for proto file: %s" % file_path] -# To avoid breaking the Lyft import, we just check for path inclusion here. -def allowlistedForProtobufDeps(file_path): - return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \ - any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_ALLOWLIST)) + return [package_name, error_message] + # To avoid breaking the Lyft import, we just check for path inclusion here. + def allowlistedForProtobufDeps(self, file_path): + return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \ + any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_ALLOWLIST)) -# Real-world time sources should not be instantiated in the source, except for a few -# specific cases. They should be passed down from where they are instantied to where -# they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager. -def allowlistedForRealTime(file_path): - if file_path.endswith(".md"): - return True - return file_path in REAL_TIME_ALLOWLIST + # Real-world time sources should not be instantiated in the source, except for a few + # specific cases. They should be passed down from where they are instantied to where + # they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager. + def allowlistedForRealTime(self, file_path): + if file_path.endswith(".md"): + return True + return file_path in REAL_TIME_ALLOWLIST + def allowlistedForRegisterFactory(self, file_path): + if not file_path.startswith("./test/"): + return True -def allowlistedForRegisterFactory(file_path): - if not file_path.startswith("./test/"): - return True + return any(file_path.startswith(prefix) for prefix in REGISTER_FACTORY_TEST_ALLOWLIST) - return any(file_path.startswith(prefix) for prefix in REGISTER_FACTORY_TEST_ALLOWLIST) + def allowlistedForSerializeAsString(self, file_path): + return file_path in SERIALIZE_AS_STRING_ALLOWLIST or file_path.endswith(DOCS_SUFFIX) + def allowlistedForJsonStringToMessage(self, file_path): + return file_path in JSON_STRING_TO_MESSAGE_ALLOWLIST -def allowlistedForSerializeAsString(file_path): - return file_path in SERIALIZE_AS_STRING_ALLOWLIST or file_path.endswith(DOCS_SUFFIX) + def allowlistedForHistogramSiSuffix(self, name): + return name in HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST + def allowlistedForStdRegex(self, file_path): + return file_path.startswith("./test") or file_path in STD_REGEX_ALLOWLIST or file_path.endswith( + DOCS_SUFFIX) -def allowlistedForJsonStringToMessage(file_path): - return file_path in JSON_STRING_TO_MESSAGE_ALLOWLIST + def allowlistedForGrpcInit(self, file_path): + return file_path in GRPC_INIT_ALLOWLIST + def allowlistedForUnpackTo(self, file_path): + return file_path.startswith("./test") or file_path in [ + "./source/common/protobuf/utility.cc", "./source/common/protobuf/utility.h" + ] -def allowlistedForHistogramSiSuffix(name): - return name in HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST + def denylistedForExceptions(self, file_path): + # Returns true when it is a non test header file or the file_path is in DENYLIST or + # it is under toos/testdata subdirectory. + if file_path.endswith(DOCS_SUFFIX): + return False + return (file_path.endswith('.h') and not file_path.startswith("./test/")) or file_path in EXCEPTION_DENYLIST \ + or self.isInSubdir(file_path, 'tools/testdata') -def allowlistedForStdRegex(file_path): - return file_path.startswith("./test") or file_path in STD_REGEX_ALLOWLIST or file_path.endswith( - DOCS_SUFFIX) + def allowlistedForBuildUrls(self, file_path): + return file_path in BUILD_URLS_ALLOWLIST + def isApiFile(self, file_path): + return file_path.startswith(self.api_prefix) or file_path.startswith(self.api_shadow_root) -def allowlistedForGrpcInit(file_path): - return file_path in GRPC_INIT_ALLOWLIST + def isBuildFile(self, file_path): + basename = os.path.basename(file_path) + if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"): + return True + return False + def isExternalBuildFile(self, file_path): + return self.isBuildFile(file_path) and (file_path.startswith("./bazel/external/") or + file_path.startswith("./tools/clang_tools")) -def allowlistedForUnpackTo(file_path): - return file_path.startswith("./test") or file_path in [ - "./source/common/protobuf/utility.cc", "./source/common/protobuf/utility.h" - ] + def isStarlarkFile(self, file_path): + return file_path.endswith(".bzl") + def isWorkspaceFile(self, file_path): + return os.path.basename(file_path) == "WORKSPACE" -def denylistedForExceptions(file_path): - # Returns true when it is a non test header file or the file_path is in DENYLIST or - # it is under toos/testdata subdirectory. - if file_path.endswith(DOCS_SUFFIX): + def isBuildFixerExcludedFile(self, file_path): + for excluded_path in self.build_fixer_check_excluded_paths: + if file_path.startswith(excluded_path): + return True return False - return (file_path.endswith('.h') and not file_path.startswith("./test/")) or file_path in EXCEPTION_DENYLIST \ - or isInSubdir(file_path, 'tools/testdata') - + def hasInvalidAngleBracketDirectory(self, line): + if not line.startswith(INCLUDE_ANGLE): + return False + path = line[INCLUDE_ANGLE_LEN:] + slash = path.find("/") + if slash == -1: + return False + subdir = path[0:slash] + return subdir in SUBDIR_SET + + def checkCurrentReleaseNotes(self, file_path, error_messages): + first_word_of_prior_line = '' + next_word_to_check = '' # first word after : + prior_line = '' + + def endsWithPeriod(prior_line): + if not prior_line: + return True # Don't punctuation-check empty lines. + if prior_line.endswith('.'): + return True # Actually ends with . + if prior_line.endswith('`') and REF_WITH_PUNCTUATION_REGEX.match(prior_line): + return True # The text in the :ref ends with a . + return False + + for line_number, line in enumerate(self.readLines(file_path)): + + def reportError(message): + error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) + + if VERSION_HISTORY_SECTION_NAME.match(line): + if line == "Deprecated": + # The deprecations section is last, and does not have enforced formatting. + break + + # Reset all parsing at the start of a section. + first_word_of_prior_line = '' + next_word_to_check = '' # first word after : + prior_line = '' + + invalid_reflink_match = INVALID_REFLINK.match(line) + if invalid_reflink_match: + reportError("Found text \" ref:\". This should probably be \" :ref:\"\n%s" % line) + + # make sure flags are surrounded by ``s + flag_match = RELOADABLE_FLAG_REGEX.match(line) + if flag_match: + if not flag_match.groups()[0].startswith(' `'): + reportError("Flag `%s` should be enclosed in a single set of back ticks" % + flag_match.groups()[1]) + + if line.startswith("* "): + if not endsWithPeriod(prior_line): + reportError("The following release note does not end with a '.'\n %s" % prior_line) + + match = VERSION_HISTORY_NEW_LINE_REGEX.match(line) + if not match: + reportError("Version history line malformed. " + "Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\n %s" % + line) + else: + first_word = match.groups()[0] + next_word = match.groups()[1] + # Do basic alphabetization checks of the first word on the line and the + # first word after the : + if first_word_of_prior_line and first_word_of_prior_line > first_word: + reportError( + "Version history not in alphabetical order (%s vs %s): please check placement of line\n %s. " + % (first_word_of_prior_line, first_word, line)) + if first_word_of_prior_line == first_word and next_word_to_check and next_word_to_check > next_word: + reportError( + "Version history not in alphabetical order (%s vs %s): please check placement of line\n %s. " + % (next_word_to_check, next_word, line)) + first_word_of_prior_line = first_word + next_word_to_check = next_word + + prior_line = line + elif not line: + # If we hit the end of this release note block block, check the prior line. + if not endsWithPeriod(prior_line): + reportError("The following release note does not end with a '.'\n %s" % prior_line) + elif prior_line: + prior_line += line + + def checkFileContents(self, file_path, checker): + error_messages = [] + + if file_path.endswith("version_history/current.rst"): + # Version file checking has enough special cased logic to merit its own checks. + # This only validates entries for the current release as very old release + # notes have a different format. + self.checkCurrentReleaseNotes(file_path, error_messages) + + def checkFormatErrors(line, line_number): + + def reportError(message): + error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) + + checker(line, file_path, reportError) + + evaluate_failure = self.evaluateLines(file_path, checkFormatErrors, False) + if evaluate_failure is not None: + error_messages.append(evaluate_failure) -def findSubstringAndReturnError(pattern, file_path, error_message): - text = readFile(file_path) - if pattern in text: - error_messages = [file_path + ": " + error_message] - for i, line in enumerate(text.splitlines()): - if pattern in line: - error_messages.append(" %s:%s" % (file_path, i + 1)) return error_messages - return [] - - -def errorIfNoSubstringFound(pattern, file_path, error_message): - return [] if pattern in readFile(file_path) else [file_path + ": " + error_message] - -def isApiFile(file_path): - return file_path.startswith(args.api_prefix) or file_path.startswith(args.api_shadow_prefix) - - -def isBuildFile(file_path): - basename = os.path.basename(file_path) - if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"): + def fixSourceLine(self, line, line_number): + # Strip double space after '.' This may prove overenthusiastic and need to + # be restricted to comments and metadata files but works for now. + line = re.sub(DOT_MULTI_SPACE_REGEX, ". ", line) + + if self.hasInvalidAngleBracketDirectory(line): + line = line.replace("<", '"').replace(">", '"') + + # Fix incorrect protobuf namespace references. + for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items(): + line = line.replace(invalid_construct, valid_construct) + + # Use recommended cpp stdlib + for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items(): + line = line.replace(invalid_construct, valid_construct) + + return line + + # We want to look for a call to condvar.waitFor, but there's no strong pattern + # to the variable name of the condvar. If we just look for ".waitFor" we'll also + # pick up time_system_.waitFor(...), and we don't want to return true for that + # pattern. But in that case there is a strong pattern of using time_system in + # various spellings as the variable name. + def hasCondVarWaitFor(self, line): + wait_for = line.find(".waitFor(") + if wait_for == -1: + return False + preceding = line[0:wait_for] + if preceding.endswith("time_system") or preceding.endswith("timeSystem()") or \ + preceding.endswith("time_system_"): + return False return True - return False - - -def isExternalBuildFile(file_path): - return isBuildFile(file_path) and (file_path.startswith("./bazel/external/") or - file_path.startswith("./tools/clang_tools")) - -def isStarlarkFile(file_path): - return file_path.endswith(".bzl") - - -def isWorkspaceFile(file_path): - return os.path.basename(file_path) == "WORKSPACE" - - -def isBuildFixerExcludedFile(file_path): - for excluded_path in build_fixer_check_excluded_paths: - if file_path.startswith(excluded_path): + # Determines whether the filename is either in the specified subdirectory, or + # at the top level. We consider files in the top level for the benefit of + # the check_format testcases in tools/testdata/check_format. + def isInSubdir(self, filename, *subdirs): + # Skip this check for check_format's unit-tests. + if filename.count("/") <= 1: return True - return False - - -def hasInvalidAngleBracketDirectory(line): - if not line.startswith(INCLUDE_ANGLE): - return False - path = line[INCLUDE_ANGLE_LEN:] - slash = path.find("/") - if slash == -1: - return False - subdir = path[0:slash] - return subdir in SUBDIR_SET - - -VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* ([a-z \-_]+): ([a-z:`]+)") -VERSION_HISTORY_SECTION_NAME = re.compile("^[A-Z][A-Za-z ]*$") -RELOADABLE_FLAG_REGEX = re.compile(".*(.)(envoy.reloadable_features.[^ ]*)\s.*") -# Check for punctuation in a terminal ref clause, e.g. -# :ref:`panic mode. ` -REF_WITH_PUNCTUATION_REGEX = re.compile(".*\. <[^<]*>`\s*") - - -def checkCurrentReleaseNotes(file_path, error_messages): - first_word_of_prior_line = '' - next_word_to_check = '' # first word after : - prior_line = '' - - def endsWithPeriod(prior_line): - if not prior_line: - return True # Don't punctuation-check empty lines. - if prior_line.endswith('.'): - return True # Actually ends with . - if prior_line.endswith('`') and REF_WITH_PUNCTUATION_REGEX.match(prior_line): - return True # The text in the :ref ends with a . + for subdir in subdirs: + if filename.startswith('./' + subdir + '/'): + return True return False - for line_number, line in enumerate(readLines(file_path)): - - def reportError(message): - error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) - - if VERSION_HISTORY_SECTION_NAME.match(line): - if line == "Deprecated": - # The deprecations section is last, and does not have enforced formatting. + # Determines if given token exists in line without leading or trailing token characters + # e.g. will return True for a line containing foo() but not foo_bar() or baz_foo + def tokenInLine(self, token, line): + index = 0 + while True: + index = line.find(token, index) + # the following check has been changed from index < 1 to index < 0 because + # this function incorrectly returns false when the token in question is the + # first one in a line. The following line returns false when the token is present: + # (no leading whitespace) violating_symbol foo; + if index < 0: break + if index == 0 or not (line[index - 1].isalnum() or line[index - 1] == '_'): + if index + len(token) >= len(line) or not (line[index + len(token)].isalnum() or + line[index + len(token)] == '_'): + return True + index = index + 1 + return False - # Reset all parsing at the start of a section. - first_word_of_prior_line = '' - next_word_to_check = '' # first word after : - prior_line = '' - - # make sure flags are surrounded by ``s - flag_match = RELOADABLE_FLAG_REGEX.match(line) - if flag_match: - if not flag_match.groups()[0].startswith('`'): - reportError("Flag `%s` should be enclosed in back ticks" % flag_match.groups()[1]) - - if line.startswith("* "): - if not endsWithPeriod(prior_line): - reportError("The following release note does not end with a '.'\n %s" % prior_line) - - match = VERSION_HISTORY_NEW_LINE_REGEX.match(line) - if not match: - reportError("Version history line malformed. " - "Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\n %s" % line) + def checkSourceLine(self, line, file_path, reportError): + # Check fixable errors. These may have been fixed already. + if line.find(". ") != -1: + reportError("over-enthusiastic spaces") + if self.isInSubdir(file_path, 'source', 'include') and X_ENVOY_USED_DIRECTLY_REGEX.match(line): + reportError( + "Please do not use the raw literal x-envoy in source code. See Envoy::Http::PrefixValue." + ) + if self.hasInvalidAngleBracketDirectory(line): + reportError("envoy includes should not have angle brackets") + for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items(): + if invalid_construct in line: + reportError("incorrect protobuf type reference %s; " + "should be %s" % (invalid_construct, valid_construct)) + for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items(): + if invalid_construct in line: + reportError("term %s should be replaced with standard library term %s" % + (invalid_construct, valid_construct)) + # Do not include the virtual_includes headers. + if re.search("#include.*/_virtual_includes/", line): + reportError("Don't include the virtual includes headers.") + + # Some errors cannot be fixed automatically, and actionable, consistent, + # navigable messages should be emitted to make it easy to find and fix + # the errors by hand. + if not self.allowlistedForProtobufDeps(file_path): + if '"google/protobuf' in line or "google::protobuf" in line: + reportError("unexpected direct dependency on google.protobuf, use " + "the definitions in common/protobuf/protobuf.h instead.") + if line.startswith("#include ") or line.startswith("#include or , switch to " + "Thread::MutexBasicLockable in source/common/common/thread.h") + if line.startswith("#include "): + # We don't check here for std::shared_timed_mutex because that may + # legitimately show up in comments, for example this one. + reportError("Don't use , use absl::Mutex for reader/writer locks.") + if not self.allowlistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line: + if "RealTimeSource" in line or \ + ("RealTimeSystem" in line and not "TestRealTimeSystem" in line) or \ + "std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \ + "std::this_thread::sleep_for" in line or self.hasCondVarWaitFor(line): + reportError("Don't reference real-world time sources from production code; use injection") + duration_arg = DURATION_VALUE_REGEX.search(line) + if duration_arg and duration_arg.group(1) != "0" and duration_arg.group(1) != "0.0": + # Matching duration(int-const or float-const) other than zero + reportError( + "Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)" + ) + if not self.allowlistedForRegisterFactory(file_path): + if "Registry::RegisterFactory<" in line or "REGISTER_FACTORY" in line: + reportError("Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, " + "use Registry::InjectFactory instead.") + if not self.allowlistedForUnpackTo(file_path): + if "UnpackTo" in line: + reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead") + # Check that we use the absl::Time library + if self.tokenInLine("std::get_time", line): + if "test/" in file_path: + reportError("Don't use std::get_time; use TestUtility::parseTime in tests") else: - first_word = match.groups()[0] - next_word = match.groups()[1] - # Do basic alphabetization checks of the first word on the line and the - # first word after the : - if first_word_of_prior_line and first_word_of_prior_line > first_word: - reportError( - "Version history not in alphabetical order (%s vs %s): please check placement of line\n %s. " - % (first_word_of_prior_line, first_word, line)) - if first_word_of_prior_line == first_word and next_word_to_check and next_word_to_check > next_word: - reportError( - "Version history not in alphabetical order (%s vs %s): please check placement of line\n %s. " - % (next_word_to_check, next_word, line)) - first_word_of_prior_line = first_word - next_word_to_check = next_word - - prior_line = line - elif not line: - # If we hit the end of this release note block block, check the prior line. - if not endsWithPeriod(prior_line): - reportError("The following release note does not end with a '.'\n %s" % prior_line) - elif prior_line: - prior_line += line - - -def checkFileContents(file_path, checker): - error_messages = [] - - if file_path.endswith("version_history/current.rst"): - # Version file checking has enough special cased logic to merit its own checks. - # This only validates entries for the current release as very old release - # notes have a different format. - checkCurrentReleaseNotes(file_path, error_messages) - - def checkFormatErrors(line, line_number): - - def reportError(message): - error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) - - checker(line, file_path, reportError) - - evaluate_failure = evaluateLines(file_path, checkFormatErrors, False) - if evaluate_failure is not None: - error_messages.append(evaluate_failure) - - return error_messages - - -DOT_MULTI_SPACE_REGEX = re.compile("\\. +") + reportError("Don't use std::get_time; use the injectable time system") + if self.tokenInLine("std::put_time", line): + reportError("Don't use std::put_time; use absl::Time equivalent instead") + if self.tokenInLine("gmtime", line): + reportError("Don't use gmtime; use absl::Time equivalent instead") + if self.tokenInLine("mktime", line): + reportError("Don't use mktime; use absl::Time equivalent instead") + if self.tokenInLine("localtime", line): + reportError("Don't use localtime; use absl::Time equivalent instead") + if self.tokenInLine("strftime", line): + reportError("Don't use strftime; use absl::FormatTime instead") + if self.tokenInLine("strptime", line): + reportError("Don't use strptime; use absl::FormatTime instead") + if self.tokenInLine("strerror", line): + reportError("Don't use strerror; use Envoy::errorDetails instead") + # Prefer using abseil hash maps/sets over std::unordered_map/set for performance optimizations and + # non-deterministic iteration order that exposes faulty assertions. + # See: https://abseil.io/docs/cpp/guides/container#hash-tables + if "std::unordered_map" in line: + reportError("Don't use std::unordered_map; use absl::flat_hash_map instead or " + "absl::node_hash_map if pointer stability of keys/values is required") + if "std::unordered_set" in line: + reportError("Don't use std::unordered_set; use absl::flat_hash_set instead or " + "absl::node_hash_set if pointer stability of keys/values is required") + if "std::atomic_" in line: + # The std::atomic_* free functions are functionally equivalent to calling + # operations on std::atomic objects, so prefer to use that instead. + reportError("Don't use free std::atomic_* functions, use std::atomic members instead.") + # Block usage of certain std types/functions as iOS 11 and macOS 10.13 + # do not support these at runtime. + # See: https://github.com/envoyproxy/envoy/issues/12341 + if self.tokenInLine("std::any", line): + reportError("Don't use std::any; use absl::any instead") + if self.tokenInLine("std::get_if", line): + reportError("Don't use std::get_if; use absl::get_if instead") + if self.tokenInLine("std::holds_alternative", line): + reportError("Don't use std::holds_alternative; use absl::holds_alternative instead") + if self.tokenInLine("std::make_optional", line): + reportError("Don't use std::make_optional; use absl::make_optional instead") + if self.tokenInLine("std::monostate", line): + reportError("Don't use std::monostate; use absl::monostate instead") + if self.tokenInLine("std::optional", line): + reportError("Don't use std::optional; use absl::optional instead") + if self.tokenInLine("std::string_view", line): + reportError("Don't use std::string_view; use absl::string_view instead") + if self.tokenInLine("std::variant", line): + reportError("Don't use std::variant; use absl::variant instead") + if self.tokenInLine("std::visit", line): + reportError("Don't use std::visit; use absl::visit instead") + if "__attribute__((packed))" in line and file_path != "./include/envoy/common/platform.h": + # __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that + # can be used instead + reportError("Don't use __attribute__((packed)), use the PACKED_STRUCT macro defined " + "in include/envoy/common/platform.h instead") + if DESIGNATED_INITIALIZER_REGEX.search(line): + # Designated initializers are not part of the C++14 standard and are not supported + # by MSVC + reportError("Don't use designated initializers in struct initialization, " + "they are not part of C++14") + if " ?: " in line: + # The ?: operator is non-standard, it is a GCC extension + reportError("Don't use the '?:' operator, it is a non-standard GCC extension") + if line.startswith("using testing::Test;"): + reportError("Don't use 'using testing::Test;, elaborate the type instead") + if line.startswith("using testing::TestWithParams;"): + reportError("Don't use 'using testing::Test;, elaborate the type instead") + if TEST_NAME_STARTING_LOWER_CASE_REGEX.search(line): + # Matches variants of TEST(), TEST_P(), TEST_F() etc. where the test name begins + # with a lowercase letter. + reportError("Test names should be CamelCase, starting with a capital letter") + if not self.allowlistedForSerializeAsString(file_path) and "SerializeAsString" in line: + # The MessageLite::SerializeAsString doesn't generate deterministic serialization, + # use MessageUtil::hash instead. + reportError( + "Don't use MessageLite::SerializeAsString for generating deterministic serialization, use MessageUtil::hash instead." + ) + if not self.allowlistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line: + # Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing + # behavior. + reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.") + + if self.isInSubdir(file_path, 'source') and file_path.endswith('.cc') and \ + ('.counterFromString(' in line or '.gaugeFromString(' in line or \ + '.histogramFromString(' in line or '.textReadoutFromString(' in line or \ + '->counterFromString(' in line or '->gaugeFromString(' in line or \ + '->histogramFromString(' in line or '->textReadoutFromString(' in line): + reportError("Don't lookup stats by name at runtime; use StatName saved during construction") + + if MANGLED_PROTOBUF_NAME_REGEX.search(line): + reportError("Don't use mangled Protobuf names for enum constants") + + hist_m = HISTOGRAM_SI_SUFFIX_REGEX.search(line) + if hist_m and not self.allowlistedForHistogramSiSuffix(hist_m.group(0)): + reportError( + "Don't suffix histogram names with the unit symbol, " + "it's already part of the histogram object and unit-supporting sinks can use this information natively, " + "other sinks can add the suffix automatically on flush should they prefer to do so.") + + if not self.allowlistedForStdRegex(file_path) and "std::regex" in line: + reportError("Don't use std::regex in code that handles untrusted input. Use RegexMatcher") + + if not self.allowlistedForGrpcInit(file_path): + grpc_init_or_shutdown = line.find("grpc_init()") + grpc_shutdown = line.find("grpc_shutdown()") + if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and + grpc_shutdown < grpc_init_or_shutdown): + grpc_init_or_shutdown = grpc_shutdown + if grpc_init_or_shutdown != -1: + comment = line.find("// ") + if comment == -1 or comment > grpc_init_or_shutdown: + reportError("Don't call grpc_init() or grpc_shutdown() directly, instantiate " + + "Grpc::GoogleGrpcContext. See #8282") + + if self.denylistedForExceptions(file_path): + # Skpping cases where 'throw' is a substring of a symbol like in "foothrowBar". + if "throw" in line.split(): + comment_match = COMMENT_REGEX.search(line) + if comment_match is None or comment_match.start(0) > line.find("throw"): + reportError("Don't introduce throws into exception-free files, use error " + + "statuses instead.") + + if "lua_pushlightuserdata" in line: + reportError( + "Don't use lua_pushlightuserdata, since it can cause unprotected error in call to" + + "Lua API (bad light userdata pointer) on ARM64 architecture. See " + + "https://github.com/LuaJIT/LuaJIT/issues/450#issuecomment-433659873 for details.") + + if file_path.endswith(PROTO_SUFFIX): + exclude_path = ['v1', 'v2', 'generated_api_shadow'] + result = PROTO_VALIDATION_STRING.search(line) + if result is not None: + if not any(x in file_path for x in exclude_path): + reportError("min_bytes is DEPRECATED, Use min_len.") + + def checkBuildLine(self, line, file_path, reportError): + if "@bazel_tools" in line and not (self.isStarlarkFile(file_path) or + file_path.startswith("./bazel/") or + "python/runfiles" in line): + reportError("unexpected @bazel_tools reference, please indirect via a definition in //bazel") + if not self.allowlistedForProtobufDeps(file_path) and '"protobuf"' in line: + reportError("unexpected direct external dependency on protobuf, use " + "//source/common/protobuf instead.") + if (self.envoy_build_rule_check and not self.isStarlarkFile(file_path) and + not self.isWorkspaceFile(file_path) and not self.isExternalBuildFile(file_path) and + "@envoy//" in line): + reportError("Superfluous '@envoy//' prefix") + if not self.allowlistedForBuildUrls(file_path) and (" urls = " in line or " url = " in line): + reportError("Only repository_locations.bzl may contains URL references") + + def fixBuildLine(self, file_path, line, line_number): + if (self.envoy_build_rule_check and not self.isStarlarkFile(file_path) and + not self.isWorkspaceFile(file_path) and not self.isExternalBuildFile(file_path)): + line = line.replace("@envoy//", "//") + return line + + def fixBuildPath(self, file_path): + self.evaluateLines(file_path, functools.partial(self.fixBuildLine, file_path)) + + error_messages = [] + + # TODO(htuch): Add API specific BUILD fixer script. + if not self.isBuildFixerExcludedFile(file_path) and not self.isApiFile( + file_path) and not self.isStarlarkFile(file_path) and not self.isWorkspaceFile(file_path): + if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0: + error_messages += ["envoy_build_fixer rewrite failed for file: %s" % file_path] + + if os.system("%s -lint=fix -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: + error_messages += ["buildifier rewrite failed for file: %s" % file_path] + return error_messages + def checkBuildPath(self, file_path): + error_messages = [] + + if not self.isBuildFixerExcludedFile(file_path) and not self.isApiFile( + file_path) and not self.isStarlarkFile(file_path) and not self.isWorkspaceFile(file_path): + command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path) + error_messages += self.executeCommand(command, "envoy_build_fixer check failed", file_path) + + if self.isBuildFile(file_path) and (file_path.startswith(self.api_prefix + "envoy") or + file_path.startswith(self.api_shadow_root + "envoy")): + found = False + for line in self.readLines(file_path): + if "api_proto_package(" in line: + found = True + break + if not found: + error_messages += ["API build file does not provide api_proto_package()"] + + command = "%s -mode=diff %s" % (BUILDIFIER_PATH, file_path) + error_messages += self.executeCommand(command, "buildifier check failed", file_path) + error_messages += self.checkFileContents(file_path, self.checkBuildLine) + return error_messages -def fixSourceLine(line, line_number): - # Strip double space after '.' This may prove overenthusiastic and need to - # be restricted to comments and metadata files but works for now. - line = re.sub(DOT_MULTI_SPACE_REGEX, ". ", line) + def fixSourcePath(self, file_path): + self.evaluateLines(file_path, self.fixSourceLine) - if hasInvalidAngleBracketDirectory(line): - line = line.replace("<", '"').replace(">", '"') + error_messages = [] - # Fix incorrect protobuf namespace references. - for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items(): - line = line.replace(invalid_construct, valid_construct) + if not file_path.endswith(DOCS_SUFFIX): + if not file_path.endswith(PROTO_SUFFIX): + error_messages += self.fixHeaderOrder(file_path) + error_messages += self.clangFormat(file_path) + if file_path.endswith(PROTO_SUFFIX) and self.isApiFile(file_path): + package_name, error_message = self.packageNameForProto(file_path) + if package_name is None: + error_messages += error_message + return error_messages - # Use recommended cpp stdlib - for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items(): - line = line.replace(invalid_construct, valid_construct) + def checkSourcePath(self, file_path): + error_messages = self.checkFileContents(file_path, self.checkSourceLine) + + if not file_path.endswith(DOCS_SUFFIX): + if not file_path.endswith(PROTO_SUFFIX): + error_messages += self.checkNamespace(file_path) + command = ("%s --include_dir_order %s --path %s | diff %s -" % + (HEADER_ORDER_PATH, self.include_dir_order, file_path, file_path)) + error_messages += self.executeCommand(command, "header_order.py check failed", file_path) + command = ("%s %s | diff %s -" % (CLANG_FORMAT_PATH, file_path, file_path)) + error_messages += self.executeCommand(command, "clang-format check failed", file_path) + + if file_path.endswith(PROTO_SUFFIX) and self.isApiFile(file_path): + package_name, error_message = self.packageNameForProto(file_path) + if package_name is None: + error_messages += error_message + return error_messages - return line + # Example target outputs are: + # - "26,27c26" + # - "12,13d13" + # - "7a8,9" + def executeCommand(self, + command, + error_message, + file_path, + regex=re.compile(r"^(\d+)[a|c|d]?\d*(?:,\d+[a|c|d]?\d*)?$")): + try: + output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).strip() + if output: + return output.decode('utf-8').split("\n") + return [] + except subprocess.CalledProcessError as e: + if (e.returncode != 0 and e.returncode != 1): + return ["ERROR: something went wrong while executing: %s" % e.cmd] + # In case we can't find any line numbers, record an error message first. + error_messages = ["%s for file: %s" % (error_message, file_path)] + for line in e.output.decode('utf-8').splitlines(): + for num in regex.findall(line): + error_messages.append(" %s:%s" % (file_path, num)) + return error_messages + + def fixHeaderOrder(self, file_path): + command = "%s --rewrite --include_dir_order %s --path %s" % (HEADER_ORDER_PATH, + self.include_dir_order, file_path) + if os.system(command) != 0: + return ["header_order.py rewrite error: %s" % (file_path)] + return [] + def clangFormat(self, file_path): + command = "%s -i %s" % (CLANG_FORMAT_PATH, file_path) + if os.system(command) != 0: + return ["clang-format rewrite error: %s" % (file_path)] + return [] -# We want to look for a call to condvar.waitFor, but there's no strong pattern -# to the variable name of the condvar. If we just look for ".waitFor" we'll also -# pick up time_system_.waitFor(...), and we don't want to return true for that -# pattern. But in that case there is a strong pattern of using time_system in -# various spellings as the variable name. -def hasCondVarWaitFor(line): - wait_for = line.find(".waitFor(") - if wait_for == -1: - return False - preceding = line[0:wait_for] - if preceding.endswith("time_system") or preceding.endswith("timeSystem()") or \ - preceding.endswith("time_system_"): - return False - return True + def checkFormat(self, file_path): + if file_path.startswith(EXCLUDED_PREFIXES): + return [] + if not file_path.endswith(SUFFIXES): + return [] -# Determines whether the filename is either in the specified subdirectory, or -# at the top level. We consider files in the top level for the benefit of -# the check_format testcases in tools/testdata/check_format. -def isInSubdir(filename, *subdirs): - # Skip this check for check_format's unit-tests. - if filename.count("/") <= 1: - return True - for subdir in subdirs: - if filename.startswith('./' + subdir + '/'): - return True - return False - - -# Determines if given token exists in line without leading or trailing token characters -# e.g. will return True for a line containing foo() but not foo_bar() or baz_foo -def tokenInLine(token, line): - index = 0 - while True: - index = line.find(token, index) - # the following check has been changed from index < 1 to index < 0 because - # this function incorrectly returns false when the token in question is the - # first one in a line. The following line returns false when the token is present: - # (no leading whitespace) violating_symbol foo; - if index < 0: - break - if index == 0 or not (line[index - 1].isalnum() or line[index - 1] == '_'): - if index + len(token) >= len(line) or not (line[index + len(token)].isalnum() or - line[index + len(token)] == '_'): - return True - index = index + 1 - return False - - -def checkSourceLine(line, file_path, reportError): - # Check fixable errors. These may have been fixed already. - if line.find(". ") != -1: - reportError("over-enthusiastic spaces") - if isInSubdir(file_path, 'source', 'include') and X_ENVOY_USED_DIRECTLY_REGEX.match(line): - reportError( - "Please do not use the raw literal x-envoy in source code. See Envoy::Http::PrefixValue.") - if hasInvalidAngleBracketDirectory(line): - reportError("envoy includes should not have angle brackets") - for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items(): - if invalid_construct in line: - reportError("incorrect protobuf type reference %s; " - "should be %s" % (invalid_construct, valid_construct)) - for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items(): - if invalid_construct in line: - reportError("term %s should be replaced with standard library term %s" % - (invalid_construct, valid_construct)) - # Do not include the virtual_includes headers. - if re.search("#include.*/_virtual_includes/", line): - reportError("Don't include the virtual includes headers.") - - # Some errors cannot be fixed automatically, and actionable, consistent, - # navigable messages should be emitted to make it easy to find and fix - # the errors by hand. - if not allowlistedForProtobufDeps(file_path): - if '"google/protobuf' in line or "google::protobuf" in line: - reportError("unexpected direct dependency on google.protobuf, use " - "the definitions in common/protobuf/protobuf.h instead.") - if line.startswith("#include ") or line.startswith("#include or , switch to " - "Thread::MutexBasicLockable in source/common/common/thread.h") - if line.startswith("#include "): - # We don't check here for std::shared_timed_mutex because that may - # legitimately show up in comments, for example this one. - reportError("Don't use , use absl::Mutex for reader/writer locks.") - if not allowlistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line: - if "RealTimeSource" in line or \ - ("RealTimeSystem" in line and not "TestRealTimeSystem" in line) or \ - "std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \ - "std::this_thread::sleep_for" in line or hasCondVarWaitFor(line): - reportError("Don't reference real-world time sources from production code; use injection") - duration_arg = DURATION_VALUE_REGEX.search(line) - if duration_arg and duration_arg.group(1) != "0" and duration_arg.group(1) != "0.0": - # Matching duration(int-const or float-const) other than zero - reportError( - "Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)" - ) - if not allowlistedForRegisterFactory(file_path): - if "Registry::RegisterFactory<" in line or "REGISTER_FACTORY" in line: - reportError("Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, " - "use Registry::InjectFactory instead.") - if not allowlistedForUnpackTo(file_path): - if "UnpackTo" in line: - reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead") - # Check that we use the absl::Time library - if tokenInLine("std::get_time", line): - if "test/" in file_path: - reportError("Don't use std::get_time; use TestUtility::parseTime in tests") + error_messages = [] + # Apply fixes first, if asked, and then run checks. If we wind up attempting to fix + # an issue, but there's still an error, that's a problem. + try_to_fix = self.operation_type == "fix" + if self.isBuildFile(file_path) or self.isStarlarkFile(file_path) or self.isWorkspaceFile( + file_path): + if try_to_fix: + error_messages += self.fixBuildPath(file_path) + error_messages += self.checkBuildPath(file_path) else: - reportError("Don't use std::get_time; use the injectable time system") - if tokenInLine("std::put_time", line): - reportError("Don't use std::put_time; use absl::Time equivalent instead") - if tokenInLine("gmtime", line): - reportError("Don't use gmtime; use absl::Time equivalent instead") - if tokenInLine("mktime", line): - reportError("Don't use mktime; use absl::Time equivalent instead") - if tokenInLine("localtime", line): - reportError("Don't use localtime; use absl::Time equivalent instead") - if tokenInLine("strftime", line): - reportError("Don't use strftime; use absl::FormatTime instead") - if tokenInLine("strptime", line): - reportError("Don't use strptime; use absl::FormatTime instead") - if tokenInLine("strerror", line): - reportError("Don't use strerror; use Envoy::errorDetails instead") - # Prefer using abseil hash maps/sets over std::unordered_map/set for performance optimizations and - # non-deterministic iteration order that exposes faulty assertions. - # See: https://abseil.io/docs/cpp/guides/container#hash-tables - if "std::unordered_map" in line: - reportError("Don't use std::unordered_map; use absl::flat_hash_map instead or " - "absl::node_hash_map if pointer stability of keys/values is required") - if "std::unordered_set" in line: - reportError("Don't use std::unordered_set; use absl::flat_hash_set instead or " - "absl::node_hash_set if pointer stability of keys/values is required") - if "std::atomic_" in line: - # The std::atomic_* free functions are functionally equivalent to calling - # operations on std::atomic objects, so prefer to use that instead. - reportError("Don't use free std::atomic_* functions, use std::atomic members instead.") - # Blocking the use of std::any, std::optional, std::variant for now as iOS 11/macOS 10.13 - # does not support these functions at runtime. - # See: https://github.com/envoyproxy/envoy/issues/12341 - if tokenInLine("std::any", line): - reportError("Don't use std::any; use absl::any instead") - if tokenInLine("std::make_optional", line): - reportError("Don't use std::make_optional; use absl::make_optional instead") - if tokenInLine("std::optional", line): - reportError("Don't use std::optional; use absl::optional instead") - if tokenInLine("std::variant", line): - reportError("Don't use std::variant; use absl::variant instead") - if "__attribute__((packed))" in line and file_path != "./include/envoy/common/platform.h": - # __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that - # can be used instead - reportError("Don't use __attribute__((packed)), use the PACKED_STRUCT macro defined " - "in include/envoy/common/platform.h instead") - if DESIGNATED_INITIALIZER_REGEX.search(line): - # Designated initializers are not part of the C++14 standard and are not supported - # by MSVC - reportError("Don't use designated initializers in struct initialization, " - "they are not part of C++14") - if " ?: " in line: - # The ?: operator is non-standard, it is a GCC extension - reportError("Don't use the '?:' operator, it is a non-standard GCC extension") - if line.startswith("using testing::Test;"): - reportError("Don't use 'using testing::Test;, elaborate the type instead") - if line.startswith("using testing::TestWithParams;"): - reportError("Don't use 'using testing::Test;, elaborate the type instead") - if TEST_NAME_STARTING_LOWER_CASE_REGEX.search(line): - # Matches variants of TEST(), TEST_P(), TEST_F() etc. where the test name begins - # with a lowercase letter. - reportError("Test names should be CamelCase, starting with a capital letter") - if not allowlistedForSerializeAsString(file_path) and "SerializeAsString" in line: - # The MessageLite::SerializeAsString doesn't generate deterministic serialization, - # use MessageUtil::hash instead. - reportError( - "Don't use MessageLite::SerializeAsString for generating deterministic serialization, use MessageUtil::hash instead." - ) - if not allowlistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line: - # Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing - # behavior. - reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.") - - if isInSubdir(file_path, 'source') and file_path.endswith('.cc') and \ - ('.counterFromString(' in line or '.gaugeFromString(' in line or \ - '.histogramFromString(' in line or '.textReadoutFromString(' in line or \ - '->counterFromString(' in line or '->gaugeFromString(' in line or \ - '->histogramFromString(' in line or '->textReadoutFromString(' in line): - reportError("Don't lookup stats by name at runtime; use StatName saved during construction") - - if MANGLED_PROTOBUF_NAME_REGEX.search(line): - reportError("Don't use mangled Protobuf names for enum constants") - - hist_m = HISTOGRAM_SI_SUFFIX_REGEX.search(line) - if hist_m and not allowlistedForHistogramSiSuffix(hist_m.group(0)): - reportError( - "Don't suffix histogram names with the unit symbol, " - "it's already part of the histogram object and unit-supporting sinks can use this information natively, " - "other sinks can add the suffix automatically on flush should they prefer to do so.") - - if not allowlistedForStdRegex(file_path) and "std::regex" in line: - reportError("Don't use std::regex in code that handles untrusted input. Use RegexMatcher") - - if not allowlistedForGrpcInit(file_path): - grpc_init_or_shutdown = line.find("grpc_init()") - grpc_shutdown = line.find("grpc_shutdown()") - if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and - grpc_shutdown < grpc_init_or_shutdown): - grpc_init_or_shutdown = grpc_shutdown - if grpc_init_or_shutdown != -1: - comment = line.find("// ") - if comment == -1 or comment > grpc_init_or_shutdown: - reportError("Don't call grpc_init() or grpc_shutdown() directly, instantiate " + - "Grpc::GoogleGrpcContext. See #8282") - - if denylistedForExceptions(file_path): - # Skpping cases where 'throw' is a substring of a symbol like in "foothrowBar". - if "throw" in line.split(): - comment_match = COMMENT_REGEX.search(line) - if comment_match is None or comment_match.start(0) > line.find("throw"): - reportError("Don't introduce throws into exception-free files, use error " + - "statuses instead.") - - if "lua_pushlightuserdata" in line: - reportError("Don't use lua_pushlightuserdata, since it can cause unprotected error in call to" + - "Lua API (bad light userdata pointer) on ARM64 architecture. See " + - "https://github.com/LuaJIT/LuaJIT/issues/450#issuecomment-433659873 for details.") - - -def checkBuildLine(line, file_path, reportError): - if "@bazel_tools" in line and not (isStarlarkFile(file_path) or - file_path.startswith("./bazel/") or "python/runfiles" in line): - reportError("unexpected @bazel_tools reference, please indirect via a definition in //bazel") - if not allowlistedForProtobufDeps(file_path) and '"protobuf"' in line: - reportError("unexpected direct external dependency on protobuf, use " - "//source/common/protobuf instead.") - if (envoy_build_rule_check and not isStarlarkFile(file_path) and - not isWorkspaceFile(file_path) and not isExternalBuildFile(file_path) and "@envoy//" in line): - reportError("Superfluous '@envoy//' prefix") - - -def fixBuildLine(file_path, line, line_number): - if (envoy_build_rule_check and not isStarlarkFile(file_path) and - not isWorkspaceFile(file_path) and not isExternalBuildFile(file_path)): - line = line.replace("@envoy//", "//") - return line - - -def fixBuildPath(file_path): - evaluateLines(file_path, functools.partial(fixBuildLine, file_path)) - - error_messages = [] - - # TODO(htuch): Add API specific BUILD fixer script. - if not isBuildFixerExcludedFile(file_path) and not isApiFile(file_path) and not isStarlarkFile( - file_path) and not isWorkspaceFile(file_path): - if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0: - error_messages += ["envoy_build_fixer rewrite failed for file: %s" % file_path] - - if os.system("%s -lint=fix -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: - error_messages += ["buildifier rewrite failed for file: %s" % file_path] - return error_messages - + if try_to_fix: + error_messages += self.fixSourcePath(file_path) + error_messages += self.checkSourcePath(file_path) -def checkBuildPath(file_path): - error_messages = [] - - if not isBuildFixerExcludedFile(file_path) and not isApiFile(file_path) and not isStarlarkFile( - file_path) and not isWorkspaceFile(file_path): - command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path) - error_messages += executeCommand(command, "envoy_build_fixer check failed", file_path) + if error_messages: + return ["From %s" % file_path] + error_messages + return error_messages - if isBuildFile(file_path) and (file_path.startswith(args.api_prefix + "envoy") or - file_path.startswith(args.api_shadow_prefix + "envoy")): + def checkFormatReturnTraceOnError(self, file_path): + """Run checkFormat and return the traceback of any exception.""" + try: + return self.checkFormat(file_path) + except: + return traceback.format_exc().split("\n") + + def checkOwners(self, dir_name, owned_directories, error_messages): + """Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS + Args: + dir_name: the directory being checked. + owned_directories: directories currently listed in CODEOWNERS. + error_messages: where to put an error message for new unowned directories. + """ found = False - for line in readLines(file_path): - if "api_proto_package(" in line: + for owned in owned_directories: + if owned.startswith(dir_name) or dir_name.startswith(owned): found = True - break - if not found: - error_messages += ["API build file does not provide api_proto_package()"] + if not found and dir_name not in UNOWNED_EXTENSIONS: + error_messages.append("New directory %s appears to not have owners in CODEOWNERS" % dir_name) - command = "%s -mode=diff %s" % (BUILDIFIER_PATH, file_path) - error_messages += executeCommand(command, "buildifier check failed", file_path) - error_messages += checkFileContents(file_path, checkBuildLine) - return error_messages - - -def fixSourcePath(file_path): - evaluateLines(file_path, fixSourceLine) - - error_messages = [] + def checkApiShadowStarlarkFiles(self, file_path, error_messages): + command = "diff -u " + command += file_path + " " + api_shadow_starlark_path = self.api_shadow_root + re.sub(r"\./api/", '', file_path) + command += api_shadow_starlark_path - if not file_path.endswith(DOCS_SUFFIX): - if not file_path.endswith(PROTO_SUFFIX): - error_messages += fixHeaderOrder(file_path) - error_messages += clangFormat(file_path) - if file_path.endswith(PROTO_SUFFIX) and isApiFile(file_path): - package_name, error_message = packageNameForProto(file_path) - if package_name is None: + error_message = self.executeCommand(command, "invalid .bzl in generated_api_shadow", file_path) + if self.operation_type == "check": error_messages += error_message - return error_messages - - -def checkSourcePath(file_path): - error_messages = checkFileContents(file_path, checkSourceLine) + elif self.operation_type == "fix" and len(error_message) != 0: + shutil.copy(file_path, api_shadow_starlark_path) - if not file_path.endswith(DOCS_SUFFIX): - if not file_path.endswith(PROTO_SUFFIX): - error_messages += checkNamespace(file_path) - command = ("%s --include_dir_order %s --path %s | diff %s -" % - (HEADER_ORDER_PATH, include_dir_order, file_path, file_path)) - error_messages += executeCommand(command, "header_order.py check failed", file_path) - command = ("%s %s | diff %s -" % (CLANG_FORMAT_PATH, file_path, file_path)) - error_messages += executeCommand(command, "clang-format check failed", file_path) - - if file_path.endswith(PROTO_SUFFIX) and isApiFile(file_path): - package_name, error_message = packageNameForProto(file_path) - if package_name is None: - error_messages += error_message - return error_messages - - -# Example target outputs are: -# - "26,27c26" -# - "12,13d13" -# - "7a8,9" -def executeCommand(command, - error_message, - file_path, - regex=re.compile(r"^(\d+)[a|c|d]?\d*(?:,\d+[a|c|d]?\d*)?$")): - try: - output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).strip() - if output: - return output.decode('utf-8').split("\n") - return [] - except subprocess.CalledProcessError as e: - if (e.returncode != 0 and e.returncode != 1): - return ["ERROR: something went wrong while executing: %s" % e.cmd] - # In case we can't find any line numbers, record an error message first. - error_messages = ["%s for file: %s" % (error_message, file_path)] - for line in e.output.decode('utf-8').splitlines(): - for num in regex.findall(line): - error_messages.append(" %s:%s" % (file_path, num)) return error_messages - -def fixHeaderOrder(file_path): - command = "%s --rewrite --include_dir_order %s --path %s" % (HEADER_ORDER_PATH, include_dir_order, - file_path) - if os.system(command) != 0: - return ["header_order.py rewrite error: %s" % (file_path)] - return [] - - -def clangFormat(file_path): - command = "%s -i %s" % (CLANG_FORMAT_PATH, file_path) - if os.system(command) != 0: - return ["clang-format rewrite error: %s" % (file_path)] - return [] - - -def checkFormat(file_path): - if file_path.startswith(EXCLUDED_PREFIXES): - return [] - - if not file_path.endswith(SUFFIXES): - return [] - - error_messages = [] - # Apply fixes first, if asked, and then run checks. If we wind up attempting to fix - # an issue, but there's still an error, that's a problem. - try_to_fix = operation_type == "fix" - if isBuildFile(file_path) or isStarlarkFile(file_path) or isWorkspaceFile(file_path): - if try_to_fix: - error_messages += fixBuildPath(file_path) - error_messages += checkBuildPath(file_path) - else: - if try_to_fix: - error_messages += fixSourcePath(file_path) - error_messages += checkSourcePath(file_path) - - if error_messages: - return ["From %s" % file_path] + error_messages - return error_messages - - -def checkFormatReturnTraceOnError(file_path): - """Run checkFormat and return the traceback of any exception.""" - try: - return checkFormat(file_path) - except: - return traceback.format_exc().split("\n") - - -def checkOwners(dir_name, owned_directories, error_messages): - """Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS - - Args: - dir_name: the directory being checked. - owned_directories: directories currently listed in CODEOWNERS. - error_messages: where to put an error message for new unowned directories. - """ - found = False - for owned in owned_directories: - if owned.startswith(dir_name) or dir_name.startswith(owned): - found = True - if not found and dir_name not in UNOWNED_EXTENSIONS: - error_messages.append("New directory %s appears to not have owners in CODEOWNERS" % dir_name) - - -def checkApiShadowStarlarkFiles(api_shadow_root, file_path, error_messages): - command = "diff -u " - command += file_path + " " - api_shadow_starlark_path = api_shadow_root + re.sub(r"\./api/", '', file_path) - command += api_shadow_starlark_path - - error_message = executeCommand(command, "invalid .bzl in generated_api_shadow", file_path) - if operation_type == "check": - error_messages += error_message - elif operation_type == "fix" and len(error_message) != 0: - shutil.copy(file_path, api_shadow_starlark_path) - - return error_messages - - -def checkFormatVisitor(arg, dir_name, names): - """Run checkFormat in parallel for the given files. - - Args: - arg: a tuple (pool, result_list, owned_directories, error_messages) - pool and result_list are for starting tasks asynchronously. - owned_directories tracks directories listed in the CODEOWNERS file. - error_messages is a list of string format errors. - dir_name: the parent directory of the given files. - names: a list of file names. - """ - - # Unpack the multiprocessing.Pool process pool and list of results. Since - # python lists are passed as references, this is used to collect the list of - # async results (futures) from running checkFormat and passing them back to - # the caller. - pool, result_list, owned_directories, api_shadow_root, error_messages = arg - - # Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded - # manner as it is a small and limited list. - source_prefix = './source/' - full_prefix = './source/extensions/' - # Check to see if this directory is a subdir under /source/extensions - # Also ignore top level directories under /source/extensions since we don't - # need owners for source/extensions/access_loggers etc, just the subdirectories. - if dir_name.startswith(full_prefix) and '/' in dir_name[len(full_prefix):]: - checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages) - - for file_name in names: - if dir_name.startswith("./api") and isStarlarkFile(file_name): - result = pool.apply_async(checkApiShadowStarlarkFiles, - args=(api_shadow_root, dir_name + "/" + file_name, error_messages)) + def checkFormatVisitor(self, arg, dir_name, names): + """Run checkFormat in parallel for the given files. + Args: + arg: a tuple (pool, result_list, owned_directories, error_messages) + pool and result_list are for starting tasks asynchronously. + owned_directories tracks directories listed in the CODEOWNERS file. + error_messages is a list of string format errors. + dir_name: the parent directory of the given files. + names: a list of file names. + """ + + # Unpack the multiprocessing.Pool process pool and list of results. Since + # python lists are passed as references, this is used to collect the list of + # async results (futures) from running checkFormat and passing them back to + # the caller. + pool, result_list, owned_directories, error_messages = arg + + # Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded + # manner as it is a small and limited list. + source_prefix = './source/' + full_prefix = './source/extensions/' + # Check to see if this directory is a subdir under /source/extensions + # Also ignore top level directories under /source/extensions since we don't + # need owners for source/extensions/access_loggers etc, just the subdirectories. + if dir_name.startswith(full_prefix) and '/' in dir_name[len(full_prefix):]: + self.checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages) + + for file_name in names: + if dir_name.startswith("./api") and self.isStarlarkFile(file_name): + result = pool.apply_async(self.checkApiShadowStarlarkFiles, + args=(dir_name + "/" + file_name, error_messages)) + result_list.append(result) + result = pool.apply_async(self.checkFormatReturnTraceOnError, + args=(dir_name + "/" + file_name,)) result_list.append(result) - result = pool.apply_async(checkFormatReturnTraceOnError, args=(dir_name + "/" + file_name,)) - result_list.append(result) - -# checkErrorMessages iterates over the list with error messages and prints -# errors and returns a bool based on whether there were any errors. -def checkErrorMessages(error_messages): - if error_messages: - for e in error_messages: - print("ERROR: %s" % e) - return True - return False + # checkErrorMessages iterates over the list with error messages and prints + # errors and returns a bool based on whether there were any errors. + def checkErrorMessages(self, error_messages): + if error_messages: + for e in error_messages: + print("ERROR: %s" % e) + return True + return False if __name__ == "__main__": @@ -1064,34 +1092,23 @@ def checkErrorMessages(error_messages): nargs="+", default=[], help="exclude paths from envoy_build_fixer check.") + parser.add_argument("--bazel_tools_check_excluded_paths", + type=str, + nargs="+", + default=[], + help="exclude paths from bazel_tools check.") parser.add_argument("--include_dir_order", type=str, default=",".join(common.includeDirOrder()), help="specify the header block include directory order.") args = parser.parse_args() - - operation_type = args.operation_type - target_path = args.target_path - api_shadow_root = args.api_shadow_prefix - envoy_build_rule_check = not args.skip_envoy_build_rule_check - namespace_check = args.namespace_check - namespace_check_excluded_paths = args.namespace_check_excluded_paths + [ - "./tools/api_boost/testdata/", - "./tools/clang_tools/", - ] - build_fixer_check_excluded_paths = args.build_fixer_check_excluded_paths + [ - "./bazel/external/", - "./bazel/toolchains/", - "./bazel/BUILD", - "./tools/clang_tools", - ] - include_dir_order = args.include_dir_order if args.add_excluded_prefixes: EXCLUDED_PREFIXES += tuple(args.add_excluded_prefixes) + format_checker = FormatChecker(args) # Check whether all needed external tools are available. - ct_error_messages = checkTools() - if checkErrorMessages(ct_error_messages): + ct_error_messages = format_checker.checkTools() + if format_checker.checkErrorMessages(ct_error_messages): sys.exit(1) # Returns the list of directories with owners listed in CODEOWNERS. May append errors to @@ -1100,7 +1117,7 @@ def ownedDirectories(error_messages): owned = [] maintainers = [ '@mattklein123', '@htuch', '@alyssawilk', '@zuercher', '@lizan', '@snowp', '@asraa', - '@yavlasov', '@junr03', '@dio', '@jmarantz' + '@yavlasov', '@junr03', '@dio', '@jmarantz', '@antoniovicente' ] try: @@ -1128,8 +1145,8 @@ def ownedDirectories(error_messages): error_messages = [] owned_directories = ownedDirectories(error_messages) - if os.path.isfile(target_path): - error_messages += checkFormat("./" + target_path) + if os.path.isfile(args.target_path): + error_messages += format_checker.checkFormat("./" + args.target_path) else: results = [] @@ -1137,9 +1154,9 @@ def PooledCheckFormat(path_predicate): pool = multiprocessing.Pool(processes=args.num_workers) # For each file in target_path, start a new task in the pool and collect the # results (results is passed by reference, and is used as an output). - for root, _, files in os.walk(target_path): - checkFormatVisitor((pool, results, owned_directories, api_shadow_root, error_messages), - root, [f for f in files if path_predicate(f)]) + for root, _, files in os.walk(args.target_path): + format_checker.checkFormatVisitor((pool, results, owned_directories, error_messages), root, + [f for f in files if path_predicate(f)]) # Close the pool to new tasks, wait for all of the running tasks to finish, # then collect the error messages. @@ -1149,14 +1166,14 @@ def PooledCheckFormat(path_predicate): # We first run formatting on non-BUILD files, since the BUILD file format # requires analysis of srcs/hdrs in the BUILD file, and we don't want these # to be rewritten by other multiprocessing pooled processes. - PooledCheckFormat(lambda f: not isBuildFile(f)) - PooledCheckFormat(lambda f: isBuildFile(f)) + PooledCheckFormat(lambda f: not format_checker.isBuildFile(f)) + PooledCheckFormat(lambda f: format_checker.isBuildFile(f)) error_messages += sum((r.get() for r in results), []) - if checkErrorMessages(error_messages): + if format_checker.checkErrorMessages(error_messages): print("ERROR: check format failed. run 'tools/code_format/check_format.py fix'") sys.exit(1) - if operation_type == "check": + if args.operation_type == "check": print("PASS") diff --git a/tools/code_format/check_format_test.sh b/tools/code_format/check_format_test.sh index 4a262013268e..5958606dd77f 100755 --- a/tools/code_format/check_format_test.sh +++ b/tools/code_format/check_format_test.sh @@ -1,7 +1,8 @@ #!/bin/bash -tools=$(dirname $(dirname $(realpath $0))) -root=$(realpath $tools/..) -ci=$root/ci -cd $root -exec ./ci/run_envoy_docker.sh ./tools/code_format/check_format_test_helper.sh "$@" \ No newline at end of file +tools="$(dirname "$(dirname "$(realpath "$0")")")" +root=$(realpath "$tools/..") +ci="${root}/ci" +export ci +cd "$root" || exit 1 +exec ./ci/run_envoy_docker.sh ./tools/code_format/check_format_test_helper.sh "$@" diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index fcfaf8b9218e..aa90d12848ec 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -243,15 +243,29 @@ def runChecks(): "std_unordered_set.cc", "Don't use std::unordered_set; use absl::flat_hash_set instead " + "or absl::node_hash_set if pointer stability of keys/values is required") errors += checkUnfixableError("std_any.cc", "Don't use std::any; use absl::any instead") + errors += checkUnfixableError("std_get_if.cc", "Don't use std::get_if; use absl::get_if instead") + errors += checkUnfixableError( + "std_holds_alternative.cc", + "Don't use std::holds_alternative; use absl::holds_alternative instead") errors += checkUnfixableError("std_make_optional.cc", "Don't use std::make_optional; use absl::make_optional instead") + errors += checkUnfixableError("std_monostate.cc", + "Don't use std::monostate; use absl::monostate instead") errors += checkUnfixableError("std_optional.cc", "Don't use std::optional; use absl::optional instead") + errors += checkUnfixableError("std_string_view.cc", + "Don't use std::string_view; use absl::string_view instead") errors += checkUnfixableError("std_variant.cc", "Don't use std::variant; use absl::variant instead") + errors += checkUnfixableError("std_visit.cc", "Don't use std::visit; use absl::visit instead") errors += checkUnfixableError( "throw.cc", "Don't introduce throws into exception-free files, use error statuses instead.") + errors += checkUnfixableError("pgv_string.proto", "min_bytes is DEPRECATED, Use min_len.") errors += checkFileExpectingOK("commented_throw.cc") + errors += checkUnfixableError("repository_url.bzl", + "Only repository_locations.bzl may contains URL references") + errors += checkUnfixableError("repository_urls.bzl", + "Only repository_locations.bzl may contains URL references") # The following files have errors that can be automatically fixed. errors += checkAndFixError("over_enthusiastic_spaces.cc", diff --git a/tools/code_format/check_format_test_helper.sh b/tools/code_format/check_format_test_helper.sh index 3a62c96f9fcf..d1f484d3a0cd 100755 --- a/tools/code_format/check_format_test_helper.sh +++ b/tools/code_format/check_format_test_helper.sh @@ -1,9 +1,9 @@ #!/bin/bash -tools=$(dirname $(dirname $(realpath $0))) -root=$(realpath $tools/..) +tools="$(dirname "$(dirname "$(realpath "$0")")")" +root=$(realpath "$tools/..") -cd $root +cd "$root" || exit 1 # to satisfy dependency on run_command export PYTHONPATH="$tools" ./tools/code_format/check_format_test_helper.py "$@" diff --git a/tools/code_format/check_shellcheck_format.sh b/tools/code_format/check_shellcheck_format.sh index 454be30cedbc..3a3a424d4af4 100755 --- a/tools/code_format/check_shellcheck_format.sh +++ b/tools/code_format/check_shellcheck_format.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -EXCLUDED_SHELLFILES=${EXCLUDED_SHELLFILES:-"^tools|^test|^examples|^ci|^bin|^source|^bazel|^.github"} +EXCLUDED_SHELLFILES=${EXCLUDED_SHELLFILES:-"^.github|.rst$|.md$"} find_shell_files () { @@ -19,15 +19,19 @@ run_shellcheck_on () { local file file="$1" echo "Shellcheck: ${file}" - # TODO: add -f diff when shellcheck version allows (ubuntu > bionic) shellcheck -x "$file" } run_shellchecks () { - local all_shellfiles failed failure filtered_shellfiles skipped_count success_count - failed=() - readarray -t all_shellfiles <<< "$(find_shell_files)" - readarray -t filtered_shellfiles <<< "$(find_shell_files | grep -vE "${EXCLUDED_SHELLFILES}")" + local all_shellfiles=() failed=() failure \ + filtered_shellfiles=() found_shellfiles \ + line skipped_count success_count + + found_shellfiles=$(find_shell_files) + while read -r line; do all_shellfiles+=("$line"); done \ + <<< "$found_shellfiles" + while read -r line; do filtered_shellfiles+=("$line"); done \ + <<< "$(echo -e "$found_shellfiles" | grep -vE "${EXCLUDED_SHELLFILES}")" for file in "${filtered_shellfiles[@]}"; do run_shellcheck_on "$file" || { diff --git a/tools/code_format/format_python_tools.sh b/tools/code_format/format_python_tools.sh index ac755e3dff52..48014de30279 100755 --- a/tools/code_format/format_python_tools.sh +++ b/tools/code_format/format_python_tools.sh @@ -9,7 +9,7 @@ set -e echo "Running Python format check..." -python_venv format_python_tools $1 +python_venv format_python_tools "$1" echo "Running Python3 flake8 check..." python3 -m flake8 --version diff --git a/tools/code_format/requirements.txt b/tools/code_format/requirements.txt index c27e0d44afaa..7ac1cb042498 100644 --- a/tools/code_format/requirements.txt +++ b/tools/code_format/requirements.txt @@ -1,2 +1,21 @@ -flake8==3.8.3 -yapf==0.30.0 +flake8==3.8.3 \ + --hash=sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c \ + --hash=sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208 +importlib-metadata==2.0.0 \ + --hash=sha256:77a540690e24b0305878c37ffd421785a6f7e53c8b5720d211b211de8d0e95da \ + --hash=sha256:cefa1a2f919b866c5beb7c9f7b0ebb4061f30a8a9bf16d609b000e2dfaceb9c3 +mccabe==0.6.1 \ + --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ + --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f +pycodestyle==2.6.0 \ + --hash=sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367 \ + --hash=sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e +pyflakes==2.2.0 \ + --hash=sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92 \ + --hash=sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8 +yapf==0.30.0 \ + --hash=sha256:3000abee4c28daebad55da6c85f3cd07b8062ce48e2e9943c8da1b9667d48427 \ + --hash=sha256:3abf61ba67cf603069710d30acbc88cfe565d907e16ad81429ae90ce9651e0c9 +zipp==3.2.0 \ + --hash=sha256:43f4fa8d8bb313e65d8323a3952ef8756bf40f9a5c3ea7334be23ee4ec8278b6 \ + --hash=sha256:b52f22895f4cfce194bc8172f3819ee8de7540aa6d873535a8668b730b8b411f diff --git a/tools/config_validation/validate_fragment.py b/tools/config_validation/validate_fragment.py index d272f37fb006..a14464ba7d46 100644 --- a/tools/config_validation/validate_fragment.py +++ b/tools/config_validation/validate_fragment.py @@ -3,7 +3,7 @@ # Example usage: # # bazel run //tools/config_validation:validate_fragment -- \ -# envoy.config.bootstrap.v3.Bootstrap $PWD/configs/google_com_proxy.v2.yaml +# envoy.config.bootstrap.v3.Bootstrap $PWD/configs/google_com_proxy.yaml import json import pathlib diff --git a/tools/dependency/cve_scan.py b/tools/dependency/cve_scan.py new file mode 100755 index 000000000000..8dea63c8cc47 --- /dev/null +++ b/tools/dependency/cve_scan.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python3 + +# Scan for any external dependencies that were last updated before known CVEs +# (and near relatives). We also try a fuzzy match on version information. + +from collections import defaultdict, namedtuple +import datetime as dt +import gzip +import json +import re +import sys +import textwrap +import urllib.request + +import utils as dep_utils + +# These CVEs are false positives for the match heuristics. An explanation is +# required when adding a new entry to this list as a comment. +IGNORES_CVES = set([ + # Node.js issue unrelated to http-parser (napi_ API implementation). + 'CVE-2020-8174', + # Node.js HTTP desync attack. Request smuggling due to CR and hyphen + # conflation in llhttp + # (https://github.com/nodejs/llhttp/commit/9d9da1d0f18599ceddd8f484df5a5ad694d23361). + # This was a result of using llparse's toLowerUnsafe() for header keys. + # http-parser uses a TOKEN method that doesn't have the same issue for + # header fields. + 'CVE-2020-8201', + # Node.js issue unrelated to http-parser. This is a DoS due to a lack of + # request/connection timeouts, see + # https://github.com/nodejs/node/commit/753f3b247a. + 'CVE-2020-8251', + # Node.js issue unrelated to http-parser (libuv). + 'CVE-2020-8252', + # Fixed via the nghttp2 1.41.0 bump in Envoy 8b6ea4. + 'CVE-2020-11080', +]) + +# Subset of CVE fields that are useful below. +Cve = namedtuple( + 'Cve', + ['id', 'description', 'cpes', 'score', 'severity', 'published_date', 'last_modified_date']) + + +class Cpe(namedtuple('CPE', ['part', 'vendor', 'product', 'version'])): + '''Model a subset of CPE fields that are used in CPE matching.''' + + @classmethod + def FromString(cls, cpe_str): + assert (cpe_str.startswith('cpe:2.3:')) + components = cpe_str.split(':') + assert (len(components) >= 6) + return cls(*components[2:6]) + + def __str__(self): + return f'cpe:2.3:{self.part}:{self.vendor}:{self.product}:{self.version}' + + def VendorNormalized(self): + '''Return a normalized CPE where only part and vendor are significant.''' + return Cpe(self.part, self.vendor, '*', '*') + + +def ParseCveJson(cve_json, cves, cpe_revmap): + '''Parse CVE JSON dictionary. + + Args: + cve_json: a NIST CVE JSON dictionary. + cves: dictionary mapping CVE ID string to Cve object (output). + cpe_revmap: a reverse map from vendor normalized CPE to CVE ID string. + ''' + + # This provides an over-approximation of possible CPEs affected by CVE nodes + # metadata; it traverses the entire AND-OR tree and just gathers every CPE + # observed. Generally we expect that most of Envoy's CVE-CPE matches to be + # simple, plus it's interesting to consumers of this data to understand when a + # CPE pops up, even in a conditional setting. + def GatherCpes(nodes, cpe_set): + for node in nodes: + for cpe_match in node.get('cpe_match', []): + cpe_set.add(Cpe.FromString(cpe_match['cpe23Uri'])) + GatherCpes(node.get('children', []), cpe_set) + + for cve in cve_json['CVE_Items']: + cve_id = cve['cve']['CVE_data_meta']['ID'] + description = cve['cve']['description']['description_data'][0]['value'] + cpe_set = set() + GatherCpes(cve['configurations']['nodes'], cpe_set) + if len(cpe_set) == 0: + continue + cvss_v3_score = cve['impact']['baseMetricV3']['cvssV3']['baseScore'] + cvss_v3_severity = cve['impact']['baseMetricV3']['cvssV3']['baseSeverity'] + + def ParseCveDate(date_str): + assert (date_str.endswith('Z')) + return dt.date.fromisoformat(date_str.split('T')[0]) + + published_date = ParseCveDate(cve['publishedDate']) + last_modified_date = ParseCveDate(cve['lastModifiedDate']) + cves[cve_id] = Cve(cve_id, description, cpe_set, cvss_v3_score, cvss_v3_severity, + published_date, last_modified_date) + for cpe in cpe_set: + cpe_revmap[str(cpe.VendorNormalized())].add(cve_id) + return cves, cpe_revmap + + +def DownloadCveData(urls): + '''Download NIST CVE JSON databases from given URLs and parse. + + Args: + urls: a list of URLs. + Returns: + cves: dictionary mapping CVE ID string to Cve object (output). + cpe_revmap: a reverse map from vendor normalized CPE to CVE ID string. + ''' + cves = {} + cpe_revmap = defaultdict(set) + for url in urls: + print(f'Loading NIST CVE database from {url}...') + with urllib.request.urlopen(url) as request: + with gzip.GzipFile(fileobj=request) as json_data: + ParseCveJson(json.loads(json_data.read()), cves, cpe_revmap) + return cves, cpe_revmap + + +def FormatCveDetails(cve, deps): + formatted_deps = ', '.join(sorted(deps)) + wrapped_description = '\n '.join(textwrap.wrap(cve.description)) + return f''' + CVE ID: {cve.id} + CVSS v3 score: {cve.score} + Severity: {cve.severity} + Published date: {cve.published_date} + Last modified date: {cve.last_modified_date} + Dependencies: {formatted_deps} + Description: {wrapped_description} + Affected CPEs: + ''' + '\n '.join(f'- {cpe}' for cpe in cve.cpes) + + +FUZZY_DATE_RE = re.compile('(\d{4}).?(\d{2}).?(\d{2})') +FUZZY_SEMVER_RE = re.compile('(\d+)[:\.\-_](\d+)[:\.\-_](\d+)') + + +def RegexGroupsMatch(regex, lhs, rhs): + '''Do two strings match modulo a regular expression? + + Args: + regex: regular expression + lhs: LHS string + rhs: RHS string + Returns: + A boolean indicating match. + ''' + lhs_match = regex.search(lhs) + if lhs_match: + rhs_match = regex.search(rhs) + if rhs_match and lhs_match.groups() == rhs_match.groups(): + return True + return False + + +def CpeMatch(cpe, dep_metadata): + '''Heuristically match dependency metadata against CPE. + + We have a number of rules below that should are easy to compute without having + to look at the dependency metadata. In the future, with additional access to + repository information we could do the following: + - For dependencies at a non-release version, walk back through git history to + the last known release version and attempt a match with this. + - For dependencies at a non-release version, use the commit date to look for a + version match where version is YYYY-MM-DD. + + Args: + cpe: Cpe object to match against. + dep_metadata: dependency metadata dictionary. + Returns: + A boolean indicating a match. + ''' + dep_cpe = Cpe.FromString(dep_metadata['cpe']) + dep_version = dep_metadata['version'] + # The 'part' and 'vendor' must be an exact match. + if cpe.part != dep_cpe.part: + return False + if cpe.vendor != dep_cpe.vendor: + return False + # We allow Envoy dependency CPEs to wildcard the 'product', this is useful for + # LLVM where multiple product need to be covered. + if dep_cpe.product != '*' and cpe.product != dep_cpe.product: + return False + # Wildcard versions always match. + if cpe.version == '*': + return True + # An exact version match is a hit. + if cpe.version == dep_version: + return True + # Allow the 'last_updated' dependency metadata to substitute for date. + # TODO(htuch): Make a finer grained distinction between Envoy update date and dependency + # release date in 'last_updated'. + # TODO(htuch): Consider fuzzier date ranges. + if cpe.version == dep_metadata['last_updated']: + return True + # Try a fuzzy date match to deal with versions like fips-20190304 in dependency version. + if RegexGroupsMatch(FUZZY_DATE_RE, dep_version, cpe.version): + return True + # Try a fuzzy semver match to deal with things like 2.1.0-beta3. + if RegexGroupsMatch(FUZZY_SEMVER_RE, dep_version, cpe.version): + return True + # Fall-thru. + return False + + +def CveMatch(cve, dep_metadata): + '''Heuristically match dependency metadata against CVE. + + In general, we allow false positives but want to keep the noise low, to avoid + the toil around having to populate IGNORES_CVES. + + Args: + cve: Cve object to match against. + dep_metadata: dependency metadata dictionary. + Returns: + A boolean indicating a match. + ''' + wildcard_version_match = False + # Consider each CPE attached to the CVE for a match against the dependency CPE. + for cpe in cve.cpes: + if CpeMatch(cpe, dep_metadata): + # Wildcard version matches need additional heuristics unrelated to CPE to + # qualify, e.g. last updated date. + if cpe.version == '*': + wildcard_version_match = True + else: + return True + if wildcard_version_match: + # If the CVE was published after the dependency was last updated, it's a + # potential match. + last_dep_update = dt.date.fromisoformat(dep_metadata['last_updated']) + if last_dep_update <= cve.published_date: + return True + return False + + +def CveScan(cves, cpe_revmap, cve_allowlist, repository_locations): + '''Scan for CVEs in a parsed NIST CVE database. + + Args: + cves: CVE dictionary as provided by DownloadCveData(). + cve_revmap: CPE-CVE reverse map as provided by DownloadCveData(). + cve_allowlist: an allowlist of CVE IDs to ignore. + repository_locations: a dictionary of dependency metadata in the format + described in api/bazel/external_deps.bzl. + Returns: + possible_cves: a dictionary mapping CVE IDs to Cve objects. + cve_deps: a dictionary mapping CVE IDs to dependency names. + ''' + possible_cves = {} + cve_deps = defaultdict(list) + for dep, metadata in repository_locations.items(): + cpe = metadata.get('cpe', 'N/A') + if cpe == 'N/A': + continue + candidate_cve_ids = cpe_revmap.get(str(Cpe.FromString(cpe).VendorNormalized()), []) + for cve_id in candidate_cve_ids: + cve = cves[cve_id] + if cve.id in cve_allowlist: + continue + if CveMatch(cve, metadata): + possible_cves[cve_id] = cve + cve_deps[cve_id].append(dep) + return possible_cves, cve_deps + + +if __name__ == '__main__': + # Allow local overrides for NIST CVE database URLs via args. + urls = sys.argv[1:] + if not urls: + # We only look back a few years, since we shouldn't have any ancient deps. + current_year = dt.datetime.now().year + scan_years = range(2018, current_year + 1) + urls = [ + f'https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-{year}.json.gz' for year in scan_years + ] + cves, cpe_revmap = DownloadCveData(urls) + possible_cves, cve_deps = CveScan(cves, cpe_revmap, IGNORES_CVES, dep_utils.RepositoryLocations()) + if possible_cves: + print('\nBased on heuristic matching with the NIST CVE database, Envoy may be vulnerable to:') + for cve_id in sorted(possible_cves): + print(f'{FormatCveDetails(possible_cves[cve_id], cve_deps[cve_id])}') + sys.exit(1) diff --git a/tools/dependency/cve_scan_test.py b/tools/dependency/cve_scan_test.py new file mode 100755 index 000000000000..afb89f83b829 --- /dev/null +++ b/tools/dependency/cve_scan_test.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 +"""Tests for cve_scan.""" + +from collections import defaultdict +import datetime as dt +import unittest + +import cve_scan + + +class CveScanTest(unittest.TestCase): + + def test_parse_cve_json(self): + cve_json = { + 'CVE_Items': [ + { + 'cve': { + 'CVE_data_meta': { + 'ID': 'CVE-2020-1234' + }, + 'description': { + 'description_data': [{ + 'value': 'foo' + }] + } + }, + 'configurations': { + 'nodes': [{ + 'cpe_match': [{ + 'cpe23Uri': 'cpe:2.3:a:foo:bar:1.2.3' + }], + }], + }, + 'impact': { + 'baseMetricV3': { + 'cvssV3': { + 'baseScore': 3.4, + 'baseSeverity': 'LOW' + } + } + }, + 'publishedDate': '2020-03-17T00:59Z', + 'lastModifiedDate': '2020-04-17T00:59Z' + }, + { + 'cve': { + 'CVE_data_meta': { + 'ID': 'CVE-2020-1235' + }, + 'description': { + 'description_data': [{ + 'value': 'bar' + }] + } + }, + 'configurations': { + 'nodes': [{ + 'cpe_match': [{ + 'cpe23Uri': 'cpe:2.3:a:foo:bar:1.2.3' + }], + 'children': [ + { + 'cpe_match': [{ + 'cpe23Uri': 'cpe:2.3:a:foo:baz:3.2.3' + }] + }, + { + 'cpe_match': [{ + 'cpe23Uri': 'cpe:2.3:a:foo:*:*' + }, { + 'cpe23Uri': 'cpe:2.3:a:wat:bar:1.2.3' + }] + }, + ], + }], + }, + 'impact': { + 'baseMetricV3': { + 'cvssV3': { + 'baseScore': 9.9, + 'baseSeverity': 'HIGH' + } + } + }, + 'publishedDate': '2020-03-18T00:59Z', + 'lastModifiedDate': '2020-04-18T00:59Z' + }, + ] + } + cves = {} + cpe_revmap = defaultdict(set) + cve_scan.ParseCveJson(cve_json, cves, cpe_revmap) + self.maxDiff = None + self.assertDictEqual( + cves, { + 'CVE-2020-1234': + cve_scan.Cve(id='CVE-2020-1234', + description='foo', + cpes=set([self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3')]), + score=3.4, + severity='LOW', + published_date=dt.date(2020, 3, 17), + last_modified_date=dt.date(2020, 4, 17)), + 'CVE-2020-1235': + cve_scan.Cve(id='CVE-2020-1235', + description='bar', + cpes=set( + map(self.BuildCpe, [ + 'cpe:2.3:a:foo:bar:1.2.3', 'cpe:2.3:a:foo:baz:3.2.3', + 'cpe:2.3:a:foo:*:*', 'cpe:2.3:a:wat:bar:1.2.3' + ])), + score=9.9, + severity='HIGH', + published_date=dt.date(2020, 3, 18), + last_modified_date=dt.date(2020, 4, 18)) + }) + self.assertDictEqual(cpe_revmap, { + 'cpe:2.3:a:foo:*:*': {'CVE-2020-1234', 'CVE-2020-1235'}, + 'cpe:2.3:a:wat:*:*': {'CVE-2020-1235'} + }) + + def BuildCpe(self, cpe_str): + return cve_scan.Cpe.FromString(cpe_str) + + def BuildDep(self, cpe_str, version=None, last_updated=None): + return {'cpe': cpe_str, 'version': version, 'last_updated': last_updated} + + def CpeMatch(self, cpe_str, dep_cpe_str, version=None, last_updated=None): + return cve_scan.CpeMatch(self.BuildCpe(cpe_str), + self.BuildDep(dep_cpe_str, version=version, last_updated=last_updated)) + + def test_cpe_match(self): + # Mismatched part + self.assertFalse(self.CpeMatch('cpe:2.3:o:foo:bar:*', 'cpe:2.3:a:foo:bar:*')) + # Mismatched vendor + self.assertFalse(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foz:bar:*')) + # Mismatched product + self.assertFalse(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foo:baz:*')) + # Wildcard product + self.assertTrue(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foo:*:*')) + # Wildcard version match + self.assertTrue(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foo:bar:*')) + # Exact version match + self.assertTrue(self.CpeMatch('cpe:2.3:a:foo:bar:1.2.3', 'cpe:2.3:a:foo:bar:*', + version='1.2.3')) + # Date version match + self.assertTrue( + self.CpeMatch('cpe:2.3:a:foo:bar:2020-03-05', + 'cpe:2.3:a:foo:bar:*', + last_updated='2020-03-05')) + fuzzy_version_matches = [ + ('2020-03-05', '2020-03-05'), + ('2020-03-05', '20200305'), + ('2020-03-05', 'foo-20200305-bar'), + ('2020-03-05', 'foo-2020_03_05-bar'), + ('2020-03-05', 'foo-2020-03-05-bar'), + ('1.2.3', '1.2.3'), + ('1.2.3', '1-2-3'), + ('1.2.3', '1_2_3'), + ('1.2.3', '1:2:3'), + ('1.2.3', 'foo-1-2-3-bar'), + ] + for cpe_version, dep_version in fuzzy_version_matches: + self.assertTrue( + self.CpeMatch(f'cpe:2.3:a:foo:bar:{cpe_version}', + 'cpe:2.3:a:foo:bar:*', + version=dep_version)) + fuzzy_version_no_matches = [ + ('2020-03-05', '2020-3.5'), + ('2020-03-05', '2020--03-05'), + ('1.2.3', '1@2@3'), + ('1.2.3', '1..2.3'), + ] + for cpe_version, dep_version in fuzzy_version_no_matches: + self.assertFalse( + self.CpeMatch(f'cpe:2.3:a:foo:bar:{cpe_version}', + 'cpe:2.3:a:foo:bar:*', + version=dep_version)) + + def BuildCve(self, cve_id, cpes, published_date): + return cve_scan.Cve(cve_id, + description=None, + cpes=cpes, + score=None, + severity=None, + published_date=dt.date.fromisoformat(published_date), + last_modified_date=None) + + def CveMatch(self, cve_id, cpes, published_date, dep_cpe_str, version=None, last_updated=None): + return cve_scan.CveMatch(self.BuildCve(cve_id, cpes=cpes, published_date=published_date), + self.BuildDep(dep_cpe_str, version=version, last_updated=last_updated)) + + def test_cve_match(self): + # Empty CPEs, no match + self.assertFalse(self.CveMatch('CVE-2020-123', set(), '2020-05-03', 'cpe:2.3:a:foo:bar:*')) + # Wildcard version, stale dependency match + self.assertTrue( + self.CveMatch('CVE-2020-123', + set([self.BuildCpe('cpe:2.3:a:foo:bar:*')]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + last_updated='2020-05-02')) + self.assertTrue( + self.CveMatch('CVE-2020-123', + set([self.BuildCpe('cpe:2.3:a:foo:bar:*')]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + last_updated='2020-05-03')) + # Wildcard version, recently updated + self.assertFalse( + self.CveMatch('CVE-2020-123', + set([self.BuildCpe('cpe:2.3:a:foo:bar:*')]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + last_updated='2020-05-04')) + # Version match + self.assertTrue( + self.CveMatch('CVE-2020-123', + set([self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3')]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + version='1.2.3')) + # Version mismatch + self.assertFalse( + self.CveMatch('CVE-2020-123', + set([self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3')]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + version='1.2.4', + last_updated='2020-05-02')) + # Multiple CPEs, match first, don't match later. + self.assertTrue( + self.CveMatch('CVE-2020-123', + set([ + self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3'), + self.BuildCpe('cpe:2.3:a:foo:baz:3.2.1') + ]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + version='1.2.3')) + + def test_cve_scan(self): + cves = { + 'CVE-2020-1234': + self.BuildCve( + 'CVE-2020-1234', + set([ + self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3'), + self.BuildCpe('cpe:2.3:a:foo:baz:3.2.1') + ]), '2020-05-03'), + 'CVE-2020-1235': + self.BuildCve( + 'CVE-2020-1235', + set([ + self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3'), + self.BuildCpe('cpe:2.3:a:foo:baz:3.2.1') + ]), '2020-05-03'), + 'CVE-2020-1236': + self.BuildCve('CVE-2020-1236', set([ + self.BuildCpe('cpe:2.3:a:foo:wat:1.2.3'), + ]), '2020-05-03'), + } + cpe_revmap = { + 'cpe:2.3:a:foo:*:*': ['CVE-2020-1234', 'CVE-2020-1235', 'CVE-2020-1236'], + } + cve_allowlist = ['CVE-2020-1235'] + repository_locations = { + 'bar': self.BuildDep('cpe:2.3:a:foo:bar:*', version='1.2.3'), + 'baz': self.BuildDep('cpe:2.3:a:foo:baz:*', version='3.2.1'), + 'foo': self.BuildDep('cpe:2.3:a:foo:*:*', version='1.2.3'), + 'blah': self.BuildDep('N/A'), + } + possible_cves, cve_deps = cve_scan.CveScan(cves, cpe_revmap, cve_allowlist, + repository_locations) + self.assertListEqual(sorted(possible_cves.keys()), ['CVE-2020-1234', 'CVE-2020-1236']) + self.assertDictEqual(cve_deps, { + 'CVE-2020-1234': ['bar', 'baz', 'foo'], + 'CVE-2020-1236': ['foo'] + }) + + +if __name__ == '__main__': + unittest.main() diff --git a/tools/dependency/utils.py b/tools/dependency/utils.py new file mode 100644 index 000000000000..df1930d5cd32 --- /dev/null +++ b/tools/dependency/utils.py @@ -0,0 +1,28 @@ +# Utilities for reasoning about dependencies. + +from importlib.util import spec_from_loader, module_from_spec +from importlib.machinery import SourceFileLoader + + +# Shared Starlark/Python files must have a .bzl suffix for Starlark import, so +# we are forced to do this workaround. +def LoadModule(name, path): + spec = spec_from_loader(name, SourceFileLoader(name, path)) + module = module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +envoy_repository_locations = LoadModule('envoy_repository_locations', + 'bazel/repository_locations.bzl') +api_repository_locations = LoadModule('api_repository_locations', + 'api/bazel/repository_locations.bzl') +repository_locations_utils = LoadModule('repository_locations_utils', + 'api/bazel/repository_locations_utils.bzl') + + +def RepositoryLocations(): + spec_loader = repository_locations_utils.load_repository_locations_spec + locations = spec_loader(envoy_repository_locations.REPOSITORY_LOCATIONS_SPEC) + locations.update(spec_loader(api_repository_locations.REPOSITORY_LOCATIONS_SPEC)) + return locations diff --git a/tools/dependency/validate.py b/tools/dependency/validate.py new file mode 100755 index 000000000000..92178b450074 --- /dev/null +++ b/tools/dependency/validate.py @@ -0,0 +1,308 @@ +#!/usr/bin/env python3 +"""Validate the relationship between Envoy dependencies and core/extensions. + +This script verifies that bazel query of the build graph is consistent with +the use_category metadata in bazel/repository_locations.bzl. +""" + +import re +import subprocess +import sys + +from importlib.machinery import SourceFileLoader +from importlib.util import spec_from_loader, module_from_spec + + +# Shared Starlark/Python files must have a .bzl suffix for Starlark import, so +# we are forced to do this workaround. +def LoadModule(name, path): + spec = spec_from_loader(name, SourceFileLoader(name, path)) + module = module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +envoy_repository_locations = LoadModule('envoy_repository_locations', + 'bazel/repository_locations.bzl') +api_repository_locations = LoadModule('api_repository_locations', + 'api/bazel/repository_locations.bzl') +extensions_build_config = LoadModule('extensions_build_config', + 'source/extensions/extensions_build_config.bzl') + +REPOSITORY_LOCATIONS_SPEC = dict(envoy_repository_locations.REPOSITORY_LOCATIONS_SPEC) +REPOSITORY_LOCATIONS_SPEC.update(api_repository_locations.REPOSITORY_LOCATIONS_SPEC) + +BAZEL_QUERY_EXTERNAL_DEP_RE = re.compile('@(\w+)//') +EXTENSION_LABEL_RE = re.compile('(//source/extensions/.*):') + +# We can safely ignore these as they are from Bazel or internal repository structure. +IGNORE_DEPS = set([ + 'envoy', + 'envoy_api', + 'envoy_api_canonical', + 'platforms', + 'bazel_tools', + 'local_config_cc', + 'remote_coverage_tools', + 'foreign_cc_platform_utils', +]) + + +# Should a dependency be ignored if it's only used in test? Any changes to this +# allowlist method should be accompanied by an update to the explanation in the +# "Test only" section of +# docs/root/intro/arch_overview/security/external_deps.rst. +def TestOnlyIgnore(dep): + # Rust + if dep.startswith('raze__'): + return True + # Java + if dep.startswith('remotejdk'): + return True + # Python (pip3) + if '_pip3_' in dep: + return True + return False + + +class DependencyError(Exception): + """Error in dependency relationships.""" + pass + + +class DependencyInfo(object): + """Models dependency info in bazel/repositories.bzl.""" + + def DepsByUseCategory(self, use_category): + """Find the set of external dependencies in a given use_category. + + Args: + use_category: string providing use_category. + + Returns: + Set of dependency identifiers that match use_category. + """ + return set(name for name, metadata in REPOSITORY_LOCATIONS_SPEC.items() + if use_category in metadata['use_category']) + + def GetMetadata(self, dependency): + """Obtain repository metadata for a dependency. + + Args: + dependency: string providing dependency identifier. + + Returns: + A dictionary with the repository metadata as defined in + bazel/repository_locations.bzl. + """ + return REPOSITORY_LOCATIONS_SPEC.get(dependency) + + +class BuildGraph(object): + """Models the Bazel build graph.""" + + def __init__(self, ignore_deps=IGNORE_DEPS, repository_locations_spec=REPOSITORY_LOCATIONS_SPEC): + self._ignore_deps = ignore_deps + # Reverse map from untracked dependencies implied by other deps back to the dep. + self._implied_untracked_deps_revmap = {} + for dep, metadata in repository_locations_spec.items(): + implied_untracked_deps = metadata.get('implied_untracked_deps', []) + for untracked_dep in implied_untracked_deps: + assert (untracked_dep not in self._implied_untracked_deps_revmap) + self._implied_untracked_deps_revmap[untracked_dep] = dep + + def QueryExternalDeps(self, *targets): + """Query the build graph for transitive external dependencies. + + Args: + targets: Bazel targets. + + Returns: + A set of dependency identifiers that are reachable from targets. + """ + deps_query = ' union '.join(f'deps({l})' for l in targets) + deps = subprocess.check_output(['bazel', 'query', deps_query], + stderr=subprocess.PIPE).decode().splitlines() + ext_deps = set() + implied_untracked_deps = set() + for d in deps: + match = BAZEL_QUERY_EXTERNAL_DEP_RE.match(d) + if match: + ext_dep = match.group(1) + if ext_dep in self._ignore_deps: + continue + # If the dependency is untracked, add the source dependency that loaded + # it transitively. + if ext_dep in self._implied_untracked_deps_revmap: + ext_dep = self._implied_untracked_deps_revmap[ext_dep] + ext_deps.add(ext_dep) + return set(ext_deps) + + def ListExtensions(self): + """List all extensions. + + Returns: + Dictionary items from source/extensions/extensions_build_config.bzl. + """ + return extensions_build_config.EXTENSIONS.items() + + +class Validator(object): + """Collection of validation methods.""" + + def __init__(self, dep_info, build_graph): + self._dep_info = dep_info + self._build_graph = build_graph + self._queried_core_deps = build_graph.QueryExternalDeps( + '//source/exe:envoy_main_common_with_core_extensions_lib') + + def ValidateBuildGraphStructure(self): + """Validate basic assumptions about dependency relationship in the build graph. + + Raises: + DependencyError: on a dependency validation error. + """ + print('Validating build dependency structure...') + queried_core_ext_deps = self._build_graph.QueryExternalDeps( + '//source/exe:envoy_main_common_with_core_extensions_lib', '//source/extensions/...') + queried_all_deps = self._build_graph.QueryExternalDeps('//source/...') + if queried_all_deps != queried_core_ext_deps: + raise DependencyError('Invalid build graph structure. deps(//source/...) != ' + 'deps(//source/exe:envoy_main_common_with_core_extensions_lib) ' + 'union deps(//source/extensions/...)') + + def ValidateTestOnlyDeps(self): + """Validate that test-only dependencies aren't included in //source/... + + Raises: + DependencyError: on a dependency validation error. + """ + print('Validating test-only dependencies...') + # Validate that //source doesn't depend on test_only + queried_source_deps = self._build_graph.QueryExternalDeps('//source/...') + expected_test_only_deps = self._dep_info.DepsByUseCategory('test_only') + bad_test_only_deps = expected_test_only_deps.intersection(queried_source_deps) + if len(bad_test_only_deps) > 0: + raise DependencyError(f'//source depends on test-only dependencies: {bad_test_only_deps}') + # Validate that //test deps additional to those of //source are captured in + # test_only. + test_only_deps = self._build_graph.QueryExternalDeps('//test/...') + source_deps = self._build_graph.QueryExternalDeps('//source/...') + marginal_test_deps = test_only_deps.difference(source_deps) + bad_test_deps = marginal_test_deps.difference(expected_test_only_deps) + unknown_bad_test_deps = [dep for dep in bad_test_deps if not TestOnlyIgnore(dep)] + if len(unknown_bad_test_deps) > 0: + raise DependencyError(f'Missing deps in test_only "use_category": {unknown_bad_test_deps}') + + def ValidateDataPlaneCoreDeps(self): + """Validate dataplane_core dependencies. + + Check that we at least tag as dataplane_core dependencies that match some + well-known targets for the data-plane. + + Raises: + DependencyError: on a dependency validation error. + """ + print('Validating data-plane dependencies...') + # Necessary but not sufficient for dataplane. With some refactoring we could + # probably have more precise tagging of dataplane/controlplane/other deps in + # these paths. + queried_dataplane_core_min_deps = self._build_graph.QueryExternalDeps( + '//source/common/api/...', '//source/common/buffer/...', '//source/common/chromium_url/...', + '//source/common/crypto/...', '//source/common/conn_pool/...', + '//source/common/formatter/...', '//source/common/http/...', '//source/common/ssl/...', + '//source/common/tcp/...', '//source/common/tcp_proxy/...', '//source/common/network/...') + # It's hard to disentangle API and dataplane today. + expected_dataplane_core_deps = self._dep_info.DepsByUseCategory('dataplane_core').union( + self._dep_info.DepsByUseCategory('api')) + bad_dataplane_core_deps = queried_dataplane_core_min_deps.difference( + expected_dataplane_core_deps) + if len(bad_dataplane_core_deps) > 0: + raise DependencyError( + f'Observed dataplane core deps {queried_dataplane_core_min_deps} is not covered by ' + f'"use_category" implied core deps {expected_dataplane_core_deps}: {bad_dataplane_core_deps} ' + 'are missing') + + def ValidateControlPlaneDeps(self): + """Validate controlplane dependencies. + + Check that we at least tag as controlplane dependencies that match some + well-known targets for + the control-plane. + + Raises: + DependencyError: on a dependency validation error. + """ + print('Validating control-plane dependencies...') + # Necessary but not sufficient for controlplane. With some refactoring we could + # probably have more precise tagging of dataplane/controlplane/other deps in + # these paths. + queried_controlplane_core_min_deps = self._build_graph.QueryExternalDeps( + '//source/common/config/...') + # Controlplane will always depend on API. + expected_controlplane_core_deps = self._dep_info.DepsByUseCategory('controlplane').union( + self._dep_info.DepsByUseCategory('api')) + bad_controlplane_core_deps = queried_controlplane_core_min_deps.difference( + expected_controlplane_core_deps) + if len(bad_controlplane_core_deps) > 0: + raise DependencyError( + f'Observed controlplane core deps {queried_controlplane_core_min_deps} is not covered ' + 'by "use_category" implied core deps {expected_controlplane_core_deps}: ' + '{bad_controlplane_core_deps} are missing') + + def ValidateExtensionDeps(self, name, target): + """Validate that extensions are correctly declared for dataplane_ext and observability_ext. + + Args: + name: extension name. + target: extension Bazel target. + + Raises: + DependencyError: on a dependency validation error. + """ + print(f'Validating extension {name} dependencies...') + queried_deps = self._build_graph.QueryExternalDeps(target) + marginal_deps = queried_deps.difference(self._queried_core_deps) + expected_deps = [] + for d in marginal_deps: + metadata = self._dep_info.GetMetadata(d) + if metadata: + use_category = metadata['use_category'] + valid_use_category = any( + c in use_category for c in ['dataplane_ext', 'observability_ext', 'other', 'api']) + if not valid_use_category: + raise DependencyError( + f'Extensions {name} depends on {d} with "use_category" not including ' + '["dataplane_ext", "observability_ext", "api", "other"]') + if 'extensions' in metadata: + allowed_extensions = metadata['extensions'] + if name not in allowed_extensions: + raise DependencyError( + f'Extension {name} depends on {d} but {d} does not list {name} in its allowlist') + + def ValidateAll(self): + """Collection of all validations. + + Raises: + DependencyError: on a dependency validation error. + """ + self.ValidateBuildGraphStructure() + self.ValidateTestOnlyDeps() + self.ValidateDataPlaneCoreDeps() + self.ValidateControlPlaneDeps() + # Validate the marginal dependencies introduced for each extension. + for name, target in sorted(build_graph.ListExtensions()): + target_all = EXTENSION_LABEL_RE.match(target).group(1) + '/...' + self.ValidateExtensionDeps(name, target_all) + + +if __name__ == '__main__': + dep_info = DependencyInfo() + build_graph = BuildGraph() + validator = Validator(dep_info, build_graph) + try: + validator.ValidateAll() + except DependencyError as e: + print('Dependency validation failed, please check metadata in bazel/repository_locations.bzl') + print(e) + sys.exit(1) diff --git a/tools/dependency/validate_test.py b/tools/dependency/validate_test.py new file mode 100755 index 000000000000..4474c6442760 --- /dev/null +++ b/tools/dependency/validate_test.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 +"""Tests for validate.py""" + +import unittest + +import validate + + +class FakeDependencyInfo(object): + """validate.DependencyInfo fake.""" + + def __init__(self, deps): + self._deps = deps + + def DepsByUseCategory(self, use_category): + return set(n for n, m in self._deps.items() if use_category in m['use_category']) + + def GetMetadata(self, dependency): + return self._deps.get(dependency) + + +class FakeBuildGraph(object): + """validate.BuildGraph fake.""" + + def __init__(self, reachable_deps, extensions): + self._reachable_deps = reachable_deps + self._extensions = extensions + + def QueryExternalDeps(self, *targets): + return set(sum((self._reachable_deps.get(t, []) for t in targets), [])) + + def ListExtensions(self): + return self._extensions + + +def FakeDep(use_category, extensions=[]): + return {'use_category': use_category, 'extensions': extensions} + + +class ValidateTest(unittest.TestCase): + + def BuildValidator(self, deps, reachable_deps, extensions=[]): + return validate.Validator(FakeDependencyInfo(deps), FakeBuildGraph(reachable_deps, extensions)) + + def test_valid_build_graph_structure(self): + validator = self.BuildValidator({}, { + '//source/exe:envoy_main_common_with_core_extensions_lib': ['a'], + '//source/extensions/...': ['b'], + '//source/...': ['a', 'b'] + }) + validator.ValidateBuildGraphStructure() + + def test_invalid_build_graph_structure(self): + validator = self.BuildValidator({}, { + '//source/exe:envoy_main_common_with_core_extensions_lib': ['a'], + '//source/extensions/...': ['b'], + '//source/...': ['a', 'b', 'c'] + }) + self.assertRaises(validate.DependencyError, lambda: validator.ValidateBuildGraphStructure()) + + def test_valid_test_only_deps(self): + validator = self.BuildValidator({'a': FakeDep('dataplane_core')}, {'//source/...': ['a']}) + validator.ValidateTestOnlyDeps() + validator = self.BuildValidator({'a': FakeDep('test_only')}, {'//test/...': ['a', 'b__pip3_']}) + validator.ValidateTestOnlyDeps() + + def test_invalid_test_only_deps(self): + validator = self.BuildValidator({'a': FakeDep('test_only')}, {'//source/...': ['a']}) + self.assertRaises(validate.DependencyError, lambda: validator.ValidateTestOnlyDeps()) + validator = self.BuildValidator({'a': FakeDep('test_only')}, {'//test/...': ['b']}) + self.assertRaises(validate.DependencyError, lambda: validator.ValidateTestOnlyDeps()) + + def test_valid_dataplane_core_deps(self): + validator = self.BuildValidator({'a': FakeDep('dataplane_core')}, + {'//source/common/http/...': ['a']}) + validator.ValidateDataPlaneCoreDeps() + + def test_invalid_dataplane_core_deps(self): + validator = self.BuildValidator({'a': FakeDep('controlplane')}, + {'//source/common/http/...': ['a']}) + self.assertRaises(validate.DependencyError, lambda: validator.ValidateDataPlaneCoreDeps()) + + def test_valid_controlplane_deps(self): + validator = self.BuildValidator({'a': FakeDep('controlplane')}, + {'//source/common/config/...': ['a']}) + validator.ValidateControlPlaneDeps() + + def test_invalid_controlplane_deps(self): + validator = self.BuildValidator({'a': FakeDep('other')}, {'//source/common/config/...': ['a']}) + self.assertRaises(validate.DependencyError, lambda: validator.ValidateControlPlaneDeps()) + + def test_valid_extension_deps(self): + validator = self.BuildValidator( + { + 'a': FakeDep('controlplane'), + 'b': FakeDep('dataplane_ext', ['foo']) + }, { + '//source/extensions/foo/...': ['a', 'b'], + '//source/exe:envoy_main_common_with_core_extensions_lib': ['a'] + }) + validator.ValidateExtensionDeps('foo', '//source/extensions/foo/...') + + def test_invalid_extension_deps_wrong_category(self): + validator = self.BuildValidator( + { + 'a': FakeDep('controlplane'), + 'b': FakeDep('controlplane', ['foo']) + }, { + '//source/extensions/foo/...': ['a', 'b'], + '//source/exe:envoy_main_common_with_core_extensions_lib': ['a'] + }) + self.assertRaises(validate.DependencyError, + lambda: validator.ValidateExtensionDeps('foo', '//source/extensions/foo/...')) + + def test_invalid_extension_deps_allowlist(self): + validator = self.BuildValidator( + { + 'a': FakeDep('controlplane'), + 'b': FakeDep('dataplane_ext', ['bar']) + }, { + '//source/extensions/foo/...': ['a', 'b'], + '//source/exe:envoy_main_common_with_core_extensions_lib': ['a'] + }) + self.assertRaises(validate.DependencyError, + lambda: validator.ValidateExtensionDeps('foo', '//source/extensions/foo/...')) + + +if __name__ == '__main__': + unittest.main() diff --git a/tools/deprecate_features/requirements.txt b/tools/deprecate_features/requirements.txt index 05ae861486e8..0cd0b4272768 100644 --- a/tools/deprecate_features/requirements.txt +++ b/tools/deprecate_features/requirements.txt @@ -1 +1,3 @@ -six==1.15.0 +six==1.15.0 \ + --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \ + --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced diff --git a/tools/deprecate_version/requirements.txt b/tools/deprecate_version/requirements.txt index 0fa52aeb712c..e37631303df1 100644 --- a/tools/deprecate_version/requirements.txt +++ b/tools/deprecate_version/requirements.txt @@ -1,2 +1,35 @@ -GitPython==3.1.7 -PyGithub==1.43.8 +certifi==2020.6.20 \ + --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \ + --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 +chardet==3.0.4 \ + --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \ + --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 +Deprecated==1.2.10 \ + --hash=sha256:525ba66fb5f90b07169fdd48b6373c18f1ee12728ca277ca44567a367d9d7f74 \ + --hash=sha256:a766c1dccb30c5f6eb2b203f87edd1d8588847709c78589e1521d769addc8218 +gitdb==4.0.5 \ + --hash=sha256:91f36bfb1ab7949b3b40e23736db18231bf7593edada2ba5c3a174a7b23657ac \ + --hash=sha256:c9e1f2d0db7ddb9a704c2a0217be31214e91a4fe1dea1efad19ae42ba0c285c9 +GitPython==3.1.8 \ + --hash=sha256:080bf8e2cf1a2b907634761c2eaefbe83b69930c94c66ad11b65a8252959f912 \ + --hash=sha256:1858f4fd089abe92ae465f01d5aaaf55e937eca565fb2c1fce35a51b5f85c910 +idna==2.10 \ + --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ + --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 +PyGithub==1.53 \ + --hash=sha256:776befaddab9d8fddd525d52a6ca1ac228cf62b5b1e271836d766f4925e1452e \ + --hash=sha256:8ad656bf79958e775ec59f7f5a3dbcbadac12147ae3dc42708b951064096af15 +PyJWT==1.7.1 \ + --hash=sha256:5c6eca3c2940464d106b99ba83b00c6add741c9becaec087fb7ccdefea71350e \ + --hash=sha256:8d59a976fb773f3e6a39c85636357c4f0e242707394cadadd9814f5cbaa20e96 +requests==2.24.0 \ + --hash=sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b \ + --hash=sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898 +smmap==3.0.4 \ + --hash=sha256:54c44c197c819d5ef1991799a7e30b662d1e520f2ac75c9efbeb54a742214cf4 \ + --hash=sha256:9c98bbd1f9786d22f14b3d4126894d56befb835ec90cef151af566c7e19b5d24 +urllib3==1.25.10 \ + --hash=sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a \ + --hash=sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461 +wrapt==1.12.1 \ + --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 diff --git a/tools/docker_wrapper.sh b/tools/docker_wrapper.sh index 7b9a4329be3f..8b2f77fb0846 100755 --- a/tools/docker_wrapper.sh +++ b/tools/docker_wrapper.sh @@ -14,6 +14,7 @@ if [ "${RUN_REMOTE}" == "yes" ]; then echo "Using docker environment from ${DOCKER_ENV}:" cat "${DOCKER_ENV}" fi +# shellcheck disable=SC1090 . "${DOCKER_ENV}" CONTAINER_NAME="envoy-test-runner" @@ -27,28 +28,27 @@ function cleanup() { trap cleanup EXIT -cat > ${ENVFILE} < "${ENVFILE}" < tar -tvf ./envoy.tar -rw------- htuch/eng 0 2017-08-13 21:13 access_0.log diff --git a/tools/git/modified_since_last_github_commit.sh b/tools/git/modified_since_last_github_commit.sh index e8e805ce9c97..bdfc2dd35449 100755 --- a/tools/git/modified_since_last_github_commit.sh +++ b/tools/git/modified_since_last_github_commit.sh @@ -1,7 +1,10 @@ #!/bin/bash -declare -r BASE="$(dirname "$0")" +BASE="$(dirname "$0")" +declare -r BASE declare -r TARGET_PATH=$1 declare -r EXTENSION=$2 +export TARGET_PATH -git diff --name-only $("${BASE}"/last_github_commit.sh) | grep "\.${EXTENSION}$" + +git diff --name-only "$("${BASE}"/last_github_commit.sh)" | grep "\.${EXTENSION}$" diff --git a/tools/github/requirements.txt b/tools/github/requirements.txt index e1b66335b79b..31b48c9997e2 100644 --- a/tools/github/requirements.txt +++ b/tools/github/requirements.txt @@ -1 +1,3 @@ -PyGithub==1.43.8 +PyGithub==1.53 \ + --hash=sha256:776befaddab9d8fddd525d52a6ca1ac228cf62b5b1e271836d766f4925e1452e \ + --hash=sha256:8ad656bf79958e775ec59f7f5a3dbcbadac12147ae3dc42708b951064096af15 diff --git a/tools/path_fix.sh b/tools/path_fix.sh index 65240c210214..122948708717 100755 --- a/tools/path_fix.sh +++ b/tools/path_fix.sh @@ -7,16 +7,15 @@ # # NOTE: This implementation is far from perfect and will need to be refined to cover all cases. -$* 2>&1 | +"$@" 2>&1 | while IFS= read -r LINE do if [[ "${LINE}" =~ [[:space:]]*([^:[:space:]]+):[[:digit:]]+:[[:digit:]]+: ]]; then - REAL_PATH=$(readlink -f "${BASH_REMATCH[1]}") # Bazel now appears to sometimes spit out paths that don't actually exist on disk at all. I # have no idea why this is happening (sigh). This check makes it so that if readlink fails we # don't attempt to fix the path and just print out what we got. - if [[ $? == 0 ]]; then - LINE=${LINE//${BASH_REMATCH[1]}/${REAL_PATH}} + if REAL_PATH=$(readlink -f "${BASH_REMATCH[1]}"); then + LINE=${LINE//${BASH_REMATCH[1]}/${REAL_PATH}} fi fi echo "${LINE}" diff --git a/tools/proto_format/proto_format.sh b/tools/proto_format/proto_format.sh index 2dfcb1e37840..def0d2a6195b 100755 --- a/tools/proto_format/proto_format.sh +++ b/tools/proto_format/proto_format.sh @@ -5,6 +5,9 @@ set -e set -x +read -ra BAZEL_BUILD_OPTIONS <<< "${BAZEL_BUILD_OPTIONS:-}" + + [[ "$1" == "check" || "$1" == "fix" || "$1" == "freeze" ]] || \ (echo "Usage: $0 "; exit 1) @@ -18,14 +21,14 @@ if [[ "$2" == "--test" ]] then echo "protoxform_test..." ./tools/protoxform/protoxform_test.sh - bazel test ${BAZEL_BUILD_OPTIONS} //tools/protoxform:merge_active_shadow_test + bazel test "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:merge_active_shadow_test fi # Generate //versioning:active_protos. ./tools/proto_format/active_protos_gen.py ./api > ./api/versioning/BUILD # This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files. -BAZEL_BUILD_OPTIONS+=" --remote_download_outputs=all" +BAZEL_BUILD_OPTIONS+=("--remote_download_outputs=all") # If the specified command is 'freeze', we tell protoxform to adjust package version status to # reflect a major version freeze and then do a regular 'fix'. @@ -37,24 +40,28 @@ then fi # Invoke protoxform aspect. -bazel build ${BAZEL_BUILD_OPTIONS} --//tools/api_proto_plugin:default_type_db_target=@envoy_api_canonical//versioning:active_protos ${FREEZE_ARG} \ +bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_db_target=@envoy_api_canonical//versioning:active_protos ${FREEZE_ARG} \ @envoy_api_canonical//versioning:active_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto # Find all source protos. -declare -r ACTIVE_PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, @envoy_api_canonical//versioning:active_protos))") -declare -r FROZEN_PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, @envoy_api_canonical//versioning:frozen_protos))") +PROTO_TARGETS=() +for proto_type in active frozen; do + protos=$(bazel query "labels(srcs, labels(deps, @envoy_api_canonical//versioning:${proto_type}_protos))") + while read -r line; do PROTO_TARGETS+=("$line"); done \ + <<< "$protos" +done # Setup for proto_sync.py. -TOOLS=$(dirname $(dirname $(realpath $0))) +TOOLS="$(dirname "$(dirname "$(realpath "$0")")")" # To satisfy dependency on api_proto_plugin. export PYTHONPATH="$TOOLS" # Build protoprint and merge_active_shadow_tools for use in proto_sync.py. -bazel build ${BAZEL_BUILD_OPTIONS} //tools/protoxform:protoprint //tools/protoxform:merge_active_shadow +bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint //tools/protoxform:merge_active_shadow # Copy back the FileDescriptorProtos that protoxform emittted to the source tree. This involves # pretty-printing to format with protoprint and potentially merging active/shadow versions of protos # with merge_active_shadow. -./tools/proto_format/proto_sync.py "--mode=${PROTO_SYNC_CMD}" ${ACTIVE_PROTO_TARGETS} ${FROZEN_PROTO_TARGETS} +./tools/proto_format/proto_sync.py "--mode=${PROTO_SYNC_CMD}" "${PROTO_TARGETS[@]}" # Need to regenerate //versioning:active_protos before building type DB below if freezing. if [[ "$1" == "freeze" ]] @@ -63,7 +70,7 @@ then fi # Generate api/BUILD file based on updated type database. -bazel build ${BAZEL_BUILD_OPTIONS} //tools/type_whisperer:api_build_file +bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/type_whisperer:api_build_file cp -f bazel-bin/tools/type_whisperer/BUILD.api_build_file api/BUILD # Misc. manual copies to keep generated_api_shadow/ in sync with api/. diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index 8eeeceb9e225..be2adf389428 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -452,6 +452,7 @@ def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, pro rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or + (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] diff --git a/tools/protoxform/protoprint.py b/tools/protoxform/protoprint.py index 092c86d6bca7..95a72b0ea51b 100755 --- a/tools/protoxform/protoprint.py +++ b/tools/protoxform/protoprint.py @@ -204,7 +204,16 @@ def CamelCase(s): file_block = '\n'.join(['syntax = "proto3";\n', package_line]) options = descriptor_pb2.FileOptions() + options.java_outer_classname = CamelCase(os.path.basename(file_proto.name)) + for msg in file_proto.message_type: + if msg.name == options.java_outer_classname: + # This is a workaround for Java outer class names that would otherwise + # conflict with types defined within the same proto file, see + # https://github.com/envoyproxy/envoy/pull/13378. + # TODO: in next major version, make this consistent. + options.java_outer_classname += "OuterClass" + options.java_multiple_files = True options.java_package = 'io.envoyproxy.' + file_proto.package diff --git a/tools/protoxform/protoxform_test.sh b/tools/protoxform/protoxform_test.sh index 3fe0a5319757..66e6fc4efbac 100755 --- a/tools/protoxform/protoxform_test.sh +++ b/tools/protoxform/protoxform_test.sh @@ -4,23 +4,30 @@ set -e rm -rf bazel-bin/tools -BAZEL_BUILD_OPTIONS+=" --remote_download_outputs=all" - -TOOLS=$(dirname $(dirname $(realpath $0))) +read -ra BAZEL_BUILD_OPTIONS <<< "${BAZEL_BUILD_OPTIONS:-}" +BAZEL_BUILD_OPTIONS+=("--remote_download_outputs=all") +TOOLS="$(dirname "$(dirname "$(realpath "$0")")")" # to satisfy dependency on run_command export PYTHONPATH="$TOOLS" + # protoxform fix test cases -PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, //tools/testdata/protoxform:fix_protos))") -bazel build ${BAZEL_BUILD_OPTIONS} --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:fix_protos \ +PROTO_TARGETS=() +protos=$(bazel query "labels(srcs, labels(deps, //tools/testdata/protoxform:fix_protos))") +while read -r line; do PROTO_TARGETS+=("$line"); done \ + <<< "$protos" +bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:fix_protos \ //tools/testdata/protoxform:fix_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto -bazel build ${BAZEL_BUILD_OPTIONS} //tools/protoxform:protoprint -./tools/protoxform/protoxform_test_helper.py fix ${PROTO_TARGETS} +bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint +./tools/protoxform/protoxform_test_helper.py fix "${PROTO_TARGETS[@]}" # protoxform freeze test cases -PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, //tools/testdata/protoxform:freeze_protos))") -bazel build ${BAZEL_BUILD_OPTIONS} --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:freeze_protos \ +PROTO_TARGETS=() +protos=$(bazel query "labels(srcs, labels(deps, //tools/testdata/protoxform:freeze_protos))") +while read -r line; do PROTO_TARGETS+=("$line"); done \ + <<< "$protos" +bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:freeze_protos \ --//tools/api_proto_plugin:extra_args=freeze \ //tools/testdata/protoxform:freeze_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto -bazel build ${BAZEL_BUILD_OPTIONS} //tools/protoxform:protoprint -./tools/protoxform/protoxform_test_helper.py freeze ${PROTO_TARGETS} +bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint +./tools/protoxform/protoxform_test_helper.py freeze "${PROTO_TARGETS[@]}" diff --git a/tools/shell_utils.sh b/tools/shell_utils.sh index a4b3e252a17a..470b9c6fc078 100644 --- a/tools/shell_utils.sh +++ b/tools/shell_utils.sh @@ -1,10 +1,14 @@ +#!/bin/bash + + source_venv() { VENV_DIR=$1 if [[ "${VIRTUAL_ENV}" == "" ]]; then if [[ ! -d "${VENV_DIR}"/venv ]]; then virtualenv "${VENV_DIR}"/venv --python=python3 fi - source "${VENV_DIR}"/venv/bin/activate + # shellcheck disable=SC1090 + source "${VENV_DIR}/venv/bin/activate" else echo "Found existing virtualenv" fi @@ -21,5 +25,5 @@ python_venv() { pip install -r "${SCRIPT_DIR}"/requirements.txt shift - python3 "${SCRIPT_DIR}/${PY_NAME}.py" $* + python3 "${SCRIPT_DIR}/${PY_NAME}.py" "$*" } diff --git a/tools/spelling/check_spelling.sh b/tools/spelling/check_spelling.sh index f6a7eea839c0..b5a0a9847d0b 100755 --- a/tools/spelling/check_spelling.sh +++ b/tools/spelling/check_spelling.sh @@ -19,7 +19,7 @@ MISSPELL_ARGS="-error -o stderr" if [[ "$#" -lt 1 ]]; then echo "Usage: $0 check|fix" - exit -1 + exit 1 fi if [[ "$1" == "fix" ]]; then @@ -39,13 +39,13 @@ SCRIPTPATH=$( cd "$(dirname "$0")" ; pwd -P ) ROOTDIR="${SCRIPTPATH}/../.." cd "$ROOTDIR" -BIN_FILENAME="misspell_"${VERSION}"_"${OS}"_64bit.tar.gz" +BIN_FILENAME="misspell_${VERSION}_${OS}_64bit.tar.gz" # Install tools we need if [[ ! -e "${TMP_DIR}/misspell" ]]; then if ! wget https://github.com/client9/misspell/releases/download/v"${VERSION}"/"${BIN_FILENAME}" \ -O "${TMP_DIR}/${BIN_FILENAME}" --no-verbose --tries=3 -o "${TMP_DIR}/wget.log"; then cat "${TMP_DIR}/wget.log" - exit -1 + exit 1 fi tar -xvf "${TMP_DIR}/${BIN_FILENAME}" -C "${TMP_DIR}" &> /dev/null fi @@ -61,7 +61,7 @@ else EXPECT_SHA="${MAC_MISSPELL_SHA}" fi -if [[ ! ${ACTUAL_SHA} == ${EXPECT_SHA} ]]; then +if [[ ! ${ACTUAL_SHA} == "${EXPECT_SHA}" ]]; then echo "Expect shasum is ${ACTUAL_SHA}, but actual is shasum ${EXPECT_SHA}" exit 1 fi @@ -70,13 +70,12 @@ chmod +x "${TMP_DIR}/misspell" # Spell checking # All the skipping files are defined in tools/spelling/spelling_skip_files.txt -SPELLING_SKIP_FILES="${ROOTDIR}/tools/spelling/spelling_skip_files.txt" +read -ra SKIP_FILES < "${ROOTDIR}/tools/spelling/spelling_skip_files.txt" +read -ra SKIP_FILES <<< "${SKIP_FILES[@]/#/-e }" # All the ignore words are defined in tools/spelling/spelling_allowlist_words.txt SPELLING_ALLOWLIST_WORDS_FILE="${ROOTDIR}/tools/spelling/spelling_allowlist_words.txt" +ALLOWLIST_WORDS=$(grep -vE '^#|^$' "${SPELLING_ALLOWLIST_WORDS_FILE}" | xargs | tr ' ' ',') -ALLOWLIST_WORDS=$(echo -n $(cat "${SPELLING_ALLOWLIST_WORDS_FILE}" | \ - grep -v "^#"|grep -v "^$") | tr ' ' ',') -SKIP_FILES=$(echo $(cat "${SPELLING_SKIP_FILES}") | sed "s| | -e |g") -git ls-files | grep -v -e ${SKIP_FILES} | xargs "${TMP_DIR}/misspell" -i \ +git ls-files | grep -v "${SKIP_FILES[@]}" | xargs "${TMP_DIR}/misspell" -i \ "${ALLOWLIST_WORDS}" ${MISSPELL_ARGS} diff --git a/tools/spelling/check_spelling_pedantic_test.sh b/tools/spelling/check_spelling_pedantic_test.sh index b6fdb596958e..900784ed999c 100755 --- a/tools/spelling/check_spelling_pedantic_test.sh +++ b/tools/spelling/check_spelling_pedantic_test.sh @@ -1,9 +1,9 @@ #!/bin/bash -tools=$(dirname $(dirname $(realpath $0))) -root=$(realpath $tools/..) +tools=$(dirname "$(dirname "$(realpath "$0")")") +root=$(realpath "$tools/..") -cd $root +cd "$root" || exit 1 # to satisfy dependency on run_command export PYTHONPATH="$tools" ./tools/spelling/check_spelling_pedantic_test.py "$@" diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index d2bf372e07cb..5eaaceff978f 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -15,6 +15,7 @@ API ARN ASAN ASCII +ASM ASSERTs AST AWS @@ -24,10 +25,14 @@ BSON BPF CAS CB +CDN CDS CEL DSR +HEXDIG +HEXDIGIT LTT +OWS TIDs ceil CHACHA @@ -67,6 +72,7 @@ DFATAL DGRAM DLOG DNS +DNSSEC DQUOTE DRYs DS @@ -78,6 +84,7 @@ EADDRNOTAVAIL EAGAIN ECDH ECDHE +ECDS ECDSA ECDSAs ECMP @@ -88,6 +95,7 @@ EINPROGRESS EINVAL ELB EMSGSIZE +ENOENT ENOTFOUND ENOTSUP ENV @@ -217,6 +225,7 @@ Nilsson Nonhashable Oauth OCSP +OID OK OOM OOMs @@ -300,6 +309,7 @@ SPIFFE SPKI SQL SR +SRCDIR SRDS SRV SS @@ -398,6 +408,7 @@ argv artisanal ary asctime +asm async atoi atomicity @@ -422,6 +433,7 @@ backtracing balancer balancers barbaz +basename baz bazel behaviour @@ -468,6 +480,7 @@ canonicalizing cardinality casted charset +checkin checksum chrono chroot @@ -565,7 +578,9 @@ dgst dir dirname djb +downcalls downcasted +downcased downstreams drainable dtor @@ -576,6 +591,7 @@ dynamodb emplace emplaced emscripten +emsdk enablement encodings endian @@ -607,9 +623,11 @@ failover fallbacks fastbuild favicon +fbs fcntl fd fds +fdstat filename filenames fileno @@ -617,6 +635,8 @@ filesystem firefox fixdate fixup +flatbuffer +flatc fmt fmtlib fn @@ -722,6 +742,8 @@ kv kvs lala latencies +ld +ldd len lenenc lexically @@ -755,6 +777,7 @@ lua lyft maglev malloc +marshaller matchable matcher matchers @@ -799,6 +822,7 @@ mysql namelen nameserver namespace +namespaced namespaces namespacing nan @@ -862,6 +886,7 @@ pausable pcall pcap pclose +performant pfctl pipelined pipelining @@ -894,6 +919,7 @@ preflight preorder prepend prepended +prepends prev probabilistically proc @@ -982,6 +1008,7 @@ resolvers responder restarter resync +retransmitting retriable retriggers revalidated @@ -1010,6 +1037,7 @@ sched schedulable schemas scopekey +sd secp sendmsg sendmmsg @@ -1022,6 +1050,7 @@ setsockopt sig sigaction sigactions +siginfo signalstack siloed sim @@ -1048,6 +1077,7 @@ stateful statsd stderr stdev +stdin stdout stmt str diff --git a/tools/testdata/.DS_Store b/tools/testdata/.DS_Store new file mode 100644 index 000000000000..f24b79727921 Binary files /dev/null and b/tools/testdata/.DS_Store differ diff --git a/tools/testdata/check_format/pgv_string.proto b/tools/testdata/check_format/pgv_string.proto new file mode 100644 index 000000000000..2a73f8a6a830 --- /dev/null +++ b/tools/testdata/check_format/pgv_string.proto @@ -0,0 +1 @@ +// this proto file is used to check proto validation ERROR min_bytes diff --git a/tools/testdata/check_format/repository_url.bzl b/tools/testdata/check_format/repository_url.bzl new file mode 100644 index 000000000000..8450c6aa3161 --- /dev/null +++ b/tools/testdata/check_format/repository_url.bzl @@ -0,0 +1,5 @@ +http_archive( + name = "foo", + url = "http://foo.com", + sha256 = "blah", +) diff --git a/tools/testdata/check_format/repository_urls.bzl b/tools/testdata/check_format/repository_urls.bzl new file mode 100644 index 000000000000..c67406076ab3 --- /dev/null +++ b/tools/testdata/check_format/repository_urls.bzl @@ -0,0 +1,5 @@ +http_archive( + name = "foo", + urls = ["http://foo.com"] + sha256 = "blah", +) diff --git a/tools/testdata/check_format/std_get_if.cc b/tools/testdata/check_format/std_get_if.cc new file mode 100644 index 000000000000..289ff5727c6e --- /dev/null +++ b/tools/testdata/check_format/std_get_if.cc @@ -0,0 +1,8 @@ +#include + +namespace Envoy { + void foo() { + absl::variant x{12}; + auto y = std::get_if(&x); + } +} // namespace Envoy diff --git a/tools/testdata/check_format/std_holds_alternative.cc b/tools/testdata/check_format/std_holds_alternative.cc new file mode 100644 index 000000000000..25e8468548d6 --- /dev/null +++ b/tools/testdata/check_format/std_holds_alternative.cc @@ -0,0 +1,8 @@ +#include + +namespace Envoy { + void foo() { + absl::variant x{12}; + auto y = std::holds_alternative(x); + } +} // namespace Envoy diff --git a/tools/testdata/check_format/std_monostate.cc b/tools/testdata/check_format/std_monostate.cc new file mode 100644 index 000000000000..e2bf8d42daeb --- /dev/null +++ b/tools/testdata/check_format/std_monostate.cc @@ -0,0 +1,12 @@ +#include + +namespace Envoy { + struct S { + S(int i) : i(i) {} + int i; + }; + + void foo() { + absl::variant x; + } +} // namespace Envoy diff --git a/tools/testdata/check_format/std_string_view.cc b/tools/testdata/check_format/std_string_view.cc new file mode 100644 index 000000000000..f92585f74cf7 --- /dev/null +++ b/tools/testdata/check_format/std_string_view.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + void foo() { + std::string_view x("a string literal"); + } +} // namespace Envoy diff --git a/tools/testdata/check_format/std_visit.cc b/tools/testdata/check_format/std_visit.cc new file mode 100644 index 000000000000..ee1ac550e898 --- /dev/null +++ b/tools/testdata/check_format/std_visit.cc @@ -0,0 +1,14 @@ +#include + +namespace Envoy { + struct SomeVisitorFunctor { + template + void operator()(const T& i) const {} + }; + + void foo() { + absl::variant x{12}; + SomeVisitorFunctor visitor; + std::visit(visitor, x); + } +} // namespace Envoy diff --git a/tools/vscode/README.md b/tools/vscode/README.md new file mode 100644 index 000000000000..237c39d9ea8a --- /dev/null +++ b/tools/vscode/README.md @@ -0,0 +1,35 @@ +# Tools for VSCode + +This directory contains tools which is useful for developers using VSCode. + +## Recommended VSCode setup + +It is recommended to use [devcontainer](../.devcontainer/README.md), or setting up an equivalent +environment. Recommended extensions and settings are listed in +[devcontainer.json](../.devcontainer/devcontainer.json). + +## Refresh compilation database + +`tools/vscode/refresh_compdb.sh` is a script to refresh compilation database, it may take a while +to generate all dependencies for code completion, such as protobuf generated codes, external dependencies. +If you changed proto definition, or changed any bazel structure, rerun this to get code completion +correctly. + +Note that it is recommended to disable VSCode Microsoft C/C++ extension and use `vscode-clangd` instead for +C/C++ code completion. + +## Generate debug config + +`tools/vscode/generate_debug_config.py` is a script to generate VSCode debug config in `.vscode/launch.json`. +The generated config will be named ` `. + +For example: +``` +tools/vscode/generate_debug_config.py //source/exe:envoy-static --args "-c envoy.yaml" +``` + +Generates an entry named `gdb //source/exe:envoy-static` for GDB in `launch.json`. It can be +used to generate config for tests also. + +The generated `gdb` config are compatible with [Native Debug](https://marketplace.visualstudio.com/items?itemName=webfreak.debug) extension, +`lldb` config are compatible with [VSCode LLDB](https://marketplace.visualstudio.com/items?itemName=vadimcn.vscode-lldb) extension. diff --git a/tools/vscode/generate_debug_config.py b/tools/vscode/generate_debug_config.py new file mode 100755 index 000000000000..7a57f2b648f3 --- /dev/null +++ b/tools/vscode/generate_debug_config.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 + +import argparse +import json +import os +import pathlib +import shlex +import shutil +import subprocess + +BAZEL_OPTIONS = shlex.split(os.environ.get("BAZEL_BUILD_OPTIONS", "")) + + +def bazelInfo(name, bazel_extra_options=[]): + return subprocess.check_output(["bazel", "info", name] + BAZEL_OPTIONS + + bazel_extra_options).decode().strip() + + +def getWorkspace(): + return bazelInfo("workspace") + + +def getExecutionRoot(workspace): + # If compilation database exists, use its execution root, this allows setting + # breakpoints with clangd navigation easier. + try: + compdb = pathlib.Path(workspace, "compile_commands.json").read_text() + return json.loads(compdb)[0]['directory'] + except: + return bazelInfo("execution_root") + + +def binaryPath(bazel_bin, target): + return pathlib.Path( + bazel_bin, + *[s for s in target.replace('@', 'external/').replace(':', '/').split('/') if s != '']) + + +def buildBinaryWithDebugInfo(target): + targets = [target, target + ".dwp"] + subprocess.check_call(["bazel", "build", "-c", "dbg"] + BAZEL_OPTIONS + targets) + + bazel_bin = bazelInfo("bazel-bin", ["-c", "dbg"]) + return binaryPath(bazel_bin, target) + + +def getLaunchJson(workspace): + try: + return json.loads(pathlib.Path(workspace, ".vscode", "launch.json").read_text()) + except: + return {"version": "0.2.0"} + + +def writeLaunchJson(workspace, launch): + launch_json = pathlib.Path(workspace, ".vscode", "launch.json") + backup_launch_json = pathlib.Path(workspace, ".vscode", "launch.json.bak") + if launch_json.exists(): + shutil.move(str(launch_json), str(backup_launch_json)) + + launch_json.write_text(json.dumps(launch, indent=4)) + + +def gdbConfig(target, binary, workspace, execroot, arguments): + return { + "name": "gdb " + target, + "request": "launch", + "arguments": arguments, + "type": "gdb", + "target": str(binary), + "debugger_args": ["--directory=" + execroot], + "cwd": "${workspaceFolder}", + "valuesFormatting": "disabled" + } + + +def lldbConfig(target, binary, workspace, execroot, arguments): + return { + "name": "lldb " + target, + "program": str(binary), + "sourceMap": { + "/proc/self/cwd": workspace, + "/proc/self/cwd/external": execroot + "/external", + "/proc/self/cwd/bazel-out": execroot + "/bazel-out" + }, + "cwd": "${workspaceFolder}", + "args": shlex.split(arguments), + "type": "lldb", + "request": "launch" + } + + +def addToLaunchJson(target, binary, workspace, execroot, arguments, debugger_type): + launch = getLaunchJson(workspace) + new_config = {} + if debugger_type == "lldb": + new_config = lldbConfig(target, binary, workspace, execroot, arguments) + else: + new_config = gdbConfig(target, binary, workspace, execroot, arguments) + + configurations = launch.get("configurations", []) + for config in configurations: + if config.get("name", None) == new_config["name"]: + config.clear() + config.update(new_config) + break + else: + configurations.append(new_config) + + launch["configurations"] = configurations + writeLaunchJson(workspace, launch) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Build and generate launch config for VSCode') + parser.add_argument('--debugger', default="gdb") + parser.add_argument('--args', default='') + parser.add_argument('target') + args = parser.parse_args() + + workspace = getWorkspace() + execution_root = getExecutionRoot(workspace) + debug_binary = buildBinaryWithDebugInfo(args.target) + addToLaunchJson(args.target, debug_binary, workspace, execution_root, args.args, args.debugger)